static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; skb = alloc_skb(size + NET_IP_ALIGN, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return -ENOMEM; } if (dev->net->type != ARPHRD_RAWIP) skb_reserve(skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach (dev->net); break; case -EHOSTUNREACH: retval = -ENOLINK; break; default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); tasklet_schedule (&dev->bh); break; case 0: usb_mark_last_busy(dev->udev); __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } return retval; }
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (!skb->local_df) { skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frags(skb)) { int first_len = skb_pagelen(skb); int truesizes = 0; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; truesizes += frag->truesize; } } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->truesize -= truesizes; skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->u.dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS); dst_release(&rt->u.dst); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS); dst_release(&rt->u.dst); return err; } slow_path: left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending upto and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); kfree_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; }
/* Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct sk_buff *fp, *head = fq->q.fragments; int sum_truesize; inet_frag_kill(&fq->q, &lowpan_frags); /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; if (!fp->next) fq->q.fragments_tail = fp; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; consume_skb(fq->q.fragments); fq->q.fragments = head; } /* Head of list must not be cloned. */ if (skb_unclone(head, GFP_ATOMIC)) goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; clone = alloc_skb(0, GFP_ATOMIC); if (!clone) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = head->data_len - plen; clone->data_len = clone->len; head->data_len -= clone->len; head->len -= clone->len; add_frag_mem_limit(&fq->q, clone->truesize); } WARN_ON(head == NULL); sum_truesize = head->truesize; for (fp = head->next; fp;) { bool headstolen; int delta; struct sk_buff *next = fp->next; sum_truesize += fp->truesize; if (skb_try_coalesce(head, fp, &headstolen, &delta)) { kfree_skb_partial(fp, headstolen); } else { if (!skb_shinfo(head)->frag_list) skb_shinfo(head)->frag_list = fp; head->data_len += fp->len; head->len += fp->len; head->truesize += fp->truesize; } fp = next; } sub_frag_mem_limit(&fq->q, sum_truesize); head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; fq->q.fragments = NULL; fq->q.fragments_tail = NULL; return 1; out_oom: net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n"); return -1; }
static irqreturn_t W6692_interrupt(int intno, void *dev_id, struct pt_regs *regs) { struct IsdnCardState *cs = dev_id; u_char val, exval, v1; struct sk_buff *skb; u_int count; u_long flags; int icnt = 5; spin_lock_irqsave(&cs->lock, flags); val = cs->readW6692(cs, W_ISTA); if (!val) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } StartW6692: if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 ISTA %x", val); if (val & W_INT_D_RME) { /* RME */ exval = cs->readW6692(cs, W_D_RSTA); if (exval & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) { if (exval & W_D_RSTA_RDOV) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 RDOV"); if (exval & W_D_RSTA_CRCE) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D-channel CRC error"); if (exval & W_D_RSTA_RMB) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D-channel ABORT"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST); } else { count = cs->readW6692(cs, W_D_RBCL) & (W_D_FIFO_THRESH - 1); if (count == 0) count = W_D_FIFO_THRESH; W6692_empty_fifo(cs, count); if ((count = cs->rcvidx) > 0) { cs->rcvidx = 0; if (!(skb = alloc_skb(count, GFP_ATOMIC))) printk(KERN_WARNING "HiSax: D receive out of memory\n"); else { memcpy(skb_put(skb, count), cs->rcvbuf, count); skb_queue_tail(&cs->rq, skb); } } } cs->rcvidx = 0; schedule_event(cs, D_RCVBUFREADY); } if (val & W_INT_D_RMR) { /* RMR */ W6692_empty_fifo(cs, W_D_FIFO_THRESH); } if (val & W_INT_D_XFR) { /* XFR */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { W6692_fill_fifo(cs); goto afterXFR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; W6692_fill_fifo(cs); } else schedule_event(cs, D_XMTBUFREADY); } afterXFR: if (val & (W_INT_XINT0 | W_INT_XINT1)) { /* XINT0/1 - never */ if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 spurious XINT!"); } if (val & W_INT_D_EXI) { /* EXI */ exval = cs->readW6692(cs, W_D_EXIR); if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D_EXIR %02x", exval); if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) { /* Transmit underrun/collision */ debugl1(cs, "W6692 D-chan underrun/collision"); printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL\n"); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { /* Restart frame */ skb_push(cs->tx_skb, cs->tx_cnt); cs->tx_cnt = 0; W6692_fill_fifo(cs); } else { printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL no skb\n"); debugl1(cs, "W6692 XDUN/XCOL no skb"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); } } if (exval & W_D_EXI_RDOV) { /* RDOV */ debugl1(cs, "W6692 D-channel RDOV"); printk(KERN_WARNING "HiSax: W6692 D-RDOV\n"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST); } if (exval & W_D_EXI_TIN2) { /* TIN2 - never */ debugl1(cs, "W6692 spurious TIN2 interrupt"); } if (exval & W_D_EXI_MOC) { /* MOC - not supported */ debugl1(cs, "W6692 spurious MOC interrupt"); v1 = cs->readW6692(cs, W_MOSR); debugl1(cs, "W6692 MOSR %02x", v1); } if (exval & W_D_EXI_ISC) { /* ISC - Level1 change */ v1 = cs->readW6692(cs, W_CIR); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 ISC CIR=0x%02X", v1); if (v1 & W_CIR_ICC) { cs->dc.w6692.ph_state = v1 & W_CIR_COD_MASK; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state_change %x", cs->dc.w6692.ph_state); schedule_event(cs, D_L1STATECHANGE); } if (v1 & W_CIR_SCC) { v1 = cs->readW6692(cs, W_SQR); debugl1(cs, "W6692 SCC SQR=0x%02X", v1); } } if (exval & W_D_EXI_WEXP) { debugl1(cs, "W6692 spurious WEXP interrupt!"); } if (exval & W_D_EXI_TEXP) { debugl1(cs, "W6692 spurious TEXP interrupt!"); } } if (val & W_INT_B1_EXI) { debugl1(cs, "W6692 B channel 1 interrupt"); W6692B_interrupt(cs, 0); } if (val & W_INT_B2_EXI) { debugl1(cs, "W6692 B channel 2 interrupt"); W6692B_interrupt(cs, 1); } val = cs->readW6692(cs, W_ISTA); if (val && icnt) { icnt--; goto StartW6692; } if (!icnt) { printk(KERN_WARNING "W6692 IRQ LOOP\n"); cs->writeW6692(cs, W_IMASK, 0xff); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; }
/* * Create an arp packet. If (dest_hw == NULL), we create a broadcast * message. */ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw) { struct sk_buff *skb; struct arphdr *arp; unsigned char *arp_ptr; /* * Allocate a buffer */ skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) return NULL; skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); skb->dev = dev; skb->protocol = htons(ETH_P_ARP); if (src_hw == NULL) src_hw = dev->dev_addr; if (dest_hw == NULL) dest_hw = dev->broadcast; /* * Fill the device header for the ARP frame */ if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0) goto out; /* * Fill out the arp protocol part. * * The arp hardware type should match the device type, except for FDDI, * which (according to RFC 1390) should always equal 1 (Ethernet). */ /* * Exceptions everywhere. AX.25 uses the AX.25 PID value not the * DIX code for the protocol. Make these device structure fields. */ switch (dev->type) { default: arp->ar_hrd = htons(dev->type); arp->ar_pro = htons(ETH_P_IP); break; #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) case ARPHRD_AX25: arp->ar_hrd = htons(ARPHRD_AX25); arp->ar_pro = htons(AX25_P_IP); break; #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) case ARPHRD_NETROM: arp->ar_hrd = htons(ARPHRD_NETROM); arp->ar_pro = htons(AX25_P_IP); break; #endif #endif #if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) case ARPHRD_FDDI: arp->ar_hrd = htons(ARPHRD_ETHER); arp->ar_pro = htons(ETH_P_IP); break; #endif #if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) case ARPHRD_IEEE802_TR: arp->ar_hrd = htons(ARPHRD_IEEE802); arp->ar_pro = htons(ETH_P_IP); break; #endif } arp->ar_hln = dev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr = (unsigned char *)(arp + 1); memcpy(arp_ptr, src_hw, dev->addr_len); arp_ptr += dev->addr_len; memcpy(arp_ptr, &src_ip, 4); arp_ptr += 4; if (target_hw != NULL) memcpy(arp_ptr, target_hw, dev->addr_len); else memset(arp_ptr, 0, dev->addr_len); arp_ptr += dev->addr_len; memcpy(arp_ptr, &dest_ip, 4); return skb; out: kfree_skb(skb); return NULL; }
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode) { struct sk_buff *skb; unsigned int gfp_mask; long timeo; int err; gfp_mask = sk->sk_allocation; if (gfp_mask & __GFP_WAIT) gfp_mask |= __GFP_REPEAT; timeo = sock_sndtimeo(sk, noblock); while (1) { err = sock_error(sk); if (err != 0) goto failure; err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { skb = alloc_skb(header_len, sk->sk_allocation); if (skb) { int npages; int i; /* No pages, we're done... */ if (!data_len) break; npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; skb->truesize += data_len; skb_shinfo(skb)->nr_frags = npages; for (i = 0; i < npages; i++) { struct page *page; skb_frag_t *frag; page = alloc_pages(sk->sk_allocation, 0); if (!page) { err = -ENOBUFS; skb_shinfo(skb)->nr_frags = i; kfree_skb(skb); goto failure; } frag = &skb_shinfo(skb)->frags[i]; frag->page = page; frag->page_offset = 0; frag->size = (data_len >= PAGE_SIZE ? PAGE_SIZE : data_len); data_len -= PAGE_SIZE; } /* Full success... */ break; } err = -ENOBUFS; goto failure; } set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; if (!timeo) goto failure; if (signal_pending(current)) goto interrupted; timeo = sock_wait_for_wmem(sk, timeo); }
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { if (netif_msg_rx_err (dev)) devdbg (dev, "no rx skb"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return; } skb_reserve (skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: if (netif_msg_ifdown (dev)) devdbg (dev, "device gone"); netif_device_detach (dev->net); break; default: if (netif_msg_rx_err (dev)) devdbg (dev, "rx submit, %d", retval); tasklet_schedule (&dev->bh); break; case 0: __skb_queue_tail (&dev->rxq, skb); } } else { if (netif_msg_ifdown (dev)) devdbg (dev, "rx: stopped"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } }
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; int ihlen; int err; u8 ecn; ipq_kill(qp); ecn = ip4_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; } /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_nomem; fp->next = head->next; if (!fp->next) qp->q.fragments_tail = fp; prev->next = fp; skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; kfree_skb(qp->q.fragments); qp->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG_CB(head)->offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); len = ihlen + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_nomem; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_nomem; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &qp->q.net->mem); } skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } atomic_sub(head->truesize, &qp->q.net->mem); head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; IPCB(head)->frag_max_size = qp->q.max_size; iph = ip_hdr(head); /* max_size != 0 implies at least one fragment had IP_DF set */ iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; iph->tot_len = htons(len); iph->tos |= ecn; ip_send_check(iph); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; return 0; out_nomem: LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), qp); err = -ENOMEM; goto out_fail; out_oversize: net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; }
void st_int_recv(void *disc_data, const unsigned char *data, long count) { register char *ptr; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; struct fm_event_hdr *fm; struct gps_event_hdr *gps; register int len = 0, type = 0, dlen = 0; static enum proto_type protoid = ST_MAX; struct st_data_s *st_gdata = (struct st_data_s *)disc_data; ptr = (char *)data; /* tty_receive sent null ? */ if (unlikely(ptr == NULL) || (st_gdata == NULL)) { pr_err(" received null from TTY "); return; } pr_info("count %ld rx_state %ld" "rx_count %ld", count, st_gdata->rx_state, st_gdata->rx_count); /* Decode received bytes here */ while (count) { if (st_gdata->rx_count) { len = min_t(unsigned int, st_gdata->rx_count, count); memcpy(skb_put(st_gdata->rx_skb, len), ptr, len); st_gdata->rx_count -= len; count -= len; ptr += len; if (st_gdata->rx_count) continue; /* Check ST RX state machine , where are we? */ switch (st_gdata->rx_state) { /* Waiting for complete packet ? */ case ST_BT_W4_DATA: pr_info("Complete pkt received"); /* Ask ST CORE to forward * the packet to protocol driver */ st_send_frame(protoid, st_gdata); st_gdata->rx_state = ST_W4_PACKET_TYPE; st_gdata->rx_skb = NULL; protoid = ST_MAX; /* is this required ? */ continue; /* Waiting for Bluetooth event header ? */ case ST_BT_W4_EVENT_HDR: eh = (struct hci_event_hdr *)st_gdata->rx_skb-> data; pr_info("Event header: evt 0x%2.2x" "plen %d", eh->evt, eh->plen); st_check_data_len(st_gdata, protoid, eh->plen); continue; /* Waiting for Bluetooth acl header ? */ case ST_BT_W4_ACL_HDR: ah = (struct hci_acl_hdr *)st_gdata->rx_skb-> data; dlen = __le16_to_cpu(ah->dlen); pr_info("ACL header: dlen %d", dlen); st_check_data_len(st_gdata, protoid, dlen); continue; /* Waiting for Bluetooth sco header ? */ case ST_BT_W4_SCO_HDR: sh = (struct hci_sco_hdr *)st_gdata->rx_skb-> data; pr_info("SCO header: dlen %d", sh->dlen); st_check_data_len(st_gdata, protoid, sh->dlen); continue; case ST_FM_W4_EVENT_HDR: fm = (struct fm_event_hdr *)st_gdata->rx_skb-> data; pr_info("FM Header: "); st_check_data_len(st_gdata, ST_FM, fm->plen); continue; /* TODO : Add GPS packet machine logic here */ case ST_GPS_W4_EVENT_HDR: /* [0x09 pkt hdr][R/W byte][2 byte len] */ gps = (struct gps_event_hdr *)st_gdata->rx_skb-> data; pr_info("GPS Header: "); st_check_data_len(st_gdata, ST_GPS, gps->plen); continue; } /* end of switch rx_state */ } /* end of if rx_count */ /* Check first byte of packet and identify module * owner (BT/FM/GPS) */ switch (*ptr) { /* Bluetooth event packet? */ case HCI_EVENT_PKT: pr_info("Event packet"); st_gdata->rx_state = ST_BT_W4_EVENT_HDR; st_gdata->rx_count = HCI_EVENT_HDR_SIZE; type = HCI_EVENT_PKT; protoid = ST_BT; break; /* Bluetooth acl packet? */ case HCI_ACLDATA_PKT: pr_info("ACL packet"); st_gdata->rx_state = ST_BT_W4_ACL_HDR; st_gdata->rx_count = HCI_ACL_HDR_SIZE; type = HCI_ACLDATA_PKT; protoid = ST_BT; break; /* Bluetooth sco packet? */ case HCI_SCODATA_PKT: pr_info("SCO packet"); st_gdata->rx_state = ST_BT_W4_SCO_HDR; st_gdata->rx_count = HCI_SCO_HDR_SIZE; type = HCI_SCODATA_PKT; protoid = ST_BT; break; /* Channel 8(FM) packet? */ case ST_FM_CH8_PKT: pr_info("FM CH8 packet"); type = ST_FM_CH8_PKT; st_gdata->rx_state = ST_FM_W4_EVENT_HDR; st_gdata->rx_count = FM_EVENT_HDR_SIZE; protoid = ST_FM; break; /* Channel 9(GPS) packet? */ case 0x9: /*ST_LL_GPS_CH9_PKT */ pr_info("GPS CH9 packet"); type = 0x9; /* ST_LL_GPS_CH9_PKT; */ protoid = ST_GPS; st_gdata->rx_state = ST_GPS_W4_EVENT_HDR; st_gdata->rx_count = 3; /* GPS_EVENT_HDR_SIZE -1*/ break; case LL_SLEEP_IND: case LL_SLEEP_ACK: case LL_WAKE_UP_IND: pr_info("PM packet"); /* this takes appropriate action based on * sleep state received -- */ st_ll_sleep_state(st_gdata, *ptr); ptr++; count--; continue; case LL_WAKE_UP_ACK: pr_info("PM packet"); /* wake up ack received */ st_wakeup_ack(st_gdata, *ptr); ptr++; count--; continue; /* Unknow packet? */ default: pr_err("Unknown packet type %2.2x", (__u8) *ptr); ptr++; count--; continue; }; ptr++; count--; switch (protoid) { case ST_BT: /* Allocate new packet to hold received data */ st_gdata->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!st_gdata->rx_skb) { pr_err("Can't allocate mem for new packet"); st_gdata->rx_state = ST_W4_PACKET_TYPE; st_gdata->rx_count = 0; return; } bt_cb(st_gdata->rx_skb)->pkt_type = type; break; case ST_FM: /* for FM */ st_gdata->rx_skb = alloc_skb(FM_MAX_FRAME_SIZE, GFP_ATOMIC); if (!st_gdata->rx_skb) { pr_err("Can't allocate mem for new packet"); st_gdata->rx_state = ST_W4_PACKET_TYPE; st_gdata->rx_count = 0; return; } /* place holder 0x08 */ skb_reserve(st_gdata->rx_skb, 1); st_gdata->rx_skb->cb[0] = ST_FM_CH8_PKT; break; case ST_GPS: /* for GPS */ st_gdata->rx_skb = alloc_skb(100 /*GPS_MAX_FRAME_SIZE */ , GFP_ATOMIC); if (!st_gdata->rx_skb) { pr_err("Can't allocate mem for new packet"); st_gdata->rx_state = ST_W4_PACKET_TYPE; st_gdata->rx_count = 0; return; } /* place holder 0x09 */ skb_reserve(st_gdata->rx_skb, 1); st_gdata->rx_skb->cb[0] = 0x09; /*ST_GPS_CH9_PKT; */ break; case ST_MAX: break; } }
/* packet receiver */ static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *skb; struct archdr *pkt = pkthdr; struct arc_rfc1201 *soft = &pkthdr->soft.rfc1201; int saddr = pkt->hard.source, ofs; struct Incoming *in = &lp->rfc1201.incoming[saddr]; BUGMSG(D_DURING, "it's an RFC1201 packet (length=%d)\n", length); if (length >= MinTU) ofs = 512 - length; else ofs = 256 - length; if (soft->split_flag == 0xFF) { /* Exception Packet */ if (length >= 4 + RFC1201_HDR_SIZE) BUGMSG(D_DURING, "compensating for exception packet\n"); else { BUGMSG(D_EXTRA, "short RFC1201 exception packet from %02Xh", saddr); return; } /* skip over 4-byte junkola */ length -= 4; ofs += 4; lp->hw.copy_from_card(dev, bufnum, 512 - length, soft, sizeof(pkt->soft)); } if (!soft->split_flag) { /* not split */ BUGMSG(D_RX, "incoming is not split (splitflag=%d)\n", soft->split_flag); if (in->skb) { /* already assembling one! */ BUGMSG(D_EXTRA, "aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n", in->sequence, soft->split_flag, soft->sequence); lp->rfc1201.aborted_seq = soft->sequence; dev_kfree_skb_irq(in->skb); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; in->skb = NULL; } in->sequence = soft->sequence; skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); skb->dev = dev; pkt = (struct archdr *) skb->data; soft = &pkt->soft.rfc1201; /* up to sizeof(pkt->soft) has already been copied from the card */ memcpy(pkt, pkthdr, sizeof(struct archdr)); if (length > sizeof(pkt->soft)) lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft), pkt->soft.raw + sizeof(pkt->soft), length - sizeof(pkt->soft)); /* * ARP packets have problems when sent from some DOS systems: the * source address is always 0! So we take the hardware source addr * (which is impossible to fumble) and insert it ourselves. */ if (soft->proto == ARC_P_ARP) { struct arphdr *arp = (struct arphdr *) soft->payload; /* make sure addresses are the right length */ if (arp->ar_hln == 1 && arp->ar_pln == 4) { uint8_t *cptr = (uint8_t *) arp + sizeof(struct arphdr); if (!*cptr) { /* is saddr = 00? */ BUGMSG(D_EXTRA, "ARP source address was 00h, set to %02Xh.\n", saddr); dev->stats.rx_crc_errors++; *cptr = saddr; } else { BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", *cptr); } } else { BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", arp->ar_hln, arp->ar_pln); dev->stats.rx_errors++; dev->stats.rx_crc_errors++; } } BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = type_trans(skb, dev); netif_rx(skb); } else { /* split packet */ /* * NOTE: MSDOS ARP packet correction should only need to apply to * unsplit packets, since ARP packets are so short. * * My interpretation of the RFC1201 document is that if a packet is * received out of order, the entire assembly process should be * aborted. * * The RFC also mentions "it is possible for successfully received * packets to be retransmitted." As of 0.40 all previously received * packets are allowed, not just the most recent one. * * We allow multiple assembly processes, one for each ARCnet card * possible on the network. Seems rather like a waste of memory, * but there's no other way to be reliable. */ BUGMSG(D_RX, "packet is split (splitflag=%d, seq=%d)\n", soft->split_flag, in->sequence); if (in->skb && in->sequence != soft->sequence) { BUGMSG(D_EXTRA, "wrong seq number (saddr=%d, expected=%d, seq=%d, splitflag=%d)\n", saddr, in->sequence, soft->sequence, soft->split_flag); dev_kfree_skb_irq(in->skb); in->skb = NULL; dev->stats.rx_errors++; dev->stats.rx_missed_errors++; in->lastpacket = in->numpackets = 0; } if (soft->split_flag & 1) { /* first packet in split */ BUGMSG(D_RX, "brand new splitpacket (splitflag=%d)\n", soft->split_flag); if (in->skb) { /* already assembling one! */ BUGMSG(D_EXTRA, "aborting previous (seq=%d) assembly " "(splitflag=%d, seq=%d)\n", in->sequence, soft->split_flag, soft->sequence); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; dev_kfree_skb_irq(in->skb); } in->sequence = soft->sequence; in->numpackets = ((unsigned) soft->split_flag >> 1) + 2; in->lastpacket = 1; if (in->numpackets > 16) { BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", soft->split_flag); lp->rfc1201.aborted_seq = soft->sequence; dev->stats.rx_errors++; dev->stats.rx_length_errors++; return; } in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); lp->rfc1201.aborted_seq = soft->sequence; dev->stats.rx_dropped++; return; } skb->dev = dev; pkt = (struct archdr *) skb->data; soft = &pkt->soft.rfc1201; memcpy(pkt, pkthdr, ARC_HDR_SIZE + RFC1201_HDR_SIZE); skb_put(skb, ARC_HDR_SIZE + RFC1201_HDR_SIZE); soft->split_flag = 0; /* end result won't be split */ } else { /* not first packet */
static int start_ipc(struct link_device *ld, struct io_device *iod) { struct sk_buff *skb; char data[1] = {'a'}; int err; struct usb_link_device *usb_ld = to_usb_link_device(ld); struct link_pm_data *pm_data = usb_ld->link_pm_data; struct device *dev = &usb_ld->usbdev->dev; struct if_usb_devdata *pipe_data = &usb_ld->devdata[IF_USB_FMT_EP]; if (!usb_ld->if_usb_connected) { mif_err("HSIC not connected, skip start ipc\n"); err = -ENODEV; goto exit; } retry: if (ld->mc->phone_state != STATE_ONLINE) { mif_err("MODEM is not online, skip start ipc\n"); err = -ENODEV; goto exit; } /* check usb runtime pm first */ if (dev->power.runtime_status != RPM_ACTIVE) { if (!pm_data->resume_requested) { mif_debug("QW PM\n"); INIT_COMPLETION(pm_data->active_done); queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, 0); } mif_debug("Wait pm\n"); err = wait_for_completion_timeout(&pm_data->active_done, msecs_to_jiffies(500)); /* timeout or -ERESTARTSYS */ if (err <= 0) goto retry; } pm_runtime_get_sync(dev); mif_err("send 'a'\n"); skb = alloc_skb(16, GFP_ATOMIC); if (unlikely(!skb)) { pm_runtime_put(dev); return -ENOMEM; } memcpy(skb_put(skb, 1), data, 1); skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return -ENODEV; usb_mark_last_busy(usb_ld->usbdev); err = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data); if (err < 0) { mif_err("usb_tx_urb fail\n"); dev_kfree_skb_any(skb); } pm_runtime_put(dev); exit: return err; }
/** * kim_int_recv - receive function called during firmware download * firmware download responses on different UART drivers * have been observed to come in bursts of different * tty_receive and hence the logic */ static void kim_int_recv(struct kim_data_s *kim_gdata, const unsigned char *data, long count) { const unsigned char *ptr; int len = 0, type = 0; unsigned char *plen; pr_debug("%s", __func__); /* Decode received bytes here */ ptr = data; if (unlikely(ptr == NULL)) { pr_err(" received null from TTY "); return; } while (count) { if (kim_gdata->rx_count) { len = min_t(unsigned int, kim_gdata->rx_count, count); memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len); kim_gdata->rx_count -= len; count -= len; ptr += len; if (kim_gdata->rx_count) continue; /* Check ST RX state machine , where are we? */ switch (kim_gdata->rx_state) { /* Waiting for complete packet ? */ case ST_W4_DATA: pr_debug("Complete pkt received"); validate_firmware_response(kim_gdata); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; continue; /* Waiting for Bluetooth event header ? */ case ST_W4_HEADER: plen = (unsigned char *)&kim_gdata->rx_skb->data[1]; pr_debug("event hdr: plen 0x%02x\n", *plen); kim_check_data_len(kim_gdata, *plen); continue; } /* end of switch */ } /* end of if rx_state */ switch (*ptr) { /* Bluetooth event packet? */ case 0x04: kim_gdata->rx_state = ST_W4_HEADER; kim_gdata->rx_count = 2; type = *ptr; break; default: pr_info("unknown packet"); ptr++; count--; continue; } ptr++; count--; kim_gdata->rx_skb = alloc_skb(1024+8, GFP_ATOMIC); if (!kim_gdata->rx_skb) { pr_err("can't allocate mem for new packet"); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_count = 0; return; } skb_reserve(kim_gdata->rx_skb, 8); kim_gdata->rx_skb->cb[0] = 4; kim_gdata->rx_skb->cb[1] = 0; }
static int tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) { int i, ret; struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; struct tc_action *head = NULL, *act, *act_prev = NULL; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (ret < 0) return ret; if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { if (tb[1] != NULL) return tca_action_flush(tb[1], n, pid); else return -EINVAL; } for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_get_1(tb[i], n, pid); if (IS_ERR(act)) { ret = PTR_ERR(act); goto err; } act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } if (event == RTM_GETACTION) ret = act_get_notify(pid, n, head, event); else { /* delete */ struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ret = -ENOBUFS; goto err; } if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 0, 1) <= 0) { kfree_skb(skb); ret = -EINVAL; goto err; } /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); if (ret > 0) return 0; return ret; } err: cleanup_a(head); return ret; }
static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { printk("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { printk("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto nla_put_failure; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); if (err > 0) return 0; return err; nla_put_failure: nlmsg_failure: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; }
/* EIF(Error in FIFO/End in Frame) handler for FIR */ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) { unsigned int len, stat, data; /* Get the current data position. */ len = DTADR(si->rxdma) - si->dma_rx_buff_phy; do { /* Read Status, and then Data. */ stat = ICSR1; rmb(); data = ICDR; if (stat & (ICSR1_CRE | ICSR1_ROR)) { dev->stats.rx_errors++; if (stat & ICSR1_CRE) { printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); dev->stats.rx_crc_errors++; } if (stat & ICSR1_ROR) { printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); dev->stats.rx_over_errors++; } } else { si->dma_rx_buff[len++] = data; } /* If we hit the end of frame, there's no point in continuing. */ if (stat & ICSR1_EOF) break; } while (ICSR0 & ICSR0_EIF); if (stat & ICSR1_EOF) { /* end of frame. */ struct sk_buff *skb; if (icsr0 & ICSR0_FRE) { printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); dev->stats.rx_dropped++; return; } skb = alloc_skb(len+1,GFP_ATOMIC); if (!skb) { printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); dev->stats.rx_dropped++; return; } /* Align IP header to 20 bytes */ skb_reserve(skb, 1); skb_copy_to_linear_data(skb, si->dma_rx_buff, len); skb_put(skb, len); /* Feed it to IrLAP */ skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } }
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw) { struct sk_buff *skb; struct arphdr *arp; unsigned char *arp_ptr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); if (skb == NULL) return NULL; skb_reserve(skb, hlen); skb_reset_network_header(skb); arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); skb->dev = dev; skb->protocol = htons(ETH_P_ARP); if (src_hw == NULL) src_hw = dev->dev_addr; if (dest_hw == NULL) dest_hw = dev->broadcast; if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0) goto out; switch (dev->type) { default: arp->ar_hrd = htons(dev->type); arp->ar_pro = htons(ETH_P_IP); break; #if IS_ENABLED(CONFIG_AX25) case ARPHRD_AX25: arp->ar_hrd = htons(ARPHRD_AX25); arp->ar_pro = htons(AX25_P_IP); break; #if IS_ENABLED(CONFIG_NETROM) case ARPHRD_NETROM: arp->ar_hrd = htons(ARPHRD_NETROM); arp->ar_pro = htons(AX25_P_IP); break; #endif #endif #if IS_ENABLED(CONFIG_FDDI) case ARPHRD_FDDI: arp->ar_hrd = htons(ARPHRD_ETHER); arp->ar_pro = htons(ETH_P_IP); break; #endif #if IS_ENABLED(CONFIG_TR) case ARPHRD_IEEE802_TR: arp->ar_hrd = htons(ARPHRD_IEEE802); arp->ar_pro = htons(ETH_P_IP); break; #endif } arp->ar_hln = dev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr = (unsigned char *)(arp + 1); memcpy(arp_ptr, src_hw, dev->addr_len); arp_ptr += dev->addr_len; memcpy(arp_ptr, &src_ip, 4); arp_ptr += 4; if (target_hw != NULL) memcpy(arp_ptr, target_hw, dev->addr_len); else memset(arp_ptr, 0, dev->addr_len); arp_ptr += dev->addr_len; memcpy(arp_ptr, &dest_ip, 4); return skb; out: kfree_skb(skb); return NULL; }
static void send_unreach(struct sk_buff *skb_in, int code) { struct iphdr *iph; struct udphdr *udph; struct icmphdr *icmph; struct sk_buff *nskb; u32 saddr; u8 tos; int hh_len, length; struct rtable *rt = (struct rtable*)skb_in->dst; unsigned char *data; if (!rt) return; /* FIXME: Use sysctl number. --RR */ if (!xrlim_allow(&rt->u.dst, 1*HZ)) return; iph = skb_in->nh.iph; /* No replies to physical multicast/broadcast */ if (skb_in->pkt_type!=PACKET_HOST) return; /* Now check at the protocol level */ if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST)) return; /* Only reply to fragment 0. */ if (iph->frag_off&htons(IP_OFFSET)) return; /* Ensure we have at least 8 bytes of proto header. */ if (skb_in->len < skb_in->nh.iph->ihl*4 + 8) return; /* if UDP checksum is set, verify it's correct */ if (iph->protocol == IPPROTO_UDP && skb_in->tail-(u8*)iph >= sizeof(struct udphdr)) { int datalen = skb_in->len - (iph->ihl<<2); udph = (struct udphdr *)((char *)iph + (iph->ihl<<2)); if (udph->check && csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, IPPROTO_UDP, csum_partial((char *)udph, datalen, 0)) != 0) return; } /* If we send an ICMP error to an ICMP error a mess would result.. */ if (iph->protocol == IPPROTO_ICMP && skb_in->tail-(u8*)iph >= sizeof(struct icmphdr)) { icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2)); if (skb_copy_bits(skb_in, skb_in->nh.iph->ihl*4, icmph, sizeof(*icmph)) < 0) return; /* Between echo-reply (0) and timestamp (13), everything except echo-request (8) is an error. Also, anything greater than NR_ICMP_TYPES is unknown, and hence should be treated as an error... */ if ((icmph->type < ICMP_TIMESTAMP && icmph->type != ICMP_ECHOREPLY && icmph->type != ICMP_ECHO) || icmph->type > NR_ICMP_TYPES) return; } saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) saddr = 0; tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL; { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = skb_in->nh.iph->saddr, .saddr = saddr, .tos = RT_TOS(tos) } } }; if (ip_route_output_key(&rt, &fl)) return; } /* RFC says return as much as we can without exceeding 576 bytes. */ length = skb_in->len + sizeof(struct iphdr) + sizeof(struct icmphdr); if (length > dst_pmtu(&rt->u.dst)) length = dst_pmtu(&rt->u.dst); if (length > 576) length = 576; hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); nskb = alloc_skb(hh_len + length, GFP_ATOMIC); if (!nskb) { ip_rt_put(rt); return; } nskb->priority = 0; nskb->dst = &rt->u.dst; skb_reserve(nskb, hh_len); /* Set up IP header */ iph = nskb->nh.iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); iph->version=4; iph->ihl=5; iph->tos=tos; iph->tot_len = htons(length); /* PMTU discovery never applies to ICMP packets. */ iph->frag_off = 0; iph->ttl = MAXTTL; ip_select_ident(iph, &rt->u.dst, NULL); iph->protocol=IPPROTO_ICMP; iph->saddr=rt->rt_src; iph->daddr=rt->rt_dst; iph->check=0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); /* Set up ICMP header. */ icmph = nskb->h.icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); icmph->type = ICMP_DEST_UNREACH; icmph->code = code; icmph->un.gateway = 0; icmph->checksum = 0; /* Copy as much of original packet as will fit */ data = skb_put(nskb, length - sizeof(struct iphdr) - sizeof(struct icmphdr)); skb_copy_bits(skb_in, 0, data, length - sizeof(struct iphdr) - sizeof(struct icmphdr)); icmph->checksum = ip_compute_csum((unsigned char *)icmph, length - sizeof(struct iphdr)); connection_attach(nskb, skb_in->nfct); NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev, ip_finish_output); }
/* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; fq_kill(fq); /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; kfree_skb(fq->q.fragments); fq->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frags(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &fq->q.net->mem); } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &fq->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; atomic_sub(fp->truesize, &fq->q.net->mem); } head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; return 1; out_oversize: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; }
static void b1dma_handle_rx(avmcard *card) { avmctrl_info *cinfo = &card->ctrlinfo[0]; avmcard_dmainfo *dma = card->dma; struct capi_ctr *ctrl = &cinfo->capi_ctrl; struct sk_buff *skb; void *p = dma->recvbuf.dmabuf+4; u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize; u8 b1cmd = _get_byte(&p); #ifdef AVM_B1DMA_DEBUG printk(KERN_DEBUG "rx: 0x%x %lu\n", b1cmd, (unsigned long)dma->recvlen); #endif switch (b1cmd) { case RECEIVE_DATA_B3_IND: ApplId = (unsigned) _get_word(&p); MsgLen = _get_slice(&p, card->msgbuf); DataB3Len = _get_slice(&p, card->databuf); if (MsgLen < 30) { memset(card->msgbuf+MsgLen, 0, 30-MsgLen); MsgLen = 30; CAPIMSG_SETLEN(card->msgbuf, 30); } if (!(skb = alloc_skb(DataB3Len+MsgLen, GFP_ATOMIC))) { printk(KERN_ERR "%s: incoming packet dropped\n", card->name); } else { memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len); capi_ctr_handle_message(ctrl, ApplId, skb); } break; case RECEIVE_MESSAGE: ApplId = (unsigned) _get_word(&p); MsgLen = _get_slice(&p, card->msgbuf); if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) { printk(KERN_ERR "%s: incoming packet dropped\n", card->name); } else { memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) { spin_lock(&card->lock); capilib_data_b3_conf(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); spin_unlock(&card->lock); } capi_ctr_handle_message(ctrl, ApplId, skb); } break; case RECEIVE_NEW_NCCI: ApplId = _get_word(&p); NCCI = _get_word(&p); WindowSize = _get_word(&p); spin_lock(&card->lock); capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize); spin_unlock(&card->lock); break; case RECEIVE_FREE_NCCI: ApplId = _get_word(&p); NCCI = _get_word(&p); if (NCCI != 0xffffffff) { spin_lock(&card->lock); capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI); spin_unlock(&card->lock); } break; case RECEIVE_START: #ifdef AVM_B1DMA_POLLDEBUG printk(KERN_INFO "%s: receive poll\n", card->name); #endif if (!suppress_pollack) queue_pollack(card); capi_ctr_resume_output(ctrl); break; case RECEIVE_STOP: capi_ctr_suspend_output(ctrl); break; case RECEIVE_INIT: cinfo->versionlen = _get_slice(&p, cinfo->versionbuf); b1_parse_version(cinfo); printk(KERN_INFO "%s: %s-card (%s) now active\n", card->name, cinfo->version[VER_CARDTYPE], cinfo->version[VER_DRIVER]); capi_ctr_ready(ctrl); break; case RECEIVE_TASK_READY: ApplId = (unsigned) _get_word(&p); MsgLen = _get_slice(&p, card->msgbuf); card->msgbuf[MsgLen] = 0; while ( MsgLen > 0 && ( card->msgbuf[MsgLen-1] == '\n' || card->msgbuf[MsgLen-1] == '\r')) { card->msgbuf[MsgLen-1] = 0; MsgLen--; } printk(KERN_INFO "%s: task %d \"%s\" ready.\n", card->name, ApplId, card->msgbuf); break; case RECEIVE_DEBUGMSG: MsgLen = _get_slice(&p, card->msgbuf); card->msgbuf[MsgLen] = 0; while ( MsgLen > 0 && ( card->msgbuf[MsgLen-1] == '\n' || card->msgbuf[MsgLen-1] == '\r')) { card->msgbuf[MsgLen-1] = 0; MsgLen--; } printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf); break; default: printk(KERN_ERR "%s: b1dma_interrupt: 0x%x ???\n", card->name, b1cmd); return; } }
int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, u8 *cmd_buf, u32 cmd_len, u8 *rsp_buf, u32 rsp_len, u32 timeout) { struct ath_hw *ah = wmi->drv_priv->ah; struct ath_common *common = ath9k_hw_common(ah); u16 headroom = sizeof(struct htc_frame_hdr) + sizeof(struct wmi_cmd_hdr); struct sk_buff *skb; u8 *data; int time_left, ret = 0; if (ah->ah_flags & AH_UNPLUGGED) return 0; skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, headroom); if (cmd_len != 0 && cmd_buf != NULL) { data = (u8 *) skb_put(skb, cmd_len); memcpy(data, cmd_buf, cmd_len); } mutex_lock(&wmi->op_mutex); /* check if wmi stopped flag is set */ if (unlikely(wmi->stopped)) { ret = -EPROTO; goto out; } /* record the rsp buffer and length */ wmi->cmd_rsp_buf = rsp_buf; wmi->cmd_rsp_len = rsp_len; ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); if (ret) goto out; time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; } mutex_unlock(&wmi->op_mutex); return 0; out: ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); return ret; }
static struct sk_buff *netlink_build_message(ipq_queue_element_t *e, int *errp) { unsigned char *old_tail; size_t size = 0; size_t data_len = 0; struct sk_buff *skb; ipq_packet_msg_t *pm; struct nlmsghdr *nlh; switch (nlq->peer.copy_mode) { size_t copy_range; case IPQ_COPY_META: size = NLMSG_SPACE(sizeof(*pm)); data_len = 0; break; case IPQ_COPY_PACKET: copy_range = nlq->peer.copy_range; if (copy_range == 0 || copy_range > e->skb->len) data_len = e->skb->len; else data_len = copy_range; size = NLMSG_SPACE(sizeof(*pm) + data_len); break; case IPQ_COPY_NONE: default: *errp = -EINVAL; return NULL; } skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); pm = NLMSG_DATA(nlh); memset(pm, 0, sizeof(*pm)); pm->packet_id = (unsigned long )e; pm->data_len = data_len; pm->timestamp_sec = e->skb->stamp.tv_sec; pm->timestamp_usec = e->skb->stamp.tv_usec; pm->mark = e->skb->nfmark; pm->hook = e->info->hook; if (e->info->indev) strcpy(pm->indev_name, e->info->indev->name); else pm->indev_name[0] = '\0'; if (e->info->outdev) strcpy(pm->outdev_name, e->info->outdev->name); else pm->outdev_name[0] = '\0'; pm->hw_protocol = e->skb->protocol; if (e->info->indev && e->skb->dev) { pm->hw_type = e->skb->dev->type; if (e->skb->dev->hard_header_parse) pm->hw_addrlen = e->skb->dev->hard_header_parse(e->skb, pm->hw_addr); } if (data_len) memcpy(pm->payload, e->skb->data, data_len); nlh->nlmsg_len = skb->tail - old_tail; NETLINK_CB(skb).dst_groups = 0; return skb; nlmsg_failure: if (skb) kfree_skb(skb); *errp = 0; printk(KERN_ERR "ip_queue: error creating netlink message\n"); return NULL; }
static void __aarp_send_query(struct aarp_entry *a) { static char aarp_eth_multicast[ETH_ALEN] = { 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF }; struct net_device *dev = a->dev; int len = dev->hard_header_len + sizeof(struct elapaarp) + aarp_dl->header_length; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); struct elapaarp *eah; struct at_addr *sat = atalk_find_dev_addr(dev); if (skb == NULL) return; if (sat == NULL) { kfree_skb(skb); return; } /* * Set up the buffer. */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); eah = (struct elapaarp *)skb_put(skb, sizeof(struct elapaarp)); skb->protocol = htons(ETH_P_ATALK); skb->nh.raw = skb->h.raw = (void *) eah; skb->dev = dev; /* * Set up the ARP. */ eah->hw_type = htons(AARP_HW_TYPE_ETHERNET); eah->pa_type = htons(ETH_P_ATALK); eah->hw_len = ETH_ALEN; eah->pa_len = AARP_PA_ALEN; eah->function = htons(AARP_REQUEST); memcpy(eah->hw_src, dev->dev_addr, ETH_ALEN); eah->pa_src_zero= 0; eah->pa_src_net = sat->s_net; eah->pa_src_node= sat->s_node; memset(eah->hw_dst, '\0', ETH_ALEN); eah->pa_dst_zero= 0; eah->pa_dst_net = a->target_addr.s_net; eah->pa_dst_node= a->target_addr.s_node; /* * Add ELAP headers and set target to the AARP multicast. */ aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); /* * Send it. */ dev_queue_xmit(skb); /* * Update the sending count */ a->xmit_count++; }
void icc_interrupt(struct IsdnCardState *cs, u_char val) { u_char exval, v1; struct sk_buff *skb; unsigned int count; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ICC interrupt %x", val); if (val & 0x80) { /* RME */ exval = cs->readisac(cs, ICC_RSTA); if ((exval & 0x70) != 0x20) { if (exval & 0x40) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "ICC RDO"); #ifdef ERROR_STATISTIC cs->err_rx++; #endif } if (!(exval & 0x20)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "ICC CRC error"); #ifdef ERROR_STATISTIC cs->err_crc++; #endif } cs->writeisac(cs, ICC_CMDR, 0x80); } else { count = cs->readisac(cs, ICC_RBCL) & 0x1f; if (count == 0) count = 32; icc_empty_fifo(cs, count); if ((count = cs->rcvidx) > 0) { cs->rcvidx = 0; if (!(skb = alloc_skb(count, GFP_ATOMIC))) printk(KERN_WARNING "HiSax: D receive out of memory\n"); else { memcpy(skb_put(skb, count), cs->rcvbuf, count); skb_queue_tail(&cs->rq, skb); } } } cs->rcvidx = 0; schedule_event(cs, D_RCVBUFREADY); } if (val & 0x40) { /* RPF */ icc_empty_fifo(cs, 32); } if (val & 0x20) { /* RSC */ /* never */ if (cs->debug & L1_DEB_WARN) debugl1(cs, "ICC RSC interrupt"); } if (val & 0x10) { /* XPR */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { icc_fill_fifo(cs); goto afterXPR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; icc_fill_fifo(cs); } else schedule_event(cs, D_XMTBUFREADY); } afterXPR: if (val & 0x04) { /* CISQ */ exval = cs->readisac(cs, ICC_CIR0); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ICC CIR0 %02X", exval ); if (exval & 2) { cs->dc.icc.ph_state = (exval >> 2) & 0xf; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state change %x", cs->dc.icc.ph_state); schedule_event(cs, D_L1STATECHANGE); }
/***************************************************************************** Prototype : PreIP_send_frame Description : Build a Frame and send it out by defined interface (eg. eth0) Input : None Output : None Return Value : Calls : Called By : *****************************************************************************/ A_BOOL PreIP_send_frame(FramePreIpAll_t *preip_all) { u8 *pbuf = NULL; FrameSAPHead_t* psap; u32 tmp32; u16 tmp16; struct sk_buff * skb = NULL; struct net_device *dev; u8 dstmac[6]={0xff,0xff,0xff,0xff,0xff,0xff}; u8 srcmac[6]={0xff,0xff,0xff,0xff,0xff,0xff}; #ifdef PRE_IP_DEBUG printk("PreIP_DiscoveryResquest: in\n"); #endif dev = dev_get_by_name(&init_net, PREIP_NET_DEVICE_NAME); if(dev == NULL) { printk("%s: don't find device %s!\n", __func__, PREIP_NET_DEVICE_NAME); return FALSE; } if ((NULL == dstmac)||(NULL == srcmac)) return FALSE; skb = alloc_skb(FRAME_SAP_OCTETS + FRAME_PREIP_OCTETS + LL_RESERVED_SPACE(dev) + sizeof(FramePreIpAll_t), GFP_ATOMIC); if (skb == NULL) { printk("alloc_skb fail\n"); return FALSE; } skb_reserve(skb, LL_RESERVED_SPACE(dev)); pbuf = skb_put(skb,(FRAME_SAP_OCTETS+FRAME_PREIP_OCTETS+sizeof(FramePreIpAll_t))); skb->dev = dev; skb->protocol = htons(ETH_P_802_3); /* srcmac is eth0 mac address*/ memcpy(srcmac, dev->dev_addr, 6); /* build Ethernet header*/ if (dev_hard_header(skb,dev,ETH_P_802_3, dstmac, srcmac, skb->len) < 0) { printk("%s: dev_hard_header error\n", __func__); kfree_skb(skb); return FALSE; } /* build LLC*/ psap = (FrameSAPHead_t*)pbuf; psap->ssap = PREIP_SSAP; psap->dsap = PREIP_DSAP; psap->cmd = PREIP_SAPCMD; psap->vendorid[0] = PREIP_PRIV_ID1; psap->vendorid[1] = PREIP_PRIV_ID2; psap->vendorid[2] = PREIP_PRIV_ID3; psap->protocol = htons(PREIP_SAPPROTOCOL); memcpy(pbuf + FRAME_SAP_OCTETS, preip_all, sizeof(FramePreIpAll_t)); #ifdef PRE_IP_DEBUG printk("PreIP_Send Frame: skb->len=%d\n", skb->len); #endif /* send this frame out */ dev_queue_xmit(skb); return TRUE; }
/* * All outgoing AX.25 I frames pass via this routine. Therefore this is * where the fragmentation of frames takes place. If fragment is set to * zero then we are not allowed to do fragmentation, even if the frame * is too large. */ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) { struct sk_buff *skbn; unsigned char *p; int frontlen, len, fragno, ka9qfrag, first = 1; if (paclen < 16) { WARN_ON_ONCE(1); kfree_skb(skb); return; } if ((skb->len - 1) > paclen) { if (*skb->data == AX25_P_TEXT) { skb_pull(skb, 1); /* skip PID */ ka9qfrag = 0; } else { paclen -= 2; /* Allow for fragment control info */ ka9qfrag = 1; } fragno = skb->len / paclen; if (skb->len % paclen == 0) fragno--; frontlen = skb_headroom(skb); /* Address space + CTRL */ while (skb->len > 0) { spin_lock_bh(&ax25_frag_lock); if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) { spin_unlock_bh(&ax25_frag_lock); printk(KERN_CRIT "AX.25: ax25_output - out of memory\n"); return; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); spin_unlock_bh(&ax25_frag_lock); len = (paclen > skb->len) ? skb->len : paclen; if (ka9qfrag == 1) { skb_reserve(skbn, frontlen + 2); skb_set_network_header(skbn, skb_network_offset(skb)); skb_copy_from_linear_data(skb, skb_put(skbn, len), len); p = skb_push(skbn, 2); *p++ = AX25_P_SEGMENT; *p = fragno--; if (first) { *p |= AX25_SEG_FIRST; first = 0; } } else { skb_reserve(skbn, frontlen + 1); skb_set_network_header(skbn, skb_network_offset(skb)); skb_copy_from_linear_data(skb, skb_put(skbn, len), len); p = skb_push(skbn, 1); *p = AX25_P_TEXT; } skb_pull(skb, len); skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */ } kfree_skb(skb); } else { skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ } switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_kick(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE /* * A DAMA slave is _required_ to work as normal AX.25L2V2 * if no DAMA master is available. */ case AX25_PROTO_DAMA_SLAVE: if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25); break; #endif } }
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; fq_kill(fq); if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; kfree_skb(fq->q.fragments); fq->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_oom; if (skb_has_frags(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &fq->q.net->mem); } nhoff = fq->nhoffset; skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &fq->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; atomic_sub(fp->truesize, &fq->q.net->mem); } head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; return 1; out_oversize: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; }
static int rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) { struct sk_buff *skb; int retval = -ENOMEM; size_t size = 0; struct usb_ep *out; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) out = dev->port_usb->out_ep; else out = NULL; spin_unlock_irqrestore(&dev->lock, flags); if (!out) return -ENOTCONN; /* Padding up to RX_EXTRA handles minor disagreements with host. * Normally we use the USB "terminate on short read" convention; * so allow up to (N*maxpacket), since that memory is normally * already allocated. Some hardware doesn't deal well with short * reads (e.g. DMA must be N*maxpacket), so for now don't trim a * byte off the end (to force hardware errors on overflow). * * RNDIS uses internal framing, and explicitly allows senders to * pad to end-of-packet. That's potentially nice for speed, but * means receivers can't recover lost synch on their own (because * new packets don't only start after a short RX). */ size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; size += dev->port_usb->header_len; size += out->maxpacket - 1; size -= size % out->maxpacket; if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); if (skb == NULL) { DBG(dev, "no rx skb\n"); goto enomem; } /* Some platforms perform better when IP packets are aligned, * but on at least one, checksumming fails otherwise. Note: * RNDIS headers involve variable numbers of LE32 values. */ skb_reserve(skb, NET_IP_ALIGN); req->buf = skb->data; req->length = size; req->complete = rx_complete; req->context = skb; retval = usb_ep_queue(out, req, gfp_flags); if (retval == -ENOMEM) enomem: defer_kevent(dev, WORK_RX_MEMORY); if (retval) { DBG(dev, "rx submit --> %d\n", retval); if (skb) dev_kfree_skb_any(skb); } return retval; }
static int netlink_trans_request(struct sk_buff *in_skb, struct genl_info *info) { char *buf; unsigned int len; uint32_t multi_flag; struct nlmsghdr *rep, *nlh = info->nlhdr; struct genlmsghdr *genlh; struct nlattr **aap = info->attrs; struct nlattr *nla; struct vr_message request, *response; struct sk_buff *skb; if (!aap || !(nla = aap[NL_ATTR_VR_MESSAGE_PROTOCOL])) return -EINVAL; request.vr_message_buf = nla_data(nla); request.vr_message_len = nla_len(nla); vr_message_request(&request); multi_flag = 0; while ((response = vr_message_dequeue_response())) { if ((multi_flag == 0) && (!vr_response_queue_empty())) multi_flag = NLM_F_MULTI; buf = response->vr_message_buf; skb = netlink_skb(buf); if (!skb) continue; len = response->vr_message_len; len += GENL_HDRLEN + NLA_HDRLEN; len = NLMSG_ALIGN(len); rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, nlh->nlmsg_type, len, multi_flag); genlh = nlmsg_data(rep); memcpy(genlh, info->genlhdr, sizeof(*genlh)); nla = (struct nlattr *)((char *)genlh + GENL_HDRLEN); nla->nla_len = response->vr_message_len; nla->nla_type = NL_ATTR_VR_MESSAGE_PROTOCOL; netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); response->vr_message_buf = NULL; vr_message_free(response); } if (multi_flag) { skb = alloc_skb(NLMSG_HDRLEN, GFP_ATOMIC); if (!skb) return 0; __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, NLMSG_DONE, 0, 0); netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); } return 0; }
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct net_device *dev; struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb->dst; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; u32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; dev = rt->u.dst.dev; hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = dst_mtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr); if (skb_shinfo(skb)->frag_list) { int first_len = skb_pagelen(skb); if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path; BUG_ON(frag->sk); if (skb->sk) { sock_hold(skb->sk); frag->sk = skb->sk; frag->destructor = sock_wfree; skb->truesize -= frag->truesize; } } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_shinfo(skb)->frag_list = NULL; /* BUILD HEADER */ tmp_hdr = kmalloc(hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } *prevhdr = NEXTHDR_FRAGMENT; memcpy(tmp_hdr, skb->nh.raw, hlen); __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); skb->nh.raw = __skb_push(skb, hlen); memcpy(skb->nh.raw, tmp_hdr, hlen); ipv6_select_ident(skb, fh); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; frag->h.raw = frag->data; fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); frag->nh.raw = __skb_push(frag, hlen); memcpy(frag->nh.raw, tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); return err; } slow_path: left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending upto and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); frag->nh.raw = frag->data; fh = (struct frag_hdr*)(frag->data + hlen); frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ memcpy(frag->nh.raw, skb->data, hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(skb, fh); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, frag->h.raw, len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); err = output(frag); if (err) goto fail; } kfree_skb(skb); IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); return err; fail: kfree_skb(skb); IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); return err; }
/* Send a Work Request to write the filter at a specified index. We construct * a Firmware Filter Work Request to have the work done and put the indicated * filter into "pending" mode which will prevent any further actions against * it till we get a reply from the firmware on the completion status of the * request. */ int set_filter_wr(struct adapter *adapter, int fidx) { struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; struct fw_filter2_wr *fwr; struct sk_buff *skb; skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); if (!skb) return -ENOMEM; /* If the new filter requires loopback Destination MAC and/or VLAN * rewriting then we need to allocate a Layer 2 Table (L2T) entry for * the filter. */ if (f->fs.newdmac || f->fs.newvlan) { /* allocate L2T entry for new filter */ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, f->fs.eport, f->fs.dmac); if (!f->l2t) { kfree_skb(skb); return -ENOMEM; } } /* If the new filter requires loopback Source MAC rewriting then * we need to allocate a SMT entry for the filter. */ if (f->fs.newsmac) { f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); if (!f->smt) { if (f->l2t) { cxgb4_l2t_release(f->l2t); f->l2t = NULL; } kfree_skb(skb); return -ENOMEM; } } fwr = __skb_put_zero(skb, sizeof(*fwr)); /* It would be nice to put most of the following in t4_hw.c but most * of the work is translating the cxgbtool ch_filter_specification * into the Work Request and the definition of that structure is * currently in cxgbtool.h which isn't appropriate to pull into the * common code. We may eventually try to come up with a more neutral * filter specification structure but for now it's easiest to simply * put this fairly direct code in line ... */ if (adapter->params.filter2_wr_support) fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR)); else fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16)); fwr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(f->tid) | FW_FILTER_WR_RQTYPE_V(f->fs.type) | FW_FILTER_WR_NOREPLY_V(0) | FW_FILTER_WR_IQ_V(f->fs.iq)); fwr->del_filter_to_l2tix = htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | FW_FILTER_WR_DMAC_V(f->fs.newdmac) | FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | FW_FILTER_WR_TXCHAN_V(f->fs.eport) | FW_FILTER_WR_PRIO_V(f->fs.prio) | FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); fwr->ethtype = htons(f->fs.val.ethtype); fwr->ethtypem = htons(f->fs.mask.ethtype); fwr->frag_to_ovlan_vldm = (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); fwr->smac_sel = 0; fwr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_CHAN_V(0) | FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); fwr->maci_to_matchtypem = htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | FW_FILTER_WR_PORT_V(f->fs.val.iport) | FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); fwr->ptcl = f->fs.val.proto; fwr->ptclm = f->fs.mask.proto; fwr->ttyp = f->fs.val.tos; fwr->ttypm = f->fs.mask.tos; fwr->ivlan = htons(f->fs.val.ivlan); fwr->ivlanm = htons(f->fs.mask.ivlan); fwr->ovlan = htons(f->fs.val.ovlan); fwr->ovlanm = htons(f->fs.mask.ovlan); memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); fwr->lp = htons(f->fs.val.lport); fwr->lpm = htons(f->fs.mask.lport); fwr->fp = htons(f->fs.val.fport); fwr->fpm = htons(f->fs.mask.fport); if (adapter->params.filter2_wr_support) { fwr->natmode_to_ulp_type = FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); fwr->newlport = htons(f->fs.nat_lport); fwr->newfport = htons(f->fs.nat_fport); } /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. */ f->pending = 1; set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); t4_ofld_send(adapter, skb); return 0; }