static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, u32 lower, u32 upper) { struct sk_buff *buf_copy; struct tipc_node *n_ptr; u32 n_num; u32 tstop; assert(lower <= upper); assert(((lower >= 1) && (lower <= tipc_max_nodes)) || ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave))); assert(((upper >= 1) && (upper <= tipc_max_nodes)) || ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave))); assert(in_own_cluster(c_ptr->addr)); tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node; if (tstop > upper) tstop = upper; for (n_num = lower; n_num <= tstop; n_num++) { n_ptr = c_ptr->nodes[n_num]; if (n_ptr && tipc_node_has_active_links(n_ptr)) { buf_copy = skb_copy(buf, GFP_ATOMIC); if (buf_copy == NULL) break; msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr); } } buf_discard(buf); }
/* * This function contains logic for AP packet forwarding. * * If a packet is multicast/broadcast, it is sent to kernel/upper layer * as well as queued back to AP TX queue so that it can be sent to other * associated stations. * If a packet is unicast and RA is present in associated station list, * it is again requeued into AP TX queue. * If a packet is unicast and RA is not in associated station list, * packet is forwarded to kernel to handle routing logic. */ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct uap_rxpd *uap_rx_pd; struct rx_packet_hdr *rx_pkt_hdr; u8 ra[ETH_ALEN]; struct sk_buff *skb_uap; uap_rx_pd = (struct uap_rxpd *)(skb->data); rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); /* don't do packet forwarding in disconnected state */ if (!priv->media_connected) { dev_err(adapter->dev, "drop packet in disconnected state.\n"); dev_kfree_skb_any(skb); return 0; } memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN); if (is_multicast_ether_addr(ra)) { skb_uap = skb_copy(skb, GFP_ATOMIC); mwifiex_uap_queue_bridged_pkt(priv, skb_uap); } else { if (mwifiex_get_sta_entry(priv, ra)) { /* Requeue Intra-BSS packet */ mwifiex_uap_queue_bridged_pkt(priv, skb); return 0; } } /* Forward unicat/Inter-BSS packets to kernel. */ return mwifiex_process_rx_packet(adapter, skb); }
static void spx_retransmit(unsigned long data) { struct sock *sk = (struct sock*)data; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; struct sk_buff *skb; unsigned long flags; int err; del_timer(&pdata->retransmit); if(pdata->state == SPX_CLOSED) return; if(pdata->retransmits > RETRY_COUNT) { spx_close_socket(sk); /* Unilateral Abort */ return; } /* Need to leave skb on the queue, aye the fear */ save_flags(flags); cli(); skb = skb_peek(&pdata->retransmit_queue); if(skb_cloned(skb)) skb = skb_copy(skb, GFP_ATOMIC); else skb = skb_clone(skb, GFP_ATOMIC); restore_flags(flags); pdata->retransmit.expires = jiffies + spx_calc_rtt(pdata->retransmits); add_timer(&pdata->retransmit); err = spx_route_skb(pdata, skb, RETRAN); pdata->retransmits++; return; }
/* set ECT codepoint from IP header. * return 0 in case there was no ECT codepoint * return 1 in case ECT codepoint has been overwritten * return < 0 in case there was error */ static int inline set_ect_ip(struct sk_buff **pskb, struct iphdr *iph, const struct ipt_ECN_info *einfo) { if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { u_int16_t diffs[2]; /* raw socket (tcpdump) may have clone of incoming * skb: don't disturb it --RR */ if (skb_cloned(*pskb) && !(*pskb)->sk) { struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; kfree_skb(*pskb); *pskb = nskb; iph = (*pskb)->nh.iph; } diffs[0] = htons(iph->tos) ^ 0xFFFF; iph->tos = iph->tos & ~IPT_ECN_IP_MASK; iph->tos = iph->tos | (einfo->ip_ect & IPT_ECN_IP_MASK); diffs[1] = htons(iph->tos); iph->check = csum_fold(csum_partial((char *)diffs, sizeof(diffs), iph->check^0xFFFF)); (*pskb)->nfcache |= NFC_ALTERED; return 1; } return 0; }
static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr, const struct net_device *in, const struct net_device *out, const void *data, unsigned int datalen) { struct ebt_nat_info *info = (struct ebt_nat_info *) data; if (skb_shared(*pskb) || skb_cloned(*pskb)) { struct sk_buff *nskb; nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; if ((*pskb)->sk) skb_set_owner_w(nskb, (*pskb)->sk); kfree_skb(*pskb); *pskb = nskb; } memcpy(eth_hdr(*pskb)->h_source, info->mac, ETH_ALEN); if (!(info->target & NAT_ARP_BIT) && eth_hdr(*pskb)->h_proto == htons(ETH_P_ARP)) { struct arphdr _ah, *ap; ap = skb_header_pointer(*pskb, 0, sizeof(_ah), &_ah); if (ap == NULL) return EBT_DROP; if (ap->ar_hln != ETH_ALEN) goto out; if (skb_store_bits(*pskb, sizeof(_ah), info->mac,ETH_ALEN)) return EBT_DROP; } out: return info->target | ~EBT_VERDICT_BITS; }
int ip_vs_make_skb_writable(struct sk_buff **pskb, int writable_len) { struct sk_buff *skb = *pskb; /* skb is already used, better copy skb and its payload */ if (unlikely(skb_shared(skb) || skb->sk)) goto copy_skb; /* skb data is already used, copy it */ if (unlikely(skb_cloned(skb))) goto copy_data; return pskb_may_pull(skb, writable_len); copy_data: if (unlikely(writable_len > skb->len)) return 0; return !pskb_expand_head(skb, 0, 0, GFP_ATOMIC); copy_skb: if (unlikely(writable_len > skb->len)) return 0; skb = skb_copy(skb, GFP_ATOMIC); if (!skb) return 0; BUG_ON(skb_is_nonlinear(skb)); /* Rest of kernel will get very unhappy if we pass it a suddenly-orphaned skbuff */ if ((*pskb)->sk) skb_set_owner_w(skb, (*pskb)->sk); kfree_skb(*pskb); *pskb = skb; return 1; }
void pppoe_relay_broadcast(pppoe_if_t *src_if, struct sk_buff *m) { pppoe_if_t *iter; int used = 0; for (iter = if_head; iter; iter = iter->next) { struct sk_buff *mc; struct ethhdr *eh; if (iter==src_if || !iter->acOK || !(iter->dev->flags & IFF_UP)) continue; if (!iter->next) { mc = m; used = 1; } else { mc = skb_copy(m, GFP_ATOMIC); if (!mc) { if (iter->dev->get_stats) iter->dev->get_stats(iter->dev)->tx_errors++; continue; } } eh = MTOD(mc, struct ethhdr *); memcpy(eh->h_source, DEV_MAC(iter->dev), ETH_ALEN); relay_enqueue(iter->dev, mc); } if (!used) kfree_skb(m); }
struct sk_buff __GC * GC_copy_buff(struct GC_data *gc, struct sk_buff __GC * orig) { struct sk_buff *skb; struct sk_buff __GC * ret; if (gc->pool.len >= Q_GC_POOL_QUEUE_LEN) { pr_devel("[PFQ] GC: pool exhausted!\n"); ret = NULL; return ret; } skb = skb_copy(PFQ_SKB(orig), GFP_ATOMIC); if (skb == NULL) { pr_devel("[PFQ] GC: out of memory!\n"); ret = NULL; return ret; } skb->mac_len = orig->mac_len; /* GC_make_buff can't fail now */ ret = GC_make_buff(gc, skb); PFQ_CB(ret)->group_mask = PFQ_CB(orig)->group_mask; PFQ_CB(ret)->direct = PFQ_CB(orig)->direct; PFQ_CB(ret)->monad = PFQ_CB(orig)->monad; return ret; }
static void ieee80211p_rx(struct sk_buff *skb) { /* We copy the received buffer since we need to modify it */ drv_priv_data.rx_skb = skb_copy(skb,GFP_ATOMIC); /* Schedule a tasklet to handle the receivded skb */ tasklet_schedule(&drv_priv_data.rx_tq); }
static unsigned int target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) { const struct arpt_mangle *mangle = targinfo; struct arphdr *arp; unsigned char *arpptr; int pln, hln; if (skb_shared(*pskb) || skb_cloned(*pskb)) { struct sk_buff *nskb; nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; if ((*pskb)->sk) skb_set_owner_w(nskb, (*pskb)->sk); kfree_skb(*pskb); *pskb = nskb; } arp = (*pskb)->nh.arph; arpptr = (*pskb)->nh.raw + sizeof(*arp); pln = arp->ar_pln; hln = arp->ar_hln; /* We assume that pln and hln were checked in the match */ if (mangle->flags & ARPT_MANGLE_SDEV) { if (ARPT_DEV_ADDR_LEN_MAX < hln || (arpptr + hln > (**pskb).tail)) return NF_DROP; memcpy(arpptr, mangle->src_devaddr, hln); } arpptr += hln; if (mangle->flags & ARPT_MANGLE_SIP) { if (ARPT_MANGLE_ADDR_LEN_MAX < pln || (arpptr + pln > (**pskb).tail)) return NF_DROP; memcpy(arpptr, &mangle->u_s.src_ip, pln); } arpptr += pln; if (mangle->flags & ARPT_MANGLE_TDEV) { if (ARPT_DEV_ADDR_LEN_MAX < hln || (arpptr + hln > (**pskb).tail)) return NF_DROP; memcpy(arpptr, mangle->tgt_devaddr, hln); } arpptr += hln; if (mangle->flags & ARPT_MANGLE_TIP) { if (ARPT_MANGLE_ADDR_LEN_MAX < pln || (arpptr + pln > (**pskb).tail)) return NF_DROP; memcpy(arpptr, &mangle->u_t.tgt_ip, pln); } return mangle->target; }
/* * This is where all valid I frames are sent to, to be dispatched to * whichever protocol requires them. */ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) { int (*func)(struct sk_buff *, ax25_cb *); volatile int queued = 0; unsigned char pid; if (skb == NULL) return 0; ax25_start_idletimer(ax25); pid = *skb->data; #ifdef CONFIG_INET if (pid == AX25_P_IP) { /* working around a TCP bug to keep additional listeners * happy. TCP re-uses the buffer and destroys the original * content. */ struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC); if (skbn != NULL) { kfree_skb(skb); skb = skbn; } skb_pull(skb, 1); /* Remove PID */ skb->h.raw = skb->data; skb->nh.raw = skb->data; skb->dev = ax25->ax25_dev->dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IP); ip_rcv(skb, skb->dev, NULL); /* Wrong ptype */ return 1; } #endif if (pid == AX25_P_SEGMENT) { skb_pull(skb, 1); /* Remove PID */ return ax25_rx_fragment(ax25, skb); } if ((func = ax25_protocol_function(pid)) != NULL) { skb_pull(skb, 1); /* Remove PID */ return (*func)(skb, ax25); } if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) { if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) || ax25->pidincl) { if (sock_queue_rcv_skb(ax25->sk, skb) == 0) queued = 1; else ax25->condition |= AX25_COND_OWN_RX_BUSY; } } return queued; }
inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb) { #ifdef PLATFORM_LINUX return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); #endif /* PLATFORM_LINUX */ #ifdef PLATFORM_FREEBSD return NULL; #endif /* PLATFORM_FREEBSD */ }
void hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) { __u16 appl_id; int _len, _len2; __u8 msghead[64]; appl_id = CAPIMSG_APPID(skb->data); switch(_hycapi_appCheck(appl_id, ctrl->cnr)) { case 0: /* printk(KERN_INFO "Need to register\n"); */ hycapi_register_internal(ctrl, appl_id, &(hycapi_applications[appl_id-1].rp)); break; case 1: break; default: printk(KERN_ERR "HYCAPI: Controller mixup!\n"); return; } switch(CAPIMSG_CMD(skb->data)) { case CAPI_DISCONNECT_B3_RESP: ctrl->free_ncci(ctrl, appl_id, CAPIMSG_NCCI(skb->data)); break; case CAPI_DATA_B3_REQ: _len = CAPIMSG_LEN(skb->data); if (_len > 22) { _len2 = _len - 22; memcpy(msghead, skb->data, 22); memcpy(skb->data + _len2, msghead, 22); skb_pull(skb, _len2); CAPIMSG_SETLEN(skb->data, 22); } break; case CAPI_LISTEN_REQ: if(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1]) { kfree_skb(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1]); hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1] = NULL; } if (!(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1] = skb_copy(skb, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n"); } break; default: break; } hycapi_sendmsg_internal(ctrl, skb); }
int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct sk_buff *nskb; struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) goto drop; nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) goto drop; kfree_skb(skb); skb = nskb; /* */ nb = x25_get_neigh(dev); if (!nb) { printk(KERN_DEBUG "X.25: unknown neighbour - %s\n", dev->name); goto drop; } if (!pskb_may_pull(skb, 1)) return 0; switch (skb->data[0]) { case X25_IFACE_DATA: skb_pull(skb, 1); if (x25_receive_data(skb, nb)) { x25_neigh_put(nb); goto out; } break; case X25_IFACE_CONNECT: x25_link_established(nb); break; case X25_IFACE_DISCONNECT: x25_link_terminated(nb); break; } x25_neigh_put(nb); drop: kfree_skb(skb); out: return 0; }
int rtw_mlcst2unicst(_adapter *padapter, struct sk_buff *skb) { struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; _irqL irqL; _list *phead, *plist; struct sk_buff *newskb; struct sta_info *psta = NULL; s32 res; _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL); phead = &pstapriv->asoc_list; plist = get_next(phead); //free sta asoc_queue while ((rtw_end_of_queue_search(phead, plist)) == _FALSE) { psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list); plist = get_next(plist); /* avoid come from STA1 and send back STA1 */ if (!memcmp(psta->hwaddr, &skb->data[6], 6)) continue; newskb = skb_copy(skb, GFP_ATOMIC); if (newskb) { memcpy(newskb->data, psta->hwaddr, 6); res = rtw_xmit(padapter, &newskb); if (res < 0) { DBG_871X("%s()-%d: rtw_xmit() return error!\n", __FUNCTION__, __LINE__); pxmitpriv->tx_drop++; dev_kfree_skb_any(newskb); } else pxmitpriv->tx_pkts++; } else { DBG_871X("%s-%d: skb_copy() failed!\n", __FUNCTION__, __LINE__); pxmitpriv->tx_drop++; _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL); //dev_kfree_skb_any(skb); return _FALSE; // Caller shall tx this multicast frame via normal way. } } _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL); dev_kfree_skb_any(skb); return _TRUE; }
static int bpf_dp_ctx_copy(struct bpf_dp_context *ctx, struct bpf_dp_context *orig_ctx) { struct sk_buff *skb = skb_copy(orig_ctx->skb, GFP_ATOMIC); if (!skb) return -ENOMEM; ctx->context = orig_ctx->context; ctx->skb = skb; ctx->dp = orig_ctx->dp; ctx->stack = orig_ctx->stack; return 0; }
static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb) { struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; unsigned long irql; struct list_head *phead, *plist; struct sk_buff *newskb; struct sta_info *psta = NULL; s32 res; _enter_critical_bh(&pstapriv->asoc_list_lock, &irql); phead = &pstapriv->asoc_list; plist = get_next(phead); /* free sta asoc_queue */ while (!rtw_end_of_queue_search(phead, plist)) { psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list); plist = get_next(plist); /* avoid come from STA1 and send back STA1 */ if (!memcmp(psta->hwaddr, &skb->data[6], 6)) continue; newskb = skb_copy(skb, GFP_ATOMIC); if (newskb) { memcpy(newskb->data, psta->hwaddr, 6); res = rtw_xmit(padapter, &newskb); if (res < 0) { DBG_88E("%s()-%d: rtw_xmit() return error!\n", __func__, __LINE__); pxmitpriv->tx_drop++; dev_kfree_skb_any(newskb); } else { pxmitpriv->tx_pkts++; } } else { DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__); pxmitpriv->tx_drop++; _exit_critical_bh(&pstapriv->asoc_list_lock, &irql); return false; /* Caller shall tx this multicast frame via normal way. */ } } _exit_critical_bh(&pstapriv->asoc_list_lock, &irql); dev_kfree_skb_any(skb); return true; }
static int inline set_ect_tcp(struct sk_buff **pskb, struct iphdr *iph, const struct ipt_ECN_info *einfo) { struct tcphdr *tcph; u_int16_t *tcpflags; u_int16_t diffs[2]; /* raw socket (tcpdump) may have clone of incoming * skb: don't disturb it --RR */ if (skb_cloned(*pskb) && !(*pskb)->sk) { struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; kfree_skb(*pskb); *pskb = nskb; } iph = (*pskb)->nh.iph; tcph = (void *) iph + iph->ihl * 4; tcpflags = (u_int16_t *)tcph + 6; diffs[0] = *tcpflags; if (einfo->operation & IPT_ECN_OP_SET_ECE && tcph->ece != einfo->proto.tcp.ece) { tcph->ece = einfo->proto.tcp.ece; } if (einfo->operation & IPT_ECN_OP_SET_CWR && tcph->cwr != einfo->proto.tcp.cwr) { tcph->cwr = einfo->proto.tcp.cwr; } if (diffs[0] != *tcpflags) { diffs[0] = diffs[0] ^ 0xFFFF; diffs[1] = *tcpflags; tcph->check = csum_fold(csum_partial((char *)diffs, sizeof(diffs), tcph->check^0xFFFF)); (*pskb)->nfcache |= NFC_ALTERED; return 1; } return 0; }
static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb) { struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct list_head *phead, *plist; struct sk_buff *newskb; struct sta_info *psta = NULL; s32 res; spin_lock_bh(&pstapriv->asoc_list_lock); phead = &pstapriv->asoc_list; plist = phead->next; /* free sta asoc_queue */ while (phead != plist) { psta = container_of(plist, struct sta_info, asoc_list); plist = plist->next; /* avoid come from STA1 and send back STA1 */ if (!memcmp(psta->hwaddr, &skb->data[6], 6)) continue; newskb = skb_copy(skb, GFP_ATOMIC); if (newskb) { memcpy(newskb->data, psta->hwaddr, 6); res = rtw_xmit(padapter, &newskb); if (res < 0) { DBG_88E("%s()-%d: rtw_xmit() return error!\n", __func__, __LINE__); pxmitpriv->tx_drop++; dev_kfree_skb_any(newskb); } else { pxmitpriv->tx_pkts++; } } else { DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__); pxmitpriv->tx_drop++; spin_unlock_bh(&pstapriv->asoc_list_lock); return false; /* Caller shall tx this multicast frame via normal way. */ } } spin_unlock_bh(&pstapriv->asoc_list_lock); dev_kfree_skb_any(skb); return true; }
static int ebt_target_vlan(struct sk_buff **pskb, unsigned int hooknr, const struct net_device *in, const struct net_device *out, const void *data, unsigned int datalen) { struct ebt_vlan_t_info *info = (struct ebt_vlan_t_info *) data; struct vlan_hdr *vh, frame; unsigned short TCITmp; /* raw socket (tcpdump) may have clone of incoming skb: don't disturb it --RR */ if (skb_shared(*pskb)||(skb_cloned(*pskb) && !(*pskb)->sk)) { struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; kfree_skb(*pskb); *pskb = nskb; } if ((*pskb)->protocol == __constant_htons(ETH_P_8021Q)) { vh = skb_header_pointer(*pskb, 0, sizeof(frame), &frame); if (GET_BITMASK(EBT_VLAN_TARGET_ID)) { TCITmp = ntohs (vh->h_vlan_TCI) & ~VLAN_TARGET_VID_MASK; vh->h_vlan_TCI = htons(TCITmp | info->id); } if (GET_BITMASK(EBT_VLAN_TARGET_PRIO)) { TCITmp = ntohs (vh->h_vlan_TCI) & ~VLAN_TARGET_PRI_MASK; vh->h_vlan_TCI = htons(TCITmp | (info->prio << 13)); } } (*pskb)->nfcache |= NFC_ALTERED; return info->target; }
/* * This is where all valid I frames are sent to, to be dispatched to * whichever protocol requires them. */ static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) { int (*func)(struct sk_buff *, ax25_cb *); struct sk_buff *skbn; volatile int queued = 0; unsigned char pid; if (skb == NULL) return 0; ax25->idletimer = ax25->idle; pid = *skb->data; #ifdef CONFIG_INET if (pid == AX25_P_IP) { if ((skbn = skb_copy(skb, GFP_ATOMIC)) != NULL) { kfree_skb(skb, FREE_READ); skb = skbn; } skb_pull(skb, 1); /* Remove PID */ skb->h.raw = skb->data; ip_rcv(skb, ax25->device, NULL); /* Wrong ptype */ return 1; } #endif if (pid == AX25_P_SEGMENT) { skb_pull(skb, 1); /* Remove PID */ return ax25_rx_fragment(ax25, skb); } if ((func = ax25_protocol_function(pid)) != NULL) { skb_pull(skb, 1); /* Remove PID */ return (*func)(skb, ax25); } if (ax25->sk != NULL && ax25_dev_get_value(ax25->device, AX25_VALUES_CONMODE) == 2) { if ((!ax25->pidincl && ax25->sk->protocol == pid) || ax25->pidincl) { if (sock_queue_rcv_skb(ax25->sk, skb) == 0) queued = 1; else ax25->condition |= AX25_COND_OWN_RX_BUSY; } } return queued; }
int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct sk_buff *nskb; struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) goto drop; nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) goto drop; kfree_skb(skb); skb = nskb; /* * Packet received from unrecognised device, throw it away. */ nb = x25_get_neigh(dev); if (!nb) { printk(KERN_DEBUG "X.25: unknown neighbour - %s\n", dev->name); goto drop; } switch (skb->data[0]) { case 0x00: skb_pull(skb, 1); if (x25_receive_data(skb, nb)) { x25_neigh_put(nb); goto out; } break; case 0x01: x25_link_established(nb); break; case 0x02: x25_link_terminated(nb); break; } x25_neigh_put(nb); drop: kfree_skb(skb); out: return 0; }
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) { if (VLAN_DEV_INFO(skb->dev)->flags & 1) { if (skb_shared(skb) || skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); kfree_skb(skb); skb = nskb; } if (skb) { /* Lifted from Gleb's VLAN code... */ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 12); skb->mac.raw += VLAN_HLEN; } } return skb; }
static void hycapi_restart_internal(struct capi_ctr *ctrl) { int i; struct sk_buff *skb; #ifdef HYCAPI_PRINTFNAMES printk(KERN_WARNING "HYSDN: hycapi_restart_internal"); #endif for (i = 0; i < CAPI_MAXAPPL; i++) { if (_hycapi_appCheck(i + 1, ctrl->cnr) == 1) { hycapi_register_internal(ctrl, i + 1, &hycapi_applications[i].rp); if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) { skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC); hycapi_sendmsg_internal(ctrl, skb); } } } }
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) { if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { if (skb_shared(skb) || skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); kfree_skb(skb); skb = nskb; } if (skb) { /* Lifted from Gleb's VLAN code... */ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 12); skb->mac_header += VLAN_HLEN; } } return skb; }
/***** Rx *****/ void zfLnxRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo) { u16_t frameType; u16_t frameCtrl; u16_t frameSubtype; zbuf_t *skb1; struct usbdrv_private *macp = dev->priv; //frameCtrl = zmw_buf_readb(dev, buf, 0); frameCtrl = *(u8_t*)((u8_t*)buf->data); frameType = frameCtrl & 0xf; frameSubtype = frameCtrl & 0xf0; if ((frameType == 0x0) && (macp->forwardMgmt)) { switch (frameSubtype) { /* Beacon */ case 0x80 : /* Probe response */ case 0x50 : skb1 = skb_copy(buf, GFP_ATOMIC); if(skb1 != NULL) { skb1->dev = dev; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) skb1->mac.raw = skb1->data; #else skb1->mac_header = skb1->data; #endif skb1->ip_summed = CHECKSUM_NONE; skb1->pkt_type = PACKET_OTHERHOST; skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */ netif_rx(skb1); } break; default: break; } } zfiRecv80211(dev, buf, addInfo); return; }
static int ebt_target_dnat(struct sk_buff **pskb, unsigned int hooknr, const struct net_device *in, const struct net_device *out, const void *data, unsigned int datalen) { struct ebt_nat_info *info = (struct ebt_nat_info *)data; if (skb_shared(*pskb) || skb_cloned(*pskb)) { struct sk_buff *nskb; nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; if ((*pskb)->sk) skb_set_owner_w(nskb, (*pskb)->sk); kfree_skb(*pskb); *pskb = nskb; } memcpy(eth_hdr(*pskb)->h_dest, info->mac, ETH_ALEN); return info->target; }
static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb) { struct mac80211_hwsim_data *data = hw->priv; int i, ack = 0; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rx_status rx_status; memset(&rx_status, 0, sizeof(rx_status)); /* TODO: set mactime */ rx_status.freq = data->channel->center_freq; rx_status.band = data->channel->band; rx_status.rate_idx = info->tx_rate_idx; /* TODO: simulate signal strength (and optional packet drop) */ /* Copy skb to all enabled radios that are on the current frequency */ for (i = 0; i < hwsim_radio_count; i++) { struct mac80211_hwsim_data *data2; struct sk_buff *nskb; if (hwsim_radios[i] == NULL || hwsim_radios[i] == hw) continue; data2 = hwsim_radios[i]->priv; if (!data2->started || !data2->radio_enabled || data->channel->center_freq != data2->channel->center_freq) continue; nskb = skb_copy(skb, GFP_ATOMIC); if (nskb == NULL) continue; if (memcmp(hdr->addr1, hwsim_radios[i]->wiphy->perm_addr, ETH_ALEN) == 0) ack = 1; ieee80211_rx_irqsafe(hwsim_radios[i], nskb, &rx_status); } return ack; }
int goose_enhan_retrans(struct sk_buff *__skb) { struct sk_buff *skb_cp, *skb = __skb; unsigned int waiting_time = retran_intvl; /* ms */ unsigned int total_waiting_time = 0; /* ms */ unsigned trans_count = 0; int ret; goose_enhan_retrans_redo: /* Make a skb copy for retransmission */ skb_cp = skb_copy(skb, GFP_ATOMIC); ret = dev_queue_xmit(skb); skb = skb_cp; if (unlikely(ret != 0)) goto goose_enhan_retrans_exit; /* Compute the overall delay */ total_waiting_time += waiting_time; waiting_time += retran_incre; if (waiting_time > max_retran_intvl) waiting_time = max_retran_intvl; /* Sleep for a while */ msleep_interruptible (waiting_time); /* It is necessary to set an upper limit for number of retransmissions */ if (unlikely((total_waiting_time < delay_thre) && (trans_count++ < MAX_GOOSE_TRANS_NUM))) goto goose_enhan_retrans_redo; /* Retransmission finishes. * Increase the number of packets transmitted by Enhanced Transmission */ num_pkt_trans ++; goose_enhan_retrans_exit: kfree_skb(skb); return ret; }
void tipc_cltr_broadcast(struct sk_buff *buf) { struct sk_buff *buf_copy; struct cluster *c_ptr; struct tipc_node *n_ptr; u32 n_num; u32 tstart; u32 tstop; u32 node_type; if (tipc_mode == TIPC_NET_MODE) { c_ptr = tipc_cltr_find(tipc_own_addr); assert(in_own_cluster(c_ptr->addr)); /* For now */ /* Send to standard nodes, then repeat loop sending to slaves */ tstart = 1; tstop = c_ptr->highest_node; for (node_type = 1; node_type <= 2; node_type++) { for (n_num = tstart; n_num <= tstop; n_num++) { n_ptr = c_ptr->nodes[n_num]; if (n_ptr && tipc_node_has_active_links(n_ptr)) { buf_copy = skb_copy(buf, GFP_ATOMIC); if (buf_copy == NULL) goto exit; msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr); } } tstart = LOWEST_SLAVE; tstop = c_ptr->highest_slave; } } exit: buf_discard(buf); }