/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; unsigned long flags; if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtos_event_sem_signal(&sock->wakeup_event); rtos_spin_lock_irqsave(&sock->param_lock, flags); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); } return 0; }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; rtdm_lockctx_t context; if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtdm_sem_up(&sock->pending_sem); rtdm_lock_get_irqsave(&sock->param_lock, context); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); } return 0; }
static int rtcfg_rx_handler(struct rtskb *rtskb, struct rtpacket_type *pt) { if (rtskb_acquire(rtskb, &rtcfg_pool) == 0) { rtdev_reference(rtskb->rtdev); rtskb_queue_tail(&rx_queue, rtskb); rtos_event_sem_signal(&rx_event); } else kfree_rtskb(rtskb); return 0; }
/* * This function returns an rtskb that contains the complete, accumulated IP message. * If not all fragments of the IP message have been received yet, it returns NULL * Note: the IP header must have already been pulled from the rtskb! * */ struct rtskb *rt_ip_defrag(struct rtskb *skb, struct rtinet_protocol *ipprot) { unsigned int more_frags; unsigned int offset; struct rtsocket *sock; struct iphdr *iph = skb->nh.iph; int ret; counter++; /* Parse the IP header */ offset = ntohs(iph->frag_off); more_frags = offset & IP_MF; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ /* First fragment? */ if (offset == 0) { /* Get the destination socket */ if ((sock = ipprot->dest_socket(skb)) == NULL) { /* Drop the rtskb */ kfree_rtskb(skb); return NULL; } /* Acquire the rtskb at the expense of the protocol pool */ ret = rtskb_acquire(skb, &sock->skb_pool); /* socket is now implicitely locked by the rtskb */ rt_socket_dereference(sock); if (ret != 0) { /* Drop the rtskb */ kfree_rtskb(skb); } else { /* Allocates a new collector */ alloc_collector(skb, sock); } return NULL; } else { /* Add to an existing collector */ return add_to_collector(skb, offset, more_frags); } }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); if (((sock->prot.packet.ifindex != 0) && (sock->prot.packet.ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtos_event_signal(&sock->wakeup_event); if (sock->wakeup != NULL) sock->wakeup(sock->fd, sock->wakeup_arg); } return 0; }
int rtmac_vnic_rx(struct rtskb *skb, u16 type) { struct rtmac_priv *mac_priv = skb->rtdev->mac_priv; struct rtskb_queue *pool = &mac_priv->vnic_skb_pool; if (rtskb_acquire(skb, pool) != 0) { mac_priv->vnic_stats.rx_dropped++; kfree_rtskb(skb); return -1; } skb->protocol = type; rtskb_queue_tail(&rx_queue, skb); rt_pend_linux_srq(vnic_srq); return 0; }
int rtmac_vnic_rx(struct rtskb *rtskb, u16 type) { struct rtmac_priv *mac_priv = rtskb->rtdev->mac_priv; struct rtskb_pool *pool = &mac_priv->vnic_skb_pool; if (rtskb_acquire(rtskb, pool) != 0) { mac_priv->vnic_stats.rx_dropped++; kfree_rtskb(rtskb); return -1; } rtskb->protocol = type; rtskb_queue_tail(&rx_queue, rtskb); rtdm_nrtsig_pend(&vnic_signal); return 0; }
/*** * rt_packet_rcv */ static int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = container_of(pt, struct rtsocket, prot.packet.packet_type); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_fd *, void *); void *callback_arg; rtdm_lockctx_t context; if (unlikely((ifindex != 0) && (ifindex != skb->rtdev->ifindex))) return -EUNATCH; #ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL if (pt->type == htons(ETH_P_ALL)) { struct rtskb *clone_skb = rtskb_clone(skb, &sock->skb_pool); if (clone_skb == NULL) goto out; skb = clone_skb; } else #endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */ if (unlikely(rtskb_acquire(skb, &sock->skb_pool) < 0)) { kfree_rtskb(skb); goto out; } rtskb_queue_tail(&sock->incoming, skb); rtdm_sem_up(&sock->pending_sem); rtdm_lock_get_irqsave(&sock->param_lock, context); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); if (callback_func) callback_func(rt_socket_fd(sock), callback_arg); out: return 0; }
/* ************************************************************************ * This function runs in rtai context. * * It is called from inside rtnet whenever a packet has been received that * has to be processed by rtnetproxy. * ************************************************************************ */ static int rtnetproxy_recv(struct rtskb *rtskb) { /* Acquire rtskb (JK) */ if (rtskb_acquire(rtskb, &rtskb_pool) != 0) { rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n"); kfree_rtskb(rtskb); } /* Place the rtskb in the ringbuffer: */ else if (write_to_ringbuffer(&ring_rtskb_rtnet_kernel, rtskb)) { /* Switch over to kernel context: */ rtdm_nrtsig_pend(&rtnetproxy_signal); } else { /* No space in ringbuffer => Free rtskb here... */ rtdm_printk("rtnetproxy_recv: No space in queue\n"); kfree_rtskb(rtskb); } return 0; }
/*** * rt_ip_local_deliver */ static inline int rt_ip_local_deliver(struct rtskb *skb) { struct iphdr *iph = skb->nh.iph; unsigned short protocol = iph->protocol; struct rtinet_protocol *ipprot; struct rtsocket *sock; int ret; ipprot = rt_inet_protocols[rt_inet_hashkey(protocol)]; /* Check if we are supporting the protocol */ if ((ipprot != NULL) && (ipprot->protocol == protocol)) { __rtskb_pull(skb, iph->ihl*4); /* Point into the IP datagram, just past the header. */ skb->h.raw = skb->data; /* Reassemble IP fragments */ if (iph->frag_off & htons(IP_MF|IP_OFFSET)) { skb = rt_ip_defrag(skb, ipprot); if (!skb) return 0; } else { /* Get the destination socket */ if ((sock = ipprot->dest_socket(skb)) == NULL) { kfree_rtskb(skb); return 0; } /* Acquire the rtskb at the expense of the protocol pool */ ret = rtskb_acquire(skb, &sock->skb_pool); /* Socket is now implicitely locked by the rtskb */ rt_socket_dereference(sock); if (ret != 0) { kfree_rtskb(skb); return 0; } } /* Deliver the packet to the next layer */ ret = ipprot->rcv_handler(skb); } else { #ifdef CONFIG_RTNET_ADDON_PROXY /* If a fallback handler for IP protocol has been installed, * call it! */ if (ip_fallback_handler) { ret = ip_fallback_handler(skb); if (ret) { rtos_print("RTnet: fallback handler failed\n"); } return ret; } #endif /* CONFIG_RTNET_ADDON_PROXY */ rtos_print("RTnet: no protocol found\n"); kfree_rtskb(skb); ret = 0; } return ret; }
/* * Return a pointer to the collector that holds the message which * fits to the iphdr of the passed rtskb. * */ static struct rtskb *add_to_collector(struct rtskb *skb, unsigned int offset, int more_frags) { int i; unsigned int flags; struct ip_collector *p_coll; struct iphdr *iph = skb->nh.iph; struct rtskb *first_skb; /* Search in existing collectors */ for (i = 0; i < COLLECTOR_COUNT; i++) { p_coll = &collector[i]; flags = rt_spin_lock_irqsave(&p_coll->frags.lock); if (p_coll->in_use && (iph->saddr == p_coll->saddr) && (iph->daddr == p_coll->daddr) && (iph->id == p_coll->id) && (iph->protocol == p_coll->protocol)) { first_skb = p_coll->frags.first; /* Acquire the rtskb at the expense of the protocol pool */ if (rtskb_acquire(skb, &p_coll->sock->skb_pool) != 0) { /* We have to drop this fragment => clean up the whole chain */ p_coll->in_use = 0; rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); rt_printk("RTnet: Compensation pool empty - IP fragments " "dropped (saddr:%x, daddr:%x)\n", iph->saddr, iph->daddr); kfree_rtskb(first_skb); kfree_rtskb(skb); return NULL; } /* Optimized version of __rtskb_queue_tail */ skb->next = NULL; p_coll->frags.last->next = skb; p_coll->frags.last = skb; /* Extend the chain */ first_skb->chain_end = skb; /* Sanity check: unordered fragments are not allowed! */ if (offset != p_coll->buf_size) { /* We have to drop this fragment => clean up the whole chain */ p_coll->in_use = 0; skb = first_skb; rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); break; /* leave the for loop */ } p_coll->last_accessed = counter; p_coll->buf_size += skb->len; if (!more_frags) { first_skb->nh.iph->tot_len = htons(p_coll->buf_size + sizeof(struct iphdr)); p_coll->in_use = 0; rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); return first_skb; } else { rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); return NULL; } } rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); } rt_printk("RTnet: Unordered IP fragment (saddr:%x, daddr:%x)" " - dropped\n", iph->saddr, iph->daddr); kfree_rtskb(skb); return NULL; }