void rtcap_kfree_rtskb(struct rtskb *rtskb) { unsigned long flags; struct rtskb *comp_skb; rtos_spin_lock_irqsave(&rtcap_lock, flags); if (rtskb->cap_flags & RTSKB_CAP_SHARED) { rtskb->cap_flags &= ~RTSKB_CAP_SHARED; comp_skb = rtskb->cap_comp_skb; rtos_spin_unlock_irqrestore(&rtcap_lock, flags); rtskb_queue_tail(comp_skb->pool, comp_skb); #ifdef CONFIG_RTNET_CHECKED comp_skb->pool->pool_balance++; #endif return; } rtos_spin_unlock_irqrestore(&rtcap_lock, flags); rtskb->chain_end = rtskb; rtskb_queue_tail(rtskb->pool, rtskb); #ifdef CONFIG_RTNET_CHECKED rtskb->pool->pool_balance++; #endif }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; rtdm_lockctx_t context; if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtdm_sem_up(&sock->pending_sem); rtdm_lock_get_irqsave(&sock->param_lock, context); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); } return 0; }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; unsigned long flags; if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtos_event_sem_signal(&sock->wakeup_event); rtos_spin_lock_irqsave(&sock->param_lock, flags); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); } return 0; }
/*** * rt_udp_rcv */ int rt_udp_rcv (struct rtskb *skb) { struct rtsocket *sock = skb->sk; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; unsigned long flags; rtskb_queue_tail(&sock->incoming, skb); rtos_event_sem_signal(&sock->wakeup_event); rtos_spin_lock_irqsave(&sock->param_lock, flags); #ifdef CONFIG_RTNET_RTDM_SELECT if (sock->wakeup_select != NULL) { wq_wakeup(sock->wakeup_select); } #endif /* CONFIG_RTNET_RTDM_SELECT */ callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); return 0; }
int nomac_nrt_packet_tx(struct rtskb *rtskb) { struct nomac_priv *nomac; struct rtnet_device *rtdev; int ret; nomac = (struct nomac_priv *)rtskb->rtdev->mac_priv->disc_priv; rtcap_mark_rtmac_enqueue(rtskb); /* note: this routine may be called both in rt and non-rt context * => detect and wrap the context if necessary */ if (!rtos_in_rt_context()) { rtskb_queue_tail(&nrt_rtskb_queue, rtskb); rtos_event_sem_signal(&wakeup_sem); return 0; } else { rtdev = rtskb->rtdev; /* no MAC: we simply transmit the packet under xmit_lock */ rtos_res_lock(&rtdev->xmit_lock); ret = rtmac_xmit(rtskb); rtos_res_unlock(&rtdev->xmit_lock); return ret; } }
static int rtcfg_rx_handler(struct rtskb *rtskb, struct rtpacket_type *pt) { if (rtskb_acquire(rtskb, &rtcfg_pool) == 0) { rtdev_reference(rtskb->rtdev); rtskb_queue_tail(&rx_queue, rtskb); rtos_event_sem_signal(&rx_event); } else kfree_rtskb(rtskb); return 0; }
/*** * kfree_rtskb * @skb rtskb */ void kfree_rtskb(struct rtskb *skb) { if ( skb ) { skb->users = 0; memset(skb->buf_start, 0, skb->buf_len); rtskb_queue_tail(&rtskb_pool, skb); if ( rtskb_pool.qlen>rtskb_pool_max ) rt_pend_linux_srq(dec_pool_srq); } }
/*** * rt_udp_rcv */ int rt_udp_rcv (struct rtskb *skb) { struct rtsocket *rtsk = skb->sk; rtskb_queue_tail(&rtsk->incoming, skb); rtos_event_broadcast(&rtsk->wakeup_event); if (rtsk->wakeup != NULL) rtsk->wakeup(rtsk->fd, rtsk->wakeup_arg); return 0; }
/*** * inc_pool_handler */ void inc_pool_handler(void) { struct rtskb* skb; while ( rtskb_pool.qlen<rtskb_pool_default ) { skb = new_rtskb(); /* might return NULL */ if (skb) { rtskb_queue_tail(&rtskb_pool, skb); } else { printk("%s(): new_rtskb() returned NULL, qlen=%d\n", __FUNCTION__, rtskb_pool.qlen); break; } } }
/*** * rtnetif_rx: will be called from the driver * and send a message to rtdev-owned stack-manager * * @skb - the packet */ void rtnetif_rx(struct rtskb *skb) { struct rtnet_device *rtdev; if (skb && (rtdev=skb->rtdev)) { if (rtskb_queue_len(&rtdev->rxqueue) < DROPPING_RTSKB) { rtskb_queue_tail(&rtdev->rxqueue, skb); } else { rt_printk("RTnet: dropping packet in "__FUNCTION__"()\n"); kfree_rtskb(skb); } } else { rt_printk("RTnet: called "__FUNCTION__"() with skb=<NULL>\n"); } }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); if (((sock->prot.packet.ifindex != 0) && (sock->prot.packet.ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtos_event_signal(&sock->wakeup_event); if (sock->wakeup != NULL) sock->wakeup(sock->fd, sock->wakeup_arg); } return 0; }
int rtmac_vnic_rx(struct rtskb *skb, u16 type) { struct rtmac_priv *mac_priv = skb->rtdev->mac_priv; struct rtskb_queue *pool = &mac_priv->vnic_skb_pool; if (rtskb_acquire(skb, pool) != 0) { mac_priv->vnic_stats.rx_dropped++; kfree_rtskb(skb); return -1; } skb->protocol = type; rtskb_queue_tail(&rx_queue, skb); rt_pend_linux_srq(vnic_srq); return 0; }
int rtmac_vnic_rx(struct rtskb *rtskb, u16 type) { struct rtmac_priv *mac_priv = rtskb->rtdev->mac_priv; struct rtskb_pool *pool = &mac_priv->vnic_skb_pool; if (rtskb_acquire(rtskb, pool) != 0) { mac_priv->vnic_stats.rx_dropped++; kfree_rtskb(rtskb); return -1; } rtskb->protocol = type; rtskb_queue_tail(&rx_queue, rtskb); rtdm_nrtsig_pend(&vnic_signal); return 0; }
/*** * rt_udp_rcv */ int rt_udp_rcv (struct rtskb *skb) { struct rtsocket *sock = skb->sk; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; unsigned long flags; rtskb_queue_tail(&sock->incoming, skb); rtos_event_sem_signal(&sock->wakeup_event); rtos_spin_lock_irqsave(&sock->param_lock, flags); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); return 0; }
/*** * rt_packet_rcv */ static int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = container_of(pt, struct rtsocket, prot.packet.packet_type); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_fd *, void *); void *callback_arg; rtdm_lockctx_t context; if (unlikely((ifindex != 0) && (ifindex != skb->rtdev->ifindex))) return -EUNATCH; #ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL if (pt->type == htons(ETH_P_ALL)) { struct rtskb *clone_skb = rtskb_clone(skb, &sock->skb_pool); if (clone_skb == NULL) goto out; skb = clone_skb; } else #endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */ if (unlikely(rtskb_acquire(skb, &sock->skb_pool) < 0)) { kfree_rtskb(skb); goto out; } rtskb_queue_tail(&sock->incoming, skb); rtdm_sem_up(&sock->pending_sem); rtdm_lock_get_irqsave(&sock->param_lock, context); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); if (callback_func) callback_func(rt_socket_fd(sock), callback_arg); out: return 0; }