static void rtcfg_rx_task(void *arg) { struct rtskb *rtskb; struct rtcfg_frm_head *frm_head; struct rtnet_device *rtdev; while (rtdm_event_wait(&rx_event) == 0) while ((rtskb = rtskb_dequeue(&rx_queue))) { rtdev = rtskb->rtdev; if (rtskb->pkt_type == PACKET_OTHERHOST) { rtdev_dereference(rtdev); kfree_rtskb(rtskb); continue; } if (rtskb->len < sizeof(struct rtcfg_frm_head)) { RTCFG_DEBUG(1, "RTcfg: %s() received an invalid frame\n", __FUNCTION__); rtdev_dereference(rtdev); kfree_rtskb(rtskb); continue; } frm_head = (struct rtcfg_frm_head *)rtskb->data; if (rtcfg_do_main_event(rtskb->rtdev->ifindex, frm_head->id + RTCFG_FRM_STAGE_1_CFG, rtskb) < 0) kfree_rtskb(rtskb); rtdev_dereference(rtdev); } }
/*** * alloc_rtskb * @size: i will need it later. */ struct rtskb *alloc_rtskb(unsigned int size) { struct rtskb *skb; if ( rtskb_pool.qlen>0 ) skb = rtskb_dequeue(&rtskb_pool); else { // skb = new_rtskb(); /* might return NULL and not be safe in this context */ rt_pend_linux_srq(inc_pool_srq); return NULL; } /* Load the data pointers. */ skb->data = skb->buf_start; skb->tail = skb->buf_start; skb->end = skb->buf_start + size; /* Set up other state */ skb->len = 0; skb->cloned = 0; skb->data_len = 0; skb->users = 1; if ( rtskb_pool.qlen<rtskb_pool_min ) rt_pend_linux_srq(inc_pool_srq); return (skb); }
int rtcap_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev) { struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex]; unsigned long flags; if ((rtskb->cap_comp_skb = rtskb_dequeue(&cap_pool)) == 0) { tap_dev->tap_dev_stats.rx_dropped++; return tap_dev->orig_xmit(rtskb, rtdev); } #ifdef CONFIG_RTNET_CHECKED cap_pool.pool_balance--; #endif rtskb->cap_next = NULL; rtskb->cap_start = rtskb->data; rtskb->cap_len = rtskb->len; rtskb->cap_flags |= RTSKB_CAP_SHARED; rtos_get_time(&rtskb->time_stamp); rtos_spin_lock_irqsave(&rtcap_lock, flags); if (cap_queue.first == NULL) cap_queue.first = rtskb; else cap_queue.last->cap_next = rtskb; cap_queue.last = rtskb; rtos_spin_unlock_irqrestore(&rtcap_lock, flags); rtos_pend_nrt_signal(&cap_signal); return tap_dev->orig_xmit(rtskb, rtdev); }
/*** * rt_packet_close */ int rt_packet_close(struct rtdm_dev_context *context, int call_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; int ret = 0; unsigned long flags; rtos_spin_lock_irqsave(&sock->param_lock, flags); if ((pt->type != 0) && ((ret = rtdev_remove_pack(pt)) == 0)) pt->type = 0; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) { rtdev_dereference(del->rtdev); kfree_rtskb(del); } if (ret == 0) ret = rt_socket_cleanup(context); return ret; }
/*** * do_stacktask */ static void do_stacktask(int mgr_id) { struct rtnet_msg msg; struct rtnet_mgr *mgr = (struct rtnet_mgr *)mgr_id; rt_printk("RTnet: stack-mgr started\n"); while(1) { rt_mbx_receive(&(mgr->mbx), &msg, sizeof(struct rtnet_msg)); if ( (msg.rtdev) && (msg.msg_type==Rx_PACKET) ) { while ( !rtskb_queue_empty(&msg.rtdev->rxqueue) ) { struct rtskb *skb = rtskb_dequeue(&msg.rtdev->rxqueue); if ( skb ) { unsigned short hash = ntohs(skb->protocol) & (MAX_RT_PROTOCOLS-1); struct rtpacket_type *pt = rt_packets[hash]; skb->nh.raw = skb->data; if (pt) { pt->handler (skb, skb->rtdev, pt); } else { rt_printk("RTnet: undefined Layer-3-Protokoll\n"); kfree_rtskb(skb); } } } } } }
/*** * rt_udp_close */ int rt_udp_close(struct rtdm_dev_context *sockctx, rtdm_user_info_t *user_info) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; struct rtskb *del; int port; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&udp_socket_base_lock, context); sock->prot.inet.state = TCP_CLOSE; if (sock->prot.inet.reg_index >= 0) { port = sock->prot.inet.reg_index; clear_bit(port % 32, &port_bitmap[port / 32]); free_ports++; sock->prot.inet.reg_index = -1; } rtdm_lock_put_irqrestore(&udp_socket_base_lock, context); /* cleanup already collected fragments */ rt_ip_frag_invalidate_socket(sock); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) kfree_rtskb(del); return rt_socket_cleanup(sockctx); }
/*** * rt_udp_close */ int rt_udp_close(struct rtdm_dev_context *context, int call_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; struct rtskb *del; int port; unsigned long flags; rtos_spin_lock_irqsave(&udp_socket_base_lock, flags); sock->prot.inet.state = TCP_CLOSE; if (sock->prot.inet.reg_index >= 0) { port = sock->prot.inet.reg_index; clear_bit(port % 32, &port_bitmap[port / 32]); sock->prot.inet.reg_index = -1; } rtos_spin_unlock_irqrestore(&udp_socket_base_lock, flags); /* cleanup already collected fragments */ rt_ip_frag_invalidate_socket(sock); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) kfree_rtskb(del); return rt_socket_cleanup(context); }
/*** * rt_packet_close */ int rt_packet_close(struct rtdm_dev_context *sockctx, rtdm_user_info_t *user_info) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; int ret = 0; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&sock->param_lock, context); if ((pt->type != 0) && ((ret = rtdev_remove_pack(pt)) == 0)) pt->type = 0; rtdm_lock_put_irqrestore(&sock->param_lock, context); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) { rtdev_dereference(del->rtdev); kfree_rtskb(del); } if (ret == 0) ret = rt_socket_cleanup(sockctx); return ret; }
static void rtmac_vnic_signal_handler(rtdm_nrtsig_t nrtsig, void *arg) { struct rtskb *rtskb; struct sk_buff *skb; unsigned hdrlen; struct net_device_stats *stats; struct rtnet_device *rtdev; while (1) { rtskb = rtskb_dequeue(&rx_queue); if (!rtskb) break; rtdev = rtskb->rtdev; hdrlen = rtdev->hard_header_len; skb = dev_alloc_skb(hdrlen + rtskb->len + 2); if (skb) { /* the rtskb stamp is useless (different clock), get new one */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) __net_timestamp(skb); #else do_gettimeofday(&skb->stamp); #endif skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* copy Ethernet header */ memcpy(skb_put(skb, hdrlen), rtskb->data - hdrlen - sizeof(struct rtmac_hdr), hdrlen); /* patch the protocol field in the original Ethernet header */ ((struct ethhdr*)skb->data)->h_proto = rtskb->protocol; /* copy data */ memcpy(skb_put(skb, rtskb->len), rtskb->data, rtskb->len); skb->dev = rtskb->rtdev->mac_priv->vnic; skb->protocol = eth_type_trans(skb, skb->dev); stats = &rtskb->rtdev->mac_priv->vnic_stats; kfree_rtskb(rtskb); stats->rx_packets++; stats->rx_bytes += skb->len; netif_rx(skb); } else { printk("RTmac: VNIC fails to allocate linux skb\n"); kfree_rtskb(rtskb); } rtdev_dereference(rtdev); } }
static void rtmac_vnic_srq(void) { struct rtskb *rtskb; struct sk_buff *skb; unsigned hdrlen; struct net_device_stats *stats; while (1) { rtskb = rtskb_dequeue(&rx_queue); if (!rtskb) break; hdrlen = rtskb->rtdev->hard_header_len; skb = dev_alloc_skb(hdrlen + rtskb->len + 2); if (skb) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* copy Ethernet header */ memcpy(skb_put(skb, hdrlen), rtskb->data - hdrlen - sizeof(struct rtmac_hdr), hdrlen); /* patch the protocol field in the original Ethernet header */ ((struct ethhdr*)skb->data)->h_proto = rtskb->protocol; /* copy data */ memcpy(skb_put(skb, rtskb->len), rtskb->data, rtskb->len); skb->dev = &rtskb->rtdev->mac_priv->vnic; skb->protocol = eth_type_trans(skb, skb->dev); count2timeval(rtskb->rx, &skb->stamp); stats = &rtskb->rtdev->mac_priv->vnic_stats; kfree_rtskb(rtskb); stats->rx_packets++; stats->rx_bytes += skb->len; netif_rx(skb); } else { printk("RTmac: VNIC fails to allocate linux skb\n"); kfree_rtskb(rtskb); } } }
void nrt_xmit_task(int arg) { struct rtskb *rtskb; struct rtnet_device *rtdev; while (!shutdown) { if ((rtskb = rtskb_dequeue(&nrt_rtskb_queue))) { rtdev = rtskb->rtdev; /* no MAC: we simply transmit the packet under xmit_lock */ rtos_res_lock(&rtdev->xmit_lock); rtmac_xmit(rtskb); rtos_res_unlock(&rtdev->xmit_lock); } rtos_event_sem_wait(&wakeup_sem); } }
/*** * rt_packet_close */ int rt_packet_close(struct rtsocket *sock) { struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; int ret = 0; if (pt->type != 0) { if ((ret = rtdev_remove_pack(pt)) == 0) pt->type = 0; } /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) kfree_rtskb(del); return ret; }
void rtcap_rx_hook(struct rtskb *rtskb) { if ((rtskb->cap_comp_skb = rtskb_dequeue(&cap_pool)) == 0) { tap_device[rtskb->rtdev->ifindex].tap_dev_stats.rx_dropped++; return; } #ifdef CONFIG_RTNET_CHECKED cap_pool.pool_balance--; #endif if (cap_queue.first == NULL) cap_queue.first = rtskb; else cap_queue.last->cap_next = rtskb; cap_queue.last = rtskb; rtskb->cap_next = NULL; rtskb->cap_flags |= RTSKB_CAP_SHARED; rtos_pend_nrt_signal(&cap_signal); }
void rtcfg_cleanup_frames(void) { struct rtskb *rtskb; while (rtdev_remove_pack(&rtcfg_packet_type) == -EAGAIN) { RTCFG_DEBUG(3, "RTcfg: waiting for protocol unregistration\n"); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1*HZ); /* wait a second */ } rtos_event_sem_delete(&rx_event); rtos_task_delete(&rx_task); while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) { rtdev_dereference(rtskb->rtdev); kfree_rtskb(rtskb); } rtskb_pool_release(&rtcfg_pool); }
/*** * rt_udp_close */ int rt_udp_close(struct rtsocket *s) { unsigned long flags; struct rtsocket *prev=s->prev; struct rtsocket *next=s->next; struct rtskb *del; s->state=TCP_CLOSE; rtos_spin_lock_irqsave(&udp_socket_base_lock, flags); prev=s->prev; next=s->next; if (prev != NULL) prev->next = next; if (next != NULL) next->prev = prev; if (s == udp_sockets) udp_sockets = next; rtos_spin_unlock_irqrestore(&udp_socket_base_lock, flags); s->next = NULL; s->prev = NULL; /* cleanup already collected fragments */ rt_ip_frag_invalidate_socket(s); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&s->incoming)) != NULL) kfree_rtskb(del); return 0; }
/*** * rt_packet_close */ static void rt_packet_close(struct rtdm_fd *fd) { struct rtsocket *sock = rtdm_fd_to_private(fd); struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&sock->param_lock, context); if (pt->type != 0) { rtdev_remove_pack(pt); pt->type = 0; } rtdm_lock_put_irqrestore(&sock->param_lock, context); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) { kfree_rtskb(del); } rt_socket_cleanup(fd); }
/*** * rtskb_queue_purge - clean the queue * @rtskb_head */ void rtskb_queue_purge(struct rtskb_head *list) { struct rtskb *skb; while ( (skb=rtskb_dequeue(list))!=NULL ) kfree_rtskb(skb); }
/*** * dec_pool_handler */ void dec_pool_handler(void) { while ( rtskb_pool.qlen>rtskb_pool_default ) dispose_rtskb(rtskb_dequeue(&rtskb_pool)); }
/*** * alloc_rtskb - allocate an rtskb from a pool * @size: required buffer size (to check against maximum boundary) * @pool: pool to take the rtskb from */ struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_queue *pool) { struct rtskb *skb; RTNET_ASSERT(size <= SKB_DATA_ALIGN(RTSKB_SIZE), return NULL;); skb = rtskb_dequeue(pool); if (!skb) return NULL; #ifdef CONFIG_RTNET_CHECKED pool->pool_balance--; skb->chain_len = 1; #endif /* Load the data pointers. */ skb->data = skb->buf_start; skb->tail = skb->buf_start; skb->end = skb->buf_start + size; /* Set up other states */ skb->chain_end = skb; skb->len = 0;