static void rtcfg_rx_task(void *arg) { struct rtskb *rtskb; struct rtcfg_frm_head *frm_head; struct rtnet_device *rtdev; while (rtdm_event_wait(&rx_event) == 0) while ((rtskb = rtskb_dequeue(&rx_queue))) { rtdev = rtskb->rtdev; if (rtskb->pkt_type == PACKET_OTHERHOST) { rtdev_dereference(rtdev); kfree_rtskb(rtskb); continue; } if (rtskb->len < sizeof(struct rtcfg_frm_head)) { RTCFG_DEBUG(1, "RTcfg: %s() received an invalid frame\n", __FUNCTION__); rtdev_dereference(rtdev); kfree_rtskb(rtskb); continue; } frm_head = (struct rtcfg_frm_head *)rtskb->data; if (rtcfg_do_main_event(rtskb->rtdev->ifindex, frm_head->id + RTCFG_FRM_STAGE_1_CFG, rtskb) < 0) kfree_rtskb(rtskb); rtdev_dereference(rtdev); } }
/* Notes: * o On success, rtcfg_client_recv_ready returns without releasing the * device lock. */ static int rtcfg_client_recv_ready(int ifindex, struct rtskb *rtskb) { struct rtcfg_frm_simple *ready_frm; struct rtcfg_device *rtcfg_dev = &device[ifindex]; u32 i; ready_frm = (struct rtcfg_frm_simple *)rtskb->data; if (rtskb->len < sizeof(struct rtcfg_frm_simple)) { rtos_res_unlock(&rtcfg_dev->dev_lock); RTCFG_DEBUG(1, "RTcfg: received invalid ready frame\n"); kfree_rtskb(rtskb); return -EINVAL; } for (i = 0; i < rtcfg_dev->stations_found; i++) /* Ethernet-specific! */ if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr, rtskb->mac.ethernet->h_source, ETH_ALEN) == 0) { if ((rtcfg_dev->spec.clt.station_addr_list[i].flags & RTCFG_FLAG_READY) == 0) { rtcfg_dev->spec.clt.station_addr_list[i].flags |= RTCFG_FLAG_READY; rtcfg_dev->stations_ready++; } break; } kfree_rtskb(rtskb); return 0; }
static int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev) { struct rtnet_device *rtdev = (struct rtnet_device*)dev->priv; struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats; struct rtskb_queue *pool = &rtdev->mac_priv->vnic_skb_pool; struct ethhdr *ethernet = (struct ethhdr*)skb->data; struct rtskb *rtskb; int res; int data_len; rtskb = alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15, pool); if (!rtskb) { stats->tx_dropped++; return -ENOMEM; } rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr)); data_len = skb->len - dev->hard_header_len; memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len, data_len); res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb, ntohs(ethernet->h_proto)); if (res < 0) { stats->tx_dropped++; kfree_rtskb(rtskb); return res; } RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb); return -1;);
int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev) { struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev); struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats; struct rtskb_pool *pool = &rtdev->mac_priv->vnic_skb_pool; struct ethhdr *ethernet = (struct ethhdr*)skb->data; struct rtskb *rtskb; int res; int data_len; rtskb = alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15, pool); if (!rtskb) return NETDEV_TX_BUSY; rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr)); data_len = skb->len - dev->hard_header_len; memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len, data_len); res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb, ntohs(ethernet->h_proto), RTMAC_FLAG_TUNNEL); if (res < 0) { stats->tx_dropped++; kfree_rtskb(rtskb); goto done; } RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb); goto done;);
static void rtmac_vnic_signal_handler(rtdm_nrtsig_t nrtsig, void *arg) { struct rtskb *rtskb; struct sk_buff *skb; unsigned hdrlen; struct net_device_stats *stats; struct rtnet_device *rtdev; while (1) { rtskb = rtskb_dequeue(&rx_queue); if (!rtskb) break; rtdev = rtskb->rtdev; hdrlen = rtdev->hard_header_len; skb = dev_alloc_skb(hdrlen + rtskb->len + 2); if (skb) { /* the rtskb stamp is useless (different clock), get new one */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) __net_timestamp(skb); #else do_gettimeofday(&skb->stamp); #endif skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* copy Ethernet header */ memcpy(skb_put(skb, hdrlen), rtskb->data - hdrlen - sizeof(struct rtmac_hdr), hdrlen); /* patch the protocol field in the original Ethernet header */ ((struct ethhdr*)skb->data)->h_proto = rtskb->protocol; /* copy data */ memcpy(skb_put(skb, rtskb->len), rtskb->data, rtskb->len); skb->dev = rtskb->rtdev->mac_priv->vnic; skb->protocol = eth_type_trans(skb, skb->dev); stats = &rtskb->rtdev->mac_priv->vnic_stats; kfree_rtskb(rtskb); stats->rx_packets++; stats->rx_bytes += skb->len; netif_rx(skb); } else { printk("RTmac: VNIC fails to allocate linux skb\n"); kfree_rtskb(rtskb); } rtdev_dereference(rtdev); } }
static void alloc_collector(struct rtskb *skb, struct rtsocket *sock) { int i; unsigned int flags; struct ip_collector *p_coll; struct iphdr *iph = skb->nh.iph; /* Find free collector */ for (i = 0; i < COLLECTOR_COUNT; i++) { p_coll = &collector[i]; rtos_spin_lock_irqsave(&p_coll->frags.lock, flags); /* * This is a very simple version of a garbage collector. * Whenver the last access to any of the collectors is a while ago, * the collector will be freed... * Under normal conditions, it should never be necessary to collect * the garbage. * */ if (p_coll->in_use && (counter - p_coll->last_accessed > GARBAGE_COLLECT_LIMIT)) { kfree_rtskb(p_coll->frags.first); p_coll->in_use = 0; #ifdef FRAG_DBG rtos_print("RTnet: IP fragmentation garbage collection " "(saddr:%x, daddr:%x)\n", p_coll->saddr, p_coll->daddr); #endif } /* Collector (now) free? */ if (!p_coll->in_use) { p_coll->in_use = 1; p_coll->last_accessed = counter; p_coll->buf_size = skb->len; p_coll->frags.first = skb; p_coll->frags.last = skb; p_coll->saddr = iph->saddr; p_coll->daddr = iph->daddr; p_coll->id = iph->id; p_coll->protocol = iph->protocol; p_coll->sock = sock; rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); return; } rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); } rtos_print("RTnet: IP fragmentation - no collector available\n"); kfree_rtskb(skb); }
int rtcfg_main_state_client_all_known(int ifindex, RTCFG_EVENT event_id, void* event_data) { struct rtskb *rtskb = (struct rtskb *)event_data; struct rt_proc_call *call = (struct rt_proc_call *)event_data; switch (event_id) { case RTCFG_CMD_ANNOUNCE: return rtcfg_client_get_frag(ifindex, call); case RTCFG_CMD_DETACH: rtcfg_client_detach(ifindex, call); break; case RTCFG_FRM_STAGE_2_CFG_FRAG: rtcfg_client_recv_stage_2_frag(ifindex, rtskb); break; case RTCFG_FRM_READY: if (rtcfg_client_recv_ready(ifindex, rtskb) == 0) rtos_res_unlock(&device[ifindex].dev_lock); break; case RTCFG_FRM_ANNOUNCE_NEW: if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) { rtcfg_send_announce_reply(ifindex, rtskb->mac.ethernet->h_source); rtos_res_unlock(&device[ifindex].dev_lock); } kfree_rtskb(rtskb); break; case RTCFG_FRM_DEAD_STATION: rtcfg_client_recv_dead_station(ifindex, rtskb); break; case RTCFG_FRM_STAGE_1_CFG: /* ignore */ rtos_res_unlock(&device[ifindex].dev_lock); kfree_rtskb(rtskb); break; default: rtos_res_unlock(&device[ifindex].dev_lock); RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n", rtcfg_event[event_id], ifindex, __FUNCTION__); return -EINVAL; } return 0; }
static void rtmac_vnic_srq(void) { struct rtskb *rtskb; struct sk_buff *skb; unsigned hdrlen; struct net_device_stats *stats; while (1) { rtskb = rtskb_dequeue(&rx_queue); if (!rtskb) break; hdrlen = rtskb->rtdev->hard_header_len; skb = dev_alloc_skb(hdrlen + rtskb->len + 2); if (skb) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* copy Ethernet header */ memcpy(skb_put(skb, hdrlen), rtskb->data - hdrlen - sizeof(struct rtmac_hdr), hdrlen); /* patch the protocol field in the original Ethernet header */ ((struct ethhdr*)skb->data)->h_proto = rtskb->protocol; /* copy data */ memcpy(skb_put(skb, rtskb->len), rtskb->data, rtskb->len); skb->dev = &rtskb->rtdev->mac_priv->vnic; skb->protocol = eth_type_trans(skb, skb->dev); count2timeval(rtskb->rx, &skb->stamp); stats = &rtskb->rtdev->mac_priv->vnic_stats; kfree_rtskb(rtskb); stats->rx_packets++; stats->rx_bytes += skb->len; netif_rx(skb); } else { printk("RTmac: VNIC fails to allocate linux skb\n"); kfree_rtskb(rtskb); } } }
int tdma_packet_rx(struct rtskb *skb, struct rtnet_device *rtdev, struct rtpacket_type *pt) { struct rtmac_device *rtmac = rtdev->rtmac; struct rtmac_tdma *tdma = (struct rtmac_tdma *)rtmac->priv; struct rtmac_hdr *rtmac_ptr; struct tdma_hdr *tdma_ptr; TDMA_EVENT event; int ret = 0; /* * set pointers in skb * * network layer pointer (->nh) to rtmac header * transport layer pointer (->h) to tdma header * data pointer (->data) to beginning of data */ skb->nh.raw = skb->data; rtmac_ptr = (struct rtmac_hdr *)skb->nh.raw; skb->data += sizeof(struct rtmac_hdr); skb->h.raw = skb->data; tdma_ptr = (struct tdma_hdr *)skb->h.raw; skb->data += sizeof(struct tdma_hdr); /* * test if the received packet is a valid tdma packet... */ if (rtmac_ptr->type != __constant_htons(ETH_TDMA) || rtmac_ptr->ver != RTMAC_VERSION) { rt_printk("RTmac: tdma: received packet on interface %s is not tdma ;(\n", rtdev->name); kfree_rtskb(skb); return -1; } event = ntohl(tdma_ptr->msg); ret = tdma_do_event(tdma, event, (void *)skb); /* * dispose socket buffer */ kfree_rtskb(skb); return ret; }
/* * This function returns an rtskb that contains the complete, accumulated IP message. * If not all fragments of the IP message have been received yet, it returns NULL * Note: the IP header must have already been pulled from the rtskb! * */ struct rtskb *rt_ip_defrag(struct rtskb *skb, struct rtinet_protocol *ipprot) { unsigned int more_frags; unsigned int offset; struct rtsocket *sock; struct iphdr *iph = skb->nh.iph; int ret; counter++; /* Parse the IP header */ offset = ntohs(iph->frag_off); more_frags = offset & IP_MF; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ /* First fragment? */ if (offset == 0) { /* Get the destination socket */ if ((sock = ipprot->dest_socket(skb)) == NULL) { /* Drop the rtskb */ kfree_rtskb(skb); return NULL; } /* Acquire the rtskb at the expense of the protocol pool */ ret = rtskb_acquire(skb, &sock->skb_pool); /* socket is now implicitely locked by the rtskb */ rt_socket_dereference(sock); if (ret != 0) { /* Drop the rtskb */ kfree_rtskb(skb); } else { /* Allocates a new collector */ alloc_collector(skb, sock); } return NULL; } else { /* Add to an existing collector */ return add_to_collector(skb, offset, more_frags); } }
/*** * rt_packet_close */ int rt_packet_close(struct rtdm_dev_context *context, int call_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; int ret = 0; unsigned long flags; rtos_spin_lock_irqsave(&sock->param_lock, flags); if ((pt->type != 0) && ((ret = rtdev_remove_pack(pt)) == 0)) pt->type = 0; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) { rtdev_dereference(del->rtdev); kfree_rtskb(del); } if (ret == 0) ret = rt_socket_cleanup(context); return ret; }
/*** * rt_udp_close */ int rt_udp_close(struct rtdm_dev_context *context, int call_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; struct rtskb *del; int port; unsigned long flags; rtos_spin_lock_irqsave(&udp_socket_base_lock, flags); sock->prot.inet.state = TCP_CLOSE; if (sock->prot.inet.reg_index >= 0) { port = sock->prot.inet.reg_index; clear_bit(port % 32, &port_bitmap[port / 32]); sock->prot.inet.reg_index = -1; } rtos_spin_unlock_irqrestore(&udp_socket_base_lock, flags); /* cleanup already collected fragments */ rt_ip_frag_invalidate_socket(sock); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) kfree_rtskb(del); return rt_socket_cleanup(context); }
/*** * rt_udp_close */ int rt_udp_close(struct rtdm_dev_context *sockctx, rtdm_user_info_t *user_info) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; struct rtskb *del; int port; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&udp_socket_base_lock, context); sock->prot.inet.state = TCP_CLOSE; if (sock->prot.inet.reg_index >= 0) { port = sock->prot.inet.reg_index; clear_bit(port % 32, &port_bitmap[port / 32]); free_ports++; sock->prot.inet.reg_index = -1; } rtdm_lock_put_irqrestore(&udp_socket_base_lock, context); /* cleanup already collected fragments */ rt_ip_frag_invalidate_socket(sock); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) kfree_rtskb(del); return rt_socket_cleanup(sockctx); }
int nomac_packet_rx(struct rtskb *rtskb) { /* actually, NoMAC doesn't expect any control packet */ kfree_rtskb(rtskb); return 0; }
static struct tdma_job *do_reply_cal_job(struct tdma_priv *tdma, struct tdma_reply_cal *job, rtdm_lockctx_t lockctx) { struct tdma_job *prev_job; if (job->reply_cycle > tdma->current_cycle) return &job->head; /* remove the job */ __list_del(job->head.entry.prev, job->head.entry.next); job->head.ref_count--; prev_job = tdma->current_job = list_entry(job->head.entry.prev, struct tdma_job, entry); prev_job->ref_count++; tdma->job_list_revision++; rtdm_lock_put_irqrestore(&tdma->lock, lockctx); if (job->reply_cycle == tdma->current_cycle) { /* send reply in the assigned slot */ rtdm_task_sleep_abs(tdma->current_cycle_start + job->reply_offset, RTDM_TIMERMODE_REALTIME); rtmac_xmit(job->reply_rtskb); } else { /* cleanup if cycle already passed */ kfree_rtskb(job->reply_rtskb); } rtdm_lock_get_irqsave(&tdma->lock, lockctx); return prev_job; }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; rtdm_lockctx_t context; if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtdm_sem_up(&sock->pending_sem); rtdm_lock_get_irqsave(&sock->param_lock, context); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); } return 0; }
/*** * rt_packet_rcv */ int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) - ((u8 *)&((struct rtsocket *)0)->prot.packet)); int ifindex = sock->prot.packet.ifindex; void (*callback_func)(struct rtdm_dev_context *, void *); void *callback_arg; unsigned long flags; if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) || (rtskb_acquire(skb, &sock->skb_pool) != 0)) kfree_rtskb(skb); else { rtdev_reference(skb->rtdev); rtskb_queue_tail(&sock->incoming, skb); rtos_event_sem_signal(&sock->wakeup_event); rtos_spin_lock_irqsave(&sock->param_lock, flags); callback_func = sock->callback_func; callback_arg = sock->callback_arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (callback_func) callback_func(rt_socket_context(sock), callback_arg); } return 0; }
/*** * do_stacktask */ static void do_stacktask(int mgr_id) { struct rtnet_msg msg; struct rtnet_mgr *mgr = (struct rtnet_mgr *)mgr_id; rt_printk("RTnet: stack-mgr started\n"); while(1) { rt_mbx_receive(&(mgr->mbx), &msg, sizeof(struct rtnet_msg)); if ( (msg.rtdev) && (msg.msg_type==Rx_PACKET) ) { while ( !rtskb_queue_empty(&msg.rtdev->rxqueue) ) { struct rtskb *skb = rtskb_dequeue(&msg.rtdev->rxqueue); if ( skb ) { unsigned short hash = ntohs(skb->protocol) & (MAX_RT_PROTOCOLS-1); struct rtpacket_type *pt = rt_packets[hash]; skb->nh.raw = skb->data; if (pt) { pt->handler (skb, skb->rtdev, pt); } else { rt_printk("RTnet: undefined Layer-3-Protokoll\n"); kfree_rtskb(skb); } } } } } }
/*** * rt_packet_close */ int rt_packet_close(struct rtdm_dev_context *sockctx, rtdm_user_info_t *user_info) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; int ret = 0; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&sock->param_lock, context); if ((pt->type != 0) && ((ret = rtdev_remove_pack(pt)) == 0)) pt->type = 0; rtdm_lock_put_irqrestore(&sock->param_lock, context); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) { rtdev_dereference(del->rtdev); kfree_rtskb(del); } if (ret == 0) ret = rt_socket_cleanup(sockctx); return ret; }
int rtcfg_send_frame(struct rtskb *rtskb, struct rtnet_device *rtdev, u8 *dest_addr) { int ret; rtskb->rtdev = rtdev; rtskb->priority = QUEUE_MIN_PRIO-1; if (rtdev->hard_header) { ret = rtdev->hard_header(rtskb, rtdev, ETH_RTCFG, dest_addr, rtdev->dev_addr, rtskb->len); if (ret < 0) goto err; } if ((rtdev->flags & IFF_UP) != 0) { ret = 0; if (rtdev_xmit(rtskb) != 0) ret = -EAGAIN; } else { ret = -ENETDOWN; goto err; } rtdev_dereference(rtdev); return ret; err: kfree_rtskb(rtskb); rtdev_dereference(rtdev); return ret; }
int rtcfg_main_state_client_0(int ifindex, RTCFG_EVENT event_id, void* event_data) { struct rtskb *rtskb = (struct rtskb *)event_data; struct rt_proc_call *call = (struct rt_proc_call *)event_data; switch (event_id) { case RTCFG_CMD_DETACH: rtcfg_client_detach(ifindex, call); break; case RTCFG_FRM_STAGE_1_CFG: rtcfg_client_recv_stage_1(ifindex, rtskb); break; case RTCFG_FRM_ANNOUNCE_NEW: if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) rtos_res_unlock(&device[ifindex].dev_lock); kfree_rtskb(rtskb); break; case RTCFG_FRM_READY: if (rtcfg_client_recv_ready(ifindex, rtskb) == 0) rtos_res_unlock(&device[ifindex].dev_lock); break; default: rtos_res_unlock(&device[ifindex].dev_lock); RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n", rtcfg_event[event_id], ifindex, __FUNCTION__); return -EINVAL; } return 0; }
int rtmac_proto_rx(struct rtskb *skb, struct rtpacket_type *pt) { struct rtmac_disc *disc = skb->rtdev->mac_disc; struct rtmac_hdr *hdr; if (disc == NULL) { rtos_print("RTmac: received RTmac packet on unattached device %s\n", skb->rtdev->name); goto error; } hdr = (struct rtmac_hdr *)skb->data; rtskb_pull(skb, sizeof(struct rtmac_hdr)); if (hdr->ver != RTMAC_VERSION) { rtos_print("RTmac: received unsupported RTmac protocol version on " "device %s\n", skb->rtdev->name); goto error; } if (disc->disc_type == hdr->type) return disc->packet_rx(skb); else if (skb->rtdev->mac_priv->vnic_used) return rtmac_vnic_rx(skb, hdr->type); error: kfree_rtskb(skb); return -1; }
/* ************************************************************************ * This functions runs in rtai context. * It is called from rtnetproxy_user_srq whenever there is frame to sent out * Copy the standard linux sk_buff buffer to a rtnet buffer and send it out * using rtnet functions. * ************************************************************************ */ static inline void send_data_out(struct sk_buff *skb) { struct rtskb *rtskb; struct rt_rtable *rt; struct skb_data_format { struct ethhdr ethhdr; char reserved[12]; /* Ugly but it works... All the not-interesting header bytes */ u32 ip_src; u32 ip_dst; } __attribute__ ((packed)); /* Important to have this structure packed! * It represents the ethernet frame on the line and * thus no spaces are allowed! */ struct skb_data_format *pData; int rc; /* Copy the data from the standard sk_buff to the realtime sk_buff: * Both have the same length. */ rtskb = alloc_rtskb(skb->len); if (NULL == rtskb) { return; } memcpy(rtskb->data, skb->data, skb->len); rtskb->len = skb->len; pData = (struct skb_data_format*) rtskb->data; /* Determine the device to use: Only ip routing is used here. * Non-ip protocols are not supported... */ rc = rt_ip_route_output(&rt, pData->ip_dst, pData->ip_src); if (rc == 0) { struct rtnet_device *rtdev = rt->rt_dev; rtskb->dst = rt; rtskb->rtdev = rt->rt_dev; /* Fill in the ethernet headers: There is already space for the header * but they contain zeros only => Fill it */ memcpy(pData->ethhdr.h_source, rtdev->dev_addr, rtdev->addr_len); memcpy(pData->ethhdr.h_dest, rt->rt_dst_mac_addr, rtdev->addr_len); /* Call the actual transmit function (this function is semaphore * protected): */ rtdev_xmit(rtskb); /* The rtskb is freed somewhere deep in the driver... * No need to do it here. */ } else { /* Routing failed => Free rtskb here... */ kfree_rtskb(rtskb); } }
static void __exit rtnetproxy_cleanup_module(void) { /* Unregister the fallback at rtnet */ rt_ip_register_fallback(0); /* free the rtai srq */ rtdm_nrtsig_destroy(&rtnetproxy_signal); rtdm_task_destroy(&rtnetproxy_thread); rtdm_sem_destroy(&rtnetproxy_sem); /* Free the ringbuffers... */ { struct sk_buff *del_skb; /* standard skb */ while ((del_skb = read_from_ringbuffer(&ring_skb_rtnet_kernel)) != 0) { dev_kfree_skb(del_skb); } while ((del_skb = read_from_ringbuffer(&ring_skb_kernel_rtnet)) != 0) { dev_kfree_skb(del_skb); } } { struct rtskb *del; /* rtnet skb */ while ((del=read_from_ringbuffer(&ring_rtskb_kernel_rtnet))!=0) { kfree_rtskb(del); // Although this is kernel mode, freeing should work... } while ((del=read_from_ringbuffer(&ring_rtskb_rtnet_kernel))!=0) { kfree_rtskb(del); // Although this is kernel mode, freeing should work... } } /* Unregister the net device: */ unregister_netdev(&dev_rtnetproxy); kfree(dev_rtnetproxy.priv); memset(&dev_rtnetproxy, 0, sizeof(dev_rtnetproxy)); dev_rtnetproxy.init = rtnetproxy_init; rtskb_pool_release(&rtskb_pool); }
/*** * rt_loopback_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * */ static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { unsigned short hash; struct rtpacket_type *pt_entry; unsigned long flags; rtos_time_t time; /* write transmission stamp - in case any protocol ever gets the idea to ask the lookback device for this service... */ if (skb->xmit_stamp) { rtos_get_time(&time); *skb->xmit_stamp = cpu_to_be64(rtos_time_to_nanosecs(&time) + *skb->xmit_stamp); } /* make sure that critical fields are re-intialised */ skb->chain_end = skb; /* parse the Ethernet header as usual */ skb->protocol = rt_eth_type_trans(skb, rtdev); skb->nh.raw = skb->data; rtdev_reference(rtdev); rtcap_report_incoming(skb); hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK; rtos_spin_lock_irqsave(&rt_packets_lock, flags); list_for_each_entry(pt_entry, &rt_packets[hash], list_entry) if (pt_entry->type == skb->protocol) { pt_entry->refcount++; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); pt_entry->handler(skb, pt_entry); rtos_spin_lock_irqsave(&rt_packets_lock, flags); pt_entry->refcount--; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); goto out; } rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); /* don't warn if running in promiscuous mode (RTcap...?) */ if ((rtdev->flags & IFF_PROMISC) == 0) rtos_print("RTnet: unknown layer-3 protocol\n"); kfree_rtskb(skb); out: rtdev_dereference(rtdev); return 0; }
static void rtcfg_client_recv_stage_2_frag(int ifindex, struct rtskb *rtskb) { struct rtcfg_frm_stage_2_cfg_frag *stage_2_frag; struct rtcfg_device *rtcfg_dev = &device[ifindex]; size_t data_len; if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg_frag)) { rtos_res_unlock(&rtcfg_dev->dev_lock); RTCFG_DEBUG(1, "RTcfg: received invalid stage_2_cfg_frag frame\n"); kfree_rtskb(rtskb); return; } stage_2_frag = (struct rtcfg_frm_stage_2_cfg_frag *)rtskb->data; __rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag)); data_len = MIN(rtcfg_dev->spec.clt.cfg_len - rtcfg_dev->spec.clt.cfg_offs, rtskb->len); if ((rtcfg_dev->flags & RTCFG_FLAG_STAGE_2_DATA) == 0) { RTCFG_DEBUG(1, "RTcfg: unexpected stage 2 fragment, we did not " "request any data!\n"); } else if (rtcfg_dev->spec.clt.cfg_offs != ntohl(stage_2_frag->frag_offs)) { RTCFG_DEBUG(1, "RTcfg: unexpected stage 2 fragment (expected: %d, " "received: %d)\n", rtcfg_dev->spec.clt.cfg_offs, ntohl(stage_2_frag->frag_offs)); rtcfg_send_ack(ifindex); rtcfg_dev->spec.clt.packet_counter = 0; } else { rtcfg_client_queue_frag(ifindex, rtskb, data_len); rtskb = NULL; } rtos_res_unlock(&rtcfg_dev->dev_lock); if (rtskb != NULL) kfree_rtskb(rtskb); }
static int rtcfg_rx_handler(struct rtskb *rtskb, struct rtpacket_type *pt) { if (rtskb_acquire(rtskb, &rtcfg_pool) == 0) { rtdev_reference(rtskb->rtdev); rtskb_queue_tail(&rx_queue, rtskb); rtos_event_sem_signal(&rx_event); } else kfree_rtskb(rtskb); return 0; }
/*** * rt_loopback_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * */ static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { unsigned short hash; struct rtpacket_type *pt; unsigned long flags; rtos_time_t time; /* write transmission stamp - in case any protocol ever gets the idea to ask the lookback device for this service... */ if (skb->xmit_stamp) { rtos_get_time(&time); *skb->xmit_stamp = cpu_to_be64(rtos_time_to_nanosecs(&time) + *skb->xmit_stamp); } /* make sure that critical fields are re-intialised */ skb->chain_end = skb; /* parse the Ethernet header as usual */ skb->protocol = rt_eth_type_trans(skb, rtdev); skb->nh.raw = skb->data; rtdev_reference(rtdev); rtcap_report_incoming(skb); hash = ntohs(skb->protocol) & (MAX_RT_PROTOCOLS-1); rtos_spin_lock_irqsave(&rt_packets_lock, flags); pt = rt_packets[hash]; if ((pt != NULL) && (pt->type == skb->protocol)) { pt->refcount++; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); pt->handler(skb, pt); rtos_spin_lock_irqsave(&rt_packets_lock, flags); pt->refcount--; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); } else { rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); rtos_print("RTnet: unknown layer-3 protocol\n"); kfree_rtskb(skb); } rtdev_dereference(rtdev); return 0; }
/*** * rt_ip_rcv */ int rt_ip_rcv(struct rtskb *skb, struct rtnet_device *rtdev, struct rtpacket_type *pt) { struct iphdr *iph; /* When the interface is in promisc. mode, drop all the crap * that it receives, do not try to analyse it. */ if (skb->pkt_type == PACKET_OTHERHOST) goto drop; iph = skb->nh.iph; /* * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. * * Is the datagram acceptable? * * 1. Length at least the size of an ip header * 2. Version of 4 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums] * 4. Doesn't have a bogus length */ if (iph->ihl < 5 || iph->version != 4) goto drop; if ( ip_fast_csum((u8 *)iph, iph->ihl)!=0 ) goto drop; { __u32 len = ntohs(iph->tot_len); if ( (skb->len<len) || (len<(iph->ihl<<2)) ) goto drop; rtskb_trim(skb, len); } if (skb->dst == NULL) if ( rt_ip_route_input(skb, iph->daddr, iph->saddr, skb->rtdev) ) goto drop; /* ip_local_deliver */ if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { skb = rt_ip_defrag(skb); if (!skb) return 0; } return rt_ip_local_deliver_finish(skb); drop: kfree_rtskb(skb); return NET_RX_DROP; }
/* ************************************************************************ * This function runs in rtai context. * * It is called from inside rtnet whenever a packet has been received that * has to be processed by rtnetproxy. * ************************************************************************ */ static int rtnetproxy_recv(struct rtskb *rtskb) { /* Acquire rtskb (JK) */ if (rtskb_acquire(rtskb, &rtskb_pool) != 0) { rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n"); kfree_rtskb(rtskb); } /* Place the rtskb in the ringbuffer: */ else if (write_to_ringbuffer(&ring_rtskb_rtnet_kernel, rtskb)) { /* Switch over to kernel context: */ rtdm_nrtsig_pend(&rtnetproxy_signal); } else { /* No space in ringbuffer => Free rtskb here... */ rtdm_printk("rtnetproxy_recv: No space in queue\n"); kfree_rtskb(rtskb); } return 0; }