void send_msg(void *arg) { int ret; struct msghdr msg; struct iovec iov[2]; unsigned short msgsize = size; while(1) { iov[0].iov_base = &msgsize; iov[0].iov_len = sizeof(msgsize); iov[1].iov_base = buffer_out; iov[1].iov_len = size; memset(&msg, 0, sizeof(msg)); msg.msg_name = &dest_addr; msg.msg_namelen = sizeof(dest_addr); msg.msg_iov = iov; msg.msg_iovlen = 2; rtos_print("Sending message of %d+2 bytes\n", size); ret = rt_dev_sendmsg(sock, &msg, 0); if (ret != (int)(sizeof(msgsize) + size)) rtos_print(" rt_dev_sendmsg() = %d!\n", ret); rtos_task_wait_period(&rt_xmit_task); } }
int rtmac_proto_rx(struct rtskb *skb, struct rtpacket_type *pt) { struct rtmac_disc *disc = skb->rtdev->mac_disc; struct rtmac_hdr *hdr; if (disc == NULL) { rtos_print("RTmac: received RTmac packet on unattached device %s\n", skb->rtdev->name); goto error; } hdr = (struct rtmac_hdr *)skb->data; rtskb_pull(skb, sizeof(struct rtmac_hdr)); if (hdr->ver != RTMAC_VERSION) { rtos_print("RTmac: received unsupported RTmac protocol version on " "device %s\n", skb->rtdev->name); goto error; } if (disc->disc_type == hdr->type) return disc->packet_rx(skb); else if (skb->rtdev->mac_priv->vnic_used) return rtmac_vnic_rx(skb, hdr->type); error: kfree_rtskb(skb); return -1; }
static void alloc_collector(struct rtskb *skb, struct rtsocket *sock) { int i; unsigned int flags; struct ip_collector *p_coll; struct iphdr *iph = skb->nh.iph; /* Find free collector */ for (i = 0; i < COLLECTOR_COUNT; i++) { p_coll = &collector[i]; rtos_spin_lock_irqsave(&p_coll->frags.lock, flags); /* * This is a very simple version of a garbage collector. * Whenver the last access to any of the collectors is a while ago, * the collector will be freed... * Under normal conditions, it should never be necessary to collect * the garbage. * */ if (p_coll->in_use && (counter - p_coll->last_accessed > GARBAGE_COLLECT_LIMIT)) { kfree_rtskb(p_coll->frags.first); p_coll->in_use = 0; #ifdef FRAG_DBG rtos_print("RTnet: IP fragmentation garbage collection " "(saddr:%x, daddr:%x)\n", p_coll->saddr, p_coll->daddr); #endif } /* Collector (now) free? */ if (!p_coll->in_use) { p_coll->in_use = 1; p_coll->last_accessed = counter; p_coll->buf_size = skb->len; p_coll->frags.first = skb; p_coll->frags.last = skb; p_coll->saddr = iph->saddr; p_coll->daddr = iph->daddr; p_coll->id = iph->id; p_coll->protocol = iph->protocol; p_coll->sock = sock; rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); return; } rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); } rtos_print("RTnet: IP fragmentation - no collector available\n"); kfree_rtskb(skb); }
/*** * rt_arp_table_add */ void rt_arp_table_add(u32 ip_addr, unsigned char *hw_addr) { struct rt_arp_table_struct *arp_entry=rt_arp_table_lookup(ip_addr); /*rt_sem_wait(&arp_sem);*/ if (arp_entry == NULL) { arp_entry=free_arp_list; if (!arp_entry) { rtos_print("RTnet: %s(): no free arp entries\n",__FUNCTION__); return; } arp_entry->ip_addr=ip_addr; memcpy(arp_entry->hw_addr,hw_addr,RT_ARP_ADDR_LEN); free_arp_list=free_arp_list->next; arp_entry->next=arp_list; if (arp_list) arp_list->prev=arp_entry; arp_list=arp_entry; /* Billa: for the rt_arp_table_del() not to crash */ arp_list->prev=NULL; } /*rt_sem_signal(&arp_sem);*/ }
/*** * rtdev_add_pack: add protocol (Layer 3) * @pt: the new protocol */ int rtdev_add_pack(struct rtpacket_type *pt) { int hash; unsigned long flags; if (pt->type == htons(ETH_P_ALL)) return -EINVAL; hash = ntohs(pt->type) & (MAX_RT_PROTOCOLS-1); rtos_spin_lock_irqsave(&rt_packets_lock, flags); if (rt_packets[hash] == NULL) { rt_packets[hash] = pt; pt->refcount = 0; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); return 0; } else { rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); rtos_print("RTnet: protocol place %d is already in use\n", hash); return -EADDRNOTAVAIL; } }
void rtmac_proto_release(void) { while (rtdev_remove_pack(&rtmac_packet_type) == -EAGAIN) { rtos_print("RTmac: waiting for protocol unregistration\n"); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1*HZ); /* wait a second */ } }
/*** * rt_unregister_rtnetdev: unregister a rtnet_device * @rtdev: the device */ int rt_unregister_rtnetdev(struct rtnet_device *rtdev) { unsigned long flags_nrt, flags_rt; RTNET_ASSERT(rtdev->ifindex != 0, rtos_print("RTnet: device %s/%p was not registered\n", rtdev->name, rtdev); return -ENODEV;);
/*** * rt_loopback_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * */ static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { unsigned short hash; struct rtpacket_type *pt_entry; unsigned long flags; rtos_time_t time; /* write transmission stamp - in case any protocol ever gets the idea to ask the lookback device for this service... */ if (skb->xmit_stamp) { rtos_get_time(&time); *skb->xmit_stamp = cpu_to_be64(rtos_time_to_nanosecs(&time) + *skb->xmit_stamp); } /* make sure that critical fields are re-intialised */ skb->chain_end = skb; /* parse the Ethernet header as usual */ skb->protocol = rt_eth_type_trans(skb, rtdev); skb->nh.raw = skb->data; rtdev_reference(rtdev); rtcap_report_incoming(skb); hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK; rtos_spin_lock_irqsave(&rt_packets_lock, flags); list_for_each_entry(pt_entry, &rt_packets[hash], list_entry) if (pt_entry->type == skb->protocol) { pt_entry->refcount++; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); pt_entry->handler(skb, pt_entry); rtos_spin_lock_irqsave(&rt_packets_lock, flags); pt_entry->refcount--; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); goto out; } rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); /* don't warn if running in promiscuous mode (RTcap...?) */ if ((rtdev->flags & IFF_PROMISC) == 0) rtos_print("RTnet: unknown layer-3 protocol\n"); kfree_rtskb(skb); out: rtdev_dereference(rtdev); return 0; }
/** * skb_over_panic - private function * @skb: buffer * @sz: size * @here: address * * Out of line support code for rtskb_put(). Not user callable. */ void rtskb_over_panic(struct rtskb *skb, int sz, void *here) { char *name; if ( skb->rtdev ) name=skb->rtdev->name; else name="<NULL>"; rtos_print("RTnet: rtskb_put :over: %p:%d put:%d dev:%s\n", here, skb->len, sz, name); }
/*** * rt_loopback_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * */ static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { unsigned short hash; struct rtpacket_type *pt; unsigned long flags; rtos_time_t time; /* write transmission stamp - in case any protocol ever gets the idea to ask the lookback device for this service... */ if (skb->xmit_stamp) { rtos_get_time(&time); *skb->xmit_stamp = cpu_to_be64(rtos_time_to_nanosecs(&time) + *skb->xmit_stamp); } /* make sure that critical fields are re-intialised */ skb->chain_end = skb; /* parse the Ethernet header as usual */ skb->protocol = rt_eth_type_trans(skb, rtdev); skb->nh.raw = skb->data; rtdev_reference(rtdev); rtcap_report_incoming(skb); hash = ntohs(skb->protocol) & (MAX_RT_PROTOCOLS-1); rtos_spin_lock_irqsave(&rt_packets_lock, flags); pt = rt_packets[hash]; if ((pt != NULL) && (pt->type == skb->protocol)) { pt->refcount++; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); pt->handler(skb, pt); rtos_spin_lock_irqsave(&rt_packets_lock, flags); pt->refcount--; rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); } else { rtos_spin_unlock_irqrestore(&rt_packets_lock, flags); rtos_print("RTnet: unknown layer-3 protocol\n"); kfree_rtskb(skb); } rtdev_dereference(rtdev); return 0; }
void cleanup_module(void) { /* Important: First close the socket! */ while (close_rt(sock) == -EAGAIN) { rtos_print("rt_server: Not all buffers freed yet - waiting...\n"); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1*HZ); /* wait a second */ } rtos_task_delete(&rt_task); }
static int rtnet_proc_register(void) { static struct proc_dir_entry *proc_rtnet_mgr; proc_rtnet_mgr = create_proc_entry(RTNET_PROC_NAME, S_IFREG | S_IRUGO | S_IWUSR, rtai_proc_root); if (!proc_rtnet_mgr) { rtos_print("Unable to initialize /proc/rtai/rtnet\n"); return -1; } proc_rtnet_mgr->read_proc = rtnet_mgr_read_proc; return 0; }
static int rt_icmp_glue_reply_bits(const void *p, char *to, unsigned int offset, unsigned int fraglen) { struct icmp_bxm *icmp_param = (struct icmp_bxm *)p; struct icmphdr *icmph; unsigned long csum; RTNET_ASSERT(offset == 0, rtos_print("RTnet: %s() does not support fragmentation.", __FUNCTION__); return -1;);
void pnic2_timer(unsigned long data) { #if 0 /*RTnet*/struct rtnet_device *rtdev = (/*RTnet*/struct rtnet_device *)data; struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; int next_tick = 60*HZ; if (tulip_debug > 3) /*RTnet*/rtos_print(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n", rtdev->name,inl(ioaddr + CSR12)); if (next_tick) { /*RTnet*/MUST_REMOVE_mod_timer(&tp->timer, RUN_AT(next_tick)); } #endif }
static void alloc_collector(struct rtskb *skb, struct rtsocket *sock) { int i; unsigned long flags; struct ip_collector *p_coll; struct iphdr *iph = skb->nh.iph; /* * Find a free collector * * Note: We once used to clean up probably outdated chains, but the * algorithm was not stable enough and could cause incorrect drops even * under medium load. If we run in overload, we will loose data anyhow. * What we should do in the future is to account collectors per socket or * socket owner and set quotations. * Garbage collection is now performed only on socket close. */ for (i = 0; i < COLLECTOR_COUNT; i++) { p_coll = &collector[i]; rtos_spin_lock_irqsave(&p_coll->frags.lock, flags); if (!p_coll->in_use) { p_coll->in_use = 1; p_coll->buf_size = skb->len; p_coll->frags.first = skb; p_coll->frags.last = skb; p_coll->saddr = iph->saddr; p_coll->daddr = iph->daddr; p_coll->id = iph->id; p_coll->protocol = iph->protocol; p_coll->sock = sock; rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); return; } rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); } rtos_print("RTnet: IP fragmentation - no collector available\n"); kfree_rtskb(skb); }
/*** * common reply function */ static void rt_icmp_send_reply(struct icmp_bxm *icmp_param, struct rtskb *skb) { struct dest_route rt; u32 daddr; int err; daddr = skb->nh.iph->saddr; icmp_param->head.icmph.checksum = 0; icmp_param->csum = 0; if (rt_ip_route_output(&rt, daddr) != 0) return; err = rt_ip_build_xmit(&icmp_socket, rt_icmp_glue_reply_bits, icmp_param, sizeof(struct icmphdr) + icmp_param->data_len, &rt, MSG_DONTWAIT); rtdev_dereference(rt.rtdev); RTNET_ASSERT(err == 0, rtos_print("RTnet: %s() error in xmit\n", __FUNCTION__););
/*** * rt_udp_connect */ int rt_udp_connect(struct rtsocket *s, const struct sockaddr *serv_addr, socklen_t addrlen) { struct sockaddr_in *usin = (struct sockaddr_in *) serv_addr; if ( (s->state!=TCP_CLOSE) || (addrlen < (int)sizeof(struct sockaddr_in)) ) return -EINVAL; if ( (usin->sin_family) && (usin->sin_family!=AF_INET) ) { s->prot.inet.saddr = INADDR_ANY; s->prot.inet.daddr = INADDR_ANY; s->state = TCP_CLOSE; return -EAFNOSUPPORT; } s->state = TCP_ESTABLISHED; s->prot.inet.daddr = usin->sin_addr.s_addr; s->prot.inet.dport = usin->sin_port; #ifdef DEBUG rtos_print("connect socket to %x:%d\n", ntohl(s->prot.inet.daddr), ntohs(s->prot.inet.dport)); #endif return 0; }
void recv_msg(void *arg) { int ret; struct msghdr msg; struct iovec iov[2]; unsigned short msgsize = size; struct sockaddr_in addr; while(1) { iov[0].iov_base = &msgsize; iov[0].iov_len = sizeof(msgsize); iov[1].iov_base = buffer_in; iov[1].iov_len = size; memset(&msg, 0, sizeof(msg)); msg.msg_name = &addr; msg.msg_namelen = sizeof(addr); msg.msg_iov = iov; msg.msg_iovlen = 2; ret = rt_dev_recvmsg(sock, &msg, 0); if (ret <= 0) { rtos_print(" rt_dev_recvmsg() = %d\n", ret); return; } else { unsigned long ip = ntohl(addr.sin_addr.s_addr); rtos_print("received packet from %lu.%lu.%lu.%lu, length: %d+2, " "encoded length: %d,\n flags: %X, content %s\n", ip >> 24, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, ret-sizeof(msgsize), msgsize, msg.msg_flags, (memcmp(buffer_in, buffer_out, ret-sizeof(msgsize)) == 0) ? "ok" : "corrupted"); } } }
static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb) { struct rtcfg_frm_stage_2_cfg *stage_2_cfg; struct rtcfg_device *rtcfg_dev = &device[ifindex]; size_t data_len; int ret; if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg)) { rtos_res_unlock(&rtcfg_dev->dev_lock); RTCFG_DEBUG(1, "RTcfg: received invalid stage_2_cfg frame\n"); kfree_rtskb(rtskb); return; } stage_2_cfg = (struct rtcfg_frm_stage_2_cfg *)rtskb->data; __rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg)); if (stage_2_cfg->heartbeat_period) { ret = rtos_task_init_periodic(&rtcfg_dev->timer_task, rtcfg_timer, (void *)ifindex, RTOS_LOWEST_RT_PRIORITY, ((nanosecs_t)ntohs(stage_2_cfg->heartbeat_period)) * 1000000); if (ret < 0) /*ERRMSG*/rtos_print("RTcfg: unable to create timer task\n"); else rtcfg_dev->flags |= FLAG_TIMER_STARTED; } /* add server to station list */ if (rtcfg_add_to_station_list(rtcfg_dev, rtskb->mac.ethernet->h_source, stage_2_cfg->flags) < 0) { rtos_res_unlock(&rtcfg_dev->dev_lock); RTCFG_DEBUG(1, "RTcfg: unable to process stage_2_cfg frage\n"); kfree_rtskb(rtskb); return; } rtcfg_dev->other_stations = ntohl(stage_2_cfg->stations); rtcfg_dev->spec.clt.cfg_len = ntohl(stage_2_cfg->cfg_len); data_len = MIN(rtcfg_dev->spec.clt.cfg_len, rtskb->len); if (((rtcfg_dev->flags & RTCFG_FLAG_STAGE_2_DATA) != 0) && (data_len > 0)) { rtcfg_client_queue_frag(ifindex, rtskb, data_len); rtskb = NULL; if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN); } else { if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) { rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE, 0); rtcfg_next_main_state(ifindex, ((rtcfg_dev->flags & RTCFG_FLAG_READY) != 0) ? RTCFG_MAIN_CLIENT_READY : RTCFG_MAIN_CLIENT_2); } else rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ALL_FRAMES); rtcfg_send_ack(ifindex); } rtos_res_unlock(&rtcfg_dev->dev_lock); if (rtskb != NULL) kfree_rtskb(rtskb); }
/*** * rt_udp_rcv_err */ void rt_udp_rcv_err (struct rtskb *skb) { rtos_print("RTnet: rt_udp_rcv err\n"); }
/*** * rt_udp_sendmsg */ int rt_udp_sendmsg(struct rtsocket *s, const struct msghdr *msg, size_t len, int flags) { int ulen = len + sizeof(struct udphdr); struct udpfakehdr ufh; struct rt_rtable *rt = NULL; u32 daddr; u16 dport; int err; if ((len < 0) || (len > 0xFFFF-sizeof(struct iphdr)-sizeof(struct udphdr))) return -EMSGSIZE; if (flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; if (flags & ~(MSG_DONTROUTE|MSG_DONTWAIT) ) return -EINVAL; if ((msg->msg_name) && (msg->msg_namelen==sizeof(struct sockaddr_in))) { struct sockaddr_in *usin = (struct sockaddr_in*) msg->msg_name; if ((usin->sin_family!=AF_INET) && (usin->sin_family!=AF_UNSPEC)) return -EINVAL; daddr = usin->sin_addr.s_addr; dport = usin->sin_port; } else { if (s->state != TCP_ESTABLISHED) return -ENOTCONN; daddr = s->prot.inet.daddr; dport = s->prot.inet.dport; } #ifdef DEBUG rtos_print("sendmsg to %x:%d\n", ntohl(daddr), ntohs(dport)); #endif if ((daddr==0) || (dport==0)) return -EINVAL; err = rt_ip_route_output(&rt, daddr, s->prot.inet.saddr); if (err) goto out; /* we found a route, remember the routing dest-addr could be the netmask */ ufh.saddr = rt->rt_src; ufh.daddr = daddr; ufh.uh.source = s->prot.inet.sport; ufh.uh.dest = dport; ufh.uh.len = htons(ulen); ufh.uh.check = 0; ufh.iov = msg->msg_iov; ufh.iovlen = msg->msg_iovlen; ufh.wcheck = 0; err = rt_ip_build_xmit(s, rt_udp_getfrag, &ufh, ulen, rt, flags); out: if (!err) return len; else return err; }
/*** * rt_ip_route_add_host: add or update host route */ int rt_ip_route_add_host(u32 addr, unsigned char *dev_addr, struct rtnet_device *rtdev) { unsigned long flags; struct host_route *new_route; struct host_route *rt; unsigned int key; rtos_spin_lock_irqsave(&rtdev->rtdev_lock, flags); if ((!test_bit(PRIV_FLAG_UP, &rtdev->priv_flags) || test_and_set_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags))) { rtos_spin_unlock_irqrestore(&rtdev->rtdev_lock, flags); return -EBUSY; } rtos_spin_unlock_irqrestore(&rtdev->rtdev_lock, flags); if ((new_route = rt_alloc_host_route()) != NULL) { new_route->dest_host.ip = addr; new_route->dest_host.rtdev = rtdev; memcpy(new_route->dest_host.dev_addr, dev_addr, rtdev->addr_len); } key = ntohl(addr) & HOST_HASH_KEY_MASK; rtos_spin_lock_irqsave(&host_table_lock, flags); rt = host_table[key]; while (rt != NULL) { if (rt->dest_host.ip == addr) { rt->dest_host.rtdev = rtdev; memcpy(rt->dest_host.dev_addr, dev_addr, rtdev->addr_len); if (new_route) rt_free_host_route(new_route); rtos_spin_unlock_irqrestore(&host_table_lock, flags); goto out; } rt = rt->next; } if (new_route) { new_route->next = host_table[key]; host_table[key] = new_route; rtos_spin_unlock_irqrestore(&host_table_lock, flags); } else { rtos_spin_unlock_irqrestore(&host_table_lock, flags); /*ERRMSG*/rtos_print("RTnet: no more host routes available\n"); } out: clear_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags); return 0; }
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, rtos_time_t *time_stamp) { struct tulip_private *tp = (struct tulip_private *)rtdev->priv; int entry = tp->cur_rx % RX_RING_SIZE; int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; int received = 0; if (tulip_debug > 4) /*RTnet*/rtos_print(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, tp->rx_ring[entry].status); /* If we own the next entry, it is a new packet. Send it up. */ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); if (tulip_debug > 5) /*RTnet*/rtos_print(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", rtdev->name, entry, status); if (--rx_work_limit < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_WARNING "%s: Oversized Ethernet frame " "spanned multiple buffers, status %8.8x!\n", rtdev->name, status); tp->stats.rx_length_errors++; } } else if (status & RxDescFatalErr) { /* There was a fatal error. */ if (tulip_debug > 2) /*RTnet*/rtos_print(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", rtdev->name, status); tp->stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) tp->stats.rx_length_errors++; if (status & 0x0004) tp->stats.rx_frame_errors++; if (status & 0x0002) tp->stats.rx_crc_errors++; if (status & 0x0001) tp->stats.rx_fifo_errors++; } } else { /* Omit the four octet CRC from the length. */ short pkt_len = ((status >> 16) & 0x7ff) - 4; struct /*RTnet*/rtskb *skb; #ifndef final_version if (pkt_len > 1518) { /*RTnet*/rtos_print(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", rtdev->name, pkt_len, pkt_len); pkt_len = 1518; tp->stats.rx_length_errors++; } #endif #if 0 /*RTnet*/ /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < tulip_rx_copybreak && (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) { skb->rtdev = rtdev; /*RTnet*/rtskb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) //eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, // pkt_len, 0); memcpy(rtskb_put(skb, pkt_len), tp->rx_buffers[entry].skb->tail, pkt_len); #else memcpy(/*RTnet*/rtskb_put(skb, pkt_len), tp->rx_buffers[entry].skb->tail, pkt_len); #endif } else { /* Pass up the skb already on the Rx ring. */ #endif /*RTnet*/ { char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb, pkt_len); #ifndef final_version if (tp->rx_buffers[entry].mapping != le32_to_cpu(tp->rx_ring[entry].buffer1)) { /*RTnet*/rtos_print(KERN_ERR "%s: Internal fault: The skbuff addresses " "do not match in tulip_rx: %08x vs. %08x ? / %p.\n", rtdev->name, le32_to_cpu(tp->rx_ring[entry].buffer1), tp->rx_buffers[entry].mapping, temp);/*RTnet*/ } #endif pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[entry].skb = NULL; tp->rx_buffers[entry].mapping = 0; } skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev); memcpy(&skb->time_stamp, time_stamp, sizeof(rtos_time_t)); /*RTnet*/rtnetif_rx(skb); tp->stats.rx_packets++; tp->stats.rx_bytes += pkt_len; } received++; entry = (++tp->cur_rx) % RX_RING_SIZE; } return received; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ void tulip_interrupt(unsigned int irq, void *__data) { /*RTnet*/struct rtnet_device *rtdev = (/*RTnet*/struct rtnet_device *)__data; struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; unsigned int csr5; int entry; int missed; int rx = 0; int tx = 0; int oi = 0; int maxrx = RX_RING_SIZE; int maxtx = TX_RING_SIZE; int maxoi = TX_RING_SIZE; unsigned int work_count = tulip_max_interrupt_work; rtos_time_t time_stamp; /* Read current time ASAP. It's used with RTmac. * Note: More than one packet may arrive us within one interrupt. * These packets will get the same time stamp. * WY */ rtos_get_time(&time_stamp); /* Let's see whether the interrupt really is for us */ csr5 = inl(ioaddr + CSR5); if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) { rtos_print("%s: unexpected IRQ!\n",rtdev->name); return; } tp->nir++; do { /* Acknowledge all of the current interrupt sources ASAP. */ outl(csr5 & 0x0001ffff, ioaddr + CSR5); if (tulip_debug > 4) /*RTnet*/rtos_print(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", rtdev->name, csr5, inl(rtdev->base_addr + CSR5)); if (csr5 & (RxIntr | RxNoBuf)) { rx += tulip_rx(rtdev, &time_stamp); tulip_refill_rx(rtdev); } if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { unsigned int dirty_tx; rtos_spin_lock(&tp->lock); for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status); if (status < 0) break; /* It still has not been Txed */ /* Check for Rx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping) pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); continue; } if (status & 0x8000) { /* There was an major error, log it. */ #ifndef final_version if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", rtdev->name, status); #endif tp->stats.tx_errors++; if (status & 0x4104) tp->stats.tx_aborted_errors++; if (status & 0x0C00) tp->stats.tx_carrier_errors++; if (status & 0x0200) tp->stats.tx_window_errors++; if (status & 0x0002) tp->stats.tx_fifo_errors++; if ((status & 0x0080) && tp->full_duplex == 0) tp->stats.tx_heartbeat_errors++; } else { tp->stats.tx_bytes += tp->tx_buffers[entry].skb->len; tp->stats.collisions += (status >> 3) & 15; tp->stats.tx_packets++; } pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tp->tx_buffers[entry].skb->len, PCI_DMA_TODEVICE); /* Free the original skb. */ /*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb); tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tx++; rtnetif_tx(rtdev); } #ifndef final_version if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { /*RTnet*/rtos_print(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", rtdev->name, dirty_tx, tp->cur_tx); dirty_tx += TX_RING_SIZE; } #endif if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) /*RTnet*/rtnetif_wake_queue(rtdev); tp->dirty_tx = dirty_tx; if (csr5 & TxDied) { if (tulip_debug > 2) /*RTnet*/rtos_print(KERN_WARNING "%s: The transmitter stopped." " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6); tulip_restart_rxtx(tp); } rtos_spin_unlock(&tp->lock); } /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ if (csr5 == 0xffffffff) break; #if 0 /*RTnet*/ if (csr5 & TxJabber) tp->stats.tx_errors++; if (csr5 & TxFIFOUnderflow) { if ((tp->csr6 & 0xC000) != 0xC000) tp->csr6 += 0x4000; /* Bump up the Tx threshold */ else tp->csr6 |= 0x00200000; /* Store-n-forward. */ /* Restart the transmit process. */ tulip_restart_rxtx(tp); outl(0, ioaddr + CSR1); } if (csr5 & (RxDied | RxNoBuf)) { if (tp->flags & COMET_MAC_ADDR) { outl(tp->mc_filter[0], ioaddr + 0xAC); outl(tp->mc_filter[1], ioaddr + 0xB0); } } if (csr5 & RxDied) { /* Missed a Rx frame. */ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; tp->stats.rx_errors++; tulip_start_rxtx(tp); } /* * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this * call is ever done under the spinlock */ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { if (tp->link_change) (tp->link_change)(rtdev, csr5); } if (csr5 & SytemError) { int error = (csr5 >> 23) & 7; /* oops, we hit a PCI error. The code produced corresponds * to the reason: * 0 - parity error * 1 - master abort * 2 - target abort * Note that on parity error, we should do a software reset * of the chip to get it back into a sane state (according * to the 21142/3 docs that is). * -- rmk */ /*RTnet*/rtos_print(KERN_ERR "%s: (%lu) System Error occured (%d)\n", rtdev->name, tp->nir, error); } #endif /*RTnet*/ /*RTnet*/rtos_print(KERN_ERR "%s: Error detected, device may not work any more!\n", rtdev->name); /* Clear all error sources, included undocumented ones! */ outl(0x0800f7ba, ioaddr + CSR5); oi++; } if (csr5 & TimerInt) { if (tulip_debug > 2) /*RTnet*/rtos_print(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n", rtdev->name, csr5); outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); tp->ttimer = 0; oi++; } if (tx > maxtx || rx > maxrx || oi > maxoi) { if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_WARNING "%s: Too much work during an interrupt, " "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", rtdev->name, csr5, tp->nir, tx, rx, oi); /* Acknowledge all interrupt sources. */ outl(0x8001ffff, ioaddr + CSR5); if (tp->flags & HAS_INTR_MITIGATION) { /* Josip Loncaric at ICASE did extensive experimentation to develop a good interrupt mitigation setting.*/ outl(0x8b240000, ioaddr + CSR11); } else if (tp->chip_id == LC82C168) { /* the LC82C168 doesn't have a hw timer.*/ outl(0x00, ioaddr + CSR7); /*RTnet*/ //MUST_REMOVE_mod_timer(&tp->timer, RUN_AT(HZ/50)); } else { /* Mask all interrupting sources, set timer to re-enable. */ } break; } work_count--; if (work_count == 0) break; csr5 = inl(ioaddr + CSR5); } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
int init_module(void) { unsigned int add_rtskbs = 30; int ret; unsigned long local_ip; unsigned long client_ip; if (strlen(local_ip_s) != 0) local_ip = rt_inet_aton(local_ip_s); else local_ip = INADDR_ANY; client_ip = rt_inet_aton(client_ip_s); if (reply_size < sizeof(nanosecs_t)) reply_size = sizeof(nanosecs_t); rtos_print("local ip address %s(%8x):%d\n", local_ip_s, (unsigned int)local_ip, RCV_PORT); rtos_print("client ip address %s(%8x):%d\n", client_ip_s, (unsigned int)client_ip, XMT_PORT); rtos_print("reply message size=%d\n", reply_size); /* create rt-socket */ rtos_print("create rtsocket\n"); if ((sock = socket_rt(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { rtos_print("socket not created\n"); return sock; } /* bind the rt-socket to local_addr */ rtos_print("bind rtsocket to local address:port\n"); memset(&local_addr, 0, sizeof(struct sockaddr_in)); local_addr.sin_family = AF_INET; local_addr.sin_port = htons(RCV_PORT); local_addr.sin_addr.s_addr = local_ip; if ((ret = bind_rt(sock, (struct sockaddr *)&local_addr, sizeof(struct sockaddr_in))) < 0) { close_rt(sock); rtos_print("can't bind rtsocket\n"); return ret; } /* set client-addr */ rtos_print("connect rtsocket to client address:port\n"); memset(&client_addr, 0, sizeof(struct sockaddr_in)); client_addr.sin_family = AF_INET; client_addr.sin_port = htons(XMT_PORT); client_addr.sin_addr.s_addr = client_ip; if ((ret = connect_rt(sock, (struct sockaddr *)&client_addr, sizeof(struct sockaddr_in))) < 0) { close_rt(sock); rtos_print("can't connect rtsocket\n"); return ret; } /* extend the socket pool */ ret = ioctl_rt(sock, RTNET_RTIOC_EXTPOOL, &add_rtskbs); if (ret != (int)add_rtskbs) { close_rt(sock); rtos_print("ioctl_rt(RTNET_RTIOC_EXTPOOL) = %d\n", ret); return -1; } ret = rtos_task_init(&rt_task, (void *)process, 0, 10); return ret; }
void pnic2_start_nway(/*RTnet*/struct rtnet_device *rtdev) { struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; int csr14; int csr12; /* set up what to advertise during the negotiation */ /* load in csr14 and mask off bits not to touch * comment at top of file explains mask value */ csr14 = (inl(ioaddr + CSR14) & 0xfff0ee39); /* bit 17 - advetise 100baseTx-FD */ if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000; /* bit 16 - advertise 100baseTx-HD */ if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000; /* bit 6 - advertise 10baseT-HD */ if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040; /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable * and bit 0 Don't PowerDown 10baseT */ csr14 |= 0x00001184; if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, " "csr14=%8.8x.\n", rtdev->name, csr14); /* tell pnic2_lnk_change we are doing an nway negotiation */ rtdev->if_port = 0; tp->nway = tp->mediasense = 1; tp->nwayset = tp->lpar = 0; /* now we have to set up csr6 for NWAY state */ tp->csr6 = inl(ioaddr + CSR6); if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_DEBUG "%s: On Entry to Nway, " "csr6=%8.8x.\n", rtdev->name, tp->csr6); /* mask off any bits not to touch * comment at top of file explains mask value */ tp->csr6 = tp->csr6 & 0xfe3bd1fd; /* don't forget that bit 9 is also used for advertising */ /* advertise 10baseT-FD for the negotiation (bit 9) */ if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200; /* set bit 24 for nway negotiation mode ... * see Data Port Selection comment at top of file * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1) */ tp->csr6 |= 0x01000000; outl(csr14, ioaddr + CSR14); outl(tp->csr6, ioaddr + CSR6); udelay(100); /* all set up so now force the negotiation to begin */ /* read in current values and mask off all but the * Autonegotiation bits 14:12. Writing a 001 to those bits * should start the autonegotiation */ csr12 = (inl(ioaddr + CSR12) & 0xffff8fff); csr12 |= 0x1000; outl(csr12, ioaddr + CSR12); }
int init_module(void) { unsigned int nonblock = 1; struct sockaddr_in local_addr; struct rtnet_callback callback = {sync_callback, NULL}; rtos_print("rt_event is using the following parameters:\n" " mode = %s\n" " io = 0x%04X\n" " irq = %d\n" " my_ip = %s\n" " dest_ip = %s\n", (mode == MODE_PAR) ? "parallel port" : "serial port", io, irq, my_ip, dest_ip); tdma = open_rt(rtmac_dev, O_RDONLY); if (tdma < 0) { rtos_print("ERROR: RTmac/TDMA not loaded!\n"); return -ENODEV; } sock = socket_rt(AF_INET,SOCK_DGRAM,0); memset(&local_addr, 0, sizeof(struct sockaddr_in)); local_addr.sin_family = AF_INET; local_addr.sin_port = htons(SYNC_PORT); local_addr.sin_addr.s_addr = (strlen(my_ip) != 0) ? rt_inet_aton(my_ip) : INADDR_ANY; bind_rt(sock, (struct sockaddr*)&local_addr, sizeof(struct sockaddr_in)); /* switch to non-blocking */ ioctl_rt(sock, RTNET_RTIOC_NONBLOCK, &nonblock); memset(&dest_addr, 0, sizeof(struct sockaddr_in)); dest_addr.sin_family = AF_INET; dest_addr.sin_port = htons(REPORT_PORT); dest_addr.sin_addr.s_addr = rt_inet_aton(dest_ip); ioctl_rt(sock, RTNET_RTIOC_CALLBACK, &callback); rtos_event_sem_init(&event_sem); if (rtos_irq_request(&irq_handle, irq, irq_handler, NULL) != 0) { rtos_print("ERROR: irq not available!\n"); rtos_event_sem_delete(&event_sem); return -EINVAL; } if (mode == MODE_PAR) { /* trigger interrupt on Acknowledge pin (10) */ outb(0x10, PAR_CONTROL); } else { /* don't forget to specify io and irq (e.g. 0x3F8 / 4) */ outb(0x00, SER_LCR); outb(0x00, SER_IER); /* clear irq sources */ while ((inb(SER_IIR) & 0x01) == 0) { rtos_print("Loop init\n"); inb(SER_LSR); inb(SER_DATA); inb(SER_MSR); } /* enable RTS output and set OUT2 */ outb(0x0A, SER_MCR); /* trigger interrupt on modem status line change */ outb(0x00, SER_LCR); outb(0x0D, SER_IER); } rtos_irq_enable(&irq_handle); return rtos_task_init(&task, event_handler, 0, 10); }
void pnic2_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5) { struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; int csr14; /* read the staus register to find out what is up */ int csr12 = inl(ioaddr + CSR12); if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, " " CSR5 %x, %8.8x.\n", rtdev->name, csr12, csr5, inl(ioaddr + CSR14)); /* If NWay finished and we have a negotiated partner capability. * check bits 14:12 for bit pattern 101 - all is good */ if (tp->nway && !tp->nwayset) { /* we did an auto negotiation */ if ((csr12 & 0x7000) == 0x5000) { /* negotiation ended successfully */ /* get the link partners reply and mask out all but * bits 24-21 which show the partners capabilites * and match those to what we advertised * * then begin to interpret the results of the negotiation. * Always go in this order : (we are ignoring T4 for now) * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD */ int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise; tp->lpar = (csr12 >> 16); tp->nwayset = 1; if (negotiated & 0x0100) rtdev->if_port = 5; else if (negotiated & 0x0080) rtdev->if_port = 3; else if (negotiated & 0x0040) rtdev->if_port = 4; else if (negotiated & 0x0020) rtdev->if_port = 0; else { if (tulip_debug > 1) /*RTnet*/rtos_print(KERN_INFO "%s: funny autonegotiate result " "csr12 %8.8x advertising %4.4x\n", rtdev->name, csr12, tp->sym_advertise); tp->nwayset = 0; /* so check if 100baseTx link state is okay */ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) rtdev->if_port = 3; } /* now record the duplex that was negotiated */ tp->full_duplex = 0; if ((rtdev->if_port == 4) || (rtdev->if_port == 5)) tp->full_duplex = 1; if (tulip_debug > 1) { if (tp->nwayset) /*RTnet*/rtos_print(KERN_INFO "%s: Switching to %s based on link " "negotiation %4.4x & %4.4x = %4.4x.\n", rtdev->name, medianame[rtdev->if_port], tp->sym_advertise, tp->lpar, negotiated); } /* remember to turn off bit 7 - autonegotiate * enable so we can properly end nway mode and * set duplex (ie. use csr6<9> again) */ csr14 = (inl(ioaddr + CSR14) & 0xffffff7f); outl(csr14,ioaddr + CSR14); /* now set the data port and operating mode * (see the Data Port Selection comments at * the top of the file */ /* get current csr6 and mask off bits not to touch */ /* see comment at top of file */ tp->csr6 = (inl(ioaddr + CSR6) & 0xfe3bd1fd); /* so if using if_port 3 or 5 then select the 100baseT * port else select the 10baseT port. * See the Data Port Selection table at the top * of the file which was taken from the PNIC_II.PDF * datasheet */ if (rtdev->if_port & 1) tp->csr6 |= 0x01840000; else tp->csr6 |= 0x00400000; /* now set the full duplex bit appropriately */ if (tp->full_duplex) tp->csr6 |= 0x00000200; outl(1, ioaddr + CSR13); if (tulip_debug > 2) /*RTnet*/rtos_print(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 " "%8.8x.\n", rtdev->name, tp->csr6, inl(ioaddr + CSR6), inl(ioaddr + CSR12)); /* now the following actually writes out the * new csr6 values */ tulip_start_rxtx(tp); return; } else {
/*** * rt_ip_local_deliver */ static inline int rt_ip_local_deliver(struct rtskb *skb) { struct iphdr *iph = skb->nh.iph; unsigned short protocol = iph->protocol; struct rtinet_protocol *ipprot; struct rtsocket *sock; int ret; ipprot = rt_inet_protocols[rt_inet_hashkey(protocol)]; /* Check if we are supporting the protocol */ if ((ipprot != NULL) && (ipprot->protocol == protocol)) { __rtskb_pull(skb, iph->ihl*4); /* Point into the IP datagram, just past the header. */ skb->h.raw = skb->data; /* Reassemble IP fragments */ if (iph->frag_off & htons(IP_MF|IP_OFFSET)) { skb = rt_ip_defrag(skb, ipprot); if (!skb) return 0; } else { /* Get the destination socket */ if ((sock = ipprot->dest_socket(skb)) == NULL) { kfree_rtskb(skb); return 0; } /* Acquire the rtskb at the expense of the protocol pool */ ret = rtskb_acquire(skb, &sock->skb_pool); /* Socket is now implicitely locked by the rtskb */ rt_socket_dereference(sock); if (ret != 0) { kfree_rtskb(skb); return 0; } } /* Deliver the packet to the next layer */ ret = ipprot->rcv_handler(skb); } else { #ifdef CONFIG_RTNET_ADDON_PROXY /* If a fallback handler for IP protocol has been installed, * call it! */ if (ip_fallback_handler) { ret = ip_fallback_handler(skb); if (ret) { rtos_print("RTnet: fallback handler failed\n"); } return ret; } #endif /* CONFIG_RTNET_ADDON_PROXY */ rtos_print("RTnet: no protocol found\n"); kfree_rtskb(skb); ret = 0; } return ret; }
/* * Return a pointer to the collector that holds the message which * fits to the iphdr of the passed rtskb. * */ static struct rtskb *add_to_collector(struct rtskb *skb, unsigned int offset, int more_frags) { int i; unsigned int flags; struct ip_collector *p_coll; struct iphdr *iph = skb->nh.iph; struct rtskb *first_skb; /* Search in existing collectors */ for (i = 0; i < COLLECTOR_COUNT; i++) { p_coll = &collector[i]; rtos_spin_lock_irqsave(&p_coll->frags.lock, flags); if (p_coll->in_use && (iph->saddr == p_coll->saddr) && (iph->daddr == p_coll->daddr) && (iph->id == p_coll->id) && (iph->protocol == p_coll->protocol)) { first_skb = p_coll->frags.first; /* Acquire the rtskb at the expense of the protocol pool */ if (rtskb_acquire(skb, &p_coll->sock->skb_pool) != 0) { /* We have to drop this fragment => clean up the whole chain */ p_coll->in_use = 0; rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); #ifdef FRAG_DBG rtos_print("RTnet: Compensation pool empty - IP fragments " "dropped (saddr:%x, daddr:%x)\n", iph->saddr, iph->daddr); #endif kfree_rtskb(first_skb); kfree_rtskb(skb); return NULL; } /* Optimized version of __rtskb_queue_tail */ skb->next = NULL; p_coll->frags.last->next = skb; p_coll->frags.last = skb; /* Extend the chain */ first_skb->chain_end = skb; /* Sanity check: unordered fragments are not allowed! */ if (offset != p_coll->buf_size) { /* We have to drop this fragment => clean up the whole chain */ p_coll->in_use = 0; skb = first_skb; rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); break; /* leave the for loop */ } p_coll->last_accessed = counter; p_coll->buf_size += skb->len; if (!more_frags) { first_skb->nh.iph->tot_len = htons(p_coll->buf_size + sizeof(struct iphdr)); p_coll->in_use = 0; rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); return first_skb; } else { rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); return NULL; } } rtos_spin_unlock_irqrestore(&p_coll->frags.lock, flags); } #ifdef FRAG_DBG rtos_print("RTnet: Unordered IP fragment (saddr:%x, daddr:%x)" " - dropped\n", iph->saddr, iph->daddr); #endif kfree_rtskb(skb); return NULL; }