ktrcv(iskb); if ((oskb = skb_dequeue(&skb_outq))) dev_queue_xmit(oskb); } while (iskb || oskb); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&ktwaitq, &wait); schedule(); remove_wait_queue(&ktwaitq, &wait); } while (!kthread_should_stop()); __set_current_state(TASK_RUNNING); complete(&ktrendez); return 0; } static struct packet_type pt = { .type = __constant_htons(ETH_P_AOE), .func = rcv, }; static int __init kvblade_module_init(void) { skb_queue_head_init(&skb_outq); skb_queue_head_init(&skb_inq); spin_lock_init(&lock); init_completion(&ktrendez); init_waitqueue_head(&ktwaitq); task = kthread_run(kthread, NULL, "kvblade"); if (task == NULL || IS_ERR(task))
/** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified * @data: raw buffer pointer to the packet, if NULL use skb->data * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) * @hlen: packet header length, if @data is NULL use skb_headlen(skb) * * The function will try to retrieve the struct flow_keys from either the skbuff * or a raw buffer specified by the rest parameters */ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, void *data, __be16 proto, int nhoff, int hlen) { u8 ip_proto; if (!data) { data = skb->data; proto = skb->protocol; nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); } memset(flow, 0, sizeof(*flow)); again: switch (proto) { case htons(ETH_P_IP): { const struct iphdr *iph; struct iphdr _iph; ip: iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph || iph->ihl < 5) return false; nhoff += iph->ihl * 4; ip_proto = iph->protocol; if (ip_is_fragment(iph)) ip_proto = 0; /* skip the address processing if skb is NULL. The assumption * here is that if there is no skb we are not looking for flow * info but lengths and protocols. */ if (!skb) break; iph_to_flow_copy_addrs(flow, iph); break; } case htons(ETH_P_IPV6): { const struct ipv6hdr *iph; struct ipv6hdr _iph; __be32 flow_label; ipv6: iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph) return false; ip_proto = iph->nexthdr; nhoff += sizeof(struct ipv6hdr); /* see comment above in IPv4 section */ if (!skb) break; flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); flow_label = ip6_flowlabel(iph); if (flow_label) { /* Awesome, IPv6 packet has a flow label so we can * use that to represent the ports without any * further dissection. */ flow->n_proto = proto; flow->ip_proto = ip_proto; flow->ports = flow_label; flow->thoff = (u16)nhoff; return true; } break; } case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; struct vlan_hdr _vlan; vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) return false; proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); goto again; } case htons(ETH_P_PPP_SES): { struct { struct pppoe_hdr hdr; __be16 proto; } *hdr, _hdr; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; proto = hdr->proto; nhoff += PPPOE_SES_HLEN; switch (proto) { case htons(PPP_IP): goto ip; case htons(PPP_IPV6): goto ipv6; default: return false; } } case __constant_htons(ETH_P_MAP): { struct { struct rmnet_map_header_s map; uint8_t proto; } *map, _map; unsigned int maplen; map = skb_header_pointer(skb, nhoff, sizeof(_map), &_map); if (!map) return false; /* Is MAP command? */ if (map->map.cd_bit) return false; /* Is aggregated frame? */ maplen = ntohs(map->map.pkt_len); maplen += map->map.pad_len; maplen += sizeof(struct rmnet_map_header_s); if (maplen < skb->len) return false; nhoff += sizeof(struct rmnet_map_header_s); switch (map->proto & RMNET_IP_VER_MASK) { case RMNET_IPV4: proto = htons(ETH_P_IP); goto ip; case RMNET_IPV6: proto = htons(ETH_P_IPV6); goto ipv6; default: return false; } } case htons(ETH_P_FCOE): flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN); /* fall through */ default: return false; } switch (ip_proto) { case IPPROTO_GRE: { struct gre_hdr { __be16 flags; __be16 proto; } *hdr, _hdr; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; /* * Only look inside GRE if version zero and no * routing */ if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { proto = hdr->proto; nhoff += 4; if (hdr->flags & GRE_CSUM) nhoff += 4; if (hdr->flags & GRE_KEY) nhoff += 4; if (hdr->flags & GRE_SEQ) nhoff += 4; if (proto == htons(ETH_P_TEB)) { const struct ethhdr *eth; struct ethhdr _eth; eth = __skb_header_pointer(skb, nhoff, sizeof(_eth), data, hlen, &_eth); if (!eth) return false; proto = eth->h_proto; nhoff += sizeof(*eth); } goto again; } break; } case IPPROTO_IPIP: proto = htons(ETH_P_IP); goto ip; case IPPROTO_IPV6: proto = htons(ETH_P_IPV6); goto ipv6; default: break; } flow->n_proto = proto; flow->ip_proto = ip_proto; flow->thoff = (u16) nhoff; /* unless skb is set we don't need to record port info */ if (skb) flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); return true; }
void __fsm_dev_init(struct net_device *dev); int __fsm_dev_open(struct net_device *dev); int __fsm_dev_stop(struct net_device *dev); int __fsm_dev_set_config(struct net_device *dev, struct ifmap *p_ifmap); int __fsm_dev_recv(struct sk_buff *skb ,struct net_device *dev1, struct packet_type *pktype, struct net_device *dev2); int __fsm_dev_tx(struct sk_buff *skb, struct net_device *dev); int __fsm_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); struct net_device_stats* __fsm_dev_get_stats(struct net_device *dev); void __fsm_dev_tx_timeout(struct net_device *p_net_device); void __fsm_dev_init(struct net_device *dev); /*---------------------------------------------------------------------------*/ /* this is used for register the special packet type that processed by DYS */ struct packet_type pk_t = { .type = __constant_htons(MY_PROTOCOL), .func = __fsm_dev_recv, }; static const struct net_device_ops fsm_netdev_ops = { .ndo_open = __fsm_dev_open, .ndo_stop = __fsm_dev_stop, .ndo_start_xmit = __fsm_dev_tx, .ndo_get_stats = __fsm_dev_get_stats, .ndo_do_ioctl = __fsm_dev_ioctl, .ndo_set_config = __fsm_dev_set_config, .ndo_tx_timeout = __fsm_dev_tx_timeout, }; typedef struct fsm_priv {
* @sap: SAP to be closed. * * Close interface function to upper layer. Each one who wants to * close an open SAP (for example NetBEUI) should call this function. * Removes this sap from the list of saps in the station and then * frees the memory for this sap. */ void llc_sap_close(struct llc_sap *sap) { WARN_ON(!hlist_empty(&sap->sk_list.list)); llc_del_sap(sap); kfree(sap); } static struct packet_type llc_packet_type = { .type = __constant_htons(ETH_P_802_2), .func = llc_rcv, }; static struct packet_type llc_tr_packet_type = { .type = __constant_htons(ETH_P_TR_802_2), .func = llc_rcv, }; static int __init llc_init(void) { struct net_device *dev; dev = first_net_device(&init_net); if (dev != NULL) dev = next_net_device(dev);
/** 根据tcp数据生成数据包 根据tcp数据,生成数据包,并填充 mac/ip/tcp 头部信息 @param skb 原始的sk_buff结构地址 @param names 网卡名称结构首地址 @param num 网卡个数 @param tcpdata tcp数据地址 @param tcpdatalen tcp数据长度 @return 成功返回数据包地址,失败返回NULL。 */ struct sk_buff *pkg_skbuff_generate(struct sk_buff *skb, struct client_nicname *names, int num, char *tcpdata, int tcpdatalen) { struct sk_buff *new_skb = NULL; struct net_device *dev = NULL; struct iphdr *iph = NULL,*new_iph = NULL; struct tcphdr *tcph = NULL,*new_tcph = NULL; struct ethhdr *ethdr = NULL; char *newpdata = NULL; unsigned char * mac_header_addr = NULL; int i = 0; if(!skb || !names) { goto out; } iph = ip_hdr(skb); if(iph == NULL) { goto out; } tcph = (struct tcphdr *)((char *)iph + iph->ihl*4); if(tcph == NULL) { goto out; } ethdr = eth_hdr(skb); if(ethdr == NULL) { goto out; } for (i=0; names[i].index != -1; i++) { #if (LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 24))//不确定版本号是否应该更早 dev = dev_get_by_name(names[i].name); #else dev = dev_get_by_name(&init_net, names[i].name); #endif if (dev != NULL) break; } if (dev == NULL) { goto out; } new_skb = alloc_skb(tcpdatalen + iph->ihl*4 + tcph->doff*4 + 14, GFP_ATOMIC); if(new_skb == NULL) { goto out; } #if (LINUX_VERSION_CODE < KERNEL_VERSION (3, 11, 0)) new_skb->mac_header = new_skb->data; skb_reserve(new_skb,14); new_skb->transport_header = new_skb->data; new_skb->network_header = new_skb->data; //get_route_mac(iph->saddr, iph->daddr); memcpy(&new_skb->mac_header[0], ethdr->h_source, 6); memcpy(&new_skb->mac_header[6], ethdr->h_dest, 6); new_skb->mac_header[12] = 0x08; new_skb->mac_header[13] = 0x00; #else skb_reset_mac_header(new_skb); skb_reserve(new_skb,14); skb_reset_transport_header(new_skb); skb_reset_network_header(new_skb); mac_header_addr=skb_mac_header(new_skb); if(mac_header_addr==NULL) { printk("Can't get header address!\n"); goto out; } //get_route_mac(iph->saddr, iph->daddr); memcpy(mac_header_addr, ethdr->h_source, 6); memcpy(mac_header_addr+6, ethdr->h_dest, 6); mac_header_addr[12] = 0x08; mac_header_addr[13] = 0x00; #endif skb_put(new_skb, iph->ihl*4 + tcph->doff*4); new_skb->mac_len = 14; new_skb->dev = dev; new_skb->pkt_type = PACKET_OTHERHOST; new_skb->protocol = __constant_htons(ETH_P_IP); new_skb->ip_summed = CHECKSUM_NONE; new_skb->priority = 0; /* *IP set */ new_iph = (struct iphdr *)new_skb->data; memset((char *)new_iph, 0, iph->ihl*4); new_iph->version = iph->version; new_iph->ihl = iph->ihl; new_iph->tos = iph->tos; new_iph->id = iph->id; new_iph->ttl = iph->ttl; new_iph->frag_off = iph->frag_off; new_iph->protocol = IPPROTO_TCP; //new_iph->saddr = iph->saddr; new_iph->saddr = iph->daddr; new_iph->daddr = iph->saddr; new_iph->tot_len = htons(tcpdatalen + iph->ihl*4 + tcph->doff*4); new_iph->check = 0; /* *TCP set */ new_tcph = (struct tcphdr *)(new_skb->data + iph->ihl*4); memset((char *)new_tcph, 0, tcph->doff*4); new_tcph->source = tcph->dest; new_tcph->dest = tcph->source; new_tcph->seq = tcph->ack_seq; new_tcph->ack_seq = htonl(ntohl(tcph->seq) + (ntohs(iph->tot_len) - iph->ihl*4 - tcph->doff*4)); new_tcph->doff = tcph->doff; new_tcph->fin = tcph->fin; new_tcph->ack = tcph->ack; new_tcph->psh = tcph->psh; new_tcph->window = tcph->window; new_tcph->check = 0; if (tcpdatalen > 0) { newpdata = skb_put(new_skb, tcpdatalen); if (newpdata != NULL) { if (tcpdata != NULL) memcpy(newpdata, tcpdata, tcpdatalen); } } refresh_skb_checksum(new_skb); return new_skb; out: if (NULL != skb) { dev_put (dev); kfree_skb (skb); } return NULL; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be aligned, * so there doesn't need to be any of the unaligned stuff. It has * been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype) { unsigned char *rawp = NULL; struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); unsigned short vid; struct net_device_stats *stats; unsigned short vlan_TCI; unsigned short proto; /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ vlan_TCI = ntohs(vhdr->h_vlan_TCI); vid = (vlan_TCI & VLAN_VID_MASK); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n", __FUNCTION__, skb, vid); #endif /* Ok, we will find the correct VLAN device, strip the header, * and then go on as usual. */ /* We have 12 bits of vlan ID. * * We must not drop the vlan_group_lock until we hold a * reference to the device (netif_rx does that) or we * fail. */ spin_lock_bh(&vlan_group_lock); skb->dev = __find_vlan_dev(dev, vid); if (!skb->dev) { spin_unlock_bh(&vlan_group_lock); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n", __FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex); #endif kfree_skb(skb); return -1; } skb->dev->last_rx = jiffies; /* Bump the rx counters for the VLAN device. */ stats = vlan_dev_get_stats(skb->dev); stats->rx_packets++; stats->rx_bytes += skb->len; skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */ /* Ok, lets check to make sure the device (dev) we * came in on is what this VLAN is attached to. */ if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) { spin_unlock_bh(&vlan_group_lock); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n", __FUNCTION__ skb, dev->name, VLAN_DEV_INFO(skb->dev)->real_dev->name, skb->dev->name); #endif kfree_skb(skb); stats->rx_errors++; return -1; } /* * Deal with ingress priority mapping. */ skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI)); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n", __FUNCTION__, (unsigned long)(skb->priority), ntohs(vhdr->h_vlan_TCI)); #endif /* The ethernet driver already did the pkt_type calculations * for us... */ switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ // stats->broadcast ++; // no such counter :-( break; case PACKET_MULTICAST: stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the underlying * device, and still route correctly. */ if (memcmp(skb->mac.ethernet->h_dest, skb->dev->dev_addr, ETH_ALEN) == 0) { /* It is for our (changed) MAC-address! */ skb->pkt_type = PACKET_HOST; } break; default: break; }; /* Was a VLAN packet, grab the encapsulated protocol, which the layer * three protocols care about. */ /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */ proto = vhdr->h_vlan_encapsulated_proto; skb->protocol = proto; if (ntohs(proto) >= 1536) { /* place it back on the queue to be handled by * true layer 3 protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } spin_unlock_bh(&vlan_group_lock); return 0; } rawp = skb->data; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *)rawp == 0xFFFF) { skb->protocol = __constant_htons(ETH_P_802_3); /* place it back on the queue to be handled by true layer 3 protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } spin_unlock_bh(&vlan_group_lock); return 0; } /* * Real 802.2 LLC */ skb->protocol = __constant_htons(ETH_P_802_2); /* place it back on the queue to be handled by upper layer protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } spin_unlock_bh(&vlan_group_lock); return 0; }
do { in_dev_put(xdst->u.rt.idev); xdst->u.rt.idev = loopback_idev; in_dev_hold(loopback_idev); xdst = (struct xfrm_dst *)xdst->u.dst.child; } while (xdst->u.dst.xfrm); __in_dev_put(loopback_idev); } xfrm_dst_ifdown(dst, dev); } static struct dst_ops xfrm4_dst_ops = { .family = AF_INET, .protocol = __constant_htons(ETH_P_IP), .gc = xfrm4_garbage_collect, .update_pmtu = xfrm4_update_pmtu, .destroy = xfrm4_dst_destroy, .ifdown = xfrm4_dst_ifdown, .gc_thresh = 1024, .entry_size = sizeof(struct xfrm_dst), }; static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { .family = AF_INET, .dst_ops = &xfrm4_dst_ops, .dst_lookup = xfrm4_dst_lookup, .get_saddr = xfrm4_get_saddr, .find_bundle = __xfrm4_find_bundle, .bundle_create = __xfrm4_bundle_create,
int __netpoll_rx(struct sk_buff *skb) { int proto, len, ulen; struct iphdr *iph; struct udphdr *uh; struct netpoll_info *npi = skb->dev->npinfo; struct netpoll *np = npi->rx_np; if (!np) goto out; if (skb->dev->type != ARPHRD_ETHER) goto out; /* check if netpoll clients need ARP */ if (skb->protocol == __constant_htons(ETH_P_ARP) && atomic_read(&trapped)) { skb_queue_tail(&npi->arp_tx, skb); return 1; } proto = ntohs(eth_hdr(skb)->h_proto); if (proto != ETH_P_IP) goto out; if (skb->pkt_type == PACKET_OTHERHOST) goto out; if (skb_shared(skb)) goto out; iph = (struct iphdr *)skb->data; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; if (iph->ihl < 5 || iph->version != 4) goto out; if (!pskb_may_pull(skb, iph->ihl*4)) goto out; if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) goto out; len = ntohs(iph->tot_len); if (skb->len < len || len < iph->ihl*4) goto out; if (iph->protocol != IPPROTO_UDP) goto out; len -= iph->ihl*4; uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); ulen = ntohs(uh->len); if (ulen != len) goto out; if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) goto out; if (np->local_ip && np->local_ip != ntohl(iph->daddr)) goto out; if (np->remote_ip && np->remote_ip != ntohl(iph->saddr)) goto out; if (np->local_port && np->local_port != ntohs(uh->dest)) goto out; np->rx_hook(np, ntohs(uh->source), (char *)(uh+1), ulen - sizeof(struct udphdr)); kfree_skb(skb); return 1; out: if (atomic_read(&trapped)) { kfree_skb(skb); return 1; } return 0; }
MODULE_DESCRIPTION("ltemac kernel module"); MODULE_AUTHOR(""); MODULE_LICENSE("GPL"); #include "../fsm/fsmdec.h" #include "../lte_system.h" #include "../debug.h" #include "virtual_srio.h" /* this is used for register the special packet type that processed by DYS */ struct packet_type pk_t = { .type = __constant_htons(DEV_PROTO_SRIO), .func = fsm_dev_recv, }; static const struct net_device_ops srio_netdev_ops = { .ndo_open = fsm_dev_open, .ndo_stop = fsm_dev_stop, .ndo_start_xmit = fsm_dev_tx, .ndo_get_stats = fsm_dev_get_stats, .ndo_do_ioctl = fsm_dev_ioctl, .ndo_set_config = fsm_dev_set_config, .ndo_tx_timeout = fsm_dev_tx_timeout, };
/*** * arp_rcv: Receive an arp request by the device layer. */ int rt_arp_rcv(struct rtskb *skb, struct rtpacket_type *pt) { struct rtnet_device *rtdev = skb->rtdev; struct arphdr *arp = skb->nh.arph; unsigned char *arp_ptr= (unsigned char *)(arp+1); unsigned char *sha, *tha; u32 sip, tip; u16 dev_type = rtdev->type; /* * The hardware length of the packet should match the hardware length * of the device. Similarly, the hardware types should match. The * device should be ARP-able. Also, if pln is not 4, then the lookup * is not from an IP number. We can't currently handle this, so toss * it. */ if ((arp->ar_hln != rtdev->addr_len) || (rtdev->flags & IFF_NOARP) || (skb->pkt_type == PACKET_OTHERHOST) || (skb->pkt_type == PACKET_LOOPBACK) || (arp->ar_pln != 4)) goto out; switch (dev_type) { default: if ((arp->ar_pro != __constant_htons(ETH_P_IP)) && (htons(dev_type) != arp->ar_hrd)) goto out; break; case ARPHRD_ETHER: /* * ETHERNET devices will accept ARP hardware types of either * 1 (Ethernet) or 6 (IEEE 802.2). */ if ((arp->ar_hrd != __constant_htons(ARPHRD_ETHER)) && (arp->ar_hrd != __constant_htons(ARPHRD_IEEE802))) { goto out; } if (arp->ar_pro != __constant_htons(ETH_P_IP)) { goto out; } break; } /* Understand only these message types */ if ((arp->ar_op != __constant_htons(ARPOP_REPLY)) && (arp->ar_op != __constant_htons(ARPOP_REQUEST))) goto out; /* * Extract fields */ sha=arp_ptr; arp_ptr += rtdev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; tha=arp_ptr; arp_ptr += rtdev->addr_len; memcpy(&tip, arp_ptr, 4); /* process only requests/replies directed to us */ if (tip == rtdev->local_ip) { rt_ip_route_add_host(sip, sha, rtdev); if (arp->ar_op == __constant_htons(ARPOP_REQUEST)) rt_arp_send(ARPOP_REPLY, ETH_P_ARP, sip, rtdev, tip, sha, rtdev->dev_addr, sha); } out: kfree_rtskb(skb); return 0; }
/*** * arp_send: Create and send an arp packet. If (dest_hw == NULL), * we create a broadcast message. */ void rt_arp_send(int type, int ptype, u32 dest_ip, struct rtnet_device *rtdev, u32 src_ip, unsigned char *dest_hw, unsigned char *src_hw, unsigned char *target_hw) { struct rtskb *skb; struct arphdr *arp; unsigned char *arp_ptr; if (rtdev->flags & IFF_NOARP) return; if (!(skb=alloc_rtskb(sizeof(struct arphdr) + 2*(rtdev->addr_len+4) + rtdev->hard_header_len+15, &global_pool))) return; rtskb_reserve(skb, (rtdev->hard_header_len+15)&~15); skb->nh.raw = skb->data; arp = (struct arphdr *)rtskb_put(skb, sizeof(struct arphdr) + 2*(rtdev->addr_len+4)); skb->rtdev = rtdev; skb->protocol = __constant_htons (ETH_P_ARP); skb->priority = RT_ARP_SKB_PRIO; if (src_hw == NULL) src_hw = rtdev->dev_addr; if (dest_hw == NULL) dest_hw = rtdev->broadcast; /* * Fill the device header for the ARP frame */ if (rtdev->hard_header && (rtdev->hard_header(skb,rtdev,ptype,dest_hw,src_hw,skb->len) < 0)) goto out; arp->ar_hrd = htons(rtdev->type); arp->ar_pro = __constant_htons(ETH_P_IP); arp->ar_hln = rtdev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr=(unsigned char *)(arp+1); memcpy(arp_ptr, src_hw, rtdev->addr_len); arp_ptr+=rtdev->addr_len; memcpy(arp_ptr, &src_ip,4); arp_ptr+=4; if (target_hw != NULL) memcpy(arp_ptr, target_hw, rtdev->addr_len); else memset(arp_ptr, 0, rtdev->addr_len); arp_ptr+=rtdev->addr_len; memcpy(arp_ptr, &dest_ip, 4); /* send the frame */ rtdev_xmit(skb); return; out: kfree_rtskb(skb); }
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *net) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) struct pdp_info *dev = (struct pdp_info *)net->ml_priv; #else struct pdp_info *dev = (struct pdp_info *)net->priv; #endif //printk("============> vnet_start_xmit() Called .. !!\n"); #ifdef USE_LOOPBACK_PING int ret; struct sk_buff *skb2; struct icmphdr *icmph; struct iphdr *iph; #endif #ifdef USE_LOOPBACK_PING dev->vn_dev.stats.tx_bytes += skb->len; dev->vn_dev.stats.tx_packets++; skb2 = alloc_skb(skb->len, GFP_ATOMIC); if (skb2 == NULL) { DPRINTK(1, "alloc_skb() failed\n"); dev_kfree_skb_any(skb); return -ENOMEM; } memcpy(skb2->data, skb->data, skb->len); skb_put(skb2, skb->len); dev_kfree_skb_any(skb); icmph = (struct icmphdr *)(skb2->data + sizeof(struct iphdr)); iph = (struct iphdr *)skb2->data; icmph->type = __constant_htons(ICMP_ECHOREPLY); ret = iph->daddr; iph->daddr = iph->saddr; iph->saddr = ret; iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb2->dev = net; skb2->protocol = __constant_htons(ETH_P_IP); netif_rx(skb2); dev->vn_dev.stats.rx_packets++; dev->vn_dev.stats.rx_bytes += skb->len; #else if (vnet_start_xmit_flag != 0) { return NETDEV_TX_BUSY; } vnet_start_xmit_flag = 1; workqueue_data = (unsigned long)skb; PREPARE_WORK(&dev->vn_dev.xmit_task,vnet_defer_xmit); schedule_work(&dev->vn_dev.xmit_task); netif_stop_queue(net); #endif return 0; }
/* * Module parameters */ #ifdef CONFIG_IRDA_DEBUG unsigned int irda_debug = IRDA_DEBUG_LEVEL; module_param_named(debug, irda_debug, uint, 0); MODULE_PARM_DESC(debug, "IRDA debugging level"); EXPORT_SYMBOL(irda_debug); #endif /* Packet type handler. * Tell the kernel how IrDA packets should be handled. */ static struct packet_type irda_packet_type = { .type = __constant_htons(ETH_P_IRDA), .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */ }; /* * Function irda_notify_init (notify) * * Used for initializing the notify structure * */ void irda_notify_init(notify_t *notify) { notify->data_indication = NULL; notify->udata_indication = NULL; notify->connect_confirm = NULL; notify->connect_indication = NULL;
int cp_dev_xmit_tcp (char * eth, u_char * smac, u_char * dmac, u_char * pkt, int pkt_len, u_long sip, u_long dip, u_short sport, u_short dport, u_long seq, u_long ack_seq, u_char psh, u_char fin) { struct sk_buff * skb = NULL; struct net_device * dev = NULL; struct ethhdr * ethdr = NULL; struct iphdr * iph = NULL; struct tcphdr * tcph = NULL; u_char * pdata = NULL; int nret = 1; if (NULL == smac || NULL == dmac) goto out; //dev = dev_get_by_name(eth); dev = dev_get_by_name(&init_net, eth); if (NULL == dev) goto out; printk("dev name: %s\n", dev->name); //skb = alloc_skb (ETH_HLEN + pkt_len + sizeof (struct iphdr) + sizeof (struct tcphdr) + LL_RESERVED_SPACE (dev), GFP_ATOMIC); skb = alloc_skb (pkt_len + sizeof (struct iphdr) + sizeof (struct tcphdr) + ETH_HLEN, GFP_ATOMIC); if (NULL == skb) goto out; //skb_reserve (skb, LL_RESERVED_SPACE (dev)); skb_reserve (skb, 2); skb->dev = dev; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb->priority = 0; //skb->nh.iph = (struct iphdr*)skb_put(skb, sizeof (struct iphdr)); //skb->h.th = (struct tcphdr*)skb_put(skb, sizeof (struct tcphdr)); skb_put(skb, sizeof(struct ethhdr)); skb_reset_mac_header(skb); skb_put(skb, sizeof(struct iphdr)); //skb_reset_network_header(skb); skb_set_network_header(skb, sizeof(struct ethhdr)); skb_put(skb, sizeof(struct tcphdr)); //skb_reset_transport_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr) + sizeof(struct ethhdr)); pdata = skb_put (skb, pkt_len); { if (NULL != pkt) memcpy (pdata, pkt, pkt_len); } { //tcph = (struct tcphdr *) skb->h.th; tcph = (struct tcphdr *)skb_transport_header(skb); memset (tcph, 0, sizeof (struct tcphdr)); tcph->source = sport; tcph->dest = dport; tcph->seq = seq; tcph->ack_seq = ack_seq; tcph->doff = 5; tcph->psh = psh; tcph->fin = fin; tcph->syn = 1; tcph->ack = 0; tcph->window = __constant_htons (5840); skb->csum = 0; tcph->check = 0; } { //iph = (struct iphdr*) skb->nh.iph; iph = (struct iphdr*)skb_network_header(skb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = 0; iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = dip; iph->saddr = sip; iph->ttl = 0x40; iph->tot_len = __constant_htons(skb->len); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } { int i = 0; printk("len0: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } //skb->csum = skb_checksum (skb, ETH_HLEN + iph->ihl*4, skb->len - iph->ihl * 4, 0); //tcph->check = csum_tcpudp_magic (sip, dip, skb->len - iph->ihl * 4, IPPROTO_TCP, skb->csum); skb->csum = skb_checksum (skb, ETH_HLEN + iph->ihl*4, pkt_len + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (sip, dip, pkt_len + sizeof(struct tcphdr), IPPROTO_TCP, skb->csum); { int i = 0; printk("len1: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } //skb->mac.raw = skb_push (skb, 14); //skb_push(skb, 14); //skb_reset_mac_header(skb); { //ethdr = (struct ethhdr *)skb->mac.raw; ethdr = (struct ethhdr *)skb_mac_header(skb); memcpy (ethdr->h_dest, dmac, ETH_ALEN); memcpy (ethdr->h_source, smac, ETH_ALEN); ethdr->h_proto = __constant_htons (ETH_P_IP); } { int i = 0; printk("len2: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } if (0 > dev_queue_xmit(skb)) goto out; printk("aaaaaaaaaa1\n"); nret = 0; out: if (0 != nret && NULL != skb) {dev_put (dev); kfree_skb (skb);} printk("aaaaaaaaaaaa2\n"); return (nret); }
/* * It is hooked at the NF_IP_FORWARD chain, used only for VS/NAT. * Check if outgoing packet belongs to the established ip_vs_conn, * rewrite addresses of the packet and send it on its way... */ static unsigned int ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct sk_buff *skb = *pskb; struct iphdr *iph; struct ip_vs_protocol *pp; struct ip_vs_conn *cp; int ihl; EnterFunction(11); if (skb->ipvs_property) return NF_ACCEPT; iph = skb->nh.iph; if (unlikely(iph->protocol == IPPROTO_ICMP)) { int related, verdict = ip_vs_out_icmp(pskb, &related); if (related) return verdict; skb = *pskb; iph = skb->nh.iph; } pp = ip_vs_proto_get(iph->protocol); if (unlikely(!pp)) return NF_ACCEPT; /* reassemble IP fragments */ if (unlikely(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET) && !pp->dont_defrag)) { skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT); if (!skb) return NF_STOLEN; iph = skb->nh.iph; *pskb = skb; } ihl = iph->ihl << 2; /* * Check if the packet belongs to an existing entry */ cp = pp->conn_out_get(skb, pp, iph, ihl, 0); if (unlikely(!cp)) { if (sysctl_ip_vs_nat_icmp_send && (pp->protocol == IPPROTO_TCP || pp->protocol == IPPROTO_UDP)) { __u16 _ports[2], *pptr; pptr = skb_header_pointer(skb, ihl, sizeof(_ports), _ports); if (pptr == NULL) return NF_ACCEPT; /* Not for me */ if (ip_vs_lookup_real_service(iph->protocol, iph->saddr, pptr[0])) { /* * Notify the real server: there is no * existing entry if it is not RST * packet or not TCP packet. */ if (iph->protocol != IPPROTO_TCP || !is_tcp_reset(skb)) { icmp_send(skb,ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); return NF_DROP; } } } IP_VS_DBG_PKT(12, pp, skb, 0, "packet continues traversal as normal"); return NF_ACCEPT; } IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); if (!ip_vs_make_skb_writable(pskb, ihl)) goto drop; /* mangle the packet */ if (pp->snat_handler && !pp->snat_handler(pskb, pp, cp)) goto drop; skb = *pskb; skb->nh.iph->saddr = cp->vaddr; ip_send_check(skb->nh.iph); /* For policy routing, packets originating from this * machine itself may be routed differently to packets * passing through. We want this packet to be routed as * if it came from this machine itself. So re-compute * the routing information. */ if (ip_route_me_harder(pskb, RTN_LOCAL) != 0) goto drop; skb = *pskb; IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); ip_vs_out_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); ip_vs_conn_put(cp); skb->ipvs_property = 1; LeaveFunction(11); return NF_ACCEPT; drop: ip_vs_conn_put(cp); kfree_skb(*pskb); return NF_STOLEN; }
ip_conntrack_expect_put(exp); } out: spin_unlock_bh(&amanda_buffer_lock); return ret; } static struct ip_conntrack_helper amanda_helper = { .max_expected = ARRAY_SIZE(conns), .timeout = 180, .me = THIS_MODULE, .help = help, .name = "amanda", .tuple = { .src = { .u = { __constant_htons(10080) } }, .dst = { .protonum = IPPROTO_UDP }, }, .mask = { .src = { .u = { 0xFFFF } }, .dst = { .protonum = 0xFF }, }, }; static void __exit fini(void) { ip_conntrack_helper_unregister(&amanda_helper); kfree(amanda_buffer); } static int __init init(void) {
/* * Handle ICMP messages in the outside-to-inside direction (incoming). * Find any that might be relevant, check against existing connections, * forward to the right destination host if relevant. * Currently handles error types - unreachable, quench, ttl exceeded. */ static int ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) { struct sk_buff *skb = *pskb; struct iphdr *iph; struct icmphdr _icmph, *ic; struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ struct ip_vs_conn *cp; struct ip_vs_protocol *pp; unsigned int offset, ihl, verdict; *related = 1; /* reassemble IP fragments */ if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) { skb = ip_vs_gather_frags(skb, hooknum == NF_IP_LOCAL_IN ? IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD); if (!skb) return NF_STOLEN; *pskb = skb; } iph = skb->nh.iph; offset = ihl = iph->ihl * 4; ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; IP_VS_DBG(12, "Incoming ICMP (%d,%d) %u.%u.%u.%u->%u.%u.%u.%u\n", ic->type, ntohs(icmp_id(ic)), NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); /* * Work through seeing if this is for us. * These checks are supposed to be in an order that means easy * things are checked first to speed up processing.... however * this means that some packets will manage to get a long way * down this stack and then be rejected, but that's life. */ if ((ic->type != ICMP_DEST_UNREACH) && (ic->type != ICMP_SOURCE_QUENCH) && (ic->type != ICMP_TIME_EXCEEDED)) { *related = 0; return NF_ACCEPT; } /* Now find the contained IP header */ offset += sizeof(_icmph); cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ pp = ip_vs_proto_get(cih->protocol); if (!pp) return NF_ACCEPT; /* Is the embedded protocol header present? */ if (unlikely(cih->frag_off & __constant_htons(IP_OFFSET) && pp->dont_defrag)) return NF_ACCEPT; IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMP for"); offset += cih->ihl * 4; /* The embedded headers contain source and dest in reverse order */ cp = pp->conn_in_get(skb, pp, cih, offset, 1); if (!cp) return NF_ACCEPT; verdict = NF_DROP; /* Ensure the checksum is correct */ if (skb->ip_summed != CHECKSUM_UNNECESSARY && ip_vs_checksum_complete(skb, ihl)) { /* Failed checksum! */ IP_VS_DBG(1, "Incoming ICMP: failed checksum from %d.%d.%d.%d!\n", NIPQUAD(iph->saddr)); goto out; } /* do the statistics and put it back */ ip_vs_in_stats(cp, skb); if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) offset += 2 * sizeof(__u16); verdict = ip_vs_icmp_xmit(skb, cp, pp, offset); /* do not touch skb anymore */ out: __ip_vs_conn_put(cp); return verdict; }
void * packet_init(struct sk_buff *skb, const struct net_device *out) { struct sk_buff *newskb = NULL; struct ethhdr *ethh = NULL; struct tcphdr *tcph = NULL; struct iphdr *iph = NULL; unsigned char *pdata = NULL; struct tcphdr *old_tcph = NULL; struct iphdr *old_iph = NULL; struct ethhdr *old_ethh = NULL; struct net_device *dev = NULL; unsigned short old_data_len = 0; unsigned char dest[6] = {0x08, 0x00, 0x27, 0xc4, 0xe6, 0x3b}; unsigned char src[6] = {0x52, 0x54, 0x00, 0x12, 0x35, 0x02}; char pkt302[] = "HTTP/1.1 302 Found\r\n" "Location: http://www.126.com/\r\n" "Content-Length: 0\r\n" "Connection: close\r\n\r\n"; //char pkt301[] = //"HTTP/1.1 301 Moved Permanently\r\n" //"Location: http://www.jd.com\r\n" //"Content-Type: text/html; charset=iso-8859-1\r\n" //"Content-length: 0\r\n" //"Cache-control: no-cache\r\n" //"\r\n"; // malloc skb space // l4 // l3 // l2 // return newskb dev = dev_get_by_name(&init_net, "eth0"); { // old skb info old_tcph = (struct tcphdr *)skb_transport_header(skb); old_iph = (struct iphdr *)skb_network_header(skb); old_ethh = (struct ethhdr *)skb_mac_header(skb); } newskb = alloc_skb(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr) + ETH_HLEN + 2, GFP_ATOMIC); if (newskb == NULL) { return NULL; } skb_reserve(skb, 2); // skb padding newskb->dev = out; //newskb->dev = dev; newskb->pkt_type = PACKET_HOST; newskb->protocol = __constant_htons(ETH_P_IP); newskb->ip_summed = CHECKSUM_NONE; newskb->priority = 0; skb_put(newskb, sizeof(struct ethhdr)); skb_reset_mac_header(newskb); skb_put(newskb, sizeof(struct iphdr)); skb_set_network_header(newskb, sizeof(struct ethhdr)); skb_put(newskb, sizeof(struct tcphdr)); skb_set_transport_header(newskb, sizeof(struct iphdr) + sizeof(struct ethhdr)); //skb_put(newskb, sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr)); pdata = skb_put(newskb, strlen(pkt302)); if (pdata != NULL) { memcpy(pdata, pkt302, strlen(pkt302)); } { //fill l4 tcph = (struct tcphdr *)skb_transport_header(newskb); memset(tcph, 0, sizeof(struct tcphdr)); tcph->source = old_tcph->dest; tcph->dest = old_tcph->source; //tcph->seq = old_tcph->seq; //tcph->ack_seq = old_tcph->ack_seq; old_data_len = __constant_ntohs(old_iph->tot_len) - old_iph->ihl * 4 - old_tcph->doff * 4; printk("---------old seq : %08x\r\n", old_tcph->seq); printk("---------old ack : %08x\r\n", old_tcph->ack_seq); printk("---------old data_len : %d\r\n", old_data_len); tcph->seq = old_tcph->ack_seq; //tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + strlen(pkt302)); tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + old_data_len); tcph->doff = 5; tcph->psh = 1; tcph->ack = 1; tcph->window = old_tcph->window; newskb->csum = 0; tcph->check = 0; tcph->urg_ptr = 0; } { //fill l3 iph = (struct iphdr *)skb_network_header(newskb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = __constant_htons(0x4000); iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = old_iph->saddr; iph->saddr = old_iph->daddr; iph->ttl = 0x40; iph->tot_len = __constant_htons(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr)); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } newskb->csum = skb_checksum (newskb, ETH_HLEN + iph->ihl*4, strlen(pkt302) + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (iph->saddr, iph->daddr, strlen(pkt302) + sizeof(struct tcphdr), IPPROTO_TCP, newskb->csum); { ethh = (struct ethhdr *)skb_mac_header(newskb); //fill l2 if (skb->mac_len > 0) { memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); } else { //memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); //memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); //memset(ethh->h_dest, 0, ETH_ALEN); //memset(ethh->h_source, 0, ETH_ALEN); memcpy(ethh->h_dest, dest, ETH_ALEN); memcpy(ethh->h_source, src, ETH_ALEN); } ethh->h_proto = __constant_htons (ETH_P_IP); } //skb_pull(newskb, ETH_HLEN); return newskb; }
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = vlan_dev_get_stats(dev); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) { unsigned short veth_TCI; /* This is not a VLAN frame...but we can fix that! */ VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++; #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n", __FUNCTION__, htons(veth->h_vlan_proto)); #endif if (skb_headroom(skb) < VLAN_HLEN) { struct sk_buff *sk_tmp = skb; skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); kfree_skb(sk_tmp); if (skb == NULL) { stats->tx_dropped++; return 0; } VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++; } else { if (!(skb = skb_unshare(skb, GFP_ATOMIC))) { printk(KERN_ERR "vlan: failed to unshare skbuff\n"); stats->tx_dropped++; return 0; } } veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ memmove(skb->data, skb->data + VLAN_HLEN, 12); /* first, the ethernet type */ /* put_unaligned(__constant_htons(ETH_P_8021Q), &veth->h_vlan_proto); */ veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); /* Now, construct the second two bytes. This field looks something * like: * usr_priority: 3 bits (high bits) * CFI 1 bit * VLAN ID 12 bits (low bits) */ veth_TCI = VLAN_DEV_INFO(dev)->vlan_id; veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); veth->h_vlan_TCI = htons(veth_TCI); } skb->dev = VLAN_DEV_INFO(dev)->real_dev; #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n", __FUNCTION__, skb, skb->dev->name); printk(VLAN_DBG " %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n", veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5], veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto); #endif stats->tx_packets++; /* for statics only */ stats->tx_bytes += skb->len; dev_queue_xmit(skb); return 0; }
printk("send skb size[%d]maclen[%d]\n", nskb->len, nskb->mac_len); for (i=0; i<nskb->len; i++) { printk("%.2x ", nskb->data[i]); } printk("count[%d]\n", i); dev_queue_xmit(nskb); } out: kfree_skb (skb); return NET_RX_SUCCESS; } static struct packet_type mirror_proto = { /* Capture all protocols. */ .type = __constant_htons(ETH_P_ALL), .func = mirror_func, }; int option_setup (char *opt) { char *from, *to, *cur, tmp; struct net_device *dev; int count = 0; printk ("args: %s\n", opt); /* Get mirror port. */ if ((to = strchr(opt, '@')) == NULL) return -EINVAL; *to = '\0';
static int ak_client_inform_port(const struct net_device *dev, aku16 port_src, aku8 protocol, unsigned int uid) { ak_client_logon_array user_logon[AK_CLIENT_MAX_LOGONS_PER_USER]; struct sk_buff *skb; // Pacote a ser enviado para avisar o firewall #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) struct flowi flp; #else struct flowi4 flp; #endif struct in_device *idev; struct rtable *rt; // Rota a ser usada para enviar o pacote struct iphdr *ip; // Header IP do pacote a enviar struct udphdr *udp; // Header UDP do pacote a enviar struct dst_entry *dst; #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) struct neighbour *neigh; #endif MD5_CTX contexto; // Contexto para calcular MD5 int pkt_sent = 0; // Enviou ao menos um pacote ? fwprofd_header *header; fwprofd_port_ctl *port_ctl; ak_client_logon_array *logon; int size; int count; int i; if (!dev) { PRINT("Device de saida NULL\n"); return -2; } count = ak_client_get_user_list(uid, user_logon); size = sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(fwprofd_header) + sizeof(fwprofd_port_ctl); for (i = 0, logon = user_logon; i < count; i++, logon++) { PRINT("Enviando pacote %d/%d - ", i + 1, count); skb = alloc_skb(size + 16, GFP_ATOMIC); if (!skb) { PRINT("Nao consegui alocar skbuff para enviar pacote\n"); return -3; } skb->data += 16; skb->len = size; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) skb->tail = skb->data + size; skb->nh.iph = (struct iphdr *) skb->data; skb->h.uh = (struct udphdr *) (skb->data + sizeof(struct iphdr)); ip = skb->nh.iph; #else skb_set_tail_pointer(skb, size); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); ip = ip_hdr(skb); #endif udp = (struct udphdr *) ((char *) ip + sizeof(struct iphdr)); header = (fwprofd_header *) (udp + 1); port_ctl = (fwprofd_port_ctl *) (header + 1); // Pega o IP da interface de saida para alocar rota de saida #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) idev = in_dev_get(dev); #else rcu_read_lock(); idev = __in_dev_get_rcu(dev); #endif if (!idev) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (1)\n"); return -4; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_lock(&idev->lock); #endif if (!idev->ifa_list) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (2)\n"); return -5; } ip->saddr = idev->ifa_list->ifa_address; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) flp.oif = 0; flp.nl_u.ip4_u.saddr = ip->saddr; flp.nl_u.ip4_u.daddr = logon->logon_data.ip.s_addr; flp.nl_u.ip4_u.tos = 0; flp.uli_u.ports.sport = ntohs(AKER_PROF_PORT); flp.uli_u.ports.dport = ntohs(AKER_PROF_PORT); flp.proto = IPPROTO_UDP; #else flp.flowi4_oif = 0; flp.saddr = ip->saddr; flp.daddr = logon->logon_data.ip.s_addr; flp.flowi4_tos = 0; flp.fl4_sport = ntohs(AKER_PROF_PORT); flp.fl4_dport = ntohs(AKER_PROF_PORT); flp.flowi4_proto = IPPROTO_UDP; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) if (ip_route_output_key(&rt, &flp)) #else if (ip_route_output_key(&init_net, &rt, &flp)) #endif { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #else rt = ip_route_output_key(&init_net, &flp); if (IS_ERR(rt)) { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) skb->dst = dst_clone(&rt->u.dst); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb_dst_set(skb, dst_clone(&rt->u.dst)); #else skb_dst_set(skb, dst_clone(&rt->dst)); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb->dev = rt->u.dst.dev; #else skb->dev = rt->dst.dev; #endif skb->protocol = __constant_htons(ETH_P_IP); // Preenche dados do usuario port_ctl->ip_src.s_addr = 0; port_ctl->seq = ntohl(logon->seq); // ak_client_get_user_list() ja incrementou seq port_ctl->user_num = ntohl(logon->logon_data.ak_user_num); port_ctl->port = port_src; port_ctl->protocol = protocol; port_ctl->reserved = 0; MD5Init(&contexto); MD5Update(&contexto, (u_char *) logon->logon_data.secret, 16); MD5Update(&contexto, (u_char *) &port_ctl->ip_src, sizeof(struct in_addr)); MD5Update(&contexto, (u_char *) &port_ctl->seq, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->user_num, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->port, sizeof(aku16)); MD5Update(&contexto, (u_char *) &port_ctl->protocol, sizeof(aku8)); MD5Update(&contexto, (u_char *) &port_ctl->reserved, sizeof(aku8)); MD5Final((u_char *) port_ctl->hash, &contexto); // Preenche demais campos do pacote header->ip_dst = logon->logon_data.ip; header->versao = AKER_PROF_VERSION; header->tipo_req = APROF_BIND_PORT; memset(header->md5, 0, 16); MD5Init(&contexto); MD5Update(&contexto, (void *) header, sizeof(fwprofd_header)); MD5Update(&contexto, (void *) port_ctl, sizeof(fwprofd_port_ctl)); MD5Final(header->md5, &contexto); udp->dest = udp->source = ntohs(AKER_PROF_PORT); udp->len = ntohs(size - sizeof(struct iphdr)); udp->check = 0; ip->ihl = sizeof(struct iphdr) >> 2; ip->version = IPVERSION; ip->ttl = IPDEFTTL; ip->tos = 0; ip->daddr = header->ip_dst.s_addr; ip->protocol = IPPROTO_UDP; ip->frag_off = 0; ip->tot_len = htons(size); ip->id = 0; ip->check = 0; ip->check = ip_fast_csum((u_char *) ip, ip->ihl); PRINT("%s -> %s\n", ip2a(ip->saddr), ip2a(ip->daddr)); // Envia pacote #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) dst = skb->dst; #else dst = skb_dst(skb); #endif #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) && \ LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_get_neighbour_noref(dst); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_neigh_lookup_skb(dst, skb); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #else if (dst->hh) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) int hh_alen; read_lock_bh(dst->hh->hh_lock); hh_alen = HH_DATA_ALIGN(dst->hh->hh_len); memcpy(skb->data - hh_alen, dst->hh->hh_data, hh_alen); read_unlock_bh(dst->hh->hh_lock); skb_push(skb, dst->hh->hh_len); dst->hh->hh_output(skb); #else neigh_hh_output(dst->hh, skb); #endif ip_rt_put(rt); pkt_sent++; } else if (dst->neighbour) { dst->neighbour->output(skb); ip_rt_put(rt); pkt_sent++; } #endif else { kfree_skb(skb); ip_rt_put(rt); PRINT("Nao sei como enviar pacote de saida\n"); } #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) rcu_read_unlock(); #endif } if (!pkt_sent) return -1; return 0; }
/* Perform route lookup using linux ip_route_input. The route lookup will use the following selectors: dst, src, inbound ifnum, ip protocol, tos, and fwmark. The source address is expected to be non-local and it must be defined. The following selectors are ignored: dst port, src port, icmp type, icmp code, ipsec spi. */ Boolean ssh_interceptor_route_input_ipv4(SshInterceptor interceptor, SshInterceptorRouteKey key, SshUInt16 selector, SshInterceptorRouteResult result) { u32 daddr, saddr; u8 ipproto; u8 tos; u32 fwmark; struct sk_buff *skbp; struct net_device *dev; struct rtable *rt; int rval = 0; u16 rt_type; struct iphdr *iph = NULL; #ifdef DEBUG_LIGHT unsigned char *rt_type_str; #endif /* DEBUG_LIGHT */ SSH_INTERCEPTOR_STACK_MARK(); SSH_IP4_ENCODE(&key->dst, (unsigned char *) &daddr); /* Initialize */ saddr = 0; ipproto = 0; tos = 0; fwmark = 0; dev = NULL; if (selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) SSH_IP4_ENCODE(&key->src, (unsigned char *) &saddr); if (selector & SSH_INTERCEPTOR_ROUTE_KEY_IN_IFNUM) { SSH_LINUX_ASSERT_VALID_IFNUM(key->ifnum); dev = ssh_interceptor_ifnum_to_netdev(interceptor, key->ifnum); } if (selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO) ipproto = key->ipproto; if (selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS) tos = key->nh.ip4.tos; #if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) #ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR /* Use linux fw_mark in routing */ if (selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION) fwmark = key->extension[SSH_LINUX_FWMARK_EXTENSION_SELECTOR]; #endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */ #endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */ /* Build dummy skb */ skbp = alloc_skb(SSH_IPH4_HDRLEN, GFP_ATOMIC); if (skbp == NULL) goto fail; SSH_SKB_RESET_MACHDR(skbp); iph = (struct iphdr *) skb_put(skbp, SSH_IPH4_HDRLEN); if (iph == NULL) { dev_kfree_skb(skbp); goto fail; } SSH_SKB_SET_NETHDR(skbp, (unsigned char *) iph); skb_dst_set(skbp, NULL); skbp->protocol = __constant_htons(ETH_P_IP); SSH_SKB_MARK(skbp) = fwmark; iph->protocol = ipproto; SSH_DEBUG(SSH_D_LOWOK, ("Route lookup: " "dst %@ src %@ ifnum %d[%s] ipproto %d tos 0x%02x fwmark 0x%x", ssh_ipaddr_render, &key->dst, ssh_ipaddr_render, ((selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) ? &key->src : NULL), (dev ? dev->ifindex : -1), (dev ? dev->name : "none"), ipproto, tos, fwmark)); /* Perform route lookup */ rval = ip_route_input(skbp, daddr, saddr, tos, dev); if (rval < 0 || skb_dst(skbp) == NULL) { dev_kfree_skb(skbp); goto fail; } /* Get the gateway, mtu and ifnum */ rt = (struct rtable *) skb_dst(skbp); SSH_IP4_DECODE(result->gw, &rt->rt_gateway); result->mtu = SSH_LINUX_DST_MTU(skb_dst(skbp)); result->ifnum = skb_dst(skbp)->dev->ifindex; rt_type = rt->rt_type; #ifdef DEBUG_LIGHT switch (rt_type) { case RTN_UNSPEC: rt_type_str = "unspec"; break; case RTN_UNICAST: rt_type_str = "unicast"; break; case RTN_LOCAL: rt_type_str = "local"; break; case RTN_BROADCAST: rt_type_str = "broadcast"; break; case RTN_ANYCAST: rt_type_str = "anycast"; break; case RTN_MULTICAST: rt_type_str = "multicast"; break; case RTN_BLACKHOLE: rt_type_str = "blackhole"; break; case RTN_UNREACHABLE: rt_type_str = "unreachable"; break; case RTN_PROHIBIT: rt_type_str = "prohibit"; break; case RTN_THROW: rt_type_str = "throw"; break; case RTN_NAT: rt_type_str = "nat"; break; case RTN_XRESOLVE: rt_type_str = "xresolve"; break; default: rt_type_str = "unknown"; } #endif /* DEBUG_LIGHT */ SSH_DEBUG(SSH_D_LOWOK, ("Route result: dst %@ via %@ ifnum %d[%s] mtu %d type %s [%d]", ssh_ipaddr_render, &key->dst, ssh_ipaddr_render, result->gw, result->ifnum, (rt->u.dst.dev->name ? rt->u.dst.dev->name : "none"), result->mtu, rt_type_str, rt_type)); #ifdef SSH_IPSEC_IP_ONLY_INTERCEPTOR #ifdef LINUX_FRAGMENTATION_AFTER_NF_POST_ROUTING /* Check if need to create a child dst_entry with interface MTU. */ if ((selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_TRANSFORM_APPLIED) && skb_dst(skbp)->child == NULL) { if (interceptor_route_create_child_dst(skb_dst(skbp)) == NULL) SSH_DEBUG(SSH_D_FAIL, ("Could not create child dst_entry for dst %p", skb_dst(skbp))); } #endif /* LINUX_FRAGMENTATION_AFTER_NF_POST_ROUTING */ #endif /* SSH_IPSEC_IP_ONLY_INTERCEPTOR */ /* Release the routing table entry ; otherwise a memory leak occurs in the route entry table. */ dst_release(skb_dst(skbp)); skb_dst_set(skbp, NULL); dev_kfree_skb(skbp); /* Assert that ifnum fits into the SshInterceptorIfnum data type. */ SSH_LINUX_ASSERT_IFNUM(result->ifnum); /* Check that ifnum does not collide with SSH_INTERCEPTOR_INVALID_IFNUM. */ if (result->ifnum == SSH_INTERCEPTOR_INVALID_IFNUM) goto fail; /* Accept only unicast, broadcast, anycast, multicast and local routes. */ if (rt_type == RTN_UNICAST || rt_type == RTN_BROADCAST || rt_type == RTN_ANYCAST || rt_type == RTN_MULTICAST || rt_type == RTN_LOCAL) { ssh_interceptor_release_netdev(dev); SSH_LINUX_ASSERT_VALID_IFNUM(result->ifnum); return TRUE; } /* Fail route lookup for other route types. */ fail: if (dev) ssh_interceptor_release_netdev(dev); SSH_DEBUG(SSH_D_FAIL, ("Route lookup for %@ failed with code %d", ssh_ipaddr_render, &key->dst, rval)); return FALSE; }
#endif nf_nat_sl = rcu_dereference(nf_nat_sl_hook); ret = nf_nat_sl(pskb, ctinfo, exp, host_offset, dataoff, datalen, user_data); return ret; } static struct nf_conntrack_helper sl_helper __read_mostly = { .name = "sl", .max_expected = 0, .timeout = 60, .tuple.src.l3num = AF_INET, .tuple.dst.protonum = IPPROTO_TCP, .tuple.src.u.tcp.port = __constant_htons(SL_PORT), .me = THIS_MODULE, .help = sl_help, }; /* don't make this __exit, since it's called from __init ! */ static void nf_conntrack_sl_fini(void) { #ifdef SL_DEBUG printk(KERN_DEBUG " unregistering for port %d\n", SL_PORT); #endif nf_conntrack_helper_unregister(&sl_helper); }
/* Route IPv4 packet 'skbp', using the route key selectors in 'route_selector' and the interface number 'ifnum_in'. */ Boolean ssh_interceptor_reroute_skb_ipv4(SshInterceptor interceptor, struct sk_buff *skbp, SshUInt16 route_selector, SshUInt32 ifnum_in) { struct iphdr *iph; int rval = 0; /* Recalculate the route info as the engine might have touched the destination address. This can happen for example if we are in tunnel mode. */ iph = (struct iphdr *) SSH_SKB_GET_NETHDR(skbp); if (iph == NULL) { SSH_DEBUG(SSH_D_ERROR, ("Could not access IP header")); return FALSE; } /* Release old dst_entry */ if (skb_dst(skbp)) dst_release(skb_dst(skbp)); skb_dst_set(skbp, NULL); if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) && (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_LOCAL_SRC) == 0 && (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IN_IFNUM)) { u32 saddr = 0; u8 ipproto = 0; u8 tos = 0; #if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) #ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR u32 fwmark = 0; #endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */ #endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */ struct net_device *dev; SSH_ASSERT(skbp->protocol == __constant_htons(ETH_P_IP)); if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) saddr = iph->saddr; /* Map 'ifnum_in' to a net_device. */ SSH_LINUX_ASSERT_VALID_IFNUM(ifnum_in); dev = ssh_interceptor_ifnum_to_netdev(interceptor, ifnum_in); /* Clear the IP protocol, if selector does not define it. Ugly, but necessary to make sure the skb gets rerouted like engine expects. */ if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO) == 0) { ipproto = iph->protocol; iph->protocol = 0; } if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS) tos = RT_TOS(iph->tos); #if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) #ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR /* Clear the nfmark, if selector does not define it. Ugly, but necessary to make sure the skb gets rerouted like engine expects. */ if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION) == 0) { fwmark = SSH_SKB_MARK(skbp); SSH_SKB_MARK(skbp) = 0; } #endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */ #endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */ /* Call ip_route_input */ if (ip_route_input(skbp, iph->daddr, saddr, tos, dev) < 0) { SSH_DEBUG(SSH_D_FAIL, ("ip_route_input failed. (0x%08x -> 0x%08x)", iph->saddr, iph->daddr)); SSH_DEBUG(SSH_D_NICETOKNOW, ("dst 0x%08x src 0x%08x iif %d[%s] proto %d tos 0x%02x " "fwmark 0x%x", iph->daddr, saddr, (dev ? dev->ifindex : -1), (dev ? dev->name : "none"), iph->protocol, tos, SSH_SKB_MARK(skbp))); /* Release netdev reference */ if (dev) ssh_interceptor_release_netdev(dev); /* Note, skb modifications are not un-done as the caller frees the skb. If this is changed then the modifications should be un-done here before returning. */ return FALSE; } /* Write original IP protocol back to skb */ if (ipproto) iph->protocol = ipproto; #if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) #ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR /* Write original fwmark back to skb */ if (fwmark) SSH_SKB_MARK(skbp) = fwmark; #endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */ #endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */ /* Release netdev reference */ if (dev) ssh_interceptor_release_netdev(dev); } else { struct rtable *rt; struct flowi rt_key; if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_LOCAL_SRC) == 0) route_selector &= ~SSH_INTERCEPTOR_ROUTE_KEY_SRC; memset(&rt_key, 0, sizeof(rt_key)); rt_key.fl4_dst = iph->daddr; if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) rt_key.fl4_src = iph->saddr; if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_OUT_IFNUM) rt_key.oif = (skbp->dev ? skbp->dev->ifindex : 0); if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO) rt_key.proto = iph->protocol; if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS) rt_key.fl4_tos = RT_TOS(iph->tos); rt_key.fl4_scope = RT_SCOPE_UNIVERSE; #if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) #ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION) { #ifdef LINUX_HAS_SKB_MARK rt_key.mark = SSH_SKB_MARK(skbp); #else /* LINUX_HAS_SKB_MARK */ #ifdef CONFIG_IP_ROUTE_FWMARK rt_key.fl4_fwmark = SSH_SKB_MARK(skbp); #endif /* CONFIG_IP_ROUTE_FWMARK */ #endif /* LINUX_HAS_SKB_MARK */ } #endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */ #endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */ /* Call ip_route_output */ #ifdef LINUX_IP_ROUTE_OUTPUT_KEY_HAS_NET_ARGUMENT rval = ip_route_output_key(&init_net, &rt, &rt_key); #else /* LINUX_IP_ROUTE_OUTPUT_KEY_HAS_NET_ARGUMENT */ rval = ip_route_output_key(&rt, &rt_key); #endif /* LINUX_IP_ROUTE_OUTPUT_KEY_HAS_NET_ARGUMENT */ if (rval < 0) { SSH_DEBUG(SSH_D_FAIL, ("ip_route_output_key failed (0x%08x -> 0x%08x): %d", iph->saddr, iph->daddr, rval)); SSH_DEBUG(SSH_D_NICETOKNOW, ("dst 0x%08x src 0x%08x oif %d[%s] proto %d tos 0x%02x" "fwmark 0x%x", iph->daddr, ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) ? iph->saddr : 0), ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_OUT_IFNUM) ? rt_key.oif : -1), ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_OUT_IFNUM) ? (skbp->dev ? skbp->dev->name : "none") : "none"), ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO) ? iph->protocol : -1), ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS) ? iph->tos : 0), ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION) ? SSH_SKB_MARK(skbp) : 0))); /* Note, skb modifications are not un-done as the caller frees the skb. If this is changed then the modifications should be un-done here before returning. */ return FALSE; } /* Make a new dst because we just rechecked the route. */ skb_dst_set(skbp, dst_clone(&rt->u.dst)); /* Release the routing table entry ; otherwise a memory leak occurs in the route entry table. */ ip_rt_put(rt); } SSH_ASSERT(skb_dst(skbp) != NULL); #ifdef SSH_IPSEC_IP_ONLY_INTERCEPTOR #ifdef LINUX_FRAGMENTATION_AFTER_NF_POST_ROUTING if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_TRANSFORM_APPLIED) { /* Check if need to create a child dst_entry with interface MTU. */ if (skb_dst(skbp)->child == NULL) { if (interceptor_route_create_child_dst(skb_dst(skbp)) == NULL) { SSH_DEBUG(SSH_D_ERROR, ("Could not create child dst_entry for dst %p", skb_dst(skbp))); return FALSE; } } /* Pop dst stack and use the child entry with interface MTU for sending the packet. */ skb_dst_set(skbp, dst_pop(skb_dst(skbp))); } #endif /* LINUX_FRAGMENTATION_AFTER_NF_POST_ROUTING */ #endif /* SSH_IPSEC_IP_ONLY_INTERCEPTOR */ return TRUE; }
static unsigned int ip_nat_fn(unsigned int hooknum, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct ip_conntrack *ct; enum ip_conntrack_info ctinfo; struct ip_nat_info *info; /* maniptype == SRC for postrouting. */ enum ip_nat_manip_type maniptype = HOOK2MANIP(hooknum); /* We never see fragments: conntrack defrags on pre-routing and local-out, and ip_nat_out protects post-routing. */ IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET))); (*pskb)->nfcache |= NFC_UNKNOWN; /* If we had a hardware checksum before, it's now invalid */ if ((*pskb)->pkt_type != PACKET_LOOPBACK) (*pskb)->ip_summed = CHECKSUM_NONE; ct = ip_conntrack_get(*pskb, &ctinfo); /* Can't track? Maybe out of memory: this would make NAT unreliable. */ if (!ct) { if (net_ratelimit()) printk(KERN_DEBUG "NAT: %u dropping untracked packet %p %u %u.%u.%u.%u -> %u.%u.%u.%u\n", hooknum, *pskb, (*pskb)->nh.iph->protocol, NIPQUAD((*pskb)->nh.iph->saddr), NIPQUAD((*pskb)->nh.iph->daddr)); return NF_DROP; } switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED+IP_CT_IS_REPLY: if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { return icmp_reply_translation(*pskb, ct, hooknum, CTINFO2DIR(ctinfo)); } /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ case IP_CT_NEW: info = &ct->nat.info; WRITE_LOCK(&ip_nat_lock); /* Seen it before? This can happen for loopback, retrans, or local packets.. */ if (!(info->initialized & (1 << maniptype))) { int in_hashes = info->initialized; unsigned int ret; ret = ip_nat_rule_find(pskb, hooknum, in, out, ct, info); if (ret != NF_ACCEPT) { WRITE_UNLOCK(&ip_nat_lock); return ret; } if (in_hashes) { IP_NF_ASSERT(info->bysource.conntrack); replace_in_hashes(ct, info); } else { place_in_hashes(ct, info); } } else DEBUGP("Already setup manip %s for ct %p\n", maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", ct); WRITE_UNLOCK(&ip_nat_lock); break; default: /* ESTABLISHED */ IP_NF_ASSERT(ctinfo == IP_CT_ESTABLISHED || ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); info = &ct->nat.info; } IP_NF_ASSERT(info); return do_bindings(ct, ctinfo, info, hooknum, pskb); }
MODULE_DESCRIPTION("lte ipadp kernel module"); MODULE_AUTHOR(""); MODULE_LICENSE("GPL"); #include "../fsm/fsmdec.h" #include "../lte_system.h" #include "../debug.h" #include "ipadp_module.h" /* this is used for register the special packet type that processed by DYS */ struct packet_type pk_t = { .type = __constant_htons(DEV_PROTO_IPADP), .func = fsm_dev_recv, }; static const struct net_device_ops ipadp_netdev_ops = { .ndo_open = fsm_dev_open, .ndo_stop = fsm_dev_stop, .ndo_start_xmit = fsm_dev_tx, .ndo_get_stats = fsm_dev_get_stats, .ndo_do_ioctl = fsm_dev_ioctl, .ndo_set_config = fsm_dev_set_config, .ndo_tx_timeout = fsm_dev_tx_timeout, };
// Filtering rules are stored in the rules object static filter_t *rules; // general purpose functions void set_prom(int); int byteMatch(unsigned char *pkt, unsigned char *val,int len); // packet handler function int sniff_pack_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt); // sniffer hook to register with ptype_* static struct packet_type sniff_hook = { __constant_htons(ETH_P_IP), NULL, sniff_pack_rcv, (void*)1, NULL }; // sniffer device vfs functions int sniff_open(struct inode *, struct file *); int sniff_read(struct file *, char *,size_t,loff_t *); int sniff_release(struct inode *, struct file *); int sniff_ioctl(struct inode *,struct file *, unsigned int, unsigned long); // sniffer device file operations struct file_operations sniff_fops = { .owner= THIS_MODULE,
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) { struct sfq_packet_info info; u32 pert = q->perturbation; unsigned mask = (1<<q->hash_divisor) - 1; #ifdef CONFIG_NET_SCH_SFQ_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); #endif switch (skb->protocol) { case __constant_htons(ETH_P_IP): { const struct iphdr *iph = ip_hdr(skb); info.dst = iph->daddr; info.src = iph->saddr; if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_UDPLITE || iph->protocol == IPPROTO_SCTP || iph->protocol == IPPROTO_DCCP || iph->protocol == IPPROTO_ESP)) info.proto = *(((u32*)iph) + iph->ihl); else info.proto = iph->protocol; break; } case __constant_htons(ETH_P_IPV6): { struct ipv6hdr *iph = ipv6_hdr(skb); /* Hash ipv6 addresses into a u32. This isn't ideal, * but the code is simple. */ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation); info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation); if (iph->nexthdr == IPPROTO_TCP || iph->nexthdr == IPPROTO_UDP || iph->nexthdr == IPPROTO_UDPLITE || iph->nexthdr == IPPROTO_SCTP || iph->nexthdr == IPPROTO_DCCP || iph->nexthdr == IPPROTO_ESP) info.proto = *(u32*)&iph[1]; else info.proto = iph->nexthdr; break; } default: info.dst = (u32)(unsigned long)skb->dst; info.src = (u32)(unsigned long)skb->sk; info.proto = skb->protocol; } info.mark = skb->mark; #ifdef CONFIG_NET_SCH_SFQ_NFCT /* defaults if there is no conntrack info */ info.ctorigsrc = info.src; info.ctorigdst = info.dst; info.ctreplsrc = info.dst; info.ctrepldst = info.src; /* collect conntrack info */ if (ct && !nf_ct_is_untracked(ct)) { if (skb->protocol == __constant_htons(ETH_P_IP)) { info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip; info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip; info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip; } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { /* Again, hash ipv6 addresses into a single u32. */ info.ctorigsrc = jhash2( ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, pert); info.ctorigdst = jhash2( ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, pert); info.ctreplsrc = jhash2( ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, pert); info.ctrepldst = jhash2( ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, pert); } } #endif switch (q->hash_kind) { case TCA_SFQ_HASH_CLASSIC: return jhash_3words(info.dst, info.src, info.proto, pert) & mask; case TCA_SFQ_HASH_DST: return jhash_1word(info.dst, pert) & mask; case TCA_SFQ_HASH_SRC: return jhash_1word(info.src, pert) & mask; case TCA_SFQ_HASH_FWMARK: return jhash_1word(info.mark, pert) & mask; #ifdef CONFIG_NET_SCH_SFQ_NFCT case TCA_SFQ_HASH_CTORIGDST: return jhash_1word(info.ctorigdst, pert) & mask; case TCA_SFQ_HASH_CTORIGSRC: return jhash_1word(info.ctorigsrc, pert) & mask; case TCA_SFQ_HASH_CTREPLDST: return jhash_1word(info.ctrepldst, pert) & mask; case TCA_SFQ_HASH_CTREPLSRC: return jhash_1word(info.ctreplsrc, pert) & mask; case TCA_SFQ_HASH_CTNATCHG: { if (info.ctorigdst == info.ctreplsrc) return jhash_1word(info.ctorigsrc, pert) & mask; return jhash_1word(info.ctreplsrc, pert) & mask; } #else case TCA_SFQ_HASH_CTORIGDST: case TCA_SFQ_HASH_CTORIGSRC: case TCA_SFQ_HASH_CTREPLDST: case TCA_SFQ_HASH_CTREPLSRC: case TCA_SFQ_HASH_CTNATCHG: if (net_ratelimit()) printk(KERN_WARNING "SFQ: Conntrack support not enabled."); #endif } if (net_ratelimit()) printk(KERN_WARNING "SFQ: Unknown hash method. " "Falling back to classic.\n"); q->hash_kind = TCA_SFQ_HASH_CLASSIC; return jhash_3words(info.dst, info.src, info.proto, pert) & mask; }
static int batadv_interface_tx(struct sk_buff *skb, struct net_device *soft_iface) { struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_hard_iface *primary_if = NULL; struct batadv_bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; __be16 ethertype = __constant_htons(ETH_P_BATMAN); static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}; static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, 0x00, 0x00}; unsigned int header_len = 0; int data_len = skb->len, ret; short vid __maybe_unused = -1; bool do_bcast = false; uint32_t seqno; unsigned long brd_delay = 1; if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; soft_iface->trans_start = jiffies; switch (ntohs(ethhdr->h_proto)) { case ETH_P_8021Q: vhdr = (struct vlan_ethhdr *)skb->data; vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; if (vhdr->h_vlan_encapsulated_proto != ethertype) break; /* fall through */ case ETH_P_BATMAN: goto dropped; } if (batadv_bla_tx(bat_priv, skb, vid)) goto dropped; /* Register the client MAC in the transtable */ if (!is_multicast_ether_addr(ethhdr->h_source)) batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); /* don't accept stp packets. STP does not help in meshes. * better use the bridge loop avoidance ... * * The same goes for ECTP sent at least by some Cisco Switches, * it might confuse the mesh when used with bridge loop avoidance. */ if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) goto dropped; if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) goto dropped; if (is_multicast_ether_addr(ethhdr->h_dest)) { do_bcast = true; switch (atomic_read(&bat_priv->gw_mode)) { case BATADV_GW_MODE_SERVER: /* gateway servers should not send dhcp * requests into the mesh */ ret = batadv_gw_is_dhcp_target(skb, &header_len); if (ret) goto dropped; break; case BATADV_GW_MODE_CLIENT: /* gateway clients should send dhcp requests * via unicast to their gateway */ ret = batadv_gw_is_dhcp_target(skb, &header_len); if (ret) do_bcast = false; break; case BATADV_GW_MODE_OFF: default: break; } } /* ethernet packet should be broadcasted */ if (do_bcast) { primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto dropped; /* in case of ARP request, we do not immediately broadcasti the * packet, instead we first wait for DAT to try to retrieve the * correct ARP entry */ if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) goto dropped; bcast_packet = (struct batadv_bcast_packet *)skb->data; bcast_packet->header.version = BATADV_COMPAT_VERSION; bcast_packet->header.ttl = BATADV_TTL; /* batman packet type: broadcast */ bcast_packet->header.packet_type = BATADV_BCAST; bcast_packet->reserved = 0; /* hw address of first interface is the orig mac because only * this mac is known throughout the mesh */ memcpy(bcast_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); /* set broadcast sequence number */ seqno = atomic_inc_return(&bat_priv->bcast_seqno); bcast_packet->seqno = htonl(seqno); batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay); /* a copy is stored in the bcast list, therefore removing * the original skb. */ kfree_skb(skb); /* unicast packet */ } else { if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); if (ret) goto dropped; } if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) goto dropped; batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); ret = batadv_unicast_send_skb(bat_priv, skb); if (ret != 0) goto dropped_freed; } batadv_inc_counter(bat_priv, BATADV_CNT_TX); batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); goto end; dropped: kfree_skb(skb); dropped_freed: batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); end: if (primary_if) batadv_hardif_free_ref(primary_if); return NETDEV_TX_OK; }
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = vlan_dev_get_stats(dev); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) { int orig_headroom = skb_headroom(skb); unsigned short veth_TCI; /* This is not a VLAN frame...but we can fix that! */ VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++; #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n", __FUNCTION__, htons(veth->h_vlan_proto)); #endif /* Construct the second two bytes. This field looks something * like: * usr_priority: 3 bits (high bits) * CFI 1 bit * VLAN ID 12 bits (low bits) */ veth_TCI = VLAN_DEV_INFO(dev)->vlan_id; veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); skb = __vlan_put_tag(skb, veth_TCI); if (!skb) { stats->tx_dropped++; return 0; } if (orig_headroom < VLAN_HLEN) { VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++; } } #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n", __FUNCTION__, skb, skb->dev->name); printk(VLAN_DBG " %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n", veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5], veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto); #endif stats->tx_packets++; /* for statics only */ stats->tx_bytes += skb->len; skb->protocol = __constant_htons(ETH_P_8021Q); skb->mac.raw -= VLAN_HLEN; skb->nh.raw -= VLAN_HLEN; skb->dev = VLAN_DEV_INFO(dev)->real_dev; dev_queue_xmit(skb); return 0; }