static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); }
/* * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. Return the protocol number. */ static __be16 efx_tso_check_protocol(struct sk_buff *skb) { __be16 protocol = skb->protocol; EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != protocol); if (protocol == htons(ETH_P_8021Q)) { /* Find the encapsulated protocol; reset network header * and transport header based on that. */ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; skb_set_network_header(skb, sizeof(*veh)); if (protocol == htons(ETH_P_IP)) skb_set_transport_header(skb, sizeof(*veh) + 4 * ip_hdr(skb)->ihl); else if (protocol == htons(ETH_P_IPV6)) skb_set_transport_header(skb, sizeof(*veh) + sizeof(struct ipv6hdr)); } if (protocol == htons(ETH_P_IP)) { EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); } else { EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); } EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + (tcp_hdr(skb)->doff << 2u)) > skb_headlen(skb)); return protocol; }
static verdict ipv4_tcp(struct sk_buff *skb, struct tuple *tuple4) { tuple4->src.addr4.l3.s_addr = ip_hdr(skb)->saddr; tuple4->src.addr4.l4 = be16_to_cpu(tcp_hdr(skb)->source); tuple4->dst.addr4.l3.s_addr = ip_hdr(skb)->daddr; tuple4->dst.addr4.l4 = be16_to_cpu(tcp_hdr(skb)->dest); tuple4->l3_proto = L3PROTO_IPV4; tuple4->l4_proto = L4PROTO_TCP; return VER_CONTINUE; }
/* * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. */ static inline void efx_tso_check_safe(const struct sk_buff *skb) { EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != skb->protocol); EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + (tcp_hdr(skb)->doff << 2u)) > skb_headlen(skb)); }
int __ip_local_out(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); /* Mark skb to identify SMB data packet */ if ((ip_hdr(skb)->protocol == IPPROTO_TCP) && tcp_hdr(skb)) skb->tcpf_smb = (tcp_hdr(skb)->source == htons(0x01bd)); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); }
unsigned int main_hook(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff*)) { struct iphdr *iph = NULL; struct tcphdr *tcph = NULL; if ((skb != NULL) && skb->pkt_type == PACKET_HOST && skb->protocol == htons(ETH_P_IP)) { iph = ip_hdr(skb); if (iph == NULL) printk( KERN_ALERT "iph was null\n"); else { if (iph->protocol == IPPROTO_TCP) { tcph = tcp_hdr(skb); if (tcph == NULL) printk(KERN_ALERT "tcph was null\n"); else { if (in) printk("In: %s\n", in->name); if (out) printk("Out: %s\n", out->name); printk(KERN_ALERT "Source IP: %u.%u.%u.%u\nDestination IP: %u.%u.%u.%u\nSource Port: %u\nDestination Port: %u\n\n\n", NIPQUAD(iph->saddr), NIPQUAD(iph->daddr), ntohs(tcph->source), ntohs(tcph->dest)); } } } } return NF_ACCEPT; }
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, __be32 *addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); if (nh->protocol == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, *addr, new_addr, 1); } else if (nh->protocol == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || get_ip_summed(skb) == OVS_CSUM_PARTIAL) { inet_proto_csum_replace4(&uh->check, skb, *addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } csum_replace4(&nh->check, *addr, new_addr); skb_clear_rxhash(skb); *addr = new_addr; }
/** * "ars" means add, remove, send */ static bool test_pkt_queue_ars(void) { struct session_entry *session; struct sk_buff *skb; struct tuple tuple4; struct tcphdr *hdr_tcp; bool success = true; /* Prepare */ if (is_error(init_ipv4_tuple(&tuple4, "5.6.7.8", 5678, "192.168.2.1", 8765, L4PROTO_TCP))) return false; session = session_create_str_tcp("1::2", 1212, "3::4", 3434, "192.168.2.1", 8765, "5.6.7.8", 5678, V4_INIT); /* The session entry that is supposed to be created in "tcp_close_state_handle". */ if (!session) return false; if (is_error(create_skb4_tcp(&tuple4, &skb, 100, 32))) { session_return(session); return false; } hdr_tcp = tcp_hdr(skb); hdr_tcp->syn = true; hdr_tcp->rst = false; hdr_tcp->fin = false; success &= assert_equals_int(0, pktqueue_add(session, skb), "pktqueue_add 1"); success &= assert_equals_int(0, pktqueue_remove(session), "pktqueue_remove 1"); success &= assert_equals_int(-ENOENT, pktqueue_send(session), "pktqueue_send 1"); success &= assert_equals_int(0, icmp64_pop(), "pktqueue not sent an icmp error"); session_return(session); /* kfree_skb(skb); "skb" kfreed when pktqueue_remove is executed */ return success; }
int create_tcp_packet(struct sk_buff **skb, l3_protocol l3_proto, bool syn, bool rst, bool fin) { struct tcphdr *hdr_tcp; struct tuple tuple; int error; switch (l3_proto) { case L3PROTO_IPV4: error = init_tuple4(&tuple, "8.7.6.5", 8765, "5.6.7.8", 5678, L4PROTO_TCP); if (error) return error; error = create_skb4_tcp(&tuple, skb, 100, 32); if (error) return error; break; case L3PROTO_IPV6: error = init_tuple6(&tuple, "1::2", 1212, "3::4", 3434, L4PROTO_TCP); if (error) return error; error = create_skb6_tcp(&tuple, skb, 100, 32); if (error) return error; break; } hdr_tcp = tcp_hdr(*skb); hdr_tcp->syn = syn; hdr_tcp->rst = rst; hdr_tcp->fin = fin; return 0; }
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, __be32 addr[4], const __be32 new_addr[4]) { int transport_len = skb->len - skb_transport_offset(skb); if (l4_proto == NEXTHDR_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, addr, new_addr, 1); } else if (l4_proto == NEXTHDR_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace16(&uh->check, skb, addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } else if (l4_proto == NEXTHDR_ICMP) { if (likely(transport_len >= sizeof(struct icmp6hdr))) inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, skb, addr, new_addr, 1); } }
static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, __be32 addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); if (nh->frag_off & htons(IP_OFFSET)) return; if (nh->protocol == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, addr, new_addr, 1); } else if (nh->protocol == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&uh->check, skb, addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } }
/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses * might have been changed by NAT. */ static void update_chksum(struct sk_buff *skb, int headln) { struct tcphdr *th = tcp_hdr(skb); int datalen = skb->len - headln; const struct ipv6hdr *ipv6h; const struct iphdr *iph; /* We only changed the payload so if we are using partial we don't * need to update anything. */ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) return; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); if (skb->sk->sk_family == AF_INET6) { ipv6h = ipv6_hdr(skb); th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, datalen, IPPROTO_TCP, 0); } else { iph = ip_hdr(skb); th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, IPPROTO_TCP, 0); } }
/** * ipv4_skb_to_auditdata : fill auditdata from skb * @skb : the skb * @ad : the audit data to fill * @proto : the layer 4 protocol * * return 0 on success */ int ipv4_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto) { int ret = 0; struct iphdr *ih; ih = ip_hdr(skb); if (ih == NULL) return -EINVAL; ad->u.net->v4info.saddr = ih->saddr; ad->u.net->v4info.daddr = ih->daddr; if (proto) *proto = ih->protocol; /* non initial fragment */ if (ntohs(ih->frag_off) & IP_OFFSET) return 0; switch (ih->protocol) { case IPPROTO_TCP: { struct tcphdr *th = tcp_hdr(skb); if (th == NULL) break; ad->u.net->sport = th->source; ad->u.net->dport = th->dest; break; } case IPPROTO_UDP: { struct udphdr *uh = udp_hdr(skb); if (uh == NULL) break; ad->u.net->sport = uh->source; ad->u.net->dport = uh->dest; break; } case IPPROTO_DCCP: { struct dccp_hdr *dh = dccp_hdr(skb); if (dh == NULL) break; ad->u.net->sport = dh->dccph_sport; ad->u.net->dport = dh->dccph_dport; break; } case IPPROTO_SCTP: { struct sctphdr *sh = sctp_hdr(skb); if (sh == NULL) break; ad->u.net->sport = sh->source; ad->u.net->dport = sh->dest; break; } default: ret = -EINVAL; } return ret; }
int ip_format_and_send_pkt(struct sk_buff *skb,struct sock*sk,__be32 saddr, __be32 daddr,u16 tcp_len) { int ret = 0; struct net *pnet = sock_net(sk); struct netns_ipv4 *n_ipv4 = &pnet->ipv4; struct inet_sock *inet = inet_sk(sk); struct tcphdr *th = tcp_hdr(skb); struct iphdr *iph = ip_hdr(skb); iph->tos = inet->tos; iph->tot_len = htons(sizeof(struct iphdr) + tcp_len); iph->frag_off = htons(IP_DF); ip_select_ident(skb, sk); iph->ttl = ip_select_ttl(sk); iph->protocol = sk->sk_protocol; iph->daddr = daddr; iph->saddr = saddr; iph->check = 0; th->check = 0; th->check = get_ipv4_psd_sum(iph); if(n_ipv4->aft_route_out){ ret = n_ipv4->aft_route_out(skb); if(ret < 0) return US_ENETUNREACH; } return mbuf_format_and_send(skb, tcp_len); }
/*请求头部解析request*/ u32 cloud_wlan_http_skb_parse_request(struct sk_buff *skb, http_t *http_info) { u32 ret; struct iphdr *iphdr; struct tcphdr *tcphdr; iphdr = ip_hdr(skb); tcphdr = tcp_hdr(skb); if((IPPROTO_TCP != iphdr->protocol) || (PROTO_HTTP!= ntohs(tcphdr->dest) && PROTO_HTTP2 != ntohs(tcphdr->dest)) ) { return CWLAN_FAIL; } /*分析报文头get 未使用*/ ret = is_http_get_pkt((void *)tcphdr + tcphdr->doff*4 , iphdr->tot_len - tcphdr->doff*4); if(ret != CWLAN_OK) { return CWLAN_FAIL; } ret = cloud_wlan_get_http_header_info( (u8 *)tcphdr + tcphdr->doff * 4, http_info); if(ret != CWLAN_OK) { return CWLAN_FAIL; } return CWLAN_OK; }
int ip_queue_xmit(struct sk_buff *skb) { //smallboy: We delete all the route here; //smallboy: ROUTE ROUTE ROUTE ROUTE !!!!! struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct tcphdr *th = tcp_hdr(skb); struct net *pnet = sock_net(sk); struct netns_ipv4 *n_ipv4 = &pnet->ipv4; struct iphdr *iph; u32 ihl; s32 ret = 0; //ihl = sizeof(struct iphdr) + (inet_opt ? inet_opt->optlen : 0); ihl = sizeof(struct iphdr); skb_push(skb, ihl); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; //smallboy: Attention for ipv6; iph->ihl = ihl>>2; iph->tos = inet->tos; iph->tot_len = htons(skb->len); iph->frag_off = htons(IP_DF); iph->ttl = ip_select_ttl(sk); iph->protocol = sk->sk_protocol; iph->daddr = inet->inet_daddr; // iph->saddr = inet->inet_rcv_saddr; //smallboy:Attention here; ip_select_ident_more(iph, sk, (skb->gso_segs ?: 1) - 1); th->check = 0; //th->check = get_ipv4_udptcp_checksum(iph , th); iph->check = 0; th->check = get_ipv4_psd_sum(iph); //iph->check = ip_fast_csum(iph, iph->ihl); if(n_ipv4->aft_route_out){ ret = n_ipv4->aft_route_out(skb); if(ret < 0){ skb_reset_data_header(skb); return ret; } } ret = ip_local_out(skb); return ret; }
static void stt_rcv(struct stt_sock *stt_sock, struct sk_buff *skb) { struct vport *vport = stt_sock->rcv_data; struct stthdr *stth = stt_hdr(skb); struct ovs_tunnel_info tun_info; struct sk_buff *next; ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), tcp_hdr(skb)->source, tcp_hdr(skb)->dest, get_unaligned(&stth->key), TUNNEL_KEY | TUNNEL_CSUM, NULL, 0); do { next = skb->next; skb->next = NULL; ovs_vport_receive(vport, skb, &tun_info); } while ((skb = next)); }
static void __tcp_v6_send_check(struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr) { struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else {
static int tcp4_gro_complete(struct sk_buff *skb, int thoff) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = tcp_hdr(skb); th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, iph->daddr, 0); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; return tcp_gro_complete(skb); }
inline void dumpIp6Hdr(const char *fn, const struct sk_buff *skb) { const struct ipv6hdr *ipv6 = ipv6_hdr(skb); const struct tcphdr *tcp = tcp_hdr(skb); printk(KERN_ALERT "%s, saddr:%pI6:%hu, daddr:%pI6:%hu\n", fn, &ipv6->saddr, ntohs(tcp->source), &ipv6->daddr, ntohs(tcp->dest)); //printk(KERN_ALERT "%s, saddr:%pI6:%d, daddr:%pI6:%d\n", fn, &ipv6->saddr, ntohs(tcp->source), &ipv6->daddr, ntohs(tcp->dest)); //printk(KERN_ALERT "%s, saddr:%pI6:, daddr:%pI6:\n", fn, &ipv6->saddr, &ipv6->daddr ); if(!strcmp(fn, "postrouting6")) printk(KERN_ALERT "---------------------------------\n"); }
/* Continue using Toeplitz hash function. * This implementation is different from the current upstream code. * See more info from this upstream commit: * 757647e10e55c01fb7a9c4356529442e316a7c72 */ bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) { struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; __be32 dbuf[9]; int data_len; if (eth_hdr(skb)->h_proto != htons(ETH_P_IP) && eth_hdr(skb)->h_proto != htons(ETH_P_IPV6)) return false; iphdr = ip_hdr(skb); ipv6hdr = ipv6_hdr(skb); if (iphdr->version == 4) { dbuf[0] = iphdr->saddr; dbuf[1] = iphdr->daddr; if (iphdr->protocol == IPPROTO_TCP) { dbuf[2] = *(__be32 *)&tcp_hdr(skb)->source; data_len = 12; } else { data_len = 8; } } else if (ipv6hdr->version == 6) { memcpy(dbuf, &ipv6hdr->saddr, 32); if (ipv6hdr->nexthdr == IPPROTO_TCP) { dbuf[8] = *(__be32 *)&tcp_hdr(skb)->source; data_len = 36; } else { data_len = 32; } } else { return false; } *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, dbuf, data_len); return true; }
/* * Add an ip header to a skbuff and send it out. * */ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,__be32 saddr, __be32 daddr) { int ret = 0; struct inet_sock *inet = inet_sk(sk); struct tcphdr *th = tcp_hdr(skb); struct net *pnet = sock_net(sk); struct netns_ipv4 *n_ipv4 = &pnet->ipv4; struct iphdr *iph; // Build the IP header. skb_push(skb, sizeof(struct iphdr)); ////+ (opt ? opt->opt.optlen : 0) skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; //iph->tos = 0;//inet->tos; iph->tos = inet->tos; iph->tot_len = htons(skb->len); iph->frag_off = htons(IP_DF); //smallboy :must before ip_select_ident; ip_select_ident(skb, sk); iph->ttl = ip_select_ttl(sk); iph->protocol = sk->sk_protocol; iph->daddr = daddr; iph->saddr = saddr; iph->check = 0; //iph->check = ip_fast_csum(iph, iph->ihl); th->check = 0; //th->check = get_ipv4_udptcp_checksum(iph , th); th->check = get_ipv4_psd_sum(iph); //skb->mark = sk->sk_mark; //fprintf(stderr,"TH:%u,src:%s,dst:%s,sport:%u,dport:%u,seq:%-12u,ack:%-12u,ipid:%-12u,len:%-12u,SYN:%u;PSH:%u;ACK:%u;FIN:%u;RST:%u; send!!!\n" // ,US_GET_LCORE(),trans_ip(iph->saddr),trans_ip(iph->daddr) // ,ntohs(th->source),ntohs(th->dest) // ,ntohl(th->seq),ntohl(th->ack_seq) // ,iph->id,skb->len,th->syn,th->psh,th->ack,th->fin,th->rst); if(n_ipv4->aft_route_out){ ret = n_ipv4->aft_route_out(skb); if(ret < 0) return US_ENETUNREACH; } // Send it out. return ip_local_out(skb); }
/** * Filtering and updating done during the CLOSED state of the TCP state machine. * Part of RFC 6146 section 3.5.2.2. */ static verdict tcp_closed_state_handle(struct sk_buff *skb, struct tuple *tuple) { struct bib_entry *bib; verdict result; int error; switch (skb_l3_proto(skb)) { case L3PROTO_IPV6: if (tcp_hdr(skb)->syn) { result = is_error(tcp_closed_v6_syn(skb, tuple)) ? VER_DROP : VER_CONTINUE; goto syn_out; } break; case L3PROTO_IPV4: if (tcp_hdr(skb)->syn) { result = tcp_closed_v4_syn(skb, tuple); goto syn_out; } break; } error = bibdb_get(tuple, &bib); if (error) { log_debug("Closed state: Packet is not SYN and there is no BIB entry, so discarding. " "ERRcode %d", error); inc_stats(skb, IPSTATS_MIB_INNOROUTES); return VER_DROP; } bib_return(bib); return VER_CONTINUE; syn_out: if (result == VER_DROP) inc_stats(skb, IPSTATS_MIB_INDISCARDS); return result; }
static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { const struct in6_addr *saddr, *daddr; struct tcp_md5sig_pool *hp; struct hash_desc *desc; const struct tcphdr *th = tcp_hdr(skb); if (sk) { saddr = &inet6_sk(sk)->saddr; daddr = &inet6_sk(sk)->daddr; } else if (req) { saddr = &inet6_rsk(req)->loc_addr; daddr = &inet6_rsk(req)->rmt_addr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; }
void modisg_snat_oper(struct sk_buff *p_psoSKB, struct iphdr *p_psoIP) { struct tcphdr *psoTCP = tcp_hdr(p_psoSKB); __be32 uiSrcAddr; if(psoTCP == NULL) return; /* производим подмену адреса отправителя */ uiSrcAddr = modisg_dnat_get(p_psoIP->daddr, psoTCP->dest); csum_replace4(&p_psoIP->check, p_psoIP->saddr, uiSrcAddr); inet_proto_csum_replace4(&psoTCP->check, p_psoSKB, p_psoIP->saddr, uiSrcAddr, 1); p_psoIP->saddr = uiSrcAddr; }
void modisg_dnat_oper(struct sk_buff *p_psoSKB, struct iphdr *p_psoIP) { struct tcphdr *psoTCP = tcp_hdr(p_psoSKB); if(psoTCP == NULL) return; /* сохраняем значение адреса назначения в таблице */ modisg_dnat_set(p_psoIP->saddr, psoTCP->source, p_psoIP->daddr); /* пересчитываем контрольные суммы */ csum_replace4(&p_psoIP->check, p_psoIP->daddr, L4R_ADDR); inet_proto_csum_replace4(&psoTCP->check, p_psoSKB, p_psoIP->daddr, L4R_ADDR, 1); p_psoIP->daddr = L4R_ADDR; }
static int tcp4_gro_complete(struct sk_buff *skb, int thoff) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = tcp_hdr(skb); th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, iph->daddr, 0); skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; if (NAPI_GRO_CB(skb)->is_atomic) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; return tcp_gro_complete(skb); }
static unsigned int switch_hook_forward( unsigned int hook, struct sk_buff *skb, const struct net_device *dev_in, const struct net_device *dev_out, int (*okfn)(struct sk_buff *) ) { // layer 2 // //-------------// skb->data // layer 3 // unsigned int result = NF_ACCEPT; struct ethhdr *eth_header = eth_hdr(skb); if (ntohs(eth_header->h_proto) == ETH_P_IP) { struct iphdr *ip_header = ip_hdr(skb); unsigned int ip_header_length = ip_hdrlen(skb); unsigned int ip_packet_length = ntohs(ip_header->tot_len); if (ip_header->protocol == IPPROTO_TCP) { skb_pull(skb, ip_header_length); // layer 3 // //-------------// skb->data // layer 4 // skb_reset_transport_header(skb); skb_push(skb, ip_header_length); // layer 2 // //-------------// skb->data // layer 3 // struct tcphdr *tcp_header = tcp_hdr(skb); unsigned char *payload = (unsigned char *)ip_header + ip_header_length; int i; for (i = 0; i < ip_packet_length - ip_header_length - 4; i++) { unsigned char byte0 = *(payload + i + 0); unsigned char byte1 = *(payload + i + 1); unsigned char byte2 = *(payload + i + 2); unsigned char byte3 = *(payload + i + 3); if (byte0 == 'f' && byte1 == 'u' && byte2 == 'c' && byte3 == 'k') { *(payload + i + 0) = '*'; *(payload + i + 1) = '*'; *(payload + i + 2) = '*'; *(payload + i + 3) = '*'; tcp_send_check(skb); } } } } // layer 2 // //-------------// skb->data // layer 3 // return result; }
/* TODO 1: netfilter hook function */ static unsigned int filter_nf_hookfn(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct iphdr *iph = ip_hdr(skb); struct tcphdr *tcph = tcp_hdr(skb); if (tcph->syn && !tcph->ack && test_daddr(iph->daddr)) { printk("Connection from: %pI4:%d\n", &iph->saddr, ntohs(tcph->source)); } return NF_ACCEPT; }
int tcp_gro_complete(struct sk_buff *skb) { struct tcphdr *th = tcp_hdr(skb); skb->csum_start = (unsigned char *)th - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; if (th->cwr) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; return 0; }