static inline int ip6_output_finish(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; if (dst->hh) return neigh_hh_output(dst->hh, skb); else if (dst->neighbour) return dst->neighbour->output(skb); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; }
/* Stolen from ip_finish_output2 */ static int ip_direct_send(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; if (dst->hh) return neigh_hh_output(dst->hh, skb); else if (dst->neighbour) return dst->neighbour->output(skb); if (net_ratelimit()) printk(KERN_DEBUG "TARPIT ip_direct_send: no header cache and no neighbor!\n"); kfree_skb(skb); return -EINVAL; }
static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, ip6_dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); } if (dst->hh) return neigh_hh_output(dst->hh, skb); else if (dst->neighbour) return dst->neighbour->output(skb); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; }
/* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address, * and this is a tricky process. * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let us first consider the case that ip_route_input() succeeds: * * If skb->dst->dev equals the logical bridge device the packet * came in on, we can consider this bridging. The packet is passed * through the neighbour output function to build a new destination * MAC address, which will make the packet enter br_nf_local_out() * not much later. In that function it is assured that the iptables * FORWARD chain is traversed for the packet. * * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let us now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() * will fail, while __ip_route_output_key() will return success. The source * address for __ip_route_output_key() is set to zero, so __ip_route_output_key * thinks we're handling a locally generated packet and won't care * if IP forwarding is allowed. We send a warning message to the users's * log telling her to put IP forwarding on. * * ip_route_input() will also fail if there is no route available. * In that case we just drop the packet. * * --Lennert, 20020411 * --Bart, 20020416 (updated) * --Bart, 20021007 (updated) * --Bart, 20062711 (updated) */ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) { if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; skb->nf_bridge->mask |= BRNF_PKT_TYPE; } skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; skb->dev = bridge_parent(skb->dev); if (skb->dev) { struct dst_entry *dst = skb->dst; nf_bridge_pull_encap_header(skb); if (dst->hh) return neigh_hh_output(dst->hh, skb); else if (dst->neighbour) return dst->neighbour->output(skb); } kfree_skb(skb); return 0; }
/* * Direct send packets to output. * Stolen from ip_finish_output2. */ static inline int bcm_fast_path_output(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct net_device *dev = dst->dev; int hh_len = LL_RESERVED_SPACE(dev); int ret = 0; /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, hh_len); if (skb2 == NULL) { kfree_skb(skb); return -ENOMEM; } if (skb->sk) skb_set_owner_w(skb2, skb->sk); kfree_skb(skb); skb = skb2; } if (dst->hh) ret = neigh_hh_output(dst->hh, skb); else if (dst->neighbour) ret = dst->neighbour->output(skb); else { #ifdef DEBUG if (net_ratelimit()) printk(KERN_DEBUG "bcm_fast_path_output: No header cache and no neighbour!\n"); #endif kfree_skb(skb); return -EINVAL; } /* Don't return 1 */ return (ret == 1) ? 0 : ret; }
static int ak_client_inform_port(const struct net_device *dev, aku16 port_src, aku8 protocol, unsigned int uid) { ak_client_logon_array user_logon[AK_CLIENT_MAX_LOGONS_PER_USER]; struct sk_buff *skb; // Pacote a ser enviado para avisar o firewall #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) struct flowi flp; #else struct flowi4 flp; #endif struct in_device *idev; struct rtable *rt; // Rota a ser usada para enviar o pacote struct iphdr *ip; // Header IP do pacote a enviar struct udphdr *udp; // Header UDP do pacote a enviar struct dst_entry *dst; #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) struct neighbour *neigh; #endif MD5_CTX contexto; // Contexto para calcular MD5 int pkt_sent = 0; // Enviou ao menos um pacote ? fwprofd_header *header; fwprofd_port_ctl *port_ctl; ak_client_logon_array *logon; int size; int count; int i; if (!dev) { PRINT("Device de saida NULL\n"); return -2; } count = ak_client_get_user_list(uid, user_logon); size = sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(fwprofd_header) + sizeof(fwprofd_port_ctl); for (i = 0, logon = user_logon; i < count; i++, logon++) { PRINT("Enviando pacote %d/%d - ", i + 1, count); skb = alloc_skb(size + 16, GFP_ATOMIC); if (!skb) { PRINT("Nao consegui alocar skbuff para enviar pacote\n"); return -3; } skb->data += 16; skb->len = size; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) skb->tail = skb->data + size; skb->nh.iph = (struct iphdr *) skb->data; skb->h.uh = (struct udphdr *) (skb->data + sizeof(struct iphdr)); ip = skb->nh.iph; #else skb_set_tail_pointer(skb, size); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); ip = ip_hdr(skb); #endif udp = (struct udphdr *) ((char *) ip + sizeof(struct iphdr)); header = (fwprofd_header *) (udp + 1); port_ctl = (fwprofd_port_ctl *) (header + 1); // Pega o IP da interface de saida para alocar rota de saida #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) idev = in_dev_get(dev); #else rcu_read_lock(); idev = __in_dev_get_rcu(dev); #endif if (!idev) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (1)\n"); return -4; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_lock(&idev->lock); #endif if (!idev->ifa_list) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (2)\n"); return -5; } ip->saddr = idev->ifa_list->ifa_address; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) flp.oif = 0; flp.nl_u.ip4_u.saddr = ip->saddr; flp.nl_u.ip4_u.daddr = logon->logon_data.ip.s_addr; flp.nl_u.ip4_u.tos = 0; flp.uli_u.ports.sport = ntohs(AKER_PROF_PORT); flp.uli_u.ports.dport = ntohs(AKER_PROF_PORT); flp.proto = IPPROTO_UDP; #else flp.flowi4_oif = 0; flp.saddr = ip->saddr; flp.daddr = logon->logon_data.ip.s_addr; flp.flowi4_tos = 0; flp.fl4_sport = ntohs(AKER_PROF_PORT); flp.fl4_dport = ntohs(AKER_PROF_PORT); flp.flowi4_proto = IPPROTO_UDP; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) if (ip_route_output_key(&rt, &flp)) #else if (ip_route_output_key(&init_net, &rt, &flp)) #endif { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #else rt = ip_route_output_key(&init_net, &flp); if (IS_ERR(rt)) { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) skb->dst = dst_clone(&rt->u.dst); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb_dst_set(skb, dst_clone(&rt->u.dst)); #else skb_dst_set(skb, dst_clone(&rt->dst)); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb->dev = rt->u.dst.dev; #else skb->dev = rt->dst.dev; #endif skb->protocol = __constant_htons(ETH_P_IP); // Preenche dados do usuario port_ctl->ip_src.s_addr = 0; port_ctl->seq = ntohl(logon->seq); // ak_client_get_user_list() ja incrementou seq port_ctl->user_num = ntohl(logon->logon_data.ak_user_num); port_ctl->port = port_src; port_ctl->protocol = protocol; port_ctl->reserved = 0; MD5Init(&contexto); MD5Update(&contexto, (u_char *) logon->logon_data.secret, 16); MD5Update(&contexto, (u_char *) &port_ctl->ip_src, sizeof(struct in_addr)); MD5Update(&contexto, (u_char *) &port_ctl->seq, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->user_num, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->port, sizeof(aku16)); MD5Update(&contexto, (u_char *) &port_ctl->protocol, sizeof(aku8)); MD5Update(&contexto, (u_char *) &port_ctl->reserved, sizeof(aku8)); MD5Final((u_char *) port_ctl->hash, &contexto); // Preenche demais campos do pacote header->ip_dst = logon->logon_data.ip; header->versao = AKER_PROF_VERSION; header->tipo_req = APROF_BIND_PORT; memset(header->md5, 0, 16); MD5Init(&contexto); MD5Update(&contexto, (void *) header, sizeof(fwprofd_header)); MD5Update(&contexto, (void *) port_ctl, sizeof(fwprofd_port_ctl)); MD5Final(header->md5, &contexto); udp->dest = udp->source = ntohs(AKER_PROF_PORT); udp->len = ntohs(size - sizeof(struct iphdr)); udp->check = 0; ip->ihl = sizeof(struct iphdr) >> 2; ip->version = IPVERSION; ip->ttl = IPDEFTTL; ip->tos = 0; ip->daddr = header->ip_dst.s_addr; ip->protocol = IPPROTO_UDP; ip->frag_off = 0; ip->tot_len = htons(size); ip->id = 0; ip->check = 0; ip->check = ip_fast_csum((u_char *) ip, ip->ihl); PRINT("%s -> %s\n", ip2a(ip->saddr), ip2a(ip->daddr)); // Envia pacote #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) dst = skb->dst; #else dst = skb_dst(skb); #endif #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) && \ LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_get_neighbour_noref(dst); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_neigh_lookup_skb(dst, skb); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #else if (dst->hh) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) int hh_alen; read_lock_bh(dst->hh->hh_lock); hh_alen = HH_DATA_ALIGN(dst->hh->hh_len); memcpy(skb->data - hh_alen, dst->hh->hh_data, hh_alen); read_unlock_bh(dst->hh->hh_lock); skb_push(skb, dst->hh->hh_len); dst->hh->hh_output(skb); #else neigh_hh_output(dst->hh, skb); #endif ip_rt_put(rt); pkt_sent++; } else if (dst->neighbour) { dst->neighbour->output(skb); ip_rt_put(rt); pkt_sent++; } #endif else { kfree_skb(skb); ip_rt_put(rt); PRINT("Nao sei como enviar pacote de saida\n"); } #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) rcu_read_unlock(); #endif } if (!pkt_sent) return -1; return 0; }