struct in_device *inetdev_by_index(int ifindex) { struct net_device *dev; struct in_device *in_dev = NULL; read_lock(&dev_base_lock); dev = __dev_get_by_index(ifindex); if (dev) in_dev = in_dev_get(dev); read_unlock(&dev_base_lock); return in_dev; }
struct in_device *inetdev_by_index(struct net *net, int ifindex) { struct net_device *dev; struct in_device *in_dev = NULL; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) in_dev = in_dev_get(dev); rcu_read_unlock(); return in_dev; }
static inline int ip_rcv_options(struct sk_buff *skb) { struct ip_options *opt; struct iphdr *iph; struct net_device *dev = skb->dev; /* It looks as overkill, because not all IP options require packet mangling. But it is the easiest for now, especially taking into account that combination of IP options and running sniffer is extremely rare condition. --ANK (980813) */ if (skb_cow(skb, skb_headroom(skb))) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } iph = ip_hdr(skb); opt = &(IPCB(skb)->opt); opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); goto drop; } if (unlikely(opt->srr)) { struct in_device *in_dev = in_dev_get(dev); if (in_dev) { if (!IN_DEV_SOURCE_ROUTE(in_dev)) { if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) printk(KERN_INFO "source route option " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n", NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); in_dev_put(in_dev); goto drop; } in_dev_put(in_dev); } if (ip_options_rcv_srr(skb)) goto drop; } return 0; drop: return -1; }
static void ltt_enumerate_device(struct net_device *dev) { struct in_device *in_dev; struct in_ifaddr *ifa; if (dev->flags & IFF_UP) { in_dev = in_dev_get(dev); if (in_dev) { for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) { trace_statedump_enumerate_network_ip_interface( dev->name, ifa->ifa_address, LTTNG_UP); } in_dev_put(in_dev); } } else trace_statedump_enumerate_network_ip_interface(dev->name, 0, LTTNG_DOWN); }
static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) { struct in_device *out_dev; int imi, omi = -1; if (!IN_DEV_PROXY_ARP(in_dev)) return 0; if ((imi = IN_DEV_MEDIUM_ID(in_dev)) == 0) return 1; if (imi == -1) return 0; /* place to check for proxy_arp for routes */ if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) { omi = IN_DEV_MEDIUM_ID(out_dev); in_dev_put(out_dev); } return (omi != imi && omi != -1); }
static void lttng_enumerate_device(struct lttng_session *session, struct net_device *dev) { struct in_device *in_dev; struct in_ifaddr *ifa; if (dev->flags & IFF_UP) { in_dev = in_dev_get(dev); if (in_dev) { for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) { trace_lttng_statedump_network_interface( session, dev, ifa); } in_dev_put(in_dev); } } else { trace_lttng_statedump_network_interface( session, dev, NULL); } }
static void ltt_enumerate_device(struct ltt_probe_private_data *call_data, struct net_device *dev) { struct in_device *in_dev; struct in_ifaddr *ifa; if (dev->flags & IFF_UP) { in_dev = in_dev_get(dev); if (in_dev) { for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) __trace_mark(0, list_network_ipv4_interface, call_data, "name %s address #4u%lu up %d", dev->name, (unsigned long)ifa->ifa_address, 0); in_dev_put(in_dev); } } else __trace_mark(0, list_network_ip_interface, call_data, "name %s address #4u%lu up %d", dev->name, 0UL, 0); }
static int netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) { struct in_device *in_dev; /* ALPS00409409406 */ UINT_8 ip[4] = { 0 }; UINT_32 u4NumIPv4 = 0; //#ifdef CONFIG_IPV6 #if 0 UINT_8 ip6[16] = { 0 }; // FIX ME: avoid to allocate large memory in stack UINT_32 u4NumIPv6 = 0; #endif struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; struct net_device *prDev = ifa->ifa_dev->dev; UINT_32 i; P_PARAM_NETWORK_ADDRESS_IP prParamIpAddr; P_GLUE_INFO_T prGlueInfo = NULL; if (prDev == NULL) { DBGLOG(REQ, INFO, ("netdev_event: device is empty.\n")); return NOTIFY_DONE; } if ((strncmp(prDev->name, "p2p", 3) != 0) && (strncmp(prDev->name, "wlan", 4) != 0)) { DBGLOG(REQ, INFO, ("netdev_event: xxx\n")); return NOTIFY_DONE; } prGlueInfo = *((P_GLUE_INFO_T *) netdev_priv(prDev)); if (prGlueInfo == NULL) { DBGLOG(REQ, INFO, ("netdev_event: prGlueInfo is empty.\n")); return NOTIFY_DONE; } ASSERT(prGlueInfo); #ifdef FIX_ALPS00409409406 // <3> get the IPv4 address in_dev = in_dev_get(prDev); if (!in_dev) return; //rtnl_lock(); if(!in_dev->ifa_list ||!in_dev->ifa_list->ifa_local) { //rtnl_unlock(); in_dev_put(in_dev); DBGLOG(REQ, INFO, ("ip is not avaliable.\n")); return; } // <4> copy the IPv4 address kalMemCopy(ip, &(in_dev->ifa_list->ifa_local), sizeof(ip)); //rtnl_unlock(); in_dev_put(in_dev); DBGLOG(REQ, INFO, ("ip is %d.%d.%d.%d\n", ip[0],ip[1],ip[2],ip[3])); #else // <3> get the IPv4 address if(!prDev || !(prDev->ip_ptr)||\ !((struct in_device *)(prDev->ip_ptr))->ifa_list||\ !(&(((struct in_device *)(prDev->ip_ptr))->ifa_list->ifa_local))){ DBGLOG(REQ, INFO, ("ip is not avaliable.\n")); return NOTIFY_DONE; } kalMemCopy(ip, &(((struct in_device *)(prDev->ip_ptr))->ifa_list->ifa_local), sizeof(ip)); DBGLOG(REQ, INFO, ("ip is %d.%d.%d.%d\n", ip[0],ip[1],ip[2],ip[3])); #endif // todo: traverse between list to find whole sets of IPv4 addresses if (!((ip[0] == 0) && (ip[1] == 0) && (ip[2] == 0) && (ip[3] == 0))) { u4NumIPv4++; } #if defined(MTK_WLAN_ARP_OFFLOAD) if(NETDEV_UP == notification && PARAM_MEDIA_STATE_CONNECTED == prGlueInfo->eParamMediaStateIndicated){ PARAM_CUSTOM_SW_CTRL_STRUC_T SwCtrlInfo; UINT_32 u4SetInfoLen; WLAN_STATUS rStatus = WLAN_STATUS_FAILURE; SwCtrlInfo.u4Id = 0x90110000; SwCtrlInfo.u4Data = 1; rStatus = kalIoctl(prGlueInfo, wlanoidSetSwCtrlWrite, (PVOID)&SwCtrlInfo, sizeof(SwCtrlInfo), FALSE, FALSE, TRUE, FALSE, &u4SetInfoLen); if (rStatus != WLAN_STATUS_SUCCESS) { DBGLOG(REQ, INFO, ("ARP OFFLOAD fail 0x%lx\n", rStatus)); }else{ DBGLOG(REQ, INFO, ("ARP OFFLOAD success\n")); } } #endif #ifdef FIX_ALPS00409409406 if(atomic_read(&fgIsUnderEarlierSuspend)==0){ #else if (fgIsUnderEarlierSuspend == false) { #endif DBGLOG(REQ, INFO, ("netdev_event: PARAM_MEDIA_STATE_DISCONNECTED. (%d)\n", prGlueInfo->eParamMediaStateIndicated)); return NOTIFY_DONE; } //#ifdef CONFIG_IPV6 #if 0 if(!prDev || !(prDev->ip6_ptr)||\ !((struct in_device *)(prDev->ip6_ptr))->ifa_list||\ !(&(((struct in_device *)(prDev->ip6_ptr))->ifa_list->ifa_local))){ printk(KERN_INFO "ipv6 is not avaliable.\n"); return NOTIFY_DONE; } kalMemCopy(ip6, &(((struct in_device *)(prDev->ip6_ptr))->ifa_list->ifa_local), sizeof(ip6)); printk(KERN_INFO"ipv6 is %d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d\n", ip6[0],ip6[1],ip6[2],ip6[3], ip6[4],ip6[5],ip6[6],ip6[7], ip6[8],ip6[9],ip6[10],ip6[11], ip6[12],ip6[13],ip6[14],ip6[15] ); // todo: traverse between list to find whole sets of IPv6 addresses if (!((ip6[0] == 0) && (ip6[1] == 0) && (ip6[2] == 0) && (ip6[3] == 0) && (ip6[4] == 0) && (ip6[5] == 0))) { //u4NumIPv6++; } #endif // here we can compare the dev with other network's netdev to // set the proper arp filter // // IMPORTANT: please make sure if the context can sleep, if the context can't sleep // we should schedule a kernel thread to do this for us // <7> set up the ARP filter { WLAN_STATUS rStatus = WLAN_STATUS_FAILURE; UINT_32 u4SetInfoLen = 0; UINT_8 aucBuf[32] = {0}; UINT_32 u4Len = OFFSET_OF(PARAM_NETWORK_ADDRESS_LIST, arAddress); P_PARAM_NETWORK_ADDRESS_LIST prParamNetAddrList = (P_PARAM_NETWORK_ADDRESS_LIST)aucBuf; P_PARAM_NETWORK_ADDRESS prParamNetAddr = prParamNetAddrList->arAddress; //#ifdef CONFIG_IPV6 #if 0 prParamNetAddrList->u4AddressCount = u4NumIPv4 + u4NumIPv6; #else prParamNetAddrList->u4AddressCount = u4NumIPv4; #endif prParamNetAddrList->u2AddressType = PARAM_PROTOCOL_ID_TCP_IP; for (i = 0; i < u4NumIPv4; i++) { prParamNetAddr->u2AddressLength = sizeof(PARAM_NETWORK_ADDRESS_IP);//4;; prParamNetAddr->u2AddressType = PARAM_PROTOCOL_ID_TCP_IP;; #if 0 kalMemCopy(prParamNetAddr->aucAddress, ip, sizeof(ip)); prParamNetAddr = (P_PARAM_NETWORK_ADDRESS)((UINT_32)prParamNetAddr + sizeof(ip)); u4Len += OFFSET_OF(PARAM_NETWORK_ADDRESS, aucAddress) + sizeof(ip); #else prParamIpAddr = (P_PARAM_NETWORK_ADDRESS_IP)prParamNetAddr->aucAddress; kalMemCopy(&prParamIpAddr->in_addr, ip, sizeof(ip)); prParamNetAddr = (P_PARAM_NETWORK_ADDRESS)((UINT_32)prParamNetAddr + sizeof(PARAM_NETWORK_ADDRESS)); u4Len += OFFSET_OF(PARAM_NETWORK_ADDRESS, aucAddress) + sizeof(PARAM_NETWORK_ADDRESS); #endif } //#ifdef CONFIG_IPV6 #if 0 for (i = 0; i < u4NumIPv6; i++) { prParamNetAddr->u2AddressLength = 6;; prParamNetAddr->u2AddressType = PARAM_PROTOCOL_ID_TCP_IP;; kalMemCopy(prParamNetAddr->aucAddress, ip6, sizeof(ip6)); prParamNetAddr = (P_PARAM_NETWORK_ADDRESS)((UINT_32)prParamNetAddr + sizeof(ip6)); u4Len += OFFSET_OF(PARAM_NETWORK_ADDRESS, aucAddress) + sizeof(ip6); } #endif ASSERT(u4Len <= sizeof(aucBuf)); DBGLOG(REQ, INFO, ("kalIoctl (0x%x, 0x%x)\n", prGlueInfo, prParamNetAddrList)); rStatus = kalIoctl(prGlueInfo, wlanoidSetNetworkAddress, (PVOID)prParamNetAddrList, u4Len, FALSE, FALSE, TRUE, FALSE, &u4SetInfoLen); if (rStatus != WLAN_STATUS_SUCCESS) { DBGLOG(REQ, INFO, ("set HW pattern filter fail 0x%lx\n", rStatus)); } } return NOTIFY_DONE; } static struct notifier_block inetaddr_notifier = { .notifier_call = netdev_event, }; void wlanRegisterNotifier(void) { register_inetaddr_notifier(&inetaddr_notifier); }
static int c2_up(struct net_device *netdev) { struct c2_port *c2_port = netdev_priv(netdev); struct c2_dev *c2dev = c2_port->c2dev; struct c2_element *elem; struct c2_rxp_hdr *rxp_hdr; struct in_device *in_dev; size_t rx_size, tx_size; int ret, i; unsigned int netimr0; if (netif_msg_ifup(c2_port)) pr_debug("%s: enabling interface\n", netdev->name); c2_set_rxbufsize(c2_port); rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc); tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); c2_port->mem_size = tx_size + rx_size; c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, &c2_port->dma); if (c2_port->mem == NULL) { pr_debug("Unable to allocate memory for " "host descriptor rings\n"); return -ENOMEM; } memset(c2_port->mem, 0, c2_port->mem_size); if ((ret = c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, c2dev->mmio_rxp_ring))) { pr_debug("Unable to create RX ring\n"); goto bail0; } if (c2_rx_fill(c2_port)) { pr_debug("Unable to fill RX ring\n"); goto bail1; } if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, c2_port->dma + rx_size, c2dev->mmio_txp_ring))) { pr_debug("Unable to create TX ring\n"); goto bail1; } c2_port->tx_avail = c2_port->tx_ring.count - 1; c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = c2_port->tx_ring.start + c2dev->cur_tx; BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); c2_reset(c2_port); for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count; i++, elem++) { rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; rxp_hdr->flags = 0; __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); } netif_start_queue(netdev); writel(0, c2dev->regs + C2_IDIS); netimr0 = readl(c2dev->regs + C2_NIMR0); netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT); writel(netimr0, c2dev->regs + C2_NIMR0); in_dev = in_dev_get(netdev); IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1); in_dev_put(in_dev); return 0; bail1: c2_rx_clean(c2_port); kfree(c2_port->rx_ring.start); bail0: pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, c2_port->dma); return ret; }
static int arp_constructor(struct neighbour *neigh) { u32 addr = *(u32*)neigh->primary_key; struct net_device *dev = neigh->dev; struct in_device *in_dev = in_dev_get(dev); if (in_dev == NULL) return -EINVAL; neigh->type = inet_addr_type(addr); if (in_dev->arp_parms) neigh->parms = in_dev->arp_parms; in_dev_put(in_dev); if (dev->hard_header == NULL) { neigh->nud_state = NUD_NOARP; neigh->ops = &arp_direct_ops; neigh->output = neigh->ops->queue_xmit; } else { /* Good devices (checked by reading texts, but only Ethernet is tested) ARPHRD_ETHER: (ethernet, apfddi) ARPHRD_FDDI: (fddi) ARPHRD_IEEE802: (tr) ARPHRD_METRICOM: (strip) ARPHRD_ARCNET: etc. etc. etc. ARPHRD_IPDDP will also work, if author repairs it. I did not it, because this driver does not work even in old paradigm. */ #if 1 /* So... these "amateur" devices are hopeless. The only thing, that I can say now: It is very sad that we need to keep ugly obsolete code to make them happy. They should be moved to more reasonable state, now they use rebuild_header INSTEAD OF hard_start_xmit!!! Besides that, they are sort of out of date (a lot of redundant clones/copies, useless in 2.1), I wonder why people believe that they work. */ switch (dev->type) { default: break; case ARPHRD_ROSE: #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) case ARPHRD_AX25: #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) case ARPHRD_NETROM: #endif neigh->ops = &arp_broken_ops; neigh->output = neigh->ops->output; return 0; #endif ;} #endif if (neigh->type == RTN_MULTICAST) { neigh->nud_state = NUD_NOARP; arp_mc_map(addr, neigh->ha, dev, 1); } else if (dev->flags&(IFF_NOARP|IFF_LOOPBACK)) { neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->dev_addr, dev->addr_len); } else if (neigh->type == RTN_BROADCAST || dev->flags&IFF_POINTOPOINT) { neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->broadcast, dev->addr_len); } if (dev->hard_header_cache) neigh->ops = &arp_hh_ops; else neigh->ops = &arp_generic_ops; if (neigh->nud_state&NUD_VALID) neigh->output = neigh->ops->connected_output; else neigh->output = neigh->ops->output; } return 0; }
static int arp_process(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct in_device *in_dev = in_dev_get(dev); struct arphdr *arp; unsigned char *arp_ptr; struct rtable *rt; unsigned char *sha; __be32 sip, tip; u16 dev_type = dev->type; int addr_type; struct neighbour *n; struct net *net = dev_net(dev); /* arp_rcv below verifies the ARP header and verifies the device * is ARP'able. */ if (in_dev == NULL) goto out; arp = arp_hdr(skb); switch (dev_type) { default: if (arp->ar_pro != htons(ETH_P_IP) || htons(dev_type) != arp->ar_hrd) goto out; break; case ARPHRD_ETHER: case ARPHRD_IEEE802_TR: case ARPHRD_FDDI: case ARPHRD_IEEE802: /* * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). * This is the case also of FDDI, where the RFC 1390 says that * FDDI devices should accept ARP hardware of (1) Ethernet, * however, to be more robust, we'll accept both 1 (Ethernet) * or 6 (IEEE 802.2) */ if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || arp->ar_pro != htons(ETH_P_IP)) goto out; break; case ARPHRD_AX25: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_AX25)) goto out; break; case ARPHRD_NETROM: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_NETROM)) goto out; break; } /* Understand only these message types */ if (arp->ar_op != htons(ARPOP_REPLY) && arp->ar_op != htons(ARPOP_REQUEST)) goto out; /* * Extract fields */ arp_ptr= (unsigned char *)(arp+1); sha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; arp_ptr += dev->addr_len; memcpy(&tip, arp_ptr, 4); /* * Check for bad requests for 127.x.x.x and requests for multicast * addresses. If this is one such, delete it. */ if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) goto out; /* * Special case: We must set Frame Relay source Q.922 address */ if (dev_type == ARPHRD_DLCI) sha = dev->broadcast; /* * Process entry. The idea here is we want to send a reply if it is a * request for us or if it is a request for someone else that we hold * a proxy for. We want to add an entry to our cache if it is a reply * to us or if it is a request for our address. * (The assumption for this last is that if someone is requesting our * address, they are probably intending to talk to us, so it saves time * if we cache their address. Their address is also probably not in * our cache, since ours is not in their cache.) * * Putting this another way, we only care about replies if they are to * us, in which case we add them to the cache. For requests, we care * about those for us and those for our proxies. We reply to both, * and in the case of requests for us we add the requester to the arp * cache. */ /* Special case: IPv4 duplicate address detection packet (RFC2131) */ if (sip == 0) { if (arp->ar_op == htons(ARPOP_REQUEST) && inet_addr_type(net, tip) == RTN_LOCAL && !arp_ignore(in_dev, sip, tip)) arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha); goto out; } if (arp->ar_op == htons(ARPOP_REQUEST) && ip_route_input(skb, tip, sip, 0, dev) == 0) { rt = skb->rtable; addr_type = rt->rt_type; if (addr_type == RTN_LOCAL) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) { int dont_send = 0; if (!dont_send) dont_send |= arp_ignore(in_dev,sip,tip); if (!dont_send && IN_DEV_ARPFILTER(in_dev)) dont_send |= arp_filter(sip,tip,dev); if (!dont_send) arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); neigh_release(n); } goto out; } else if (IN_DEV_FORWARD(in_dev)) { if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) neigh_release(n); if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || skb->pkt_type == PACKET_HOST || in_dev->arp_parms->proxy_delay == 0) { arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); in_dev_put(in_dev); return 0; } goto out; } } } /* Update our ARP tables */ n = __neigh_lookup(&arp_tbl, &sip, dev, 0); if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) { /* Unsolicited ARP is not accepted by default. It is possible, that this option should be enabled for some devices (strip is candidate) */ if (n == NULL && arp->ar_op == htons(ARPOP_REPLY) && inet_addr_type(net, sip) == RTN_UNICAST) n = __neigh_lookup(&arp_tbl, &sip, dev, 1); } if (n) { int state = NUD_REACHABLE; int override; /* If several different ARP replies follows back-to-back, use the FIRST one. It is possible, if several proxy agents are active. Taking the first reply prevents arp trashing and chooses the fastest router. */ override = time_after(jiffies, n->updated + n->parms->locktime); /* Broadcast replies and request packets do not assert neighbour reachability. */ if (arp->ar_op != htons(ARPOP_REPLY) || skb->pkt_type != PACKET_HOST) state = NUD_STALE; neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0); neigh_release(n); }
/* Sends the updated interface information to the engine. This is called at the initialization of the interceptor and whenever the status of any interface changes. This function grabs 'if_table_lock' (for reading) and 'interceptor_lock'. */ static void ssh_interceptor_send_interfaces(SshInterceptor interceptor) { SshInterceptorInternalInterface iface; SshInterceptorInterface *ifarray; SshInterceptorInterfacesCB interfaces_callback; struct net_device *dev; struct in_device *inet_dev = NULL; struct in_ifaddr *addr; int num, n, count, i, hashvalue; #ifdef SSH_LINUX_INTERCEPTOR_IPV6 struct inet6_dev *inet6_dev = NULL; struct inet6_ifaddr *addr6; #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ /* Grab 'if_table_lock' for reading the interface table. */ read_lock(&interceptor->nf->if_table_lock); num = interceptor->nf->if_table_size; if (!num) { read_unlock(&interceptor->nf->if_table_lock); SSH_DEBUG(4, ("No interfaces to report.")); return; } /* Allocate temporary memory for the table that is passed to the engine. This is mallocated, since we want to minimise stack usage. */ ifarray = ssh_malloc(sizeof(*ifarray) * num); if (ifarray == NULL) { read_unlock(&interceptor->nf->if_table_lock); return; } memset(ifarray, 0, sizeof(*ifarray) * num); /* Iterate over the slots of the iface hashtable. */ n = 0; for (hashvalue = 0; hashvalue < SSH_LINUX_IFACE_HASH_SIZE; hashvalue++) { /* Iterate over the chain of iface entries in a hashtable slot. */ for (iface = interceptor->nf->if_hash[hashvalue]; iface != NULL; iface = iface->next) { /* Ignore devices that are not up. */ dev = iface->dev; if (dev == NULL || !(dev->flags & IFF_UP)) continue; /* Disable net_device features that quicksec does not support */ if (dev->features & NETIF_F_TSO) { ssh_warning("Warning: Interface %d [%s], dropping unsupported " "feature NETIF_F_TSO", iface->ifindex, (dev->name ? dev->name : "<none>")); dev->features &= ~NETIF_F_TSO; } #ifdef LINUX_HAS_NETIF_F_GSO /* Disable net_device features that quicksec does not support */ if (dev->features & NETIF_F_GSO) { ssh_warning("Warning: Interface %d [%s], dropping unsupported " "feature NETIF_F_GSO", iface->ifindex, (dev->name ? dev->name : "<none>")); dev->features &= ~NETIF_F_GSO; } #endif /* LINUX_HAS_NETIF_F_GSO */ #ifdef LINUX_HAS_NETIF_F_TSO6 /* Disable net_device features that quicksec does not support */ if (dev->features & NETIF_F_TSO6) { ssh_warning("Warning: Interface %d [%s], dropping unsupported " "feature NETIF_F_TSO6", iface->ifindex, (dev->name ? dev->name : "<none>")); dev->features &= ~NETIF_F_TSO6; } #endif /* LINUX_HAS_NETIF_F_TSO6 */ #ifdef LINUX_HAS_NETIF_F_TSO_ECN /* Disable net_device features that quicksec does not support */ if (dev->features & NETIF_F_TSO_ECN) { ssh_warning("Warning: Interface %d [%s], dropping unsupported " "feature NETIF_F_TSO_ECN", iface->ifindex, (dev->name ? dev->name : "<none>")); dev->features &= ~NETIF_F_TSO_ECN; } #endif /* LINUX_HAS_NETIF_F_TSO_ECN */ #ifdef LINUX_HAS_NETIF_F_GSO_ROBUST /* Disable net_device features that quicksec does not support */ if (dev->features & NETIF_F_GSO_ROBUST) { ssh_warning("Warning: Interface %d [%s], dropping unsupported " "feature NETIF_F_GSO_ROBUST", iface->ifindex, (dev->name ? dev->name : "<none>")); dev->features &= ~NETIF_F_GSO_ROBUST; } #endif /* LINUX_HAS_NETIF_F_GSO_ROBUST */ #ifdef LINUX_HAS_NETIF_F_UFO if (dev->features & NETIF_F_UFO) { ssh_warning("Warning: Interface %d [%s], dropping unsupported " "feature NETIF_F_UFO", iface->ifindex, (dev->name ? dev->name : "<none>")); dev->features &= ~NETIF_F_UFO; } #endif /* LINUX_HAS_NETIF_F_UFO */ /* Count addresses */ count = 0; /* Increment refcount to make sure the device does not disappear. */ inet_dev = in_dev_get(dev); if (inet_dev) { /* Count the device's IPv4 addresses */ for (addr = inet_dev->ifa_list; addr != NULL; addr = addr->ifa_next) { count++; } } #ifdef SSH_LINUX_INTERCEPTOR_IPV6 /* Increment refcount to make sure the device does not disappear. */ inet6_dev = in6_dev_get(dev); if (inet6_dev) { /* Count the device's IPv6 addresses */ for (addr6 = inet6_dev->addr_list ; addr6 != NULL; addr6 = addr6->if_next) { count++; } } #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ /* Fill interface entry. */ ifarray[n].ifnum = iface->ifindex; ifarray[n].to_protocol.flags = SSH_INTERCEPTOR_MEDIA_INFO_NO_FRAGMENT; ifarray[n].to_protocol.mtu_ipv4 = dev->mtu; ifarray[n].to_adapter.flags = 0; ifarray[n].to_adapter.mtu_ipv4 = dev->mtu; #ifdef WITH_IPV6 ifarray[n].to_adapter.mtu_ipv6 = dev->mtu; ifarray[n].to_protocol.mtu_ipv6 = dev->mtu; #endif /* WITH_IPV6 */ #ifndef SSH_IPSEC_IP_ONLY_INTERCEPTOR ifarray[n].to_adapter.media = ssh_interceptor_media_type(dev->type); ifarray[n].to_protocol.media = ssh_interceptor_media_type(dev->type); #else /* !SSH_IPSEC_IP_ONLY_INTERCEPTOR */ ifarray[n].to_adapter.media = SSH_INTERCEPTOR_MEDIA_PLAIN; ifarray[n].to_protocol.media = SSH_INTERCEPTOR_MEDIA_PLAIN; #endif /* !SSH_IPSEC_IP_ONLY_INTERCEPTOR */ strncpy(ifarray[n].name, dev->name, 15); /* Set interface type and link status. */ if (dev->flags & IFF_POINTOPOINT) ifarray[n].flags |= SSH_INTERFACE_FLAG_POINTOPOINT; if (dev->flags & IFF_BROADCAST) ifarray[n].flags |= SSH_INTERFACE_FLAG_BROADCAST; if (!netif_carrier_ok(dev)) ifarray[n].flags |= SSH_INTERFACE_FLAG_LINK_DOWN; ifarray[n].num_addrs = count; ifarray[n].addrs = NULL; /* Add addresses to interface entry. */ if (count > 0) { ifarray[n].addrs = ssh_malloc(sizeof(*ifarray[n].addrs) * count); if (ifarray[n].addrs == NULL) { /* Release INET/INET6 devices */ if (inet_dev) in_dev_put(inet_dev); #ifdef SSH_LINUX_INTERCEPTOR_IPV6 if (inet6_dev) in6_dev_put(inet6_dev); #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ read_unlock(&interceptor->nf->if_table_lock); goto out; } count = 0; if (inet_dev) { /* Put the IPv4 addresses */ for (addr = inet_dev->ifa_list; addr != NULL; addr = addr->ifa_next) { ifarray[n].addrs[count].protocol = SSH_PROTOCOL_IP4; SSH_IP4_DECODE(&ifarray[n].addrs[count].addr.ip.ip, &addr->ifa_local); SSH_IP4_DECODE(&ifarray[n].addrs[count].addr.ip.mask, &addr->ifa_mask); #if 0 if (dev->flags & IFF_POINTOPOINT) SSH_IP4_DECODE(&ifarray[n].addrs[count]. addr.ip.broadcast, &addr->ifa_address); else #endif /* 0 */ SSH_IP4_DECODE(&ifarray[n].addrs[count]. addr.ip.broadcast, &addr->ifa_broadcast); count++; } } #ifdef SSH_LINUX_INTERCEPTOR_IPV6 if (inet6_dev) { /* Put the IPv6 addresses */ for (addr6 = inet6_dev->addr_list; addr6 != NULL; addr6 = addr6->if_next) { ifarray[n].addrs[count].protocol = SSH_PROTOCOL_IP6; SSH_IP6_DECODE(&ifarray[n].addrs[count].addr.ip.ip, &addr6->addr); /* Generate mask from prefix length and IPv6 addr */ SSH_IP6_DECODE(&ifarray[n].addrs[count].addr.ip.mask, "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff"); ssh_ipaddr_set_bits(&ifarray[n].addrs[count]. addr.ip.mask, &ifarray[n].addrs[count]. addr.ip.mask, addr6->prefix_len, 0); /* Set the broadcast address to the IPv6 undefined address */ SSH_IP6_DECODE(&ifarray[n].addrs[count]. addr.ip.broadcast, "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00"); /* Copy the ifnum in case of ipv6 to scope_id, since in linux scope_id == ifnum. */ ifarray[n].addrs[count].addr.ip.ip. scope_id.scope_id_union.ui32 = ifarray[n].ifnum; count++; } } #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ } #ifndef SSH_IPSEC_IP_ONLY_INTERCEPTOR /* Grab the MAC address */ ifarray[n].media_addr_len = dev->addr_len; SSH_ASSERT(dev->addr_len <= sizeof(ifarray[n].media_addr)); memcpy(&ifarray[n].media_addr[0], dev->dev_addr, dev->addr_len); #else /* !SSH_IPSEC_IP_ONLY_INTERCEPTOR */ ifarray[n].media_addr[0] = 0; ifarray[n].media_addr_len = 0; #endif /* !SSH_IPSEC_IP_ONLY_INTERCEPTOR */ /* Release INET/INET6 devices */ if (inet_dev) in_dev_put(inet_dev); inet_dev = NULL; #ifdef SSH_LINUX_INTERCEPTOR_IPV6 if (inet6_dev) in6_dev_put(inet6_dev); inet6_dev = NULL; #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ /* Update the flags to the interface information. First check if this is virtual adapter, and then check all the other possible flags (e.g. broadcast, pointopoint). Other flags checking not implemented yet. */ #ifdef SSHDIST_IPSEC_VIRTUAL_ADAPTERS ssh_kernel_mutex_lock(interceptor->interceptor_lock); for (i = 0; i < SSH_LINUX_MAX_VIRTUAL_ADAPTERS; i++) { SshVirtualAdapter adapter = interceptor->nf->virtual_adapters[i]; if (adapter && adapter->dev->ifindex == ifarray[n].ifnum) { ifarray[n].flags |= SSH_INTERFACE_FLAG_VIP; break; } } ssh_kernel_mutex_unlock(interceptor->interceptor_lock); #endif /* SSHDIST_IPSEC_VIRTUAL_ADAPTERS */ /* Done with the interface entry, increase ifarray index and continue with next iface. */ n++; } /* for (iface entry chain iteration) */ } /* for (hashtable iteration) */ /* Release if_table lock. */ read_unlock(&interceptor->nf->if_table_lock); SSH_ASSERT(n < num); /* 'interceptor_lock' protects 'num_interface_callbacks' and 'interfaces_callback'. */ ssh_kernel_mutex_lock(interceptor->interceptor_lock); interceptor->nf->num_interface_callbacks++; interfaces_callback = interceptor->nf->interfaces_callback; ssh_kernel_mutex_unlock(interceptor->interceptor_lock); /* Call the interface callback. */ (interfaces_callback)(n, ifarray, interceptor->nf->callback_context); ssh_kernel_mutex_lock(interceptor->interceptor_lock); interceptor->nf->num_interface_callbacks--; ssh_kernel_mutex_unlock(interceptor->interceptor_lock); out: /* Free the array. */ for (i = 0; i < n; i++) { if (ifarray[i].addrs != NULL) ssh_free(ifarray[i].addrs); } ssh_free(ifarray); return; }
static int __init kaodv_init(void) { struct net_device *dev = NULL; struct in_device *indev; struct in_ifaddr **ifap = NULL; struct in_ifaddr *ifa = NULL; int i, ret = -ENOMEM; #ifndef KERNEL26 EXPORT_NO_SYMBOLS; #endif kaodv_expl_init(); ret = kaodv_queue_init(); if (ret < 0) return ret; ret = kaodv_netlink_init(); if (ret < 0) goto cleanup_queue; ret = nf_register_hook(&kaodv_ops[0]); if (ret < 0) goto cleanup_netlink; ret = nf_register_hook(&kaodv_ops[1]); if (ret < 0) goto cleanup_hook0; ret = nf_register_hook(&kaodv_ops[2]); if (ret < 0) goto cleanup_hook1; /* Prefetch network device info (ip, broadcast address, ifindex). */ for (i = 0; i < MAX_INTERFACES; i++) { if (!ifname[i]) break; dev = dev_get_by_name(ifname[i]); if (!dev) { printk("No device %s available, ignoring!\n", ifname[i]); continue; } netdevs[nif].ifindex = dev->ifindex; // indev = inetdev_by_index(dev->ifindex); indev = in_dev_get(dev); if (indev) { for (ifap = &indev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) if (!strcmp(dev->name, ifa->ifa_label)) break; if (ifa) { netdevs[nif].ip_addr = ifa->ifa_address; netdevs[nif].bc_addr = ifa->ifa_broadcast; //printk("dev ip=%s bc=%s\n", print_ip(netdevs[nif].ip_addr), print_ip(netdevs[nif].bc_addr)); } in_dev_put(indev); } nif++; dev_put(dev); } proc_net_create("kaodv", 0, kaodv_proc_info); return ret; cleanup_hook1: nf_unregister_hook(&kaodv_ops[1]); cleanup_hook0: nf_unregister_hook(&kaodv_ops[0]); cleanup_netlink: kaodv_netlink_fini(); cleanup_queue: kaodv_queue_fini(); return ret; }
static int econet_sendmsg(struct socket *sock, struct msghdr *msg, int len, struct scm_cookie *scm) { struct sock *sk = sock->sk; struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name; struct net_device *dev; struct ec_addr addr; int err; unsigned char port, cb; struct sk_buff *skb; struct ec_cb *eb; #ifdef CONFIG_ECONET_NATIVE unsigned short proto = 0; #endif #ifdef CONFIG_ECONET_AUNUDP struct msghdr udpmsg; struct iovec iov[msg->msg_iovlen+1]; struct aunhdr ah; struct sockaddr_in udpdest; __kernel_size_t size; int i; mm_segment_t oldfs; #endif /* * Check the flags. */ if (msg->msg_flags&~MSG_DONTWAIT) return(-EINVAL); /* * Get and verify the address. */ if (saddr == NULL) { addr.station = sk->protinfo.af_econet->station; addr.net = sk->protinfo.af_econet->net; port = sk->protinfo.af_econet->port; cb = sk->protinfo.af_econet->cb; } else { if (msg->msg_namelen < sizeof(struct sockaddr_ec)) return -EINVAL; addr.station = saddr->addr.station; addr.net = saddr->addr.net; port = saddr->port; cb = saddr->cb; } /* Look for a device with the right network number. */ dev = net2dev_map[addr.net]; /* If not directly reachable, use some default */ if (dev == NULL) { dev = net2dev_map[0]; /* No interfaces at all? */ if (dev == NULL) return -ENETDOWN; } if (dev->type == ARPHRD_ECONET) { /* Real hardware Econet. We're not worthy etc. */ #ifdef CONFIG_ECONET_NATIVE atomic_inc(&dev->refcnt); skb = sock_alloc_send_skb(sk, len+dev->hard_header_len+15, msg->msg_flags & MSG_DONTWAIT, &err); if (skb==NULL) goto out_unlock; skb_reserve(skb, (dev->hard_header_len+15)&~15); skb->nh.raw = skb->data; eb = (struct ec_cb *)&skb->cb; /* BUG: saddr may be NULL */ eb->cookie = saddr->cookie; eb->sec = *saddr; eb->sent = ec_tx_done; if (dev->hard_header) { int res; struct ec_framehdr *fh; err = -EINVAL; res = dev->hard_header(skb, dev, ntohs(proto), &addr, NULL, len); /* Poke in our control byte and port number. Hack, hack. */ fh = (struct ec_framehdr *)(skb->data); fh->cb = cb; fh->port = port; if (sock->type != SOCK_DGRAM) { skb->tail = skb->data; skb->len = 0; } else if (res < 0) goto out_free; } /* Copy the data. Returns -EFAULT on error */ err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); skb->protocol = proto; skb->dev = dev; skb->priority = sk->priority; if (err) goto out_free; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_free; /* * Now send it */ dev_queue_xmit(skb); dev_put(dev); return(len); out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); #else err = -EPROTOTYPE; #endif return err; } #ifdef CONFIG_ECONET_AUNUDP /* AUN virtual Econet. */ if (udpsock == NULL) return -ENETDOWN; /* No socket - can't send */ /* Make up a UDP datagram and hand it off to some higher intellect. */ memset(&udpdest, 0, sizeof(udpdest)); udpdest.sin_family = AF_INET; udpdest.sin_port = htons(AUN_PORT); /* At the moment we use the stupid Acorn scheme of Econet address y.x maps to IP a.b.c.x. This should be replaced with something more flexible and more aware of subnet masks. */ { struct in_device *idev = in_dev_get(dev); unsigned long network = 0; if (idev) { read_lock(&idev->lock); if (idev->ifa_list) network = ntohl(idev->ifa_list->ifa_address) & 0xffffff00; /* !!! */ read_unlock(&idev->lock); in_dev_put(idev); } udpdest.sin_addr.s_addr = htonl(network | addr.station); } ah.port = port; ah.cb = cb & 0x7f; ah.code = 2; /* magic */ ah.pad = 0; /* tack our header on the front of the iovec */ size = sizeof(struct aunhdr); iov[0].iov_base = (void *)&ah; iov[0].iov_len = size; for (i = 0; i < msg->msg_iovlen; i++) { void *base = msg->msg_iov[i].iov_base; size_t len = msg->msg_iov[i].iov_len; /* Check it now since we switch to KERNEL_DS later. */ if ((err = verify_area(VERIFY_READ, base, len)) < 0) return err; iov[i+1].iov_base = base; iov[i+1].iov_len = len; size += len; } /* Get a skbuff (no data, just holds our cb information) */ if ((skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; eb = (struct ec_cb *)&skb->cb; eb->cookie = saddr->cookie; eb->timeout = (5*HZ); eb->start = jiffies; ah.handle = aun_seq; eb->seq = (aun_seq++); eb->sec = *saddr; skb_queue_tail(&aun_queue, skb); udpmsg.msg_name = (void *)&udpdest; udpmsg.msg_namelen = sizeof(udpdest); udpmsg.msg_iov = &iov[0]; udpmsg.msg_iovlen = msg->msg_iovlen + 1; udpmsg.msg_control = NULL; udpmsg.msg_controllen = 0; udpmsg.msg_flags=0; oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ err = sock_sendmsg(udpsock, &udpmsg, size); set_fs(oldfs); #else err = -EPROTOTYPE; #endif return err; }
static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct rtable *rt; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = 0, .tos = RT_TOS(iph->tos) }, }, .proto = 0, }; struct in_device *in_dev = in_dev_get(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (((struct dst_entry *)rt)->dev == dev) { skb->dst = (struct dst_entry *)rt; goto bridged_dnat; } /* we are sure that forwarding is disabled, so printing * this message is no problem. Note that the packet could * still have a martian destination address, in which case * the packet could be dropped even if forwarding were enabled */ __br_dnat_complain(); dst_release((struct dst_entry *)rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb->dst->dev == dev) { bridged_dnat: /* Tell br_nf_local_out this is a * bridged frame */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; skb->dev = nf_bridge->physindev; nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else {
static inline int ip_rcv_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = skb->nh.iph; DEENC_CHECK(); /* * Initialise the virtual path cache for the packet. It describes * how the packet travels inside Linux networking. */ if (skb->dst == NULL) { DEENC_CHECK(); if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) goto drop; } #ifdef CONFIG_NET_CLS_ROUTE if (skb->dst->tclassid) { struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id(); u32 idx = skb->dst->tclassid; st[idx&0xFF].o_packets++; st[idx&0xFF].o_bytes+=skb->len; st[(idx>>16)&0xFF].i_packets++; st[(idx>>16)&0xFF].i_bytes+=skb->len; } #endif if (iph->ihl > 5) { struct ip_options *opt; DEENC_CHECK(); /* It looks as overkill, because not all IP options require packet mangling. But it is the easiest for now, especially taking into account that combination of IP options and running sniffer is extremely rare condition. --ANK (980813) */ if (skb_cow(skb, skb_headroom(skb))) goto drop; iph = skb->nh.iph; if (ip_options_compile(NULL, skb)) goto inhdr_error; opt = &(IPCB(skb)->opt); if (opt->srr) { struct in_device *in_dev = in_dev_get(dev); if (in_dev) { if (!IN_DEV_SOURCE_ROUTE(in_dev)) { if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) printk(KERN_INFO "source route option %u.%u.%u.%u -> %u.%u.%u.%u\n", NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); in_dev_put(in_dev); goto drop; } in_dev_put(in_dev); } if (ip_options_rcv_srr(skb)) goto drop; } } DEENC_CHECK(); return skb->dst->input(skb); inhdr_error: IP_INC_STATS_BH(IpInHdrErrors); drop: kfree_skb(skb); return NET_RX_DROP; }
int cpt_dump_ifaddr(struct cpt_context * ctx) { struct net *net = get_exec_env()->ve_netns; struct net_device *dev; cpt_open_section(ctx, CPT_SECT_NET_IFADDR); for_each_netdev(net, dev) { struct in_device *idev = in_dev_get(dev); struct in_ifaddr *ifa; if (!idev) continue; for (ifa = idev->ifa_list; ifa; ifa = ifa->ifa_next) { struct cpt_ifaddr_image v; cpt_open_object(NULL, ctx); v.cpt_next = CPT_NULL; v.cpt_object = CPT_OBJ_NET_IFADDR; v.cpt_hdrlen = sizeof(v); v.cpt_content = CPT_CONTENT_VOID; v.cpt_index = dev->ifindex; v.cpt_family = AF_INET; v.cpt_masklen = ifa->ifa_prefixlen; v.cpt_flags = ifa->ifa_flags; v.cpt_scope = ifa->ifa_scope; memset(&v.cpt_address, 0, sizeof(v.cpt_address)); memset(&v.cpt_peer, 0, sizeof(v.cpt_peer)); memset(&v.cpt_broadcast, 0, sizeof(v.cpt_broadcast)); v.cpt_address[0] = ifa->ifa_local; v.cpt_peer[0] = ifa->ifa_address; v.cpt_broadcast[0] = ifa->ifa_broadcast; memcpy(v.cpt_label, ifa->ifa_label, IFNAMSIZ); ctx->write(&v, sizeof(v), ctx); cpt_close_object(ctx); } in_dev_put(idev); } #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) for_each_netdev(net, dev) { struct inet6_dev *idev = in6_dev_get(dev); struct inet6_ifaddr *ifa; if (!idev) continue; for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { struct cpt_ifaddr_image v; if (dev == net->loopback_dev && ifa->prefix_len == 128 && ifa->addr.s6_addr32[0] == 0 && ifa->addr.s6_addr32[1] == 0 && ifa->addr.s6_addr32[2] == 0 && ifa->addr.s6_addr32[3] == htonl(1)) continue; cpt_open_object(NULL, ctx); v.cpt_next = CPT_NULL; v.cpt_object = CPT_OBJ_NET_IFADDR; v.cpt_hdrlen = sizeof(v); v.cpt_content = CPT_CONTENT_VOID; v.cpt_index = dev->ifindex; v.cpt_family = AF_INET6; v.cpt_masklen = ifa->prefix_len; v.cpt_flags = ifa->flags; v.cpt_scope = ifa->scope; v.cpt_valid_lft = ifa->valid_lft; v.cpt_prefered_lft = ifa->prefered_lft; memcpy(&v.cpt_address, &ifa->addr, 16); memcpy(&v.cpt_peer, &ifa->addr, 16); memset(&v.cpt_broadcast, 0, sizeof(v.cpt_broadcast)); memcpy(v.cpt_label, dev->name, IFNAMSIZ); ctx->write(&v, sizeof(v), ctx); cpt_close_object(ctx); } in6_dev_put(idev); } #endif cpt_close_section(ctx); return 0; }
static int ak_client_inform_port(const struct net_device *dev, aku16 port_src, aku8 protocol, unsigned int uid) { ak_client_logon_array user_logon[AK_CLIENT_MAX_LOGONS_PER_USER]; struct sk_buff *skb; // Pacote a ser enviado para avisar o firewall #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) struct flowi flp; #else struct flowi4 flp; #endif struct in_device *idev; struct rtable *rt; // Rota a ser usada para enviar o pacote struct iphdr *ip; // Header IP do pacote a enviar struct udphdr *udp; // Header UDP do pacote a enviar struct dst_entry *dst; #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) struct neighbour *neigh; #endif MD5_CTX contexto; // Contexto para calcular MD5 int pkt_sent = 0; // Enviou ao menos um pacote ? fwprofd_header *header; fwprofd_port_ctl *port_ctl; ak_client_logon_array *logon; int size; int count; int i; if (!dev) { PRINT("Device de saida NULL\n"); return -2; } count = ak_client_get_user_list(uid, user_logon); size = sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(fwprofd_header) + sizeof(fwprofd_port_ctl); for (i = 0, logon = user_logon; i < count; i++, logon++) { PRINT("Enviando pacote %d/%d - ", i + 1, count); skb = alloc_skb(size + 16, GFP_ATOMIC); if (!skb) { PRINT("Nao consegui alocar skbuff para enviar pacote\n"); return -3; } skb->data += 16; skb->len = size; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) skb->tail = skb->data + size; skb->nh.iph = (struct iphdr *) skb->data; skb->h.uh = (struct udphdr *) (skb->data + sizeof(struct iphdr)); ip = skb->nh.iph; #else skb_set_tail_pointer(skb, size); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); ip = ip_hdr(skb); #endif udp = (struct udphdr *) ((char *) ip + sizeof(struct iphdr)); header = (fwprofd_header *) (udp + 1); port_ctl = (fwprofd_port_ctl *) (header + 1); // Pega o IP da interface de saida para alocar rota de saida #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) idev = in_dev_get(dev); #else rcu_read_lock(); idev = __in_dev_get_rcu(dev); #endif if (!idev) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (1)\n"); return -4; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_lock(&idev->lock); #endif if (!idev->ifa_list) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (2)\n"); return -5; } ip->saddr = idev->ifa_list->ifa_address; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) flp.oif = 0; flp.nl_u.ip4_u.saddr = ip->saddr; flp.nl_u.ip4_u.daddr = logon->logon_data.ip.s_addr; flp.nl_u.ip4_u.tos = 0; flp.uli_u.ports.sport = ntohs(AKER_PROF_PORT); flp.uli_u.ports.dport = ntohs(AKER_PROF_PORT); flp.proto = IPPROTO_UDP; #else flp.flowi4_oif = 0; flp.saddr = ip->saddr; flp.daddr = logon->logon_data.ip.s_addr; flp.flowi4_tos = 0; flp.fl4_sport = ntohs(AKER_PROF_PORT); flp.fl4_dport = ntohs(AKER_PROF_PORT); flp.flowi4_proto = IPPROTO_UDP; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) if (ip_route_output_key(&rt, &flp)) #else if (ip_route_output_key(&init_net, &rt, &flp)) #endif { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #else rt = ip_route_output_key(&init_net, &flp); if (IS_ERR(rt)) { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) skb->dst = dst_clone(&rt->u.dst); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb_dst_set(skb, dst_clone(&rt->u.dst)); #else skb_dst_set(skb, dst_clone(&rt->dst)); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb->dev = rt->u.dst.dev; #else skb->dev = rt->dst.dev; #endif skb->protocol = __constant_htons(ETH_P_IP); // Preenche dados do usuario port_ctl->ip_src.s_addr = 0; port_ctl->seq = ntohl(logon->seq); // ak_client_get_user_list() ja incrementou seq port_ctl->user_num = ntohl(logon->logon_data.ak_user_num); port_ctl->port = port_src; port_ctl->protocol = protocol; port_ctl->reserved = 0; MD5Init(&contexto); MD5Update(&contexto, (u_char *) logon->logon_data.secret, 16); MD5Update(&contexto, (u_char *) &port_ctl->ip_src, sizeof(struct in_addr)); MD5Update(&contexto, (u_char *) &port_ctl->seq, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->user_num, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->port, sizeof(aku16)); MD5Update(&contexto, (u_char *) &port_ctl->protocol, sizeof(aku8)); MD5Update(&contexto, (u_char *) &port_ctl->reserved, sizeof(aku8)); MD5Final((u_char *) port_ctl->hash, &contexto); // Preenche demais campos do pacote header->ip_dst = logon->logon_data.ip; header->versao = AKER_PROF_VERSION; header->tipo_req = APROF_BIND_PORT; memset(header->md5, 0, 16); MD5Init(&contexto); MD5Update(&contexto, (void *) header, sizeof(fwprofd_header)); MD5Update(&contexto, (void *) port_ctl, sizeof(fwprofd_port_ctl)); MD5Final(header->md5, &contexto); udp->dest = udp->source = ntohs(AKER_PROF_PORT); udp->len = ntohs(size - sizeof(struct iphdr)); udp->check = 0; ip->ihl = sizeof(struct iphdr) >> 2; ip->version = IPVERSION; ip->ttl = IPDEFTTL; ip->tos = 0; ip->daddr = header->ip_dst.s_addr; ip->protocol = IPPROTO_UDP; ip->frag_off = 0; ip->tot_len = htons(size); ip->id = 0; ip->check = 0; ip->check = ip_fast_csum((u_char *) ip, ip->ihl); PRINT("%s -> %s\n", ip2a(ip->saddr), ip2a(ip->daddr)); // Envia pacote #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) dst = skb->dst; #else dst = skb_dst(skb); #endif #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) && \ LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_get_neighbour_noref(dst); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_neigh_lookup_skb(dst, skb); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #else if (dst->hh) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) int hh_alen; read_lock_bh(dst->hh->hh_lock); hh_alen = HH_DATA_ALIGN(dst->hh->hh_len); memcpy(skb->data - hh_alen, dst->hh->hh_data, hh_alen); read_unlock_bh(dst->hh->hh_lock); skb_push(skb, dst->hh->hh_len); dst->hh->hh_output(skb); #else neigh_hh_output(dst->hh, skb); #endif ip_rt_put(rt); pkt_sent++; } else if (dst->neighbour) { dst->neighbour->output(skb); ip_rt_put(rt); pkt_sent++; } #endif else { kfree_skb(skb); ip_rt_put(rt); PRINT("Nao sei como enviar pacote de saida\n"); } #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) rcu_read_unlock(); #endif } if (!pkt_sent) return -1; return 0; }
static int c2_up(struct net_device *netdev) { struct c2_port *c2_port = netdev_priv(netdev); struct c2_dev *c2dev = c2_port->c2dev; struct c2_element *elem; struct c2_rxp_hdr *rxp_hdr; struct in_device *in_dev; size_t rx_size, tx_size; int ret, i; unsigned int netimr0; if (netif_msg_ifup(c2_port)) pr_debug("%s: enabling interface\n", netdev->name); /* Set the Rx buffer size based on MTU */ c2_set_rxbufsize(c2_port); /* Allocate DMA'able memory for Tx/Rx host descriptor rings */ rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc); tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); c2_port->mem_size = tx_size + rx_size; c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, &c2_port->dma); if (c2_port->mem == NULL) { pr_debug("Unable to allocate memory for " "host descriptor rings\n"); return -ENOMEM; } memset(c2_port->mem, 0, c2_port->mem_size); /* Create the Rx host descriptor ring */ if ((ret = c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, c2dev->mmio_rxp_ring))) { pr_debug("Unable to create RX ring\n"); goto bail0; } /* Allocate Rx buffers for the host descriptor ring */ if (c2_rx_fill(c2_port)) { pr_debug("Unable to fill RX ring\n"); goto bail1; } /* Create the Tx host descriptor ring */ if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, c2_port->dma + rx_size, c2dev->mmio_txp_ring))) { pr_debug("Unable to create TX ring\n"); goto bail1; } /* Set the TX pointer to where we left off */ c2_port->tx_avail = c2_port->tx_ring.count - 1; c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = c2_port->tx_ring.start + c2dev->cur_tx; /* missing: Initialize MAC */ BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); /* Reset the adapter, ensures the driver is in sync with the RXP */ c2_reset(c2_port); /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */ for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count; i++, elem++) { rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; rxp_hdr->flags = 0; __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); } /* Enable network packets */ netif_start_queue(netdev); /* Enable IRQ */ writel(0, c2dev->regs + C2_IDIS); netimr0 = readl(c2dev->regs + C2_NIMR0); netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT); writel(netimr0, c2dev->regs + C2_NIMR0); /* Tell the stack to ignore arp requests for ipaddrs bound to * other interfaces. This is needed to prevent the host stack * from responding to arp requests to the ipaddr bound on the * rdma interface. */ in_dev = in_dev_get(netdev); IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1); in_dev_put(in_dev); return 0; bail1: c2_rx_clean(c2_port); kfree(c2_port->rx_ring.start); bail0: pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, c2_port->dma); return ret; }
int igmp_rcv(struct sk_buff *skb) { /* This basically follows the spec line by line -- see RFC1112 */ struct igmphdr *ih = skb->h.igmph; struct in_device *in_dev = in_dev_get(skb->dev); int len = skb->len; if (in_dev==NULL) { kfree_skb(skb); return 0; } if (skb_is_nonlinear(skb)) { if (skb_linearize(skb, GFP_ATOMIC) != 0) { kfree_skb(skb); return -ENOMEM; } ih = skb->h.igmph; } if (len < sizeof(struct igmphdr) || ip_compute_csum((void *)ih, len)) { in_dev_put(in_dev); kfree_skb(skb); return 0; } #ifdef CONFIG_RG_IGMP_PROXY igmprx_recv(skb); #endif #ifdef CONFIG_RG_IGMP_PROXY_MODULE if (igmp_proxy_recv) igmp_proxy_recv(skb); #endif switch (ih->type) { case IGMP_HOST_MEMBERSHIP_QUERY: igmp_heard_query(in_dev, ih->code, ih->group); break; case IGMP_HOST_MEMBERSHIP_REPORT: case IGMP_HOST_NEW_MEMBERSHIP_REPORT: /* Is it our report looped back? */ if (((struct rtable*)skb->dst)->key.iif == 0) break; igmp_heard_report(in_dev, ih->group); break; case IGMP_PIM: #ifdef CONFIG_IP_PIMSM_V1 in_dev_put(in_dev); return pim_rcv_v1(skb); #endif case IGMP_DVMRP: case IGMP_TRACE: case IGMP_HOST_LEAVE_MESSAGE: case IGMP_MTRACE: case IGMP_MTRACE_RESP: break; default: // NETDEBUG(printk(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type)); ; } in_dev_put(in_dev); kfree_skb(skb); return 0; }