/* check for PPLive & PPStream */ int search_pp (const unsigned char *payload, const u16 plen) { /* message pplive */ if ( memcmp(payload,"GET /zh-cn/xml/default.xml",26)==0 ) return (IPP2P_PP *100 + 1 ); /* message PPStream */ if ( memcmp(payload,"PSProtocol",10)==0 ) return (IPP2P_PP * 100 + 2); /* message PPLive */ if (get_u16(payload,0) == __constant_htons(0xe903) && get_u32(payload,4) == __constant_htonl(0x98ab0102)) return (IPP2P_PP * 100 + 3); if (get_u16(payload,4) == __constant_htons(0xe903) && get_u32(payload,8) == __constant_htonl(0x98ab0102)) return (IPP2P_PP * 100 + 4); /* message UUSee */ if (((*payload + *(payload+1) * 256) == (plen-4) || ((*payload + *(payload+1) * 256) < (plen-4) && *payload + *(payload+1) * 256 + *(payload + *payload + *(payload+1) * 256 + 4) + *(payload + *payload + *(payload+1) * 256 + 5) * 256 + 8 == plen )) && get_u16(payload,2) == __constant_htons(0x0000)) { if (*(payload+18) == 0x68 && *(payload+20) == 0x74 && *(payload+22) == 0x74 && *(payload+24) == 0x70 && *(payload+26)==0x3a && *(payload+28) == 0x2f && *(payload+30) == 0x2f) return 0; return (IPP2P_PP * 100 + 5); } /* QQLive */ if ( (plen < 150) && (*payload == 0xfe) && (*(payload+1) == *(payload+4)) && get_u16(payload,2) == __constant_htons(0x0000) ) return (IPP2P_PP * 100 + 6); /* feidian */ if ( (plen == 4 && get_u32(payload,0) == __constant_htonl(0x291c3201)) || (plen == 61 && get_u32(payload,0) == __constant_htonl(0x291c3201) && get_u32(payload,4) == __constant_htonl(0x39000000))) return (IPP2P_PP * 100 + 7); /* POCO */ if ( (*payload + *(payload+1)*256) == plen && get_u16(payload,2) == __constant_htons(0x0000) && (*(payload+4) + *(payload+5)) == (plen - 13) && get_u16(payload,6) == __constant_htons(0x0000)) return (IPP2P_PP * 100 + 8); /* QVOD */ if(memcmp((payload + 1),"QVOD protocol",13) == 0) return (IPP2P_PP * 100 + 9); return 0; }
int udp_search_xunlei (unsigned char *t, int packet_len) { /* baidu xiaba */ if (packet_len == 24 && get_u32(t,0) == __constant_htonl(0x01000101) && get_u32(t,4) == __constant_htonl(0xfefffeff) && get_u32(t,8) == __constant_htonl(0x00)) return (IPP2P_XUNLEI * 100 + 11); if (packet_len == 38 && get_u32(t,0) == __constant_htonl(0x010011a0) && get_u32(t,4) == __constant_htonl(0xfefffeff) && get_u32(t,8) == __constant_htonl(0x00)) return (IPP2P_XUNLEI * 100 + 12); return 0; }
int ipv6_addr_type(struct in6_addr *addr) { u32 st; st = addr->s6_addr32[0]; /* Consider all addresses with the first three bits different of 000 and 111 as unicasts. */ if ((st & htonl(0xE0000000)) != htonl(0x00000000) && (st & htonl(0xE0000000)) != htonl(0xE0000000)) return IPV6_ADDR_UNICAST; if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { int type = IPV6_ADDR_MULTICAST; switch((st & htonl(0x00FF0000))) { case __constant_htonl(0x00010000): type |= IPV6_ADDR_LOOPBACK; break; case __constant_htonl(0x00020000): type |= IPV6_ADDR_LINKLOCAL; break; case __constant_htonl(0x00050000): type |= IPV6_ADDR_SITELOCAL; break; }; return type; } if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST); if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST); if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) { if (addr->s6_addr32[2] == 0) { if (addr->s6_addr32[3] == 0) return IPV6_ADDR_ANY; if (addr->s6_addr32[3] == htonl(0x00000001)) return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST); return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST); } if (addr->s6_addr32[2] == htonl(0x0000ffff)) return IPV6_ADDR_MAPPED; } return IPV6_ADDR_RESERVED; }
static void addrconf_add_lroute(struct net_device *dev) { struct in6_addr addr; ipv6_addr_set(&addr, __constant_htonl(0xFE800000), 0, 0, 0); addrconf_prefix_route(&addr, 10, dev, 0, RTF_ADDRCONF); }
static void sit_add_v4_addrs(struct inet6_dev *idev) { struct inet6_ifaddr * ifp; struct in6_addr addr; struct device *dev; int scope; memset(&addr, 0, sizeof(struct in6_addr)); memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4); if (idev->dev->flags&IFF_POINTOPOINT) { addr.s6_addr32[0] = __constant_htonl(0xfe800000); scope = IFA_LINK; } else { scope = IPV6_ADDR_COMPATv4; } if (addr.s6_addr32[3]) { ifp = ipv6_add_addr(idev, &addr, scope); if (ifp) { ifp->flags |= ADDR_PERMANENT; ifp->prefix_len = 128; ipv6_ifa_notify(RTM_NEWADDR, ifp); } return; } for (dev = dev_base; dev != NULL; dev = dev->next) { if (dev->ip_ptr && (dev->flags & IFF_UP)) { struct in_device * in_dev = dev->ip_ptr; struct in_ifaddr * ifa; int flag = scope; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { addr.s6_addr32[3] = ifa->ifa_local; if (ifa->ifa_scope == RT_SCOPE_LINK) continue; if (ifa->ifa_scope >= RT_SCOPE_HOST) { if (idev->dev->flags&IFF_POINTOPOINT) continue; flag |= IFA_HOST; } ifp = ipv6_add_addr(idev, &addr, flag); if (ifp == NULL) continue; if (idev->dev->flags&IFF_POINTOPOINT) ifp->prefix_len = 10; else ifp->prefix_len = 96; ifp->flags |= ADDR_PERMANENT; ipv6_ifa_notify(RTM_NEWADDR, ifp); } } } }
static inline u32 * xdr_encode_sattr(u32 *p, struct iattr *attr) { const u32 not_set = __constant_htonl(0xFFFFFFFF); *p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set; *p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set; *p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set; *p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set; if (attr->ia_valid & ATTR_ATIME_SET) { p = xdr_encode_time(p, &attr->ia_atime); } else if (attr->ia_valid & ATTR_ATIME) { p = xdr_encode_current_server_time(p, &attr->ia_atime); } else { *p++ = not_set; *p++ = not_set; } if (attr->ia_valid & ATTR_MTIME_SET) { p = xdr_encode_time(p, &attr->ia_mtime); } else if (attr->ia_valid & ATTR_MTIME) { p = xdr_encode_current_server_time(p, &attr->ia_mtime); } else { *p++ = not_set; *p++ = not_set; } return p; }
static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp) { struct cb_compound_hdr_arg hdr_arg = { 0 }; struct cb_compound_hdr_res hdr_res = { NULL }; struct xdr_stream xdr_in, xdr_out; __be32 *p, status; struct cb_process_state cps = { .drc_status = 0, .clp = NULL, .slotid = NFS4_NO_SLOT, .net = rqstp->rq_xprt->xpt_net, }; unsigned int nops = 0; dprintk("%s: start\n", __func__); xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); xdr_init_encode(&xdr_out, &rqstp->rq_res, p); status = decode_compound_hdr_arg(&xdr_in, &hdr_arg); if (status == __constant_htonl(NFS4ERR_RESOURCE)) return rpc_garbage_args; if (hdr_arg.minorversion == 0) { cps.clp = nfs4_find_client_ident(rqstp->rq_xprt->xpt_net, hdr_arg.cb_ident); if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) return rpc_drop_reply; } hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) return rpc_system_err; while (status == 0 && nops != hdr_arg.nops) { status = process_op(hdr_arg.minorversion, nops, rqstp, &xdr_in, argp, &xdr_out, resp, &cps); nops++; } if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { status = htonl(NFS4ERR_RESOURCE); nops--; } *hdr_res.status = status; *hdr_res.nops = htonl(nops); nfs4_cb_free_slot(&cps); nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; } static struct callback_op callback_ops[] = { [0] = { .res_maxsize = CB_OP_HDR_RES_MAXSZ, },
/* check for PPLive & PPStream UDP pkg */ int udp_search_pp (unsigned char *t, int packet_len) { if (get_u16(t,0) == __constant_htons(0xe903) && get_u32(t,4) == __constant_htonl(0x98ab0102)) return (IPP2P_PP * 100 + 11); if ( (memcmp(t+8,"[bsinfo]",8) ==0) && (*(t+16) == 0x0d) && (*(t+17) == 0x0a) && (memcmp(t+18,"mf=",3) ==0) ) return (IPP2P_PP * 100 + 12); if ( (packet_len == 22 || packet_len == 8) && get_u16(t,0) == __constant_htons(0x0909) && (*(t+2) == 0x08 || *(t+2) == 0x09) && ((*(t+3) == 0x00) || (*(t+3) == 0x01)) ) return (IPP2P_PP * 100 + 13); if ( packet_len == (*t+4) && get_u32(t,1) == __constant_htonl(0x00430000)) return (IPP2P_PP * 100 + 15 ); /* QQLive */ if ( (packet_len < 150) && (*t == 0xfe) && (*(t+1) == *(t+4)) && get_u16(t,2) == __constant_htons(0x0000) ) return (IPP2P_PP * 100 + 14); /* feidian */ if ( (packet_len == 112 || packet_len == 116) && get_u32(t,0) == __constant_htonl(0x1c1c3201) && (get_u16(t,4) == __constant_htons(0x0b00) || get_u16(t,4) == __constant_htons(0x0c00))) return (IPP2P_PP * 100 + 16); /* POCO */ if (packet_len == 6 && get_u16(t,0) == __constant_htons(0x8095) && (get_u16(t,2) == __constant_htons(0x0462) || get_u16(t,2) == __constant_htons(0x0565)) ) return (IPP2P_PP * 100 + 17); if (packet_len == 22 && get_u16(t,0) == __constant_htons(0x8094) && (get_u16(t,2) == __constant_htons(0x0429) || get_u16(t,4) == __constant_htons(0x0429)) ) return (IPP2P_PP * 100 + 18); /* QVOD */ if (packet_len == ntohl(get_u32(t,0)) && memcmp((t + 14),"QVOD protocol",13) == 0) return (IPP2P_PP * 100 + 19); return 0; }
static void addrconf_add_mroute(struct net_device *dev) { struct in6_rtmsg rtmsg; memset(&rtmsg, 0, sizeof(rtmsg)); ipv6_addr_set(&rtmsg.rtmsg_dst, __constant_htonl(0xFF000000), 0, 0, 0); rtmsg.rtmsg_dst_len = 8; rtmsg.rtmsg_metric = IP6_RT_PRIO_ADDRCONF; rtmsg.rtmsg_ifindex = dev->ifindex; rtmsg.rtmsg_flags = RTF_UP|RTF_ADDRCONF; rtmsg.rtmsg_type = RTMSG_NEWROUTE; ip6_route_add(&rtmsg); }
int ip_wccp_rcv(struct sk_buff *skb) { u32 *gre_hdr; struct iphdr *iph; if (!pskb_may_pull(skb, 16)) goto drop; iph = skb->nh.iph; gre_hdr = (u32 *)skb->h.raw; if(*gre_hdr != __constant_htonl(WCCP_PROTOCOL_TYPE)) goto drop; skb->mac.raw = skb->nh.raw; /* WCCP2 puts an extra 4 octets into the header, but uses the same * encapsulation type; if it looks as if the first octet of the packet * isn't the beginning of an IPv4 header, assume it's WCCP2. * This should be safe as these bits are reserved in the WCCPv2 header * and always zero in WCCPv2. */ if ((skb->h.raw[WCCP_GRE_LEN] & 0xF0) != 0x40) { skb->nh.raw = pskb_pull(skb, WCCP_GRE_LEN + WCCP2_GRE_EXTRA); } else { skb->nh.raw = pskb_pull(skb, WCCP_GRE_LEN); } if (skb->len <= 0) goto drop; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); skb->protocol = __constant_htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; dst_release(skb->dst); skb->dst = NULL; #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); skb->nfct = NULL; #ifdef CONFIG_NETFILTER_DEBUG skb->nf_debug = 0; #endif #endif ip_wccp_ecn_decapsulate(iph, skb); netif_rx(skb); return(0); drop: kfree_skb(skb); return(0); }
static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; if (!len){ len = skb->len - HIPPI_HLEN; printk("hippi_header(): length not supplied\n"); } /* * Due to the stupidity of the little endian byte-order we * have to set the fp field this way. */ hip->fp.fixed = __constant_htonl(0x04800018); hip->fp.d2_size = htonl(len + 8); hip->le.fc = 0; hip->le.double_wide = 0; /* only HIPPI 800 for the time being */ hip->le.message_type = 0; /* Data PDU */ hip->le.dest_addr_type = 2; /* 12 bit SC address */ hip->le.src_addr_type = 2; /* 12 bit SC address */ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3); memset(&hip->le.reserved, 0, 16); hip->snap.dsap = HIPPI_EXTENDED_SAP; hip->snap.ssap = HIPPI_EXTENDED_SAP; hip->snap.ctrl = HIPPI_UI_CMD; hip->snap.oui[0] = 0x00; hip->snap.oui[1] = 0x00; hip->snap.oui[2] = 0x00; hip->snap.ethertype = htons(type); if (daddr) { memcpy(hip->le.dest_switch_addr, daddr + 3, 3); memcpy(&hcb->ifield, daddr + 2, 4); return HIPPI_HLEN; } hcb->ifield = 0; return -((int)HIPPI_HLEN); }
/* * Decode, process and encode a COMPOUND */ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp) { struct cb_compound_hdr_arg hdr_arg = { 0 }; struct cb_compound_hdr_res hdr_res = { NULL }; struct xdr_stream xdr_in, xdr_out; __be32 *p; __be32 status, drc_status = 0; unsigned int nops = 0; dprintk("%s: start\n", __func__); xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); xdr_init_encode(&xdr_out, &rqstp->rq_res, p); status = decode_compound_hdr_arg(&xdr_in, &hdr_arg); if (status == __constant_htonl(NFS4ERR_RESOURCE)) return rpc_garbage_args; hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) return rpc_system_err; while (status == 0 && nops != hdr_arg.nops) { status = process_op(hdr_arg.minorversion, nops, rqstp, &xdr_in, argp, &xdr_out, resp, &drc_status); nops++; } /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return * resource error in cb_compound status without returning op */ if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { status = htonl(NFS4ERR_RESOURCE); nops--; } *hdr_res.status = status; *hdr_res.nops = htonl(nops); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; }
static int ebt_target_idnat(struct sk_buff **pskb, unsigned int hooknr, const struct net_device *in, const struct net_device *out, const void *data, unsigned int datalen) { struct ebt_inat_info *info = (struct ebt_inat_info *)data; uint32_t ip; int index; struct ebt_inat_tuple *tuple; if (!get_ip_dst(*pskb, &ip)) { return info->target; } if ((ip & __constant_htonl(0xffffff00)) != info->ip_subnet) { return info->target; } index = ((unsigned char*)&ip)[3]; tuple = &info->a[index]; if (!tuple->enabled) { return info->target; } memcpy(((**pskb).mac.ethernet)->h_dest, tuple->mac, ETH_ALEN * sizeof(unsigned char)); if ((**pskb).mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) { memcpy( (**pskb).nh.raw + sizeof(struct arphdr) + (**pskb).nh.arph->ar_hln + (**pskb).nh.arph->ar_pln, tuple->mac, ETH_ALEN); } return tuple->target; }
/*Search for BitTorrent commands*/ int search_bittorrent (const unsigned char *payload, const u16 plen) { if (plen > 20) { /* test for match 0x13+"BitTorrent protocol" */ if (*payload == 0x13) { if (memcmp(payload+1, "BitTorrent protocol", 19) == 0) return (IPP2P_BIT * 100); } /* get tracker commandos, all starts with GET / * then it can follow: scrape| announce * and then ?hash_info= */ if (memcmp(payload,"GET /",5) == 0) { /* message scrape */ if ( memcmp(payload+5,"scrape?info_hash=",17)==0 ) return (IPP2P_BIT * 100 + 1); /* message announce */ if ( memcmp(payload+5,"announce",8)==0 ) return (IPP2P_BIT * 100 + 2); if ( memcmp(payload+5,"?info_hash=",11)==0 ) return (IPP2P_BIT * 100 + 3); if ( memcmp(payload+5,"data?fid=",9)==0 ) return (IPP2P_BIT * 100 + 5); } } else { /* bitcomet encryptes the first packet, so we have to detect another * one later in the flow */ /* first try failed, too many missdetections */ //if ( size == 5 && get_u32(t,0) == __constant_htonl(1) && t[4] < 3) return (IPP2P_BIT * 100 + 3); /* second try: block request packets */ if ( plen == 17 && get_u32(payload,0) == __constant_htonl(0x0d) && *(payload+4) == 0x06 && get_u32(payload,13) == __constant_htonl(0x4000) ) return (IPP2P_BIT * 100 + 4); } return 0; }
/*Search for UDP BitTorrent commands*/ int udp_search_bit (unsigned char *haystack, int packet_len) { switch(packet_len) { case 24: /* ^ 00 00 04 17 27 10 19 80 */ if ((ntohl(get_u32(haystack, 8)) == 0x00000417) && (ntohl(get_u32(haystack, 12)) == 0x27101980)) return (IPP2P_BIT * 100 + 50); break; case 44: if (get_u32(haystack, 16) == __constant_htonl(0x00000400) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) return (IPP2P_BIT * 100 + 51); if (get_u32(haystack, 16) == __constant_htonl(0x00000400)) return (IPP2P_BIT * 100 + 61); break; case 65: if (get_u32(haystack, 16) == __constant_htonl(0x00000404) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) return (IPP2P_BIT * 100 + 52); if (get_u32(haystack, 16) == __constant_htonl(0x00000404)) return (IPP2P_BIT * 100 + 62); break; case 67: if (get_u32(haystack, 16) == __constant_htonl(0x00000406) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) return (IPP2P_BIT * 100 + 53); if (get_u32(haystack, 16) == __constant_htonl(0x00000406)) return (IPP2P_BIT * 100 + 63); break; case 211: if (get_u32(haystack, 8) == __constant_htonl(0x00000405)) return (IPP2P_BIT * 100 + 54); break; case 29: if ((get_u32(haystack, 8) == __constant_htonl(0x00000401))) return (IPP2P_BIT * 100 + 55); break; case 52: if (get_u32(haystack,8) == __constant_htonl(0x00000827) && get_u32(haystack,12) == __constant_htonl(0x37502950)) return (IPP2P_BIT * 100 + 80); break; default: /* this packet does not have a constant size */ if (packet_len >= 40 && get_u32(haystack, 16) == __constant_htonl(0x00000402) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) return (IPP2P_BIT * 100 + 56); break; } /* some extra-bitcomet rules: * "d1:" [a|r] "d2:id20:" */ if (packet_len > 30 && get_u8(haystack, 8) == 'd' && get_u8(haystack, 9) == '1' && get_u8(haystack, 10) == ':' ) { if (get_u8(haystack, 11) == 'a' || get_u8(haystack, 11) == 'r') { if (memcmp(haystack+12,"d2:id20:",8)==0) return (IPP2P_BIT * 100 + 57); } } #if 0 /* bitlord rules */ /* packetlen must be bigger than 40 */ /* first 4 bytes are zero */ if (packet_len > 40 && get_u32(haystack, 8) == 0x00000000) { /* first rule: 00 00 00 00 01 00 00 xx xx xx xx 00 00 00 00*/ if (get_u32(haystack, 12) == 0x00000000 && get_u32(haystack, 16) == 0x00010000 && get_u32(haystack, 24) == 0x00000000 ) return (IPP2P_BIT * 100 + 71); /* 00 01 00 00 0d 00 00 xx xx xx xx 00 00 00 00*/ if (get_u32(haystack, 12) == 0x00000001 && get_u32(haystack, 16) == 0x000d0000 && get_u32(haystack, 24) == 0x00000000 ) return (IPP2P_BIT * 100 + 71); } #endif return 0; }/*udp_search_bit*/
static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, struct tcphdr *tcph, __u32 *sack) { unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; unsigned char *ptr; int length = (tcph->doff*4) - sizeof(struct tcphdr); __u32 tmp; if (!length) return; ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr), length, buff); BUG_ON(ptr == NULL); /* Fast path for timestamp-only option */ if (length == TCPOLEN_TSTAMP_ALIGNED*4 && *(__be32 *)ptr == __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) return; while (length > 0) { int opcode = *ptr++; int opsize, i; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) break; /* don't parse partial options */ if (opcode == TCPOPT_SACK && opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK)) { for (i = 0; i < (opsize - TCPOLEN_SACK_BASE); i += TCPOLEN_SACK_PERBLOCK) { tmp = ntohl(*((__be32 *)(ptr+i)+1)); if (after(tmp, *sack)) *sack = tmp; } return; } ptr += opsize - 2; length -= opsize; } } }
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct ipv6_txoptions *opt) { struct ipv6_pinfo * np = sk ? &sk->net_pinfo.af_inet6 : NULL; struct in6_addr *first_hop = fl->nl_u.ip6_u.daddr; struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr; u8 proto = fl->proto; int seg_len = skb->len; int hlimit; if (opt) { int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + ((dst->dev->hard_header_len + 15)&~15); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); kfree_skb(skb); skb = skb2; if (skb == NULL) return -ENOBUFS; if (sk) skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr)); /* * Fill in the IPv6 header */ *(u32*)hdr = __constant_htonl(0x60000000) | fl->fl6_flowlabel; hlimit = -1; if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = ((struct rt6_info*)dst)->rt6i_hoplimit; hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; ipv6_addr_copy(&hdr->saddr, fl->nl_u.ip6_u.saddr); ipv6_addr_copy(&hdr->daddr, first_hop); if (skb->len <= dst->pmtu) { IP6_INC_STATS(Ip6OutRequests); return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute); } if (net_ratelimit()) printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev); kfree_skb(skb); return -EMSGSIZE; }
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <infiniband/ssa_db.h> #include <infiniband/ssa_ipdb.h> #include <asm/byteorder.h> const struct db_table_def ip_def_tbl[] = { { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, IPDB_TBL_ID_IPv4, 0 }, "IPv4", __constant_htonl(sizeof(struct ipdb_ipv4)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, IPDB_TBL_ID_MAX + IPDB_TBL_ID_IPv4, 0 }, "IPv4 fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(IPDB_TBL_ID_IPv4) }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, IPDB_TBL_ID_IPv6, 0 }, "IPv6", __constant_htonl(sizeof(struct ipdb_ipv6)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, IPDB_TBL_ID_MAX + IPDB_TBL_ID_IPv6, 0 }, "IPv6 fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(IPDB_TBL_ID_IPv6) }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, IPDB_TBL_ID_NAME, 0 }, "NAME", __constant_htonl(sizeof(struct ipdb_name)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, IPDB_TBL_ID_MAX + IPDB_TBL_ID_NAME, 0 }, "NAME fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(IPDB_TBL_ID_NAME)
/*Search for UDP eDonkey/eMule/Kad commands*/ int udp_search_edk (unsigned char *t, int packet_len) { /* Vagaa */ if(packet_len == 4 && get_u32(t,0) == __constant_htonl(0xff0a0000)) return (IPP2P_EDK * 100 + 42); if(packet_len == 8 && get_u16(t,0) == __constant_htons(0xff0a) && get_u16(t,4) == __constant_htons(0x0c02)) return (IPP2P_EDK * 100 + 47); if(memcmp(t+12,"POST / HTTP/1.1",15) == 0 && memcmp(t+29,"Host: vagaa.com",15) == 0 && memcmp(t+46,"VAGAA-OPERATION:",16) == 0) return (IPP2P_EDK * 100 + 48); switch (*t) { case 0xf1: { if ( get_u16(t,3) == __constant_htons(0x0000) ) { /* Search Result */ if(packet_len == 22 && *(t+1) == 0x11) return (IPP2P_EDK * 100 + 40); if(packet_len == 32 && *(t+1) == 0x00) return (IPP2P_EDK * 100 + 41); /* Identify Reply */ if(packet_len == 26 && *(t+1) == 0x15) return (IPP2P_EDK * 100 + 43); /* Identify Ack */ if(packet_len == 27 && *(t+1) == 0x16) return (IPP2P_EDK * 100 + 44); if(packet_len == 6 && *(t+1) == 0x01) return (IPP2P_EDK * 100 + 45); if(packet_len == 144 && *(t+1) == 0x8b) return (IPP2P_EDK * 100 + 46); } break; } case 0xe3: { /*edonkey*/ switch (*(t+1)) { /* client -> server status request */ case 0x96: if (packet_len > 5) return ((IPP2P_EDK * 100) + 50); break; /* server -> client status request */ case 0x97: if (packet_len > 33) return ((IPP2P_EDK * 100) + 51); break; /* server description request */ /* e3 2a ff f0 .. | size == 6 */ case 0xa2: if ( (packet_len > 5) && ( get_u16(t,2) == __constant_htons(0xfff0) ) ) return ((IPP2P_EDK * 100) + 52); break; /* server description response */ /* e3 a3 ff f0 .. | size > 40 && size < 200 */ //case 0xa3: return ((IPP2P_EDK * 100) + 53); // break; case 0x9a: if (packet_len > 17) return ((IPP2P_EDK * 100) + 54); break; case 0x9b: if (packet_len > 24) return ((IPP2P_EDK * 100) + 56); break; case 0x92: if (packet_len > 9) return ((IPP2P_EDK * 100) + 55); break; } break; } case 0xe4: { switch (*(t+1)) { case 0x01: if (packet_len == 2) return ((IPP2P_EDK * 100) + 70); break; /* e4 19 .. Firewall Connection ACK */ case 0x19: if (packet_len > 21 ) return ((IPP2P_EDK * 100) + 71); break; /* e4 20 .. | size == 43 */ case 0x20: if ((packet_len > 33) && (*(t+2) != 0x00) && (*(t+33) != 0x00)) return ((IPP2P_EDK * 100) + 60); break; case 0x21: if ((packet_len > 33) && (*(t+2) != 0x00) && (*(t+33) != 0x00)) return ((IPP2P_EDK * 100) + 60); break; /* e4 00 .. 00 | size == 35 ? */ case 0x00: if ((packet_len > 26) && (*(t+26) == 0x00)) return ((IPP2P_EDK * 100) + 61); break; /* e4 10 .. 00 | size == 35 ? Search Info */ case 0x10: if ((packet_len > 26) && (*(t+26) == 0x00)) return ((IPP2P_EDK * 100) + 62); break; /* e4 11 .. Search Result */ case 0x11: if ((packet_len > 26) && (*(t+26) == 0x00)) return ((IPP2P_EDK * 100) + 62); break; /* e4 18 .. 00 | size == 35 ? */ case 0x18: if ((packet_len > 26) && (*(t+26) == 0x00)) return ((IPP2P_EDK * 100) + 63); break; /* e4 52 .. | size = 44 */ case 0x52: if (packet_len > 35 ) return ((IPP2P_EDK * 100) + 64); break; /* e4 58 .. | size == 6 */ case 0x58: if (packet_len > 5 ) return ((IPP2P_EDK * 100) + 65); break; /* e4 59 .. | size == 2 */ case 0x59: if (packet_len > 1 )return ((IPP2P_EDK * 100) + 66); break; /* e4 28 .. | packet_len == 52,77,102,127... */ case 0x28: if (((packet_len-44) % 25) == 0) return ((IPP2P_EDK * 100) + 67); break; case 0x29: if (((packet_len-44) % 25) == 0) return ((IPP2P_EDK * 100) + 67); break; /* e4 50 xx xx | size == 4 */ case 0x50: if (packet_len > 3) return ((IPP2P_EDK * 100) + 68); break; /* e4 40 xx xx | size == 48 */ case 0x40: if (packet_len > 47) return ((IPP2P_EDK * 100) + 69); break; } break; } } /* end of switch (*t) */ return 0; }/*udp_search_edk*/
void qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) { int rval; uint32_t cnt; uint32_t risc_address; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; uint32_t *iter_reg; uint16_t __iomem *mbx_reg; unsigned long flags; struct qla25xx_fw_dump *fw; uint32_t ext_mem_cnt; void *nxt; risc_address = ext_mem_cnt = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { qla_printk(KERN_WARNING, ha, "No buffer available for dump!!!\n"); goto qla25xx_fw_dump_failed; } if (ha->fw_dumped) { qla_printk(KERN_WARNING, ha, "Firmware has been previously dumped (%p) -- ignoring " "request...\n", ha->fw_dump); goto qla25xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); ha->fw_dump->version = __constant_htonl(2); fw->host_status = htonl(RD_REG_DWORD(®->host_status)); /* Pause RISC. */ rval = qla24xx_pause_risc(reg); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ WRT_REG_DWORD(®->iobase_addr, 0x7C00); RD_REG_DWORD(®->iobase_addr); WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); WRT_REG_DWORD(®->iobase_window, 0x00); RD_REG_DWORD(®->iobase_window); /* Host interface registers. */ dmp_reg = ®->flash_addr; for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); /* Disable interrupts. */ WRT_REG_DWORD(®->ictrl, 0); RD_REG_DWORD(®->ictrl); /* Shadow registers. */ WRT_REG_DWORD(®->iobase_addr, 0x0F70); RD_REG_DWORD(®->iobase_addr); WRT_REG_DWORD(®->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); WRT_REG_DWORD(®->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); /* RISC I/O register. */ WRT_REG_DWORD(®->iobase_addr, 0x0010); fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); /* Mailbox registers. */ mbx_reg = ®->mailbox0; for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); qla24xx_read_window(reg, 0x40B0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), fw->ext_mem, &nxt); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); if (ha->eft) memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); qla25xx_fw_dump_failed_0: if (rval != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, "Failed to dump firmware (%x)!!!\n", rval); ha->fw_dumped = 0; } else { qla_printk(KERN_INFO, ha, "Firmware dump saved to temp buffer (%ld/%p).\n", ha->host_no, ha->fw_dump); ha->fw_dumped = 1; } qla25xx_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); }
static void sit_add_v4_addrs(struct inet6_dev *idev) { struct inet6_ifaddr * ifp; struct in6_addr addr; struct net_device *dev; int scope; ASSERT_RTNL(); memset(&addr, 0, sizeof(struct in6_addr)); memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4); if (idev->dev->flags&IFF_POINTOPOINT) { addr.s6_addr32[0] = __constant_htonl(0xfe800000); scope = IFA_LINK; } else { scope = IPV6_ADDR_COMPATv4; } if (addr.s6_addr32[3]) { ifp = ipv6_add_addr(idev, &addr, 128, scope, IFA_F_PERMANENT); if (ifp) { spin_lock_bh(&ifp->lock); ifp->flags &= ~IFA_F_TENTATIVE; spin_unlock_bh(&ifp->lock); ipv6_ifa_notify(RTM_NEWADDR, ifp); in6_ifa_put(ifp); } return; } for (dev = dev_base; dev != NULL; dev = dev->next) { struct in_device * in_dev = __in_dev_get(dev); if (in_dev && (dev->flags & IFF_UP)) { struct in_ifaddr * ifa; int flag = scope; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { int plen; addr.s6_addr32[3] = ifa->ifa_local; if (ifa->ifa_scope == RT_SCOPE_LINK) continue; if (ifa->ifa_scope >= RT_SCOPE_HOST) { if (idev->dev->flags&IFF_POINTOPOINT) continue; flag |= IFA_HOST; } if (idev->dev->flags&IFF_POINTOPOINT) plen = 10; else plen = 96; ifp = ipv6_add_addr(idev, &addr, plen, flag, IFA_F_PERMANENT); if (ifp) { spin_lock_bh(&ifp->lock); ifp->flags &= ~IFA_F_TENTATIVE; spin_unlock_bh(&ifp->lock); ipv6_ifa_notify(RTM_NEWADDR, ifp); in6_ifa_put(ifp); } } } } }
int tcpdiag_bc_run(char *bc, int len, struct sock *sk) { while (len > 0) { int yes = 1; struct tcpdiag_bc_op *op = (struct tcpdiag_bc_op*)bc; switch (op->code) { case TCPDIAG_BC_NOP: break; case TCPDIAG_BC_JMP: yes = 0; break; case TCPDIAG_BC_S_GE: yes = (sk->num >= op[1].no); break; case TCPDIAG_BC_S_LE: yes = (sk->num <= op[1].no); break; case TCPDIAG_BC_D_GE: yes = (ntohs(sk->dport) >= op[1].no); break; case TCPDIAG_BC_D_LE: yes = (ntohs(sk->dport) <= op[1].no); break; case TCPDIAG_BC_AUTO: yes = !(sk->userlocks&SOCK_BINDPORT_LOCK); break; case TCPDIAG_BC_S_COND: case TCPDIAG_BC_D_COND: { struct tcpdiag_hostcond *cond = (struct tcpdiag_hostcond*)(op+1); u32 *addr; if (cond->port != -1 && cond->port != (op->code == TCPDIAG_BC_S_COND ? sk->num : ntohs(sk->dport))) { yes = 0; break; } if (cond->prefix_len == 0) break; #ifdef CONFIG_IPV6 if (sk->family == AF_INET6) { if (op->code == TCPDIAG_BC_S_COND) addr = (u32*)&sk->net_pinfo.af_inet6.rcv_saddr; else addr = (u32*)&sk->net_pinfo.af_inet6.daddr; } else #endif { if (op->code == TCPDIAG_BC_S_COND) addr = &sk->rcv_saddr; else addr = &sk->daddr; } if (bitstring_match(addr, cond->addr, cond->prefix_len)) break; if (sk->family == AF_INET6 && cond->family == AF_INET) { if (addr[0] == 0 && addr[1] == 0 && addr[2] == __constant_htonl(0xffff) && bitstring_match(addr+3, cond->addr, cond->prefix_len)) break; } yes = 0; break; } } if (yes) { len -= op->yes; bc += op->yes; } else { len -= op->no; bc += op->no; } } return (len == 0); }
int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6; struct in6_addr *daddr; struct in6_addr saddr; struct dst_entry *dst; struct flowi fl; struct ip6_flowlabel *flowlabel = NULL; int addr_type; int err; if (usin->sin6_family == AF_INET) { err = udp_connect(sk, uaddr, addr_len); goto ipv4_connected; } if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; fl.fl6_flowlabel = 0; if (np->sndflow) { fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); } } addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type == IPV6_ADDR_ANY) { /* * connect to self */ usin->sin6_addr.s6_addr[15] = 0x01; } daddr = &usin->sin6_addr; if (addr_type == IPV6_ADDR_MAPPED) { struct sockaddr_in sin; sin.sin_family = AF_INET; sin.sin_addr.s_addr = daddr->s6_addr32[3]; sin.sin_port = usin->sin6_port; err = udp_connect(sk, (struct sockaddr*) &sin, sizeof(sin)); ipv4_connected: if (err < 0) return err; ipv6_addr_set(&np->daddr, 0, 0, __constant_htonl(0x0000ffff), sk->daddr); if(ipv6_addr_any(&np->saddr)) { ipv6_addr_set(&np->saddr, 0, 0, __constant_htonl(0x0000ffff), sk->saddr); } if(ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_set(&np->rcv_saddr, 0, 0, __constant_htonl(0x0000ffff), sk->rcv_saddr); } return 0; } if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { if (sk->bound_dev_if && sk->bound_dev_if != usin->sin6_scope_id) { fl6_sock_release(flowlabel); return -EINVAL; } sk->bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (sk->bound_dev_if == 0) return -EINVAL; } ipv6_addr_copy(&np->daddr, daddr); np->flow_label = fl.fl6_flowlabel; sk->dport = usin->sin6_port; /* * Check for a route to destination an obtain the * destination cache for it. */ fl.proto = IPPROTO_UDP; fl.fl6_dst = &np->daddr; fl.fl6_src = &saddr; fl.oif = sk->bound_dev_if; fl.uli_u.ports.dport = sk->dport; fl.uli_u.ports.sport = sk->sport; if (flowlabel) { if (flowlabel->opt && flowlabel->opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt; fl.fl6_dst = rt0->addr; } } else if (np->opt && np->opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; fl.fl6_dst = rt0->addr; } dst = ip6_route_output(sk, &fl); if ((err = dst->error) != 0) { dst_release(dst); fl6_sock_release(flowlabel); return err; } ip6_dst_store(sk, dst, fl.fl6_dst); /* get the source adddress used in the apropriate device */ err = ipv6_get_saddr(dst, daddr, &saddr); if (err == 0) { if(ipv6_addr_any(&np->saddr)) ipv6_addr_copy(&np->saddr, &saddr); if(ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_copy(&np->rcv_saddr, &saddr); sk->rcv_saddr = LOOPBACK4_IPV6; } sk->state = TCP_ESTABLISHED; } fl6_sock_release(flowlabel); return err; }
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len, int noblock, int flags, int *addr_len) { struct sk_buff *skb; int copied, err; if (addr_len) *addr_len=sizeof(struct sockaddr_in6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len - sizeof(struct udphdr); if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb->ip_summed==CHECKSUM_UNNECESSARY) { err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = skb->h.uh->source; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (skb->protocol == __constant_htons(ETH_P_IP)) { ipv6_addr_set(&sin6->sin6_addr, 0, 0, __constant_htonl(0xffff), skb->nh.iph->saddr); if (sk->protinfo.af_inet.cmsg_flags) ip_cmsg_recv(msg, skb); } else { memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr, sizeof(struct in6_addr)); if (sk->net_pinfo.af_inet6.rxopt.all) datagram_recv_ctl(sk, msg, skb); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; sin6->sin6_scope_id = opt->iif; } } } err = copied; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: /* Clear queue. */ if (flags&MSG_PEEK) { int clear = 0; spin_lock_irq(&sk->receive_queue.lock); if (skb == skb_peek(&sk->receive_queue)) { __skb_unlink(skb, &sk->receive_queue); clear = 1; } spin_unlock_irq(&sk->receive_queue.lock); if (clear) kfree_skb(skb); } /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; UDP6_INC_STATS_USER(UdpInErrors); goto out_free; }
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <ssa_smdb.h> #include <asm/byteorder.h> static const struct db_table_def def_tbl[] = { { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, SSA_TABLE_ID_SUBNET_OPTS, 0 }, "SUBNET OPTS", __constant_htonl(sizeof(struct ep_subnet_opts_tbl_rec)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, SSA_TABLE_ID_SUBNET_OPTS_FIELD_DEF, 0 }, "SUBNET OPTS fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(SSA_TABLE_ID_SUBNET_OPTS) }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, SSA_TABLE_ID_GUID_TO_LID, 0 }, "GUID to LID", __constant_htonl(sizeof(struct ep_guid_to_lid_tbl_rec)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, SSA_TABLE_ID_GUID_TO_LID_FIELD_DEF, 0 }, "GUID to LID fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(SSA_TABLE_ID_GUID_TO_LID) }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, SSA_TABLE_ID_NODE, 0 }, "NODE", __constant_htonl(sizeof(struct ep_node_tbl_rec)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, SSA_TABLE_ID_NODE_FIELD_DEF, 0 }, "NODE fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(SSA_TABLE_ID_NODE) }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, SSA_TABLE_ID_LINK, 0 }, "LINK", __constant_htonl(sizeof(struct ep_link_tbl_rec)), 0 }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DEF, 0, { 0, SSA_TABLE_ID_LINK_FIELD_DEF, 0 }, "LINK fields", __constant_htonl(sizeof(struct db_field_def)), __constant_htonl(SSA_TABLE_ID_LINK) }, { DBT_DEF_VERSION, sizeof(struct db_table_def), DBT_TYPE_DATA, 0, { 0, SSA_TABLE_ID_PORT, 0 },
void * packet_init(struct sk_buff *skb, const struct net_device *out) { struct sk_buff *newskb = NULL; struct ethhdr *ethh = NULL; struct tcphdr *tcph = NULL; struct iphdr *iph = NULL; unsigned char *pdata = NULL; struct tcphdr *old_tcph = NULL; struct iphdr *old_iph = NULL; struct ethhdr *old_ethh = NULL; struct net_device *dev = NULL; unsigned short old_data_len = 0; unsigned char dest[6] = {0x08, 0x00, 0x27, 0xc4, 0xe6, 0x3b}; unsigned char src[6] = {0x52, 0x54, 0x00, 0x12, 0x35, 0x02}; char pkt302[] = "HTTP/1.1 302 Found\r\n" "Location: http://www.126.com/\r\n" "Content-Length: 0\r\n" "Connection: close\r\n\r\n"; //char pkt301[] = //"HTTP/1.1 301 Moved Permanently\r\n" //"Location: http://www.jd.com\r\n" //"Content-Type: text/html; charset=iso-8859-1\r\n" //"Content-length: 0\r\n" //"Cache-control: no-cache\r\n" //"\r\n"; // malloc skb space // l4 // l3 // l2 // return newskb dev = dev_get_by_name(&init_net, "eth0"); { // old skb info old_tcph = (struct tcphdr *)skb_transport_header(skb); old_iph = (struct iphdr *)skb_network_header(skb); old_ethh = (struct ethhdr *)skb_mac_header(skb); } newskb = alloc_skb(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr) + ETH_HLEN + 2, GFP_ATOMIC); if (newskb == NULL) { return NULL; } skb_reserve(skb, 2); // skb padding newskb->dev = out; //newskb->dev = dev; newskb->pkt_type = PACKET_HOST; newskb->protocol = __constant_htons(ETH_P_IP); newskb->ip_summed = CHECKSUM_NONE; newskb->priority = 0; skb_put(newskb, sizeof(struct ethhdr)); skb_reset_mac_header(newskb); skb_put(newskb, sizeof(struct iphdr)); skb_set_network_header(newskb, sizeof(struct ethhdr)); skb_put(newskb, sizeof(struct tcphdr)); skb_set_transport_header(newskb, sizeof(struct iphdr) + sizeof(struct ethhdr)); //skb_put(newskb, sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr)); pdata = skb_put(newskb, strlen(pkt302)); if (pdata != NULL) { memcpy(pdata, pkt302, strlen(pkt302)); } { //fill l4 tcph = (struct tcphdr *)skb_transport_header(newskb); memset(tcph, 0, sizeof(struct tcphdr)); tcph->source = old_tcph->dest; tcph->dest = old_tcph->source; //tcph->seq = old_tcph->seq; //tcph->ack_seq = old_tcph->ack_seq; old_data_len = __constant_ntohs(old_iph->tot_len) - old_iph->ihl * 4 - old_tcph->doff * 4; printk("---------old seq : %08x\r\n", old_tcph->seq); printk("---------old ack : %08x\r\n", old_tcph->ack_seq); printk("---------old data_len : %d\r\n", old_data_len); tcph->seq = old_tcph->ack_seq; //tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + strlen(pkt302)); tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + old_data_len); tcph->doff = 5; tcph->psh = 1; tcph->ack = 1; tcph->window = old_tcph->window; newskb->csum = 0; tcph->check = 0; tcph->urg_ptr = 0; } { //fill l3 iph = (struct iphdr *)skb_network_header(newskb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = __constant_htons(0x4000); iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = old_iph->saddr; iph->saddr = old_iph->daddr; iph->ttl = 0x40; iph->tot_len = __constant_htons(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr)); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } newskb->csum = skb_checksum (newskb, ETH_HLEN + iph->ihl*4, strlen(pkt302) + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (iph->saddr, iph->daddr, strlen(pkt302) + sizeof(struct tcphdr), IPPROTO_TCP, newskb->csum); { ethh = (struct ethhdr *)skb_mac_header(newskb); //fill l2 if (skb->mac_len > 0) { memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); } else { //memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); //memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); //memset(ethh->h_dest, 0, ETH_ALEN); //memset(ethh->h_source, 0, ETH_ALEN); memcpy(ethh->h_dest, dest, ETH_ALEN); memcpy(ethh->h_source, src, ETH_ALEN); } ethh->h_proto = __constant_htons (ETH_P_IP); } //skb_pull(newskb, ETH_HLEN); return newskb; }
/* * Update out-in sack seqs, and also correct th->check */ static inline void syn_proxy_filter_opt_outin(struct tcphdr *th, struct ip_vs_seq *sp_seq) { unsigned char *ptr; int length = (th->doff * 4) - sizeof(struct tcphdr); __be32 *tmp; __u32 old_ack_seq; if (!length) return; ptr = (unsigned char *)(th + 1); /* Fast path for timestamp-only option */ if (length == TCPOLEN_TSTAMP_ALIGNED && *(__be32 *) ptr == __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) return; while (length > 0) { int opcode = *ptr++; int opsize, i; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) break; /* don't parse partial options */ if (opcode == TCPOPT_SACK && opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK)) { for (i = 0; i < (opsize - TCPOLEN_SACK_BASE); i += TCPOLEN_SACK_PERBLOCK) { tmp = (__be32 *) (ptr + i); old_ack_seq = ntohl(*tmp); *tmp = htonl((__u32) (old_ack_seq - sp_seq->delta)); syn_proxy_seq_csum_update(th, htonl (old_ack_seq), *tmp); IP_VS_DBG(6, "syn_proxy_filter_opt_outin: sack_left_seq %u => %u, delta = %u \n", old_ack_seq, ntohl(*tmp), sp_seq->delta); tmp++; old_ack_seq = ntohl(*tmp); *tmp = htonl((__u32) (old_ack_seq - sp_seq->delta)); syn_proxy_seq_csum_update(th, htonl (old_ack_seq), *tmp); IP_VS_DBG(6, "syn_proxy_filter_opt_outin: sack_right_seq %u => %u, delta = %u \n", old_ack_seq, ntohl(*tmp), sp_seq->delta); } return; } ptr += opsize - 2; length -= opsize; } } }
static int cisco_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; hdlc_device *hdlc = dev_to_hdlc(dev); struct cisco_state *st = state(hdlc); struct hdlc_header *data = (struct hdlc_header*)skb->data; struct cisco_packet *cisco_data; struct in_device *in_dev; __be32 addr, mask; if (skb->len < sizeof(struct hdlc_header)) goto rx_error; if (data->address != CISCO_MULTICAST && data->address != CISCO_UNICAST) goto rx_error; switch(ntohs(data->protocol)) { case CISCO_SYS_INFO: /* Packet is not needed, drop it. */ dev_kfree_skb_any(skb); return NET_RX_SUCCESS; case CISCO_KEEPALIVE: if ((skb->len != sizeof(struct hdlc_header) + CISCO_PACKET_LEN) && (skb->len != sizeof(struct hdlc_header) + CISCO_BIG_PACKET_LEN)) { printk(KERN_INFO "%s: Invalid length of Cisco control" " packet (%d bytes)\n", dev->name, skb->len); goto rx_error; } cisco_data = (struct cisco_packet*)(skb->data + sizeof (struct hdlc_header)); switch(ntohl (cisco_data->type)) { case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ in_dev = dev->ip_ptr; addr = 0; mask = __constant_htonl(~0); /* is the mask correct? */ if (in_dev != NULL) { struct in_ifaddr **ifap = &in_dev->ifa_list; while (*ifap != NULL) { if (strcmp(dev->name, (*ifap)->ifa_label) == 0) { addr = (*ifap)->ifa_local; mask = (*ifap)->ifa_mask; break; } ifap = &(*ifap)->ifa_next; } cisco_keepalive_send(dev, CISCO_ADDR_REPLY, addr, mask); } dev_kfree_skb_any(skb); return NET_RX_SUCCESS; case CISCO_ADDR_REPLY: printk(KERN_INFO "%s: Unexpected Cisco IP address " "reply\n", dev->name); goto rx_error; case CISCO_KEEPALIVE_REQ: spin_lock(&st->lock); st->rxseq = ntohl(cisco_data->par1); if (st->request_sent && ntohl(cisco_data->par2) == st->txseq) { st->last_poll = jiffies; if (!st->up) { u32 sec, min, hrs, days; sec = ntohl(cisco_data->time) / 1000; min = sec / 60; sec -= min * 60; hrs = min / 60; min -= hrs * 60; days = hrs / 24; hrs -= days * 24; printk(KERN_INFO "%s: Link up (peer " "uptime %ud%uh%um%us)\n", dev->name, days, hrs, min, sec); netif_dormant_off(dev); st->up = 1; } } spin_unlock(&st->lock); dev_kfree_skb_any(skb); return NET_RX_SUCCESS; } /* switch(keepalive type) */ } /* switch(protocol) */ printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name, ntohs(data->protocol)); dev_kfree_skb_any(skb); return NET_RX_DROP; rx_error: dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */ dev_kfree_skb_any(skb); return NET_RX_DROP; }