static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { struct sock *sk=icmp_socket->sk; struct ipcm_cookie ipc; struct rtable *rt = (struct rtable*)skb->dst; u32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; icmp_param->icmph.checksum=0; icmp_param->csum=0; icmp_out_count(icmp_param->icmph.type); sk->ip_tos = skb->nh.iph->tos; daddr = ipc.addr = rt->rt_src; ipc.opt = &icmp_param->replyopts; if (ipc.opt->srr) daddr = icmp_param->replyopts.faddr; if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0)) return; ip_build_xmit(sk, icmp_glue_bits, icmp_param, icmp_param->data_len+sizeof(struct icmphdr), &ipc, rt, MSG_DONTWAIT); ip_rt_put(rt); }
static void icmp_build_xmit(struct icmp_bxm *icmp_param, __u32 saddr, __u32 daddr, __u8 tos) { struct sock *sk=icmp_socket.data; icmp_param->icmph.checksum=0; icmp_param->csum=0; icmp_out_count(icmp_param->icmph.type); sk->ip_tos = tos; ip_build_xmit(sk, icmp_glue_bits, icmp_param, icmp_param->data_len+sizeof(struct icmphdr), daddr, saddr, &icmp_param->replyopts, 0, IPPROTO_ICMP, 1); }
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, void *from, size_t length, struct rtable **rtp, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; hlen = LL_RESERVED_SPACE(rt->dst.dev); tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; skb_reserve(skb, hlen); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb,sizeof(*iph)); skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; err = -EFAULT; if (!memcpy_fromiovecend2((void *)iph, from, 0, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = fl4->saddr; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(skb, &rt->dst, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } if (iph->protocol == IPPROTO_ICMP) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet->recverr) err = 0; return err; }
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, struct rtable *rt, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; if (length > rt->u.dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, rt->u.dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->u.dst)); skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; err = memcpy_fromiovecend((void *)iph, from, 0, length); if (err) goto error_fault; /* We don't modify invalid header */ iphlen = iph->ihl * 4; if (iphlen >= sizeof(*iph) && iphlen <= length) { if (!iph->saddr) iph->saddr = rt->rt_src; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(iph, &rt->u.dst, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } if (iph->protocol == IPPROTO_ICMP) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_fault: err = -EFAULT; kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet->recverr) err = 0; return err; }
void icmp_send(struct sk_buff *skb_in, int type, int code, unsigned long info) { struct iphdr *iph; struct icmphdr *icmph; int room; struct icmp_bxm icmp_param; struct rtable *rt = (struct rtable*)skb_in->dst; struct ipcm_cookie ipc; u32 saddr; u8 tos; /* * Find the original header */ iph = skb_in->nh.iph; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type!=PACKET_HOST) return; /* * Now check at the protocol level */ if (!rt) { if (sysctl_ip_always_defrag == 0 && net_ratelimit()) printk(KERN_DEBUG "icmp_send: destinationless packet\n"); return; } if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST)) return; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off&htons(IP_OFFSET)) return; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an ICMP error */ if (iph->protocol==IPPROTO_ICMP) { icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2)); /* * Assume any unknown ICMP type is an error. This isn't * specified by the RFC, but think about it.. */ if (icmph->type>NR_ICMP_TYPES || icmp_pointers[icmph->type].error) return; } } /* * Construct source address and options. */ #ifdef CONFIG_IP_ROUTE_NAT /* * Restore original addresses if packet has been translated. */ if (rt->rt_flags&RTCF_NAT && IPCB(skb_in)->flags&IPSKB_TRANSLATED) { iph->daddr = rt->key.dst; iph->saddr = rt->key.src; } #endif #ifdef CONFIG_IP_MASQUERADE if (type==ICMP_DEST_UNREACH && IPCB(skb_in)->flags&IPSKB_MASQUERADED) { ip_fw_unmasq_icmp(skb_in); } #endif saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) saddr = 0; tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; /* XXX: use a more aggressive expire for routes created by * this call (not longer than the rate limit timeout). * It could be also worthwhile to not put them into ipv4 * fast routing cache at first. Otherwise an attacker can * grow the routing table. */ if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0)) return; if (ip_options_echo(&icmp_param.replyopts, skb_in)) goto ende; /* * Prepare data for ICMP header. */ icmp_param.icmph.type=type; icmp_param.icmph.code=code; icmp_param.icmph.un.gateway = info; icmp_param.icmph.checksum=0; icmp_param.csum=0; icmp_param.data_ptr=iph; icmp_out_count(icmp_param.icmph.type); icmp_socket->sk->ip_tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts; if (icmp_param.replyopts.srr) { ip_rt_put(rt); if (ip_route_output(&rt, icmp_param.replyopts.faddr, saddr, RT_TOS(tos), 0)) return; } if (!icmpv4_xrlim_allow(rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = rt->u.dst.pmtu; if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len=(iph->ihl<<2)+skb_in->len; if (icmp_param.data_len > room) icmp_param.data_len = room; ip_build_xmit(icmp_socket->sk, icmp_glue_bits, &icmp_param, icmp_param.data_len+sizeof(struct icmphdr), &ipc, rt, MSG_DONTWAIT); ende: ip_rt_put(rt); }