int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev) { struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev); struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats; struct rtskb_pool *pool = &rtdev->mac_priv->vnic_skb_pool; struct ethhdr *ethernet = (struct ethhdr*)skb->data; struct rtskb *rtskb; int res; int data_len; rtskb = alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15, pool); if (!rtskb) return NETDEV_TX_BUSY; rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr)); data_len = skb->len - dev->hard_header_len; memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len, data_len); res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb, ntohs(ethernet->h_proto), RTMAC_FLAG_TUNNEL); if (res < 0) { stats->tx_dropped++; kfree_rtskb(rtskb); goto done; } RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb); goto done;);
int rtcfg_send_simple_frame(int ifindex, int frame_id, u8 *dest_addr) { struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_simple *simple_frm; rtdev = rtdev_get_by_index(ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_simple); rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); simple_frm = (struct rtcfg_frm_simple *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_simple)); simple_frm->head.id = frame_id; simple_frm->head.version = 0; return rtcfg_send_frame(rtskb, rtdev, (dest_addr) ? dest_addr : rtdev->broadcast); }
int rtcfg_send_ready(int ifindex) { struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_ready *ready_frm; rtdev = rtdev_get_by_index(ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_ready); rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); ready_frm = (struct rtcfg_frm_ready *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_ready)); ready_frm->head.id = RTCFG_ID_READY; ready_frm->head.version = 0; return rtcfg_send_frame(rtskb, rtdev, eth_broadcast); }
int rtcfg_send_ack(int ifindex) { struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_ack_cfg *ack_frm; rtdev = rtdev_get_by_index(ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_ack_cfg); rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); ack_frm = (struct rtcfg_frm_ack_cfg *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_ack_cfg)); ack_frm->head.id = RTCFG_ID_ACK_CFG; ack_frm->head.version = 0; ack_frm->ack_len = htonl(device[ifindex].cfg_offs); return rtcfg_send_frame(rtskb, rtdev, device[ifindex].srv_mac_addr); }
int rtcfg_send_stage_1(struct rtcfg_connection *conn) { struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_stage_1_cfg *stage_1_frm; rtdev = rtdev_get_by_index(conn->ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_stage_1_cfg) + conn->stage1_size + #ifdef CONFIG_RTNET_RTIPV4 (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ? 2*RTCFG_ADDRSIZE_IP : 0); #else /* !CONFIG_RTNET_RTIPV4 */ 0; #endif /* CONFIG_RTNET_RTIPV4 */ rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); stage_1_frm = (struct rtcfg_frm_stage_1_cfg *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_stage_1_cfg)); stage_1_frm->head.id = RTCFG_ID_STAGE_1_CFG; stage_1_frm->head.version = 0; stage_1_frm->addr_type = conn->addr_type & RTCFG_ADDR_MASK; #ifdef CONFIG_RTNET_RTIPV4 if (stage_1_frm->addr_type == RTCFG_ADDR_IP) { rtskb_put(rtskb, 2*RTCFG_ADDRSIZE_IP); memcpy(stage_1_frm->client_addr, &(conn->addr.ip_addr), 4); stage_1_frm = (struct rtcfg_frm_stage_1_cfg *) (((u8 *)stage_1_frm) + RTCFG_ADDRSIZE_IP); memcpy(stage_1_frm->server_addr, &(rtdev->local_ip), 4); stage_1_frm = (struct rtcfg_frm_stage_1_cfg *) (((u8 *)stage_1_frm) + RTCFG_ADDRSIZE_IP); } #endif /* CONFIG_RTNET_RTIPV4 */ stage_1_frm->burstrate = device[conn->ifindex].burstrate; stage_1_frm->cfg_len = htons(conn->stage1_size); memcpy(rtskb_put(rtskb, conn->stage1_size), conn->stage1_data, conn->stage1_size); return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr); }
static int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev) { struct rtnet_device *rtdev = (struct rtnet_device*)dev->priv; struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats; struct rtskb_queue *pool = &rtdev->mac_priv->vnic_skb_pool; struct ethhdr *ethernet = (struct ethhdr*)skb->data; struct rtskb *rtskb; int res; int data_len; rtskb = alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15, pool); if (!rtskb) { stats->tx_dropped++; return -ENOMEM; } rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr)); data_len = skb->len - dev->hard_header_len; memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len, data_len); res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb, ntohs(ethernet->h_proto)); if (res < 0) { stats->tx_dropped++; kfree_rtskb(rtskb); return res; } RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb); return -1;);
/* ************************************************************************ * This functions runs in rtai context. * It is called from rtnetproxy_user_srq whenever there is frame to sent out * Copy the standard linux sk_buff buffer to a rtnet buffer and send it out * using rtnet functions. * ************************************************************************ */ static inline void send_data_out(struct sk_buff *skb) { struct rtskb *rtskb; struct rt_rtable *rt; struct skb_data_format { struct ethhdr ethhdr; char reserved[12]; /* Ugly but it works... All the not-interesting header bytes */ u32 ip_src; u32 ip_dst; } __attribute__ ((packed)); /* Important to have this structure packed! * It represents the ethernet frame on the line and * thus no spaces are allowed! */ struct skb_data_format *pData; int rc; /* Copy the data from the standard sk_buff to the realtime sk_buff: * Both have the same length. */ rtskb = alloc_rtskb(skb->len); if (NULL == rtskb) { return; } memcpy(rtskb->data, skb->data, skb->len); rtskb->len = skb->len; pData = (struct skb_data_format*) rtskb->data; /* Determine the device to use: Only ip routing is used here. * Non-ip protocols are not supported... */ rc = rt_ip_route_output(&rt, pData->ip_dst, pData->ip_src); if (rc == 0) { struct rtnet_device *rtdev = rt->rt_dev; rtskb->dst = rt; rtskb->rtdev = rt->rt_dev; /* Fill in the ethernet headers: There is already space for the header * but they contain zeros only => Fill it */ memcpy(pData->ethhdr.h_source, rtdev->dev_addr, rtdev->addr_len); memcpy(pData->ethhdr.h_dest, rt->rt_dst_mac_addr, rtdev->addr_len); /* Call the actual transmit function (this function is semaphore * protected): */ rtdev_xmit(rtskb); /* The rtskb is freed somewhere deep in the driver... * No need to do it here. */ } else { /* Routing failed => Free rtskb here... */ kfree_rtskb(rtskb); } }
int rtcfg_send_dead_station(struct rtcfg_connection *conn) { struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_dead_station *dead_station_frm; rtdev = rtdev_get_by_index(conn->ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_dead_station) + #ifdef CONFIG_RTNET_RTIPV4 (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ? RTCFG_ADDRSIZE_IP : 0); #else /* !CONFIG_RTNET_RTIPV4 */ 0; #endif /* CONFIG_RTNET_RTIPV4 */ rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); dead_station_frm = (struct rtcfg_frm_dead_station *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_dead_station)); dead_station_frm->head.id = RTCFG_ID_DEAD_STATION; dead_station_frm->head.version = 0; dead_station_frm->addr_type = conn->addr_type & RTCFG_ADDR_MASK; #ifdef CONFIG_RTNET_RTIPV4 if (dead_station_frm->addr_type == RTCFG_ADDR_IP) { rtskb_put(rtskb, RTCFG_ADDRSIZE_IP); memcpy(dead_station_frm->logical_addr, &(conn->addr.ip_addr), 4); dead_station_frm = (struct rtcfg_frm_dead_station *) (((u8 *)dead_station_frm) + RTCFG_ADDRSIZE_IP); } #endif /* CONFIG_RTNET_RTIPV4 */ /* Ethernet-specific! */ memcpy(dead_station_frm->physical_addr, conn->mac_addr, ETH_ALEN); memset(&dead_station_frm->physical_addr[ETH_ALEN], 0, sizeof(dead_station_frm->physical_addr) - ETH_ALEN); return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast); }
int rtcfg_send_announce_reply(int ifindex, u8 *dest_mac_addr) { struct rtcfg_device *rtcfg_dev = &device[ifindex]; struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_announce *announce_rpl; rtdev = rtdev_get_by_index(ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_announce) + #ifdef CONFIG_RTNET_RTIPV4 ((rtcfg_dev->spec.clt.addr_type == RTCFG_ADDR_IP) ? RTCFG_ADDRSIZE_IP : 0); #else /* !CONFIG_RTNET_RTIPV4 */ 0; #endif /* CONFIG_RTNET_RTIPV4 */ rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); announce_rpl = (struct rtcfg_frm_announce *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_announce)); announce_rpl->head.id = RTCFG_ID_ANNOUNCE_REPLY; announce_rpl->head.version = 0; announce_rpl->addr_type = rtcfg_dev->spec.clt.addr_type; #ifdef CONFIG_RTNET_RTIPV4 if (announce_rpl->addr_type == RTCFG_ADDR_IP) { rtskb_put(rtskb, RTCFG_ADDRSIZE_IP); memcpy(announce_rpl->addr, &(rtdev->local_ip), 4); announce_rpl = (struct rtcfg_frm_announce *) (((u8 *)announce_rpl) + RTCFG_ADDRSIZE_IP); } #endif /* CONFIG_RTNET_RTIPV4 */ announce_rpl->flags = rtcfg_dev->flags & RTCFG_FLAG_READY; announce_rpl->burstrate = 0; /* padding field */ return rtcfg_send_frame(rtskb, rtdev, dest_mac_addr); }
int rtcfg_send_stage_2(struct rtcfg_connection *conn, int send_data) { struct rtnet_device *rtdev; struct rtcfg_device *rtcfg_dev = &device[conn->ifindex]; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_stage_2_cfg *stage_2_frm; size_t total_size; size_t frag_size; rtdev = rtdev_get_by_index(conn->ifindex); if (rtdev == NULL) return -ENODEV; if (send_data) { total_size = conn->stage2_file->size; frag_size = MIN(rtdev->mtu - sizeof(struct rtcfg_frm_stage_2_cfg), total_size); } else { total_size = 0; frag_size = 0; } rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_stage_2_cfg) + frag_size; rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); stage_2_frm = (struct rtcfg_frm_stage_2_cfg *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg)); stage_2_frm->head.id = RTCFG_ID_STAGE_2_CFG; stage_2_frm->head.version = 0; stage_2_frm->flags = rtcfg_dev->flags; stage_2_frm->stations = htonl(rtcfg_dev->other_stations); stage_2_frm->heartbeat_period = htons(0); stage_2_frm->cfg_len = htonl(total_size); if (send_data) memcpy(rtskb_put(rtskb, frag_size), conn->stage2_file->buffer, frag_size); conn->cfg_offs = frag_size; return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr); }
int rtcfg_send_announce_new(int ifindex) { struct rtcfg_device *rtcfg_dev = &device[ifindex]; struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_announce *announce_new; rtdev = rtdev_get_by_index(ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_announce) + #ifdef CONFIG_RTNET_RTIPV4 (((rtcfg_dev->spec.clt.addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ? RTCFG_ADDRSIZE_IP : 0); #else /* !CONFIG_RTNET_RTIPV4 */ 0; #endif /* CONFIG_RTNET_RTIPV4 */ rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); announce_new = (struct rtcfg_frm_announce *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_announce)); announce_new->head.id = RTCFG_ID_ANNOUNCE_NEW; announce_new->head.version = 0; announce_new->addr_type = rtcfg_dev->spec.clt.addr_type; #ifdef CONFIG_RTNET_RTIPV4 if (announce_new->addr_type == RTCFG_ADDR_IP) { rtskb_put(rtskb, RTCFG_ADDRSIZE_IP); memcpy(announce_new->addr, &(rtdev->local_ip), 4); announce_new = (struct rtcfg_frm_announce *) (((u8 *)announce_new) + RTCFG_ADDRSIZE_IP); } #endif /* CONFIG_RTNET_RTIPV4 */ announce_new->flags = rtcfg_dev->flags; announce_new->burstrate = rtcfg_dev->burstrate; return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast); }
int rtcfg_send_announce_new(int ifindex) { struct rtcfg_device *rtcfg_dev = &device[ifindex]; struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_announce *announce_new; rtdev = rtdev_get_by_index(ifindex); if (rtdev == NULL) return -ENODEV; rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_announce) + ((rtcfg_dev->addr_type == RTCFG_ADDR_IP) ? RTCFG_ADDRSIZE_IP : 0); rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); announce_new = (struct rtcfg_frm_announce *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_announce)); announce_new->head.id = RTCFG_ID_ANNOUNCE_NEW; announce_new->head.version = 0; announce_new->addr_type = rtcfg_dev->addr_type; if (announce_new->addr_type == RTCFG_ADDR_IP) { rtskb_put(rtskb, RTCFG_ADDRSIZE_IP); *(u32*)announce_new->addr = rtdev->local_ip; announce_new = (struct rtcfg_frm_announce *) (((u8 *)announce_new) + RTCFG_ADDRSIZE_IP); } announce_new->flags = rtcfg_dev->flags; announce_new->burstrate = rtcfg_dev->burstrate; return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast); }
int rtcfg_send_stage_2_frag(struct rtcfg_connection *conn) { struct rtnet_device *rtdev; struct rtskb *rtskb; unsigned int rtskb_size; struct rtcfg_frm_stage_2_cfg_frag *stage_2_frm; size_t frag_size; rtdev = rtdev_get_by_index(conn->ifindex); if (rtdev == NULL) return -ENODEV; frag_size = MIN(rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO) - sizeof(struct rtcfg_frm_stage_2_cfg_frag), conn->stage2_file->size - conn->cfg_offs); rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_stage_2_cfg_frag) + frag_size; rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool); if (rtskb == NULL) { rtdev_dereference(rtdev); return -ENOBUFS; } rtskb_reserve(rtskb, rtdev->hard_header_len); stage_2_frm = (struct rtcfg_frm_stage_2_cfg_frag *) rtskb_put(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag)); stage_2_frm->head.id = RTCFG_ID_STAGE_2_CFG_FRAG; stage_2_frm->head.version = 0; stage_2_frm->frag_offs = htonl(conn->cfg_offs); memcpy(rtskb_put(rtskb, frag_size), conn->stage2_file->buffer + conn->cfg_offs, frag_size); conn->cfg_offs += frag_size; return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr); }
/*** * Slow path for fragmented packets */ int rt_ip_build_xmit_slow(struct rtsocket *sk, int getfrag(const void *, char *, unsigned int, unsigned int), const void *frag, unsigned length, struct rt_rtable *rt, int msg_flags) { int err, next_err; struct rtskb *skb; struct rtskb *next_skb; struct iphdr *iph; struct rtnet_device *rtdev=rt->rt_dev; int mtu = rtdev->mtu; unsigned int fragdatalen; unsigned int offset = 0; u16 msg_rt_ip_id; unsigned long flags; unsigned int rtskb_size; int hh_len = (rtdev->hard_header_len + 15) & ~15; #define FRAGHEADERLEN sizeof(struct iphdr) fragdatalen = ((mtu - FRAGHEADERLEN) & ~7); /* Store id in local variable */ rtos_spin_lock_irqsave(&rt_ip_id_lock, flags); msg_rt_ip_id = rt_ip_id_count++; rtos_spin_unlock_irqrestore(&rt_ip_id_lock, flags); rtskb_size = mtu + hh_len + 15; /* Preallocate first rtskb */ skb = alloc_rtskb(rtskb_size, &sk->skb_pool); if (skb == NULL) return -ENOBUFS; for (offset = 0; offset < length; offset += fragdatalen) { int fraglen; /* The length (IP, including ip-header) of this very fragment */ __u16 frag_off = offset >> 3 ; next_err = 0; if (offset >= length - fragdatalen) { /* last fragment */ fraglen = FRAGHEADERLEN + length - offset ; next_skb = NULL; } else { fraglen = FRAGHEADERLEN + fragdatalen; frag_off |= IP_MF; next_skb = alloc_rtskb(rtskb_size, &sk->skb_pool); if (next_skb == NULL) { frag_off &= ~IP_MF; /* cut the chain */ next_err = -ENOBUFS; } } rtskb_reserve(skb, hh_len); skb->dst = rt; skb->rtdev = rt->rt_dev; skb->nh.iph = iph = (struct iphdr *) rtskb_put(skb, fraglen); skb->priority = sk->priority; iph->version = 4; iph->ihl = 5; /* 20 byte header - no options */ iph->tos = sk->prot.inet.tos; iph->tot_len = htons(fraglen); iph->id = htons(msg_rt_ip_id); iph->frag_off = htons(frag_off); iph->ttl = 255; iph->protocol = sk->protocol; iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; iph->check = 0; /* required! */ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, offset, fraglen - FRAGHEADERLEN)) ) goto error; if (!(rtdev->hard_header) || (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr, rtdev->dev_addr, skb->len) < 0)) goto error; err = rtdev_xmit(skb); skb = next_skb; if (err != 0) { err = -EAGAIN; goto error; } if (next_err != 0) return next_err; } return 0; error: if (skb != NULL) { kfree_rtskb(skb); if (next_skb != NULL) kfree_rtskb(next_skb); } return err; }
/* * Fast path for unfragmented packets. */ int rt_ip_build_xmit(struct rtsocket *sk, int getfrag (const void *, char *, unsigned int, unsigned int), const void *frag, unsigned length, struct rt_rtable *rt, int flags) { int err=0; struct rtskb *skb; int df; struct iphdr *iph; struct rtnet_device *rtdev=rt->rt_dev; /* * Try the simple case first. This leaves fragmented frames, and by * choice RAW frames within 20 bytes of maximum size(rare) to the long path */ length += sizeof(struct iphdr); df = htons(IP_DF); { int hh_len = (rtdev->hard_header_len+15)&~15; skb = alloc_rtskb(length+hh_len+15); if (skb==NULL) goto no_rtskb; rtskb_reserve(skb, hh_len); } skb->dst=rt; skb->rtdev=rt->rt_dev; skb->nh.iph = iph = (struct iphdr *) rtskb_put(skb, length); iph->version=4; iph->ihl=5; iph->tos=sk->tos; iph->tot_len = htons(length); iph->id=htons(rt_ip_id_count++); iph->frag_off = df; iph->ttl=255; iph->protocol=sk->protocol; iph->saddr=rt->rt_src; iph->daddr=rt->rt_dst; iph->check=0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, 0, length-iph->ihl*4)) ) goto error; if ( !(rtdev->hard_header) ) { goto error; } else if (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr, rtdev->dev_addr, skb->len)<0) { goto error; } if ((skb->rtdev->rtmac) && /* This code lines are crappy! */ (skb->rtdev->rtmac->disc_type) && (skb->rtdev->rtmac->disc_type->rt_packet_tx)) { err = skb->rtdev->rtmac->disc_type->rt_packet_tx(skb, skb->rtdev); } else { err = rtdev_xmit(skb); } if (err) { return -EAGAIN; } else { return 0; } error: kfree_rtskb(skb); no_rtskb: return err; }
/*** * rt_packet_sendmsg */ int rt_packet_sendmsg(struct rtsocket *sock, const struct msghdr *msg, size_t len, int flags) { struct sockaddr_ll *sll = (struct sockaddr_ll*)msg->msg_name; struct rtnet_device *rtdev; struct rtskb *rtskb; int ret = 0; if (flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; /* a lot of sanity checks */ if ((flags & ~MSG_DONTWAIT) || (sll == NULL) || (msg->msg_namelen != sizeof(struct sockaddr_ll)) || ((sll->sll_family != AF_PACKET) && (sll->sll_family != AF_UNSPEC)) || (sll->sll_ifindex <= 0)) return -EINVAL; if ((rtdev = rtdev_get_by_index(sll->sll_ifindex)) == NULL) return -ENODEV; rtskb = alloc_rtskb(rtdev->hard_header_len + len, &sock->skb_pool); if (rtskb == NULL) { ret = -ENOBUFS; goto out; } if ((len < 0) || (len > rtdev->mtu)) { ret = -EMSGSIZE; goto err; } if (sll->sll_halen != rtdev->addr_len) { ret = -EINVAL; goto err; } rtskb_reserve(rtskb, rtdev->hard_header_len); rt_memcpy_fromkerneliovec(rtskb_put(rtskb, len), msg->msg_iov, len); rtskb->rtdev = rtdev; rtskb->priority = sock->priority; if (rtdev->hard_header) { ret = rtdev->hard_header(rtskb, rtdev, ntohs(sll->sll_protocol), sll->sll_addr, rtdev->dev_addr, rtskb->len); if (ret < 0) goto err; } if ((rtdev->flags & IFF_UP) != 0) { if (rtdev_xmit(rtskb) == 0) ret = len; else ret = -EAGAIN; } else { ret = -ENETDOWN; goto err; } out: rtdev_dereference(rtdev); return ret; err: kfree_rtskb(rtskb); rtdev_dereference(rtdev); return ret; }
/*** * Fast path for unfragmented packets. */ int rt_ip_build_xmit(struct rtsocket *sk, int getfrag(const void *, char *, unsigned int, unsigned int), const void *frag, unsigned length, struct rt_rtable *rt, int msg_flags) { int err=0; struct rtskb *skb; struct iphdr *iph; int hh_len; u16 msg_rt_ip_id; unsigned long flags; struct rtnet_device *rtdev=rt->rt_dev; /* * Try the simple case first. This leaves fragmented frames, and by choice * RAW frames within 20 bytes of maximum size(rare) to the long path */ length += sizeof(struct iphdr); if (length > rtdev->mtu) return rt_ip_build_xmit_slow(sk, getfrag, frag, length - sizeof(struct iphdr), rt, msg_flags); /* Store id in local variable */ rtos_spin_lock_irqsave(&rt_ip_id_lock, flags); msg_rt_ip_id = rt_ip_id_count++; rtos_spin_unlock_irqrestore(&rt_ip_id_lock, flags); hh_len = (rtdev->hard_header_len+15)&~15; skb = alloc_rtskb(length+hh_len+15, &sk->skb_pool); if (skb==NULL) return -ENOBUFS; rtskb_reserve(skb, hh_len); skb->dst = rt; skb->rtdev = rt->rt_dev; skb->nh.iph = iph = (struct iphdr *) rtskb_put(skb, length); skb->priority = sk->priority; iph->version = 4; iph->ihl = 5; iph->tos = sk->prot.inet.tos; iph->tot_len = htons(length); iph->id = htons(msg_rt_ip_id); iph->frag_off = htons(IP_DF); iph->ttl = 255; iph->protocol = sk->protocol; iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; iph->check = 0; /* required! */ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, 0, length-iph->ihl*4)) ) goto error; if (!(rtdev->hard_header) || (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr, rtdev->dev_addr, skb->len) < 0)) goto error; err = rtdev_xmit(skb); if (err) return -EAGAIN; else return 0; error: kfree_rtskb(skb); return err; }
/*** * arp_send: Create and send an arp packet. If (dest_hw == NULL), * we create a broadcast message. */ void rt_arp_send(int type, int ptype, u32 dest_ip, struct rtnet_device *rtdev, u32 src_ip, unsigned char *dest_hw, unsigned char *src_hw, unsigned char *target_hw) { struct net_device *dev = dev_get_by_rtdev(rtdev); struct rtskb *skb; struct arphdr *arp; unsigned char *arp_ptr; if ( dev->flags & IFF_NOARP ) return; if ( !(skb=alloc_rtskb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)+dev->hard_header_len+15)) ) return; rtskb_reserve(skb, (dev->hard_header_len+15)&~15); skb->nh.raw = skb->data; arp = (struct arphdr *) rtskb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); skb->rtdev = rtdev; skb->protocol = __constant_htons (ETH_P_ARP); if (src_hw == NULL) src_hw = dev->dev_addr; if (dest_hw == NULL) dest_hw = dev->broadcast; /* * Fill the device header for the ARP frame */ if (rtdev->hard_header && rtdev->hard_header(skb,rtdev,ptype,dest_hw,src_hw,skb->len) < 0) goto out; arp->ar_hrd = htons(dev->type); arp->ar_pro = __constant_htons(ETH_P_IP); arp->ar_hln = dev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr=(unsigned char *)(arp+1); memcpy(arp_ptr, src_hw, dev->addr_len); arp_ptr+=dev->addr_len; memcpy(arp_ptr, &src_ip,4); arp_ptr+=4; if (target_hw != NULL) memcpy(arp_ptr, target_hw, dev->addr_len); else memset(arp_ptr, 0, dev->addr_len); arp_ptr+=dev->addr_len; memcpy(arp_ptr, &dest_ip, 4); /* send the frame */ rtdev_xmit_if(skb); return; out: kfree_rtskb(skb); }