Esempio n. 1
0
int rtcfg_send_ack(int ifindex)
{
    struct rtnet_device      *rtdev;
    struct rtskb             *rtskb;
    unsigned int             rtskb_size;
    struct rtcfg_frm_ack_cfg *ack_frm;


    rtdev = rtdev_get_by_index(ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_ack_cfg);

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    ack_frm = (struct rtcfg_frm_ack_cfg *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_ack_cfg));

    ack_frm->head.id      = RTCFG_ID_ACK_CFG;
    ack_frm->head.version = 0;
    ack_frm->ack_len      = htonl(device[ifindex].cfg_offs);

    return rtcfg_send_frame(rtskb, rtdev, device[ifindex].srv_mac_addr);
}
Esempio n. 2
0
int rtcfg_send_simple_frame(int ifindex, int frame_id, u8 *dest_addr)
{
    struct rtnet_device     *rtdev;
    struct rtskb            *rtskb;
    unsigned int            rtskb_size;
    struct rtcfg_frm_simple *simple_frm;


    rtdev = rtdev_get_by_index(ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_simple);

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    simple_frm = (struct rtcfg_frm_simple *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_simple));

    simple_frm->head.id      = frame_id;
    simple_frm->head.version = 0;

    return rtcfg_send_frame(rtskb, rtdev,
                            (dest_addr) ? dest_addr : rtdev->broadcast);
}
Esempio n. 3
0
int rtcfg_send_ready(int ifindex)
{
    struct rtnet_device    *rtdev;
    struct rtskb           *rtskb;
    unsigned int           rtskb_size;
    struct rtcfg_frm_ready *ready_frm;


    rtdev = rtdev_get_by_index(ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_ready);

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    ready_frm = (struct rtcfg_frm_ready *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_ready));

    ready_frm->head.id      = RTCFG_ID_READY;
    ready_frm->head.version = 0;

    return rtcfg_send_frame(rtskb, rtdev, eth_broadcast);
}
Esempio n. 4
0
static int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rtnet_device     *rtdev = (struct rtnet_device*)dev->priv;
    struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats;
    struct rtskb_queue      *pool = &rtdev->mac_priv->vnic_skb_pool;
    struct ethhdr           *ethernet = (struct ethhdr*)skb->data;
    struct rtskb            *rtskb;
    int                     res;
    int                     data_len;


    rtskb =
        alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15, pool);
    if (!rtskb) {
        stats->tx_dropped++;
        return -ENOMEM;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr));

    data_len = skb->len - dev->hard_header_len;
    memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len,
           data_len);

    res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb,
                           ntohs(ethernet->h_proto));
    if (res < 0) {
        stats->tx_dropped++;
        kfree_rtskb(rtskb);
        return res;
    }

    RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb);
                 return -1;);
Esempio n. 5
0
int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rtnet_device     *rtdev = *(struct rtnet_device **)netdev_priv(dev);
    struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats;
    struct rtskb_pool       *pool = &rtdev->mac_priv->vnic_skb_pool;
    struct ethhdr           *ethernet = (struct ethhdr*)skb->data;
    struct rtskb            *rtskb;
    int                     res;
    int                     data_len;


    rtskb =
	alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15, pool);
    if (!rtskb)
	return NETDEV_TX_BUSY;

    rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr));

    data_len = skb->len - dev->hard_header_len;
    memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len,
	   data_len);

    res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb,
			   ntohs(ethernet->h_proto), RTMAC_FLAG_TUNNEL);
    if (res < 0) {
	stats->tx_dropped++;
	kfree_rtskb(rtskb);
	goto done;
    }

    RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb);
		 goto done;);
Esempio n. 6
0
int rtcfg_send_stage_1(struct rtcfg_connection *conn)
{
    struct rtnet_device          *rtdev;
    struct rtskb                 *rtskb;
    unsigned int                 rtskb_size;
    struct rtcfg_frm_stage_1_cfg *stage_1_frm;


    rtdev = rtdev_get_by_index(conn->ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len +
        sizeof(struct rtcfg_frm_stage_1_cfg) + conn->stage1_size +
#ifdef CONFIG_RTNET_RTIPV4
        (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
        2*RTCFG_ADDRSIZE_IP : 0);
#else /* !CONFIG_RTNET_RTIPV4 */
        0;
#endif /* CONFIG_RTNET_RTIPV4 */

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    stage_1_frm = (struct rtcfg_frm_stage_1_cfg *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));

    stage_1_frm->head.id      = RTCFG_ID_STAGE_1_CFG;
    stage_1_frm->head.version = 0;
    stage_1_frm->addr_type    = conn->addr_type & RTCFG_ADDR_MASK;

#ifdef CONFIG_RTNET_RTIPV4
    if (stage_1_frm->addr_type == RTCFG_ADDR_IP) {
        rtskb_put(rtskb, 2*RTCFG_ADDRSIZE_IP);

        memcpy(stage_1_frm->client_addr, &(conn->addr.ip_addr), 4);

        stage_1_frm = (struct rtcfg_frm_stage_1_cfg *)
            (((u8 *)stage_1_frm) + RTCFG_ADDRSIZE_IP);

        memcpy(stage_1_frm->server_addr, &(rtdev->local_ip), 4);

        stage_1_frm = (struct rtcfg_frm_stage_1_cfg *)
            (((u8 *)stage_1_frm) + RTCFG_ADDRSIZE_IP);
    }
#endif /* CONFIG_RTNET_RTIPV4 */

    stage_1_frm->burstrate = device[conn->ifindex].burstrate;
    stage_1_frm->cfg_len   = htons(conn->stage1_size);

    memcpy(rtskb_put(rtskb, conn->stage1_size), conn->stage1_data,
           conn->stage1_size);

    return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
}
Esempio n. 7
0
int rtcfg_send_dead_station(struct rtcfg_connection *conn)
{
    struct rtnet_device           *rtdev;
    struct rtskb                  *rtskb;
    unsigned int                  rtskb_size;
    struct rtcfg_frm_dead_station *dead_station_frm;


    rtdev = rtdev_get_by_index(conn->ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len +
        sizeof(struct rtcfg_frm_dead_station) +
#ifdef CONFIG_RTNET_RTIPV4
        (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
        RTCFG_ADDRSIZE_IP : 0);
#else /* !CONFIG_RTNET_RTIPV4 */
        0;
#endif /* CONFIG_RTNET_RTIPV4 */

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    dead_station_frm = (struct rtcfg_frm_dead_station *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_dead_station));

    dead_station_frm->head.id      = RTCFG_ID_DEAD_STATION;
    dead_station_frm->head.version = 0;
    dead_station_frm->addr_type    = conn->addr_type & RTCFG_ADDR_MASK;

#ifdef CONFIG_RTNET_RTIPV4
    if (dead_station_frm->addr_type == RTCFG_ADDR_IP) {
        rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);

        memcpy(dead_station_frm->logical_addr, &(conn->addr.ip_addr), 4);

        dead_station_frm = (struct rtcfg_frm_dead_station *)
            (((u8 *)dead_station_frm) + RTCFG_ADDRSIZE_IP);
    }
#endif /* CONFIG_RTNET_RTIPV4 */

    /* Ethernet-specific! */
    memcpy(dead_station_frm->physical_addr, conn->mac_addr, ETH_ALEN);
    memset(&dead_station_frm->physical_addr[ETH_ALEN], 0,
        sizeof(dead_station_frm->physical_addr) - ETH_ALEN);

    return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
}
Esempio n. 8
0
int rtcfg_send_announce_reply(int ifindex, u8 *dest_mac_addr)
{
    struct rtcfg_device       *rtcfg_dev = &device[ifindex];
    struct rtnet_device       *rtdev;
    struct rtskb              *rtskb;
    unsigned int              rtskb_size;
    struct rtcfg_frm_announce *announce_rpl;


    rtdev = rtdev_get_by_index(ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len +
        sizeof(struct rtcfg_frm_announce) +
#ifdef CONFIG_RTNET_RTIPV4
        ((rtcfg_dev->spec.clt.addr_type == RTCFG_ADDR_IP) ?
        RTCFG_ADDRSIZE_IP : 0);
#else /* !CONFIG_RTNET_RTIPV4 */
        0;
#endif /* CONFIG_RTNET_RTIPV4 */

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    announce_rpl = (struct rtcfg_frm_announce *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_announce));

    announce_rpl->head.id      = RTCFG_ID_ANNOUNCE_REPLY;
    announce_rpl->head.version = 0;
    announce_rpl->addr_type    = rtcfg_dev->spec.clt.addr_type;

#ifdef CONFIG_RTNET_RTIPV4
    if (announce_rpl->addr_type == RTCFG_ADDR_IP) {
        rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);

        memcpy(announce_rpl->addr, &(rtdev->local_ip), 4);

        announce_rpl = (struct rtcfg_frm_announce *)
            (((u8 *)announce_rpl) + RTCFG_ADDRSIZE_IP);
    }
#endif /* CONFIG_RTNET_RTIPV4 */

    announce_rpl->flags     = rtcfg_dev->flags & RTCFG_FLAG_READY;
    announce_rpl->burstrate = 0; /* padding field */

    return rtcfg_send_frame(rtskb, rtdev, dest_mac_addr);
}
Esempio n. 9
0
int rtcfg_send_stage_2(struct rtcfg_connection *conn, int send_data)
{
    struct rtnet_device          *rtdev;
    struct rtcfg_device          *rtcfg_dev = &device[conn->ifindex];
    struct rtskb                 *rtskb;
    unsigned int                 rtskb_size;
    struct rtcfg_frm_stage_2_cfg *stage_2_frm;
    size_t                       total_size;
    size_t                       frag_size;


    rtdev = rtdev_get_by_index(conn->ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    if (send_data) {
        total_size = conn->stage2_file->size;
        frag_size  = MIN(rtdev->mtu - sizeof(struct rtcfg_frm_stage_2_cfg),
                         total_size);
    } else {
        total_size = 0;
        frag_size  = 0;
    }

    rtskb_size = rtdev->hard_header_len +
        sizeof(struct rtcfg_frm_stage_2_cfg) + frag_size;

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    stage_2_frm = (struct rtcfg_frm_stage_2_cfg *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg));

    stage_2_frm->head.id          = RTCFG_ID_STAGE_2_CFG;
    stage_2_frm->head.version     = 0;
    stage_2_frm->flags            = rtcfg_dev->flags;
    stage_2_frm->stations         = htonl(rtcfg_dev->other_stations);
    stage_2_frm->heartbeat_period = htons(0);
    stage_2_frm->cfg_len          = htonl(total_size);

    if (send_data)
        memcpy(rtskb_put(rtskb, frag_size), conn->stage2_file->buffer,
               frag_size);
    conn->cfg_offs = frag_size;

    return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
}
Esempio n. 10
0
int rtcfg_send_announce_new(int ifindex)
{
    struct rtcfg_device       *rtcfg_dev = &device[ifindex];
    struct rtnet_device       *rtdev;
    struct rtskb              *rtskb;
    unsigned int              rtskb_size;
    struct rtcfg_frm_announce *announce_new;


    rtdev = rtdev_get_by_index(ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_announce) +
#ifdef CONFIG_RTNET_RTIPV4
        (((rtcfg_dev->spec.clt.addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
        RTCFG_ADDRSIZE_IP : 0);
#else /* !CONFIG_RTNET_RTIPV4 */
        0;
#endif /* CONFIG_RTNET_RTIPV4 */

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    announce_new = (struct rtcfg_frm_announce *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_announce));

    announce_new->head.id      = RTCFG_ID_ANNOUNCE_NEW;
    announce_new->head.version = 0;
    announce_new->addr_type    = rtcfg_dev->spec.clt.addr_type;

#ifdef CONFIG_RTNET_RTIPV4
    if (announce_new->addr_type == RTCFG_ADDR_IP) {
        rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);

        memcpy(announce_new->addr, &(rtdev->local_ip), 4);

        announce_new = (struct rtcfg_frm_announce *)
            (((u8 *)announce_new) + RTCFG_ADDRSIZE_IP);
    }
#endif /* CONFIG_RTNET_RTIPV4 */

    announce_new->flags     = rtcfg_dev->flags;
    announce_new->burstrate = rtcfg_dev->burstrate;

    return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
}
Esempio n. 11
0
static void rt2x00_interrupt_rxdone(struct _data_ring * ring, nanosecs_t *time_stamp) {

    struct _rt2x00_pci	 * rt2x00pci  = rt2x00_priv(ring->device);
    struct rtnet_device  * rtnet_dev  = ring->device->rtnet_dev; 
    struct rtwlan_device * rtwlan     = rtnetdev_priv(rtnet_dev);
    struct _rxd		 * rxd = NULL;
    struct rtskb         * rtskb;
    void		 * data = NULL;
    u16			   size = 0x0000;
    /*    u16                    rssi = 0x0000; */

    while(1){

        rxd = DESC_ADDR(ring);
        data = DATA_ADDR(ring);

        if(rt2x00_get_field32(rxd->word0, RXD_W0_OWNER_NIC))
            break;

        size = rt2x00_get_field32(rxd->word0, RXD_W0_DATABYTE_COUNT);
        /*	rssi = rt2x00_get_field32(rxd->word2, RXD_W2_RSSI); */

        /* prepare rtskb */
        rtskb = dev_alloc_rtskb(size + NET_IP_ALIGN, &rtwlan->skb_pool);
        if(!rtskb){
            ERROR("Couldn't allocate rtskb, packet dropped.\n");
            break;
        }
        rtskb->rtdev = rtnet_dev;
        rtskb->time_stamp = *time_stamp;
        rtskb_reserve(rtskb, NET_IP_ALIGN);

        memcpy(rtskb->data, data, size);
        rtskb_put(rtskb, size);

        /* give incoming frame to rtwlan stack */
        rtwlan_rx(rtskb, rtnet_dev);

        /* forward rtskb to rtnet */
        rtnetif_rx(rtskb);

        rtwlan->stats.rx_packets++;

        rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1);
        rt2x00_ring_index_inc(&rt2x00pci->rx);
    }
}
Esempio n. 12
0
int rtcfg_send_announce_new(int ifindex)
{
    struct rtcfg_device       *rtcfg_dev = &device[ifindex];
    struct rtnet_device       *rtdev;
    struct rtskb              *rtskb;
    unsigned int              rtskb_size;
    struct rtcfg_frm_announce *announce_new;


    rtdev = rtdev_get_by_index(ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_announce) +
        ((rtcfg_dev->addr_type == RTCFG_ADDR_IP) ? RTCFG_ADDRSIZE_IP : 0);

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    announce_new = (struct rtcfg_frm_announce *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_announce));

    announce_new->head.id      = RTCFG_ID_ANNOUNCE_NEW;
    announce_new->head.version = 0;
    announce_new->addr_type    = rtcfg_dev->addr_type;

    if (announce_new->addr_type == RTCFG_ADDR_IP) {
        rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);

        *(u32*)announce_new->addr = rtdev->local_ip;

        announce_new = (struct rtcfg_frm_announce *)
            (((u8 *)announce_new) + RTCFG_ADDRSIZE_IP);
    }

    announce_new->flags     = rtcfg_dev->flags;
    announce_new->burstrate = rtcfg_dev->burstrate;

    return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
}
Esempio n. 13
0
int rtcfg_send_stage_2_frag(struct rtcfg_connection *conn)
{
    struct rtnet_device               *rtdev;
    struct rtskb                      *rtskb;
    unsigned int                      rtskb_size;
    struct rtcfg_frm_stage_2_cfg_frag *stage_2_frm;
    size_t                            frag_size;


    rtdev = rtdev_get_by_index(conn->ifindex);
    if (rtdev == NULL)
        return -ENODEV;

    frag_size = MIN(rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO) -
                    sizeof(struct rtcfg_frm_stage_2_cfg_frag),
                    conn->stage2_file->size - conn->cfg_offs);

    rtskb_size = rtdev->hard_header_len +
        sizeof(struct rtcfg_frm_stage_2_cfg_frag) + frag_size;

    rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
    if (rtskb == NULL) {
        rtdev_dereference(rtdev);
        return -ENOBUFS;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    stage_2_frm = (struct rtcfg_frm_stage_2_cfg_frag *)
        rtskb_put(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag));

    stage_2_frm->head.id      = RTCFG_ID_STAGE_2_CFG_FRAG;
    stage_2_frm->head.version = 0;
    stage_2_frm->frag_offs    = htonl(conn->cfg_offs);

    memcpy(rtskb_put(rtskb, frag_size),
           conn->stage2_file->buffer + conn->cfg_offs, frag_size);
    conn->cfg_offs += frag_size;

    return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
}
Esempio n. 14
0
/*
 *	Fast path for unfragmented packets.
 */
int rt_ip_build_xmit(struct rtsocket *sk, 
	            int getfrag (const void *, char *, unsigned int, unsigned int),
		    const void *frag, 
		    unsigned length, 
		    struct rt_rtable *rt, 
		    int flags)
{
	int	err=0;
	struct	rtskb *skb;
	int	df;
	struct	iphdr *iph;
	
	struct	rtnet_device *rtdev=rt->rt_dev;

	/*
	 *	Try the simple case first. This leaves fragmented frames, and by
	 *	choice RAW frames within 20 bytes of maximum size(rare) to the long path
	 */
	length += sizeof(struct iphdr);
	
	df = htons(IP_DF);

	{
		int hh_len = (rtdev->hard_header_len+15)&~15;

		skb = alloc_rtskb(length+hh_len+15);
		if (skb==NULL)
			goto no_rtskb; 
		rtskb_reserve(skb, hh_len);
	}
	
	skb->dst=rt; 
	skb->rtdev=rt->rt_dev;
	skb->nh.iph = iph = (struct iphdr *) rtskb_put(skb, length);
	
	
	iph->version=4;
	iph->ihl=5;
	iph->tos=sk->tos;
	iph->tot_len = htons(length);
	iph->id=htons(rt_ip_id_count++);
	iph->frag_off = df;
	iph->ttl=255;
	iph->protocol=sk->protocol;
	iph->saddr=rt->rt_src;
	iph->daddr=rt->rt_dst;
	iph->check=0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	
	if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, 0, length-iph->ihl*4)) )
		goto error;

	if ( !(rtdev->hard_header) ) {
	     goto error;
	} else if (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr, rtdev->dev_addr, skb->len)<0) {
		goto error;
	}

	if ((skb->rtdev->rtmac) && /* This code lines are crappy! */
	    (skb->rtdev->rtmac->disc_type) &&
	    (skb->rtdev->rtmac->disc_type->rt_packet_tx)) {
	    err = skb->rtdev->rtmac->disc_type->rt_packet_tx(skb, skb->rtdev);
	} else {
	    err = rtdev_xmit(skb);
	}

	if (err) {
		return -EAGAIN;
	} else {
		return 0;
	}
	
error:
	kfree_rtskb(skb);
no_rtskb:
	return err; 
}
Esempio n. 15
0
/***
 *  rt_packet_sendmsg
 */
int rt_packet_sendmsg(struct rtsocket *sock, const struct msghdr *msg,
                      size_t len, int flags)
{
    struct sockaddr_ll *sll = (struct sockaddr_ll*)msg->msg_name;
    struct rtnet_device *rtdev;
    struct rtskb *rtskb;
    int ret = 0;


    if (flags & MSG_OOB)   /* Mirror BSD error message compatibility */
        return -EOPNOTSUPP;

    /* a lot of sanity checks */
    if ((flags & ~MSG_DONTWAIT) ||
        (sll == NULL) || (msg->msg_namelen != sizeof(struct sockaddr_ll)) ||
        ((sll->sll_family != AF_PACKET) && (sll->sll_family != AF_UNSPEC)) ||
        (sll->sll_ifindex <= 0))
        return -EINVAL;

    if ((rtdev = rtdev_get_by_index(sll->sll_ifindex)) == NULL)
        return -ENODEV;

    rtskb = alloc_rtskb(rtdev->hard_header_len + len, &sock->skb_pool);
    if (rtskb == NULL) {
        ret = -ENOBUFS;
        goto out;
    }

    if ((len < 0) || (len > rtdev->mtu)) {
        ret = -EMSGSIZE;
        goto err;
    }

    if (sll->sll_halen != rtdev->addr_len) {
        ret = -EINVAL;
        goto err;
    }

    rtskb_reserve(rtskb, rtdev->hard_header_len);

    rt_memcpy_fromkerneliovec(rtskb_put(rtskb, len), msg->msg_iov, len);

    rtskb->rtdev    = rtdev;
    rtskb->priority = sock->priority;

    if (rtdev->hard_header) {
        ret = rtdev->hard_header(rtskb, rtdev, ntohs(sll->sll_protocol),
                                 sll->sll_addr, rtdev->dev_addr, rtskb->len);
        if (ret < 0)
            goto err;
    }

    if ((rtdev->flags & IFF_UP) != 0) {
        if (rtdev_xmit(rtskb) == 0)
            ret = len;
        else
            ret = -EAGAIN;
    } else {
        ret = -ENETDOWN;
        goto err;
    }

out:
    rtdev_dereference(rtdev);
    return ret;

err:
    kfree_rtskb(rtskb);
    rtdev_dereference(rtdev);
    return ret;
}
Esempio n. 16
0
/***
 *	arp_send:	Create and send an arp packet. If (dest_hw == NULL),
 *			we create a broadcast message.
 */
void rt_arp_send(int type, 
		 int ptype, 
		 u32 dest_ip, 
		 struct rtnet_device *rtdev, 
		 u32 src_ip, 
		 unsigned char *dest_hw, 
		 unsigned char *src_hw,
		 unsigned char *target_hw)
{
	struct net_device *dev = dev_get_by_rtdev(rtdev);
	struct rtskb *skb;
	struct arphdr *arp;
	unsigned char *arp_ptr;
	
	if ( dev->flags & IFF_NOARP )
		return;

	if ( !(skb=alloc_rtskb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)+dev->hard_header_len+15)) )
		return;

	rtskb_reserve(skb, (dev->hard_header_len+15)&~15);

	skb->nh.raw = skb->data;
	arp = (struct arphdr *) rtskb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));

	skb->rtdev = rtdev;
	skb->protocol = __constant_htons (ETH_P_ARP);
	if (src_hw == NULL)
		src_hw = dev->dev_addr;
	if (dest_hw == NULL)
		dest_hw = dev->broadcast;

	/*
	 *	Fill the device header for the ARP frame
	 */
	if (rtdev->hard_header &&
	    rtdev->hard_header(skb,rtdev,ptype,dest_hw,src_hw,skb->len) < 0)
		goto out;

	arp->ar_hrd = htons(dev->type);
	arp->ar_pro = __constant_htons(ETH_P_IP);
	arp->ar_hln = dev->addr_len;
	arp->ar_pln = 4;
	arp->ar_op = htons(type);

	arp_ptr=(unsigned char *)(arp+1);

	memcpy(arp_ptr, src_hw, dev->addr_len);
	arp_ptr+=dev->addr_len;

	memcpy(arp_ptr, &src_ip,4);
	arp_ptr+=4;

	if (target_hw != NULL)
		memcpy(arp_ptr, target_hw, dev->addr_len);
	else
		memset(arp_ptr, 0, dev->addr_len);
	arp_ptr+=dev->addr_len;

	memcpy(arp_ptr, &dest_ip, 4);


	/* send the frame */
	rtdev_xmit_if(skb);

	return;

out:
	kfree_rtskb(skb);
}
Esempio n. 17
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static void
fec_enet_rx(struct rtnet_device *ndev, int *packets, nanosecs_abs_t *time_stamp)
{
	struct fec_enet_private *fep = rtnetdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	rtskb	*skb;
	ushort	pkt_len;
	__u8 *data;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif
	rtdm_lock_get(&fep->hw_lock);

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				fep->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				fep->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				fep->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				fep->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			fep->stats.rx_errors++;
			fep->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		fep->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		fep->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = dev_alloc_rtskb(pkt_len - 4 + NET_IP_ALIGN,
				      &fep->skb_pool); /* RTnet */

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			fep->stats.rx_dropped++;
		} else {
			rtskb_reserve(skb, NET_IP_ALIGN);
			rtskb_put(skb, pkt_len - 4);	/* Make room */
			memcpy(skb->data, data, pkt_len - 4);
			skb->protocol = rt_eth_type_trans(skb, ndev);
			skb->rtdev = ndev;
			skb->time_stamp = *time_stamp;
			rtnetif_rx(skb);
			(*packets)++; /* RTnet */
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp++;
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	rtdm_lock_put(&fep->hw_lock);
}
Esempio n. 18
0
/***
 *  Fast path for unfragmented packets.
 */
int rt_ip_build_xmit(struct rtsocket *sk,
        int getfrag(const void *, char *, unsigned int, unsigned int),
        const void *frag, unsigned length, struct rt_rtable *rt, int msg_flags)
{
    int                     err=0;
    struct rtskb            *skb;
    struct iphdr            *iph;
    int                     hh_len;
    u16                     msg_rt_ip_id;
    unsigned long           flags;
    struct  rtnet_device    *rtdev=rt->rt_dev;


    /*
     *  Try the simple case first. This leaves fragmented frames, and by choice
     *  RAW frames within 20 bytes of maximum size(rare) to the long path
     */
    length += sizeof(struct iphdr);

    if (length > rtdev->mtu)
        return rt_ip_build_xmit_slow(sk, getfrag, frag,
                                length - sizeof(struct iphdr), rt, msg_flags);

    /* Store id in local variable */
    rtos_spin_lock_irqsave(&rt_ip_id_lock, flags);
    msg_rt_ip_id = rt_ip_id_count++;
    rtos_spin_unlock_irqrestore(&rt_ip_id_lock, flags);

    hh_len = (rtdev->hard_header_len+15)&~15;

    skb = alloc_rtskb(length+hh_len+15, &sk->skb_pool);
    if (skb==NULL)
        return -ENOBUFS;

    rtskb_reserve(skb, hh_len);

    skb->dst      = rt;
    skb->rtdev    = rt->rt_dev;
    skb->nh.iph   = iph = (struct iphdr *) rtskb_put(skb, length);
    skb->priority = sk->priority;

    iph->version  = 4;
    iph->ihl      = 5;
    iph->tos      = sk->prot.inet.tos;
    iph->tot_len  = htons(length);
    iph->id       = htons(msg_rt_ip_id);
    iph->frag_off = htons(IP_DF);
    iph->ttl      = 255;
    iph->protocol = sk->protocol;
    iph->saddr    = rt->rt_src;
    iph->daddr    = rt->rt_dst;
    iph->check    = 0; /* required! */
    iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);

    if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, 0, length-iph->ihl*4)) )
        goto error;

    if (!(rtdev->hard_header) ||
        (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr,
                            rtdev->dev_addr, skb->len) < 0))
        goto error;

    err = rtdev_xmit(skb);

    if (err)
        return -EAGAIN;
    else
        return 0;

error:
    kfree_rtskb(skb);
    return err;
}
Esempio n. 19
0
/***
 * rt_loopback_xmit - begin packet transmission
 * @skb: packet to be sent
 * @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	int err=0;
	struct rtskb *new_skb;

	if ( (new_skb=dev_alloc_rtskb(skb->len + 2))==NULL ) 
	{
		rt_printk("RTnet %s: couldn't allocate a rtskb of size %d.\n", rtdev->name, skb->len);
		err = -ENOMEM;
		goto rt_loopback_xmit_end;
	}
	else 
	{
		new_skb->rx = rt_get_time();
		new_skb->rtdev = rtdev;
		rtskb_reserve(new_skb,2);
		memcpy(new_skb->buf_start, skb->buf_start, SKB_DATA_ALIGN(ETH_FRAME_LEN));
		rtskb_put(new_skb, skb->len);
		new_skb->protocol = rt_eth_type_trans(new_skb, rtdev);

#ifdef DEBUG_LOOPBACK_DRIVER
		{
			int i, cuantos;
			rt_printk("\n\nPACKET:");
			rt_printk("\nskb->protocol = %d", 		skb->protocol);
			rt_printk("\nskb->pkt_type = %d", 		skb->pkt_type);
			rt_printk("\nskb->users = %d", 			skb->users);
			rt_printk("\nskb->cloned = %d", 		skb->cloned);
			rt_printk("\nskb->csum = %d",	 		skb->csum);
			rt_printk("\nskb->len = %d", 			skb->len);
			
			rt_printk("\nnew_skb->protocol = %d", 	new_skb->protocol);
			rt_printk("\nnew_skb->pkt_type = %d", 	new_skb->pkt_type);
			rt_printk("\nnew_skb->users = %d", 		new_skb->users);
			rt_printk("\nnew_skb->cloned = %d", 	new_skb->cloned);
			rt_printk("\nnew_skb->csum = %d",	 	new_skb->csum);
			rt_printk("\nnew_skb->len = %d", 		new_skb->len);
			
			rt_printk("\n\nETHERNET HEADER:");
			rt_printk("\nMAC dest: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+2]); }
			rt_printk("\nMAC orig: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+8]); }
			rt_printk("\nPROTOCOL: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+14]); }
		
			rt_printk("\n\nIP HEADER:");
			rt_printk("\nVERSIZE : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+16]); }
			rt_printk("\nPRIORITY: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+17]); }
			rt_printk("\nLENGTH  : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+18]); }
			rt_printk("\nIDENT   : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+20]); }
			rt_printk("\nFRAGMENT: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+22]); }
			rt_printk("\nTTL     : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+24]); }
			rt_printk("\nPROTOCOL: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+25]); }
			rt_printk("\nCHECKSUM: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+26]); }
			rt_printk("\nIP ORIGE: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+28]); }
			rt_printk("\nIP DESTI: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+32]); }
		
			cuantos = (int)(*(unsigned short *)(new_skb->buf_start+18)) - 20;
			rt_printk("\n\nDATA (%d):", cuantos);  
			rt_printk("\n:");  		   for(i=0;i<cuantos;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+36]); }		
		}
#endif

		rtnetif_rx(new_skb);
		rt_mark_stack_mgr(rtdev);
	}
	
rt_loopback_xmit_end:
	kfree_rtskb(skb);
	return err;
}
Esempio n. 20
0
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   rtdev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   rtdev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   rtdev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct /*RTnet*/rtskb *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   rtdev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#if 0 /*RTnet*/
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) {
				skb->rtdev = rtdev;
				/*RTnet*/rtskb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				//eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
				//		 pkt_len, 0);
				memcpy(rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#else
				memcpy(/*RTnet*/rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
#endif /*RTnet*/
			{
				char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %08x ? / %p.\n",
					       rtdev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       tp->rx_buffers[entry].mapping,
					       temp);/*RTnet*/
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
			skb->time_stamp = *time_stamp;
			/*RTnet*/rtnetif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
int tulip_interrupt(rtdm_irq_t *irq_handle)
{
	nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/
	struct rtnet_device *rtdev =
	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	long ioaddr = rtdev->base_addr;
	unsigned int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
		return 0;
	}

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
			rx += tulip_rx(rtdev, &time_stamp);
			tulip_refill_rx(rtdev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			rtdm_lock_get(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   rtdev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
				rtnetif_tx(rtdev);
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   rtdev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				/*RTnet*/rtnetif_wake_queue(rtdev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			rtdm_lock_put(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
#if 0 /*RTnet*/
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(rtdev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				/*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
					rtdev->name, tp->nir, error);
			}
#endif /*RTnet*/
			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
Esempio n. 21
0
/***
 *  Slow path for fragmented packets
 */
int rt_ip_build_xmit_slow(struct rtsocket *sk,
        int getfrag(const void *, char *, unsigned int, unsigned int),
        const void *frag, unsigned length, struct rt_rtable *rt, int msg_flags)
{
    int             err, next_err;
    struct rtskb    *skb;
    struct rtskb    *next_skb;
    struct          iphdr *iph;

    struct          rtnet_device *rtdev=rt->rt_dev;
    int             mtu = rtdev->mtu;
    unsigned int    fragdatalen;
    unsigned int    offset = 0;
    u16             msg_rt_ip_id;
    unsigned long   flags;
    unsigned int    rtskb_size;
    int             hh_len = (rtdev->hard_header_len + 15) & ~15;


    #define FRAGHEADERLEN sizeof(struct iphdr)

    fragdatalen  = ((mtu - FRAGHEADERLEN) & ~7);

    /* Store id in local variable */
    rtos_spin_lock_irqsave(&rt_ip_id_lock, flags);
    msg_rt_ip_id = rt_ip_id_count++;
    rtos_spin_unlock_irqrestore(&rt_ip_id_lock, flags);

    rtskb_size = mtu + hh_len + 15;

    /* Preallocate first rtskb */
    skb = alloc_rtskb(rtskb_size, &sk->skb_pool);
    if (skb == NULL)
        return -ENOBUFS;

    for (offset = 0; offset < length; offset += fragdatalen)
    {
        int fraglen; /* The length (IP, including ip-header) of this
                        very fragment */
        __u16 frag_off = offset >> 3 ;


        next_err = 0;
        if (offset >= length - fragdatalen)
        {
            /* last fragment */
            fraglen  = FRAGHEADERLEN + length - offset ;
            next_skb = NULL;
        }
        else
        {
            fraglen = FRAGHEADERLEN + fragdatalen;
            frag_off |= IP_MF;

            next_skb = alloc_rtskb(rtskb_size, &sk->skb_pool);
            if (next_skb == NULL) {
                frag_off &= ~IP_MF; /* cut the chain */
                next_err = -ENOBUFS;
            }
        }

        rtskb_reserve(skb, hh_len);

        skb->dst      = rt;
        skb->rtdev    = rt->rt_dev;
        skb->nh.iph   = iph = (struct iphdr *) rtskb_put(skb, fraglen);
        skb->priority = sk->priority;

        iph->version  = 4;
        iph->ihl      = 5;    /* 20 byte header - no options */
        iph->tos      = sk->prot.inet.tos;
        iph->tot_len  = htons(fraglen);
        iph->id       = htons(msg_rt_ip_id);
        iph->frag_off = htons(frag_off);
        iph->ttl      = 255;
        iph->protocol = sk->protocol;
        iph->saddr    = rt->rt_src;
        iph->daddr    = rt->rt_dst;
        iph->check    = 0; /* required! */
        iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);

        if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, offset,
                          fraglen - FRAGHEADERLEN)) )
            goto error;

        if (!(rtdev->hard_header) ||
            (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr,
                                rtdev->dev_addr, skb->len) < 0))
            goto error;

        err = rtdev_xmit(skb);

        skb = next_skb;

        if (err != 0) {
            err = -EAGAIN;
            goto error;
        }

        if (next_err != 0)
            return next_err;
    }
    return 0;

error:
    if (skb != NULL) {
        kfree_rtskb(skb);

        if (next_skb != NULL)
            kfree_rtskb(next_skb);
    }
    return err;
}