Ejemplo n.º 1
0
/*
 *	This is where all NET/ROM frames pass, except for IP-over-NET/ROM which
 *	cannot be fragmented in this manner.
 */
void nr_output(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char transport[NR_TRANSPORT_LEN];
	int err, frontlen, len;

	if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
		/* Save a copy of the Transport Header */
		skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
		skb_pull(skb, NR_TRANSPORT_LEN);

		frontlen = skb_headroom(skb);

		while (skb->len > 0) {
			if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
				return;

			skb_reserve(skbn, frontlen);

			len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;

			/* Copy the user data */
			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
			skb_pull(skb, len);

			/* Duplicate the Transport Header */
			skb_push(skbn, NR_TRANSPORT_LEN);
			skb_copy_to_linear_data(skbn, transport,
						NR_TRANSPORT_LEN);
			if (skb->len > 0)
				skbn->data[4] |= NR_MORE_FLAG;

			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
		}

		kfree_skb(skb);
	} else {
		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
	}

	nr_kick(sk);
}
Ejemplo n.º 2
0
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
			     int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
{
	struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
	int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;

	if (skb->len <= spc) {
		inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
		skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
		if (skb_shinfo(skb)->nr_frags)
			memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
			       skb_frag_size(&skb_shinfo(skb)->frags[0]));

	} else {
		inl->byte_count = cpu_to_be32(1 << 31 | spc);
		if (skb_headlen(skb) <= spc) {
			skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
			if (skb_headlen(skb) < spc) {
				memcpy(((void *)(inl + 1)) + skb_headlen(skb),
					fragptr, spc - skb_headlen(skb));
				fragptr +=  spc - skb_headlen(skb);
			}
			inl = (void *) (inl + 1) + spc;
			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
		} else {
			skb_copy_from_linear_data(skb, inl + 1, spc);
			inl = (void *) (inl + 1) + spc;
			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
					skb_headlen(skb) - spc);
			if (skb_shinfo(skb)->nr_frags)
				memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
					fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
		}

		wmb();
		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
	}
	tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
		(!!vlan_tx_tag_present(skb));
	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
/* Send data from current skb to the device. */
static int write_modem(struct cardstate *cs)
{
	int ret = 0;
	int count;
	struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
	struct usb_cardstate *ucs = cs->hw.usb;
	unsigned long flags;

	gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len);

	if (!bcs->tx_skb->len) {
		dev_kfree_skb_any(bcs->tx_skb);
		bcs->tx_skb = NULL;
		return -EINVAL;
	}

	/* Copy data to bulk out buffer and transmit data */
	count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
	skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
	skb_pull(bcs->tx_skb, count);
	ucs->busy = 1;
	gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);

	spin_lock_irqsave(&cs->lock, flags);
	if (cs->connected) {
		usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
				  usb_sndbulkpipe(ucs->udev,
						  ucs->bulk_out_endpointAddr &
						  0x0f),
				  ucs->bulk_out_buffer, count,
				  gigaset_write_bulk_callback, cs);
		ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
	} else {
		ret = -ENODEV;
	}
	spin_unlock_irqrestore(&cs->lock, flags);

	if (ret) {
		dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
		ucs->busy = 0;
	}

	if (!bcs->tx_skb->len) {
		/* skb sent completely */
		gigaset_skb_sent(bcs, bcs->tx_skb);

		gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
			(unsigned long) bcs->tx_skb);
		dev_kfree_skb_any(bcs->tx_skb);
		bcs->tx_skb = NULL;
	}

	return ret;
}
Ejemplo n.º 4
0
/*
 * Au1000 transmit routine.
 */
static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
{
	struct au1000_private *aup = netdev_priv(dev);
	struct net_device_stats *ps = &dev->stats;
	volatile tx_dma_t *ptxd;
	u32 buff_stat;
	db_dest_t *pDB;
	int i;

	if (au1000_debug > 5)
		printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
				dev->name, (unsigned)aup, skb->len,
				skb->data, aup->tx_head);

	ptxd = aup->tx_dma_ring[aup->tx_head];
	buff_stat = ptxd->buff_stat;
	if (buff_stat & TX_DMA_ENABLE) {
		/* We've wrapped around and the transmitter is still busy */
		netif_stop_queue(dev);
		aup->tx_full = 1;
		return NETDEV_TX_BUSY;
	}
	else if (buff_stat & TX_T_DONE) {
		update_tx_stats(dev, ptxd->status);
		ptxd->len = 0;
	}

	if (aup->tx_full) {
		aup->tx_full = 0;
		netif_wake_queue(dev);
	}

	pDB = aup->tx_db_inuse[aup->tx_head];
	skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
	if (skb->len < ETH_ZLEN) {
		for (i=skb->len; i<ETH_ZLEN; i++) {
			((char *)pDB->vaddr)[i] = 0;
		}
		ptxd->len = ETH_ZLEN;
	}
	else
		ptxd->len = skb->len;

	ps->tx_packets++;
	ps->tx_bytes += ptxd->len;

	ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
	au_sync();
	dev_kfree_skb(skb);
	aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
	dev->trans_start = jiffies;
	return NETDEV_TX_OK;
}
Ejemplo n.º 5
0
static int dtl1_hci_send_frame(struct sk_buff *skb)
{
	dtl1_info_t *info;
	struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
	struct sk_buff *s;
	nsh_t nsh;

	if (!hdev) {
		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
		return -ENODEV;
	}

	info = (dtl1_info_t *)(hdev->driver_data);

	switch (bt_cb(skb)->pkt_type) {
	case HCI_COMMAND_PKT:
		hdev->stat.cmd_tx++;
		nsh.type = 0x81;
		break;
	case HCI_ACLDATA_PKT:
		hdev->stat.acl_tx++;
		nsh.type = 0x82;
		break;
	case HCI_SCODATA_PKT:
		hdev->stat.sco_tx++;
		nsh.type = 0x83;
		break;
	default:
		return -EILSEQ;
	};

	nsh.zero = 0;
	nsh.len = skb->len;

	s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
	if (!s)
		return -ENOMEM;

	skb_reserve(s, NSHL);
	skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
	if (skb->len & 0x0001)
		*skb_put(s, 1) = 0;	/* PAD */

	/* Prepend skb with Nokia frame header and queue */
	memcpy(skb_push(s, NSHL), &nsh, NSHL);
	skb_queue_tail(&(info->txq), s);

	dtl1_write_wakeup(info);

	kfree_skb(skb);

	return 0;
}
Ejemplo n.º 6
0
static inline int
write_modem(struct BCState *bcs) {
	int ret=0;
	struct IsdnCardState *cs = bcs->cs;
	int count, len, fp;
	
	if (!bcs->tx_skb)
		return 0;
	if (bcs->tx_skb->len <= 0)
		return 0;
	len = bcs->tx_skb->len;
	if (len > MAX_MODEM_BUF - cs->hw.elsa.transcnt)
		len = MAX_MODEM_BUF - cs->hw.elsa.transcnt;
	fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
	fp &= (MAX_MODEM_BUF -1);
	count = len;
	if (count > MAX_MODEM_BUF - fp) {
		count = MAX_MODEM_BUF - fp;
		skb_copy_from_linear_data(bcs->tx_skb,
					  cs->hw.elsa.transbuf + fp, count);
		skb_pull(bcs->tx_skb, count);
		cs->hw.elsa.transcnt += count;
		ret = count;
		count = len - count;
		fp = 0;
	}
	skb_copy_from_linear_data(bcs->tx_skb,
				  cs->hw.elsa.transbuf + fp, count);
	skb_pull(bcs->tx_skb, count);
	cs->hw.elsa.transcnt += count;
	ret += count;
	
	if (cs->hw.elsa.transcnt && 
	    !(cs->hw.elsa.IER & UART_IER_THRI)) {
			cs->hw.elsa.IER |= UART_IER_THRI;
		serial_outp(cs, UART_IER, cs->hw.elsa.IER);
	}
	return(ret);
}
Ejemplo n.º 7
0
/* Get a packet queued to go onto the wire. */
static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct sunqe *qep = netdev_priv(dev);
	struct sunqe_buffers *qbufs = qep->buffers;
	__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
	unsigned char *txbuf;
	int len, entry;

	spin_lock_irq(&qep->lock);

	qe_tx_reclaim(qep);

	len = skb->len;
	entry = qep->tx_new;

	txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
	txbuf_dvma = qbufs_dvma +
		qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));

	/* Avoid a race... */
	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;

	skb_copy_from_linear_data(skb, txbuf, len);

	qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
	qep->qe_block->qe_txd[entry].tx_flags =
		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
	qep->tx_new = NEXT_TX(entry);

	/* Get it going. */
	dev->trans_start = jiffies;
	sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += len;

	if (TX_BUFFS_AVAIL(qep) <= 0) {
		/* Halt the net queue and enable tx interrupts.
		 * When the tx queue empties the tx irq handler
		 * will wake up the queue and return us back to
		 * the lazy tx reclaim scheme.
		 */
		netif_stop_queue(dev);
		sbus_writel(0, qep->qcregs + CREG_TIMASK);
	}
	spin_unlock_irq(&qep->lock);

	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 8
0
/*
 * Au1000 transmit routine.
 */
static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct au1k_private *aup = netdev_priv(dev);
	int speed = irda_get_next_speed(skb);
	volatile struct ring_dest *ptxd;
	struct db_dest *pDB;
	u32 len, flags;

	if (speed != aup->speed && speed != -1)
		aup->newspeed = speed;

	if ((skb->len == 0) && (aup->newspeed)) {
		if (aup->tx_tail == aup->tx_head) {
			au1k_irda_set_speed(dev, speed);
			aup->newspeed = 0;
		}
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	ptxd = aup->tx_ring[aup->tx_head];
	flags = ptxd->flags;

	if (flags & AU_OWN) {
		printk(KERN_DEBUG "%s: tx_full\n", dev->name);
		netif_stop_queue(dev);
		aup->tx_full = 1;
		return 1;
	} else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
		printk(KERN_DEBUG "%s: tx_full\n", dev->name);
		netif_stop_queue(dev);
		aup->tx_full = 1;
		return 1;
	}

	pDB = aup->tx_db_inuse[aup->tx_head];

#if 0
	if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
		printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
				irda_read(aup, IR_RX_BYTE_CNT));
	}
#endif

	if (aup->speed == 4000000) {
		/* FIR */
		skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
		ptxd->count_0 = skb->len & 0xff;
		ptxd->count_1 = (skb->len >> 8) & 0xff;
	} else {
static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct pxa_irda *si = netdev_priv(dev);
	int speed = irda_get_next_speed(skb);

	if (speed != si->speed && speed != -1)
		si->newspeed = speed;

	if (skb->len == 0) {
		if (si->newspeed) {
			si->newspeed = 0;
			pxa_irda_set_speed(si, speed);
		}
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	netif_stop_queue(dev);

	if (!IS_FIR(si)) {
		si->tx_buff.data = si->tx_buff.head;
		si->tx_buff.len  = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);

		
		STIER = 0;
		STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;

		
		STIER = IER_UUE | IER_TIE;
	} else {
		unsigned long mtt = irda_get_mtt(skb);

		si->dma_tx_buff_len = skb->len;
		skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);

		if (mtt)
			while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
				cpu_relax();

		
		DCSR(si->rxdma) &= ~DCSR_RUN;
		ICCR0 = 0;

		pxa_irda_fir_dma_tx_start(si);
		ICCR0 = ICCR0_ITR | ICCR0_TXE;
	}

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Ejemplo n.º 10
0
/*
 * This routine is called to send an error reply.
 */
void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
{
	struct sk_buff *skbn;
	unsigned char *dptr;
	int len;

	len = NR_NETWORK_LEN + NR_TRANSPORT_LEN + 1;

	if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL)
		return;

	skb_reserve(skbn, 0);

	dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN);

	skb_copy_from_linear_data_offset(skb, 7, dptr, AX25_ADDR_LEN);
	dptr[6] &= ~AX25_CBIT;
	dptr[6] &= ~AX25_EBIT;
	dptr[6] |= AX25_SSSID_SPARE;
	dptr += AX25_ADDR_LEN;

	skb_copy_from_linear_data(skb, dptr, AX25_ADDR_LEN);
	dptr[6] &= ~AX25_CBIT;
	dptr[6] |= AX25_EBIT;
	dptr[6] |= AX25_SSSID_SPARE;
	dptr += AX25_ADDR_LEN;

	*dptr++ = sysctl_netrom_network_ttl_initialiser;

	if (mine) {
		*dptr++ = 0;
		*dptr++ = 0;
		*dptr++ = skb->data[15];
		*dptr++ = skb->data[16];
	} else {
		*dptr++ = skb->data[15];
		*dptr++ = skb->data[16];
		*dptr++ = 0;
		*dptr++ = 0;
	}

	*dptr++ = cmdflags;
	*dptr++ = 0;

	if (!nr_route_frame(skbn, NULL))
		kfree_skb(skbn);
}
Ejemplo n.º 11
0
static int ack_tx(struct net_device *dev, int acked)
{
  struct arcnet_local *lp = netdev_priv(dev);
  struct sk_buff *ackskb;
  struct archdr *ackpkt;
  int length=sizeof(struct arc_cap);

  BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
	 lp->outgoing.skb->protocol, acked);

  BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");

  /* Now alloc a skb to send back up through the layers: */
  ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
  if (ackskb == NULL) {
	  BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
	  goto free_outskb;
  }

  skb_put(ackskb, length + ARC_HDR_SIZE );
  ackskb->dev = dev;

  skb_reset_mac_header(ackskb);
  ackpkt = (struct archdr *)skb_mac_header(ackskb);
  /* skb_pull(ackskb, ARC_HDR_SIZE); */


  skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
		ARC_HDR_SIZE + sizeof(struct arc_cap));
  ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
  ackpkt->soft.cap.mes.ack=acked;

  BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
	 *((int*)&ackpkt->soft.cap.cookie[0]));

  ackskb->protocol = __constant_htons(ETH_P_ARCNET);

  BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
  netif_rx(ackskb);

 free_outskb:
  dev_kfree_skb_irq(lp->outgoing.skb);
  lp->outgoing.proto = NULL; /* We are always finished when in this protocol */

  return 0;
}
Ejemplo n.º 12
0
static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
{
	struct ks8842_adapter *adapter = netdev_priv(netdev);
	struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
	u8 *buf = ctl->buf;

	if (ctl->adesc) {
		netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
		/* transfer ongoing */
		return NETDEV_TX_BUSY;
	}

	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);

	/* copy data to the TX buffer */
	/* the control word, enable IRQ, port 1 and the length */
	*buf++ = 0x00;
	*buf++ = 0x01; /* Port 1 */
	*buf++ = skb->len & 0xff;
	*buf++ = (skb->len >> 8) & 0xff;
	skb_copy_from_linear_data(skb, buf, skb->len);

	dma_sync_single_range_for_device(adapter->dev,
		sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
		DMA_TO_DEVICE);

	/* make sure the length is a multiple of 4 */
	if (sg_dma_len(&ctl->sg) % 4)
		sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;

	ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
		&ctl->sg, 1, DMA_TO_DEVICE,
		DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
	if (!ctl->adesc)
		return NETDEV_TX_BUSY;

	ctl->adesc->callback_param = netdev;
	ctl->adesc->callback = ks8842_dma_tx_cb;
	ctl->adesc->tx_submit(ctl->adesc);

	netdev->stats.tx_bytes += skb->len;

	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 13
0
int nr_loopback_queue(struct sk_buff *skb)
{
	struct sk_buff *skbn;

	if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) {
		skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len);
		skb_reset_transport_header(skbn);

		skb_queue_tail(&loopback_queue, skbn);

		if (!nr_loopback_running())
			mod_timer(&loopback_timer, jiffies + 10);
	}

	kfree_skb(skb);
	return 1;
}
Ejemplo n.º 14
0
Archivo: z85230.c Proyecto: 274914765/C
int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
{
    unsigned long flags;
    
    netif_stop_queue(c->netdevice);
    if(c->tx_next_skb)
    {
        return 1;
    }
    
    /* PC SPECIFIC - DMA limits */
    
    /*
     *    If we will DMA the transmit and its gone over the ISA bus
     *    limit, then copy to the flip buffer
     */
     
    if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
    {
        /* 
         *    Send the flip buffer, and flip the flippy bit.
         *    We don't care which is used when just so long as
         *    we never use the same buffer twice in a row. Since
         *    only one buffer can be going out at a time the other
         *    has to be safe.
         */
        c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
        c->tx_dma_used^=1;    /* Flip temp buffer */
        skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
    }
    else
        c->tx_next_ptr=skb->data;    
    RT_LOCK;
    c->tx_next_skb=skb;
    RT_UNLOCK;
    
    spin_lock_irqsave(c->lock, flags);
    z8530_tx_begin(c);
    spin_unlock_irqrestore(c->lock, flags);
    
    return 0;
}
int sbd_pio_tx(struct sbd_ring_buffer *rb, struct sk_buff *skb)
{
	int ret;
	unsigned int qlen = rb->len;
	unsigned int in = *rb->wp;
	unsigned int out = *rb->rp;
	unsigned int count = skb->len;
	unsigned int space = (rb->buff_size - rb->payload_offset);
	u8 *dst;

	pktlog_tx_bottom_skb(rb->sl, skb);

	ret = check_rb_space(rb, qlen, in, out);
	if (unlikely(ret < 0))
		return ret;

	if (unlikely(count > space)) {
		mif_err("ERR! {id:%d ch:%d} count %d > space %d\n",
			rb->id, rb->ch, count, space);
		return -ENOSPC;
	}

	barrier();

	dst = rb->buff[in] + rb->payload_offset;

	barrier();

	skb_copy_from_linear_data(skb, dst, count);

	rb->size_v[in] = skb->len;

	barrier();

	*rb->wp = circ_new_ptr(qlen, in, 1);

	/* Commit the item before incrementing the head */
	smp_mb();

	return count;
}
Ejemplo n.º 16
0
static int
net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct net_local *lp = netdev_priv(dev);
	unsigned long flags;

	if (net_debug > 3)
		printk("%s: sent %d byte packet of type %x\n",
		       dev->name, skb->len,
		       (skb->data[ETH_ALEN+ETH_ALEN] << 8)
		       | skb->data[ETH_ALEN+ETH_ALEN+1]);

	/* keep the upload from being interrupted, since we
	   ask the chip to start transmitting before the
	   whole packet has been completely uploaded. */
	local_irq_save(flags);
	netif_stop_queue(dev);

	/* initiate a transmit sequence */
	writereg(dev, PP_TxCMD, lp->send_cmd);
	writereg(dev, PP_TxLength, skb->len);

	/* Test to see if the chip has allocated memory for the packet */
	if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
		/* Gasp!  It hasn't.  But that shouldn't happen since
		   we're waiting for TxOk, so return 1 and requeue this packet. */
		local_irq_restore(flags);
		return NETDEV_TX_BUSY;
	}

	/* Write the contents of the packet */
	skb_copy_from_linear_data(skb, (void *)(dev->mem_start + PP_TxFrame),
				  skb->len+1);

	local_irq_restore(flags);
	dev->trans_start = jiffies;
	dev_kfree_skb (skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 17
0
/*
 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
 */
static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
					       struct sk_buff *skb)
{
	struct sk_buff *new_skb;

	/* Alloc new skb */
	new_skb = netdev_alloc_skb(dev, skb->len + 4);
	if (!new_skb)
		return NULL;

	/* Make sure new skb is properly aligned */
	skb_align(new_skb, 4);

	/* Copy data to new skb ... */
	skb_copy_from_linear_data(skb, new_skb->data, skb->len);
	skb_put(new_skb, skb->len);

	/* ... and free an old one */
	dev_kfree_skb_any(skb);

	return new_skb;
}
Ejemplo n.º 18
0
/*
 *	This is where all X.25 information frames pass.
 *
 *      Returns the amount of user data bytes sent on success
 *      or a negative error code on failure.
 */
int x25_output(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char header[X25_EXT_MIN_LEN];
	int err, frontlen, len;
	int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
	struct x25_sock *x25 = x25_sk(sk);
	int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
						    X25_STD_MIN_LEN;
	int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);

	if (skb->len - header_len > max_len) {
		/* Save a copy of the Header */
		skb_copy_from_linear_data(skb, header, header_len);
		skb_pull(skb, header_len);

		frontlen = skb_headroom(skb);

		while (skb->len > 0) {
<<<<<<< HEAD
			release_sock(sk);
			skbn = sock_alloc_send_skb(sk, frontlen + max_len,
						   noblock, &err);
			lock_sock(sk);
			if (!skbn) {
=======
			if ((skbn = sock_alloc_send_skb(sk, frontlen + max_len,
							noblock, &err)) == NULL){
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
				if (err == -EWOULDBLOCK && noblock){
					kfree_skb(skb);
					return sent;
				}
				SOCK_DEBUG(sk, "x25_output: fragment alloc"
					       " failed, err=%d, %d bytes "
					       "sent\n", err, sent);
				return err;
			}
Ejemplo n.º 19
0
static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
{
	struct sk_buff *skbo, *skbn = skb;
	struct nr_sock *nr = nr_sk(sk);

	skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);

	nr_start_idletimer(sk);

	if (more) {
		nr->fraglen += skb->len;
		skb_queue_tail(&nr->frag_queue, skb);
		return 0;
	}

	if (!more && nr->fraglen > 0) {	/* End of fragment */
		nr->fraglen += skb->len;
		skb_queue_tail(&nr->frag_queue, skb);

		if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL)
			return 1;

		skb_reset_transport_header(skbn);

		while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) {
			skb_copy_from_linear_data(skbo,
						  skb_put(skbn, skbo->len),
						  skbo->len);
			kfree_skb(skbo);
		}

		nr->fraglen = 0;
	}

	return sock_queue_rcv_skb(sk, skbn);
}
Ejemplo n.º 20
0
static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const unsigned char *buf, int count)
{
	struct sk_buff *skb = session->reassembly[id], *nskb;
	int size;

	BT_DBG("session %p buf %p count %d", session, buf, count);

	size = (skb) ? skb->len + count : count;

	nskb = alloc_skb(size, GFP_ATOMIC);
	if (!nskb) {
		BT_ERR("Can't allocate memory for CAPI message");
		return;
	}

	if (skb && (skb->len > 0))
		skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len);

	memcpy(skb_put(nskb, count), buf, count);

	session->reassembly[id] = nskb;

	kfree_skb(skb);
}
Ejemplo n.º 21
0
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
	struct mace_data *mp = netdev_priv(dev);
	unsigned long flags;

	/* Stop the queue since there's only the one buffer */

	local_irq_save(flags);
	netif_stop_queue(dev);
	if (!mp->tx_count) {
		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
		local_irq_restore(flags);
		return NETDEV_TX_BUSY;
	}
	mp->tx_count--;
	local_irq_restore(flags);

	mp->stats.tx_packets++;
	mp->stats.tx_bytes += skb->len;

	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);

	/* load the Tx DMA and fire it off */

	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);

	mp->tx_slot ^= 0x10;

	dev_kfree_skb(skb);

	dev->trans_start = jiffies;
	return NETDEV_TX_OK;
}
Ejemplo n.º 22
0
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
				      struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	union ibmveth_buf_desc desc;
	unsigned long lpar_rc;
	unsigned long correlator;
	unsigned long flags;
	unsigned int retry_count;
	unsigned int tx_dropped = 0;
	unsigned int tx_bytes = 0;
	unsigned int tx_packets = 0;
	unsigned int tx_send_failed = 0;
	unsigned int tx_map_failed = 0;
	int used_bounce = 0;
	unsigned long data_dma_addr;

	desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;

	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
		ibmveth_error_printk("tx: failed to checksum packet\n");
		tx_dropped++;
		goto out;
	}

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;

		desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);

		
		buf[0] = 0;
		buf[1] = 0;
	}

	data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
				       skb->len, DMA_TO_DEVICE);
	if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
		if (!firmware_has_feature(FW_FEATURE_CMO))
			ibmveth_error_printk("tx: unable to map xmit buffer\n");
		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
					  skb->len);
		desc.fields.address = adapter->bounce_buffer_dma;
		tx_map_failed++;
		used_bounce = 1;
		wmb();
	} else
		desc.fields.address = data_dma_addr;

	
	correlator = 0;
	retry_count = 1024;
	do {
		lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
					     desc.desc, 0, 0, 0, 0, 0,
					     correlator, &correlator);
	} while ((lpar_rc == H_BUSY) && (retry_count--));

	if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
		ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
		ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
				     (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0,
				     skb->len, desc.fields.address);
		tx_send_failed++;
		tx_dropped++;
	} else {
		tx_packets++;
		tx_bytes += skb->len;
		netdev->trans_start = jiffies;
	}

	if (!used_bounce)
		dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
				 skb->len, DMA_TO_DEVICE);

out:	spin_lock_irqsave(&adapter->stats_lock, flags);
	netdev->stats.tx_dropped += tx_dropped;
	netdev->stats.tx_bytes += tx_bytes;
	netdev->stats.tx_packets += tx_packets;
	adapter->tx_send_failed += tx_send_failed;
	adapter->tx_map_failed += tx_map_failed;
	spin_unlock_irqrestore(&adapter->stats_lock, flags);

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Ejemplo n.º 23
0
/**
 * Unpack a just received skb and hand it over to
 * upper layers.
 *
 *  ch		The channel where this skb has been received.
 *  pskb	The received skb.
 */
void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
	struct net_device *dev = ch->netdev;
	struct ctcm_priv *priv = dev->ml_priv;
	__u16 len = *((__u16 *) pskb->data);

	skb_put(pskb, 2 + LL_HEADER_LENGTH);
	skb_pull(pskb, 2);
	pskb->dev = dev;
	pskb->ip_summed = CHECKSUM_UNNECESSARY;
	while (len > 0) {
		struct sk_buff *skb;
		int skblen;
		struct ll_header *header = (struct ll_header *)pskb->data;

		skb_pull(pskb, LL_HEADER_LENGTH);
		if ((ch->protocol == CTCM_PROTO_S390) &&
		    (header->type != ETH_P_IP)) {
			if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
				ch->logflags |= LOG_FLAG_ILLEGALPKT;
				/*
				 * Check packet type only if we stick strictly
				 * to S/390's protocol of OS390. This only
				 * supports IP. Otherwise allow any packet
				 * type.
				 */
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): Illegal packet type 0x%04x"
					" - dropping",
					CTCM_FUNTAIL, dev->name, header->type);
			}
			priv->stats.rx_dropped++;
			priv->stats.rx_frame_errors++;
			return;
		}
		pskb->protocol = ntohs(header->type);
		if ((header->length <= LL_HEADER_LENGTH) ||
		    (len <= LL_HEADER_LENGTH)) {
			if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): Illegal packet size %d(%d,%d)"
					"- dropping",
					CTCM_FUNTAIL, dev->name,
					header->length, dev->mtu, len);
				ch->logflags |= LOG_FLAG_ILLEGALSIZE;
			}

			priv->stats.rx_dropped++;
			priv->stats.rx_length_errors++;
			return;
		}
		header->length -= LL_HEADER_LENGTH;
		len -= LL_HEADER_LENGTH;
		if ((header->length > skb_tailroom(pskb)) ||
			(header->length > len)) {
			if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): Packet size %d (overrun)"
					" - dropping", CTCM_FUNTAIL,
						dev->name, header->length);
				ch->logflags |= LOG_FLAG_OVERRUN;
			}

			priv->stats.rx_dropped++;
			priv->stats.rx_length_errors++;
			return;
		}
		skb_put(pskb, header->length);
		skb_reset_mac_header(pskb);
		len -= header->length;
		skb = dev_alloc_skb(pskb->len);
		if (!skb) {
			if (!(ch->logflags & LOG_FLAG_NOMEM)) {
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): MEMORY allocation error",
						CTCM_FUNTAIL, dev->name);
				ch->logflags |= LOG_FLAG_NOMEM;
			}
			priv->stats.rx_dropped++;
			return;
		}
		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
					  pskb->len);
		skb_reset_mac_header(skb);
		skb->dev = pskb->dev;
		skb->protocol = pskb->protocol;
		pskb->ip_summed = CHECKSUM_UNNECESSARY;
		skblen = skb->len;
		/*
		 * reset logflags
		 */
		ch->logflags = 0;
		priv->stats.rx_packets++;
		priv->stats.rx_bytes += skblen;
		netif_rx_ni(skb);
		if (len > 0) {
			skb_pull(pskb, header->length);
			if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
				CTCM_DBF_DEV_NAME(TRACE, dev,
					"Overrun in ctcm_unpack_skb");
				ch->logflags |= LOG_FLAG_OVERRUN;
				return;
			}
			skb_put(pskb, LL_HEADER_LENGTH);
		}
	}
}
Ejemplo n.º 24
0
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Ejemplo n.º 25
0
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	__be32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;
	struct net *net = dev_net(skb_dst(skb)->dev);

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.
	 */
	if (!skb->local_df && skb->len > mtu) {
		skb->dev = skb_dst(skb)->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	if (skb_has_frag_list(skb)) {
		int first_len = skb_pagelen(skb);
		struct sk_buff *frag2;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
				goto slow_path_clean;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path_clean;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
			}
			skb->truesize -= frag->truesize;
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		ipv6_select_ident(fh, &rt->rt6i_dst.addr);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(skb);
			if(!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
				      IPSTATS_MIB_FRAGOKS);
			dst_release(&rt->dst);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
			      IPSTATS_MIB_FRAGFAILS);
		dst_release(&rt->dst);
		return err;

slow_path_clean:
		skb_walk_frags(skb, frag2) {
			if (frag2 == frag)
				break;
			frag2->sk = NULL;
			frag2->destructor = NULL;
			skb->truesize += frag2->truesize;
		}
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending up to and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(fh, &rt->rt6i_dst.addr);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	kfree_skb(skb);
	return err;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Ejemplo n.º 26
0
/*
 *	All outgoing AX.25 I frames pass via this routine. Therefore this is
 *	where the fragmentation of frames takes place. If fragment is set to
 *	zero then we are not allowed to do fragmentation, even if the frame
 *	is too large.
 */
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
    struct sk_buff *skbn;
    unsigned char *p;
    int frontlen, len, fragno, ka9qfrag, first = 1;

    if (paclen < 16) {
        WARN_ON_ONCE(1);
        kfree_skb(skb);
        return;
    }

    if ((skb->len - 1) > paclen) {
        if (*skb->data == AX25_P_TEXT) {
            skb_pull(skb, 1); /* skip PID */
            ka9qfrag = 0;
        } else {
            paclen -= 2;	/* Allow for fragment control info */
            ka9qfrag = 1;
        }

        fragno = skb->len / paclen;
        if (skb->len % paclen == 0) fragno--;

        frontlen = skb_headroom(skb);	/* Address space + CTRL */

        while (skb->len > 0) {
            spin_lock_bh(&ax25_frag_lock);
            if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
                spin_unlock_bh(&ax25_frag_lock);
                printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
                return;
            }

            if (skb->sk != NULL)
                skb_set_owner_w(skbn, skb->sk);

            spin_unlock_bh(&ax25_frag_lock);

            len = (paclen > skb->len) ? skb->len : paclen;

            if (ka9qfrag == 1) {
                skb_reserve(skbn, frontlen + 2);
                skb_set_network_header(skbn,
                                       skb_network_offset(skb));
                skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                p = skb_push(skbn, 2);

                *p++ = AX25_P_SEGMENT;

                *p = fragno--;
                if (first) {
                    *p |= AX25_SEG_FIRST;
                    first = 0;
                }
            } else {
                skb_reserve(skbn, frontlen + 1);
                skb_set_network_header(skbn,
                                       skb_network_offset(skb));
                skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                p = skb_push(skbn, 1);
                *p = AX25_P_TEXT;
            }

            skb_pull(skb, len);
            skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
        }

        kfree_skb(skb);
    } else {
        skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
    }

    switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
    case AX25_PROTO_STD_SIMPLEX:
    case AX25_PROTO_STD_DUPLEX:
        ax25_kick(ax25);
        break;

#ifdef CONFIG_AX25_DAMA_SLAVE
    /*
     * A DAMA slave is _required_ to work as normal AX.25L2V2
     * if no DAMA master is available.
     */
    case AX25_PROTO_DAMA_SLAVE:
        if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
        break;
#endif
    }
}
Ejemplo n.º 27
0
static int send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = dev->priv;
	unsigned long target;
	unsigned long flags;

	/*
	 * make sure the length is even and no shorter than 60 bytes
	 */
	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			printk(KERN_DEBUG "%s: transmit blocked\n", dev->name);
		return FALSE;
	}

	adapter->stats.tx_bytes += nlen;

	/*
	 * send the adapter a transmit packet command. Ignore segment and offset
	 * and make sure the length is even
	 */
	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	/* Unused */
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return FALSE;
	}
	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		printk(KERN_DEBUG "%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = isa_virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = isa_virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	/* dma memory -> io */
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3)
		printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name);

	return TRUE;
}
Ejemplo n.º 28
0
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
	struct mlx5_wq_cyc       *wq   = &sq->wq;

	u16 pi = sq->pc & wq->sz_m1;
	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);

	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
	struct mlx5_wqe_data_seg *dseg;

	u8  opcode = MLX5_OPCODE_SEND;
	dma_addr_t dma_addr = 0;
	bool bf = false;
	u16 headlen;
	u16 ds_cnt;
	u16 ihs;
	int i;

	memset(wqe, 0, sizeof(*wqe));

	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
		eseg->cs_flags	= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
	else
		sq->stats.csum_offload_none++;

	if (sq->cc != sq->prev_cc) {
		sq->prev_cc = sq->cc;
		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
	}

	if (skb_is_gso(skb)) {
		u32 payload_len;

		eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
		opcode       = MLX5_OPCODE_LSO;
		ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
		payload_len  = skb->len - ihs;
		MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
					(skb_shinfo(skb)->gso_segs - 1) * ihs;
		sq->stats.tso_packets++;
		sq->stats.tso_bytes += payload_len;
	} else {
		bf = sq->bf_budget &&
		     !skb->xmit_more &&
		     !skb_shinfo(skb)->nr_frags;
		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
							ETH_ZLEN);
	}

	skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
	skb_pull_inline(skb, ihs);

	eseg->inline_hdr_sz = cpu_to_be16(ihs);

	ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
			       MLX5_SEND_WQE_DS);
	dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;

	MLX5E_TX_SKB_CB(skb)->num_dma = 0;

	headlen = skb_headlen(skb);
	if (headlen) {
		dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
					  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(headlen);

		mlx5e_dma_push(sq, dma_addr, headlen);
		MLX5E_TX_SKB_CB(skb)->num_dma++;

		dseg++;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int fsz = skb_frag_size(frag);

		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
					    DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(fsz);

		mlx5e_dma_push(sq, dma_addr, fsz);
		MLX5E_TX_SKB_CB(skb)->num_dma++;

		dseg++;
	}

	ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;

	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);

	sq->skb[pi] = skb;

	MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
							MLX5_SEND_WQEBB_NUM_DS);
	sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;

	netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);

	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
		netif_tx_stop_queue(sq->txq);
		sq->stats.stopped++;
	}

	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
		int bf_sz = 0;

		if (bf && sq->uar_bf_map)
			bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;

		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
	}

	/* fill sq edge with nops to avoid wqe wrap around */
	while ((sq->pc & wq->sz_m1) > sq->edge)
		mlx5e_send_nop(sq, false);

	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;

	sq->stats.packets++;
	return NETDEV_TX_OK;

dma_unmap_wqe_err:
	sq->stats.dropped++;
	mlx5e_dma_unmap_wqe_err(sq, skb);

	dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 29
0
int rpl_ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
		       int (*output)(OVS_VPORT_OUTPUT_PARAMS))
{
	struct iphdr *iph;
	int ptr;
	struct net_device *dev;
	struct sk_buff *skb2;
	unsigned int mtu, hlen, left, len, ll_rs;
	int offset;
	__be16 not_last_frag;
	struct rtable *rt = skb_rtable(skb);
	int err = 0;

	dev = rt->dst.dev;

	/* for offloaded checksums cleanup checksum before fragmentation */
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    (err = skb_checksum_help(skb)))
		goto fail;

	/*
	 *	Point into the IP datagram header.
	 */

	iph = ip_hdr(skb);

	mtu = ip_skb_dst_mtu(skb);
	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
		mtu = IPCB(skb)->frag_max_size;

	/*
	 *	Setup starting values.
	 */

	hlen = iph->ihl * 4;
	mtu = mtu - hlen;	/* Size of data space */
	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;

	/* When frag_list is given, use it. First, check its validity:
	 * some transformers could create wrong frag_list or break existing
	 * one, it is not prohibited. In this case fall back to copying.
	 *
	 * LATER: this step can be merged to real generation of fragments,
	 * we can switch to copy when see the first bad fragment.
	 */
	if (skb_has_frag_list(skb)) {
		struct sk_buff *frag, *frag2;
		int first_len = skb_pagelen(skb);

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    ip_is_fragment(iph) ||
		    skb_cloned(skb))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
				goto slow_path_clean;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path_clean;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
			}
			skb->truesize -= frag->truesize;
		}

		/* Everything is OK. Generate! */

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		iph->tot_len = htons(first_len);
		iph->frag_off = htons(IP_MF);
		ip_send_check(iph);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), iph, hlen);
				iph = ip_hdr(frag);
				iph->tot_len = htons(frag->len);
				ip_copy_metadata(frag, skb);
				if (offset == 0)
					ip_options_fragment(frag);
				offset += skb->len - hlen;
				iph->frag_off = htons(offset>>3);
				if (frag->next)
					iph->frag_off |= htons(IP_MF);
				/* Ready, complete checksum */
				ip_send_check(iph);
			}

			err = OUTPUT(net, sk, skb);

			if (!err)
				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		if (err == 0) {
			IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}
		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
		return err;

slow_path_clean:
		skb_walk_frags(skb, frag2) {
			if (frag2 == frag)
				break;
			frag2->sk = NULL;
			frag2->destructor = NULL;
			skb->truesize += frag2->truesize;
		}
	}

slow_path:
	iph = ip_hdr(skb);

	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;		/* Where to start from */

	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);

	/*
	 *	Fragment the datagram.
	 */

	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
	not_last_frag = iph->frag_off & htons(IP_MF);

	/*
	 *	Keep copying data until we run out.
	 */

	while (left > 0) {
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending up to and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}

		/* Allocate buffer */
		skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC);
		if (!skb2) {
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip_copy_metadata(skb2, skb);
		skb_reserve(skb2, ll_rs);
		skb_put(skb2, len + hlen);
		skb_reset_network_header(skb2);
		skb2->transport_header = skb2->network_header + hlen;

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */

		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */

		skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
			BUG();
		left -= len;

		/*
		 *	Fill in the new header fields.
		 */
		iph = ip_hdr(skb2);
		iph->frag_off = htons((offset >> 3));

		if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
			iph->frag_off |= htons(IP_DF);

		/* ANK: dirty, but effective trick. Upgrade options only if
		 * the segment to be fragmented was THE FIRST (otherwise,
		 * options are already fixed) and make it ONCE
		 * on the initial skb, so that all the following fragments
		 * will inherit fixed options.
		 */
		if (offset == 0)
			ip_options_fragment(skb);

		/*
		 *	Added AC : If we are fragmenting a fragment that's not the
		 *		   last fragment then keep MF on each bit
		 */
		if (left > 0 || not_last_frag)
			iph->frag_off |= htons(IP_MF);
		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		iph->tot_len = htons(len + hlen);

		ip_send_check(iph);

		err = OUTPUT(net, sk, skb2);
		if (err)
			goto fail;

		IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
	}
	consume_skb(skb);
	IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
	return err;

fail:
	kfree_skb(skb);
	IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
	return err;
}
Ejemplo n.º 30
0
static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	int len,i;
#ifndef NO_NOPCOMMANDS
	int next_nop;
#endif
	struct priv *p = netdev_priv(dev);

	if(skb->len > XMIT_BUFF_SIZE)
	{
		printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
		return NETDEV_TX_OK;
	}

	netif_stop_queue(dev);

#if(NUM_XMIT_BUFFS > 1)
	if(test_and_set_bit(0,(void *) &p->lock)) {
		printk("%s: Queue was locked\n",dev->name);
		return NETDEV_TX_BUSY;
	}
	else
#endif
	{
		len = skb->len;
		if (len < ETH_ZLEN) {
			memset((void *)p->xmit_cbuffs[p->xmit_count], 0,
			       ETH_ZLEN);
			len = ETH_ZLEN;
		}
		skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len);

#if (NUM_XMIT_BUFFS == 1)
#	ifdef NO_NOPCOMMANDS

#ifdef DEBUG
		if(p->scb->cus & CU_ACTIVE)
		{
			printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
			printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status));
		}
#endif

		p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
		for(i=0;i<16;i++)
		{
			p->xmit_cmds[0]->cmd_status = 0;
			WAIT_4_SCB_CMD();
			if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
				p->scb->cmd_cuc = CUC_RESUME;
			else
			{
				p->scb->cbl_offset = make16(p->xmit_cmds[0]);
				p->scb->cmd_cuc = CUC_START;
			}

			sun3_attn586();
			if(!i)
				dev_kfree_skb(skb);
			WAIT_4_SCB_CMD();
			if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
				break;
			if(p->xmit_cmds[0]->cmd_status)
				break;
			if(i==15)
				printk("%s: Can't start transmit-command.\n",dev->name);
		}
#	else
		next_nop = (p->nop_point + 1) & 0x1;
		p->xmit_buffs[0]->size = swab16(TBD_LAST | len);

		p->xmit_cmds[0]->cmd_link	 = p->nop_cmds[next_nop]->cmd_link
			= make16((p->nop_cmds[next_nop]));
		p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;

		p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
		p->nop_point = next_nop;
		dev_kfree_skb(skb);
#	endif
#else
		p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len);
		if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
			next_nop = 0;

		p->xmit_cmds[p->xmit_count]->cmd_status	= 0;
		/* linkpointer of xmit-command already points to next nop cmd */
		p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
		p->nop_cmds[next_nop]->cmd_status = 0;

		p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
		p->xmit_count = next_nop;

		{
			unsigned long flags;
			local_irq_save(flags);
			if(p->xmit_count != p->xmit_last)
				netif_wake_queue(dev);
			p->lock = 0;
			local_irq_restore(flags);
		}
		dev_kfree_skb(skb);
#endif
	}
	return NETDEV_TX_OK;
}