Пример #1
0
static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
{
	struct bfusb_data_scb *scb = (void *) skb->cb;
	struct urb *urb = bfusb_get_completed(data);
	int err, pipe;

	BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len);

	if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
		return -ENOMEM;

	pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);

	usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len,
			bfusb_tx_complete, skb);

	scb->urb = urb;

	skb_queue_tail(&data->pending_q, skb);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (err) {
		BT_ERR("%s bulk tx submit failed urb %p err %d", 
					data->hdev->name, urb, err);
		skb_unlink(skb, &data->pending_q);
		usb_free_urb(urb);
	} else
		atomic_inc(&data->pending_tx);

	return err;
}
Пример #2
0
static void aun_tx_ack(unsigned long seq, int result)
{
	struct sk_buff *skb;
	unsigned long flags;
	struct ec_cb *eb;

	spin_lock_irqsave(&aun_queue_lock, flags);
	skb = skb_peek(&aun_queue);
	while (skb && skb != (struct sk_buff *)&aun_queue)
	{
		struct sk_buff *newskb = skb->next;
		eb = (struct ec_cb *)&skb->cb;
		if (eb->seq == seq)
			goto foundit;

		skb = newskb;
	}
	spin_unlock_irqrestore(&aun_queue_lock, flags);
	printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
	return;

foundit:
	tx_result(skb->sk, eb->cookie, result);
	skb_unlink(skb);
	spin_unlock_irqrestore(&aun_queue_lock, flags);
}
Пример #3
0
static void bfusb_tx_complete(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct bfusb_data *data = (struct bfusb_data *) skb->dev;

	BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len);

	atomic_dec(&data->pending_tx);

	if (!test_bit(HCI_RUNNING, &data->hdev->flags))
		return;

	if (!urb->status)
		data->hdev->stat.byte_tx += skb->len;
	else
		data->hdev->stat.err_tx++;

	read_lock(&data->lock);

	skb_unlink(skb, &data->pending_q);
	skb_queue_tail(&data->completed_q, skb);

	bfusb_tx_wakeup(data);

	read_unlock(&data->lock);
}
Пример #4
0
static void try_fill_recv(struct virtnet_info *vi)
{
	struct sk_buff *skb;
	struct scatterlist sg[1+MAX_SKB_FRAGS];
	int num, err;

	sg_init_table(sg, 1+MAX_SKB_FRAGS);
	for (;;) {
		skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
		if (unlikely(!skb))
			break;

		skb_put(skb, MAX_PACKET_LEN);
		vnet_hdr_to_sg(sg, skb);
		num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
		skb_queue_head(&vi->recv, skb);

		err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
		if (err) {
			skb_unlink(skb, &vi->recv);
			kfree_skb(skb);
			break;
		}
		vi->num++;
	}
	if (unlikely(vi->num > vi->max))
		vi->max = vi->num;
	vi->rvq->vq_ops->kick(vi->rvq);
}
Пример #5
0
int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
{
	struct dn_skb_cb *cb = DN_SKB_CB(skb);
	struct dn_scp *scp = DN_SK(sk);
	struct sk_buff *skb2, *n, *ack = NULL;
	int wakeup = 0;
	int try_retrans = 0;
	unsigned long reftime = cb->stamp;
	unsigned long pkttime;
	unsigned short xmit_count;
	unsigned short segnum;

	skb_queue_walk_safe(q, skb2, n) {
		struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);

		if (dn_before_or_equal(cb2->segnum, acknum))
			ack = skb2;

		

		if (ack == NULL)
			continue;

		

		
		try_retrans = 0;
		
		wakeup = 1;
		
		pkttime = cb2->stamp;
		xmit_count = cb2->xmit_count;
		segnum = cb2->segnum;
		
		skb_unlink(ack, q);
		kfree_skb(ack);
		ack = NULL;

		
		WARN_ON(xmit_count == 0);

		
		if (xmit_count == 1) {
			if (dn_equal(segnum, acknum))
				dn_nsp_rtt(sk, (long)(pkttime - reftime));

			if (scp->snd_window < scp->max_window)
				scp->snd_window++;
		}

		
		if (xmit_count > 1)
			try_retrans = 1;
	}

	if (try_retrans)
		dn_nsp_output(sk);

	return wakeup;
}
Пример #6
0
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int num, err;
	struct scatterlist sg[1+MAX_SKB_FRAGS];
	struct virtio_net_hdr *hdr;
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
	DECLARE_MAC_BUF(mac);

	sg_init_table(sg, 1+MAX_SKB_FRAGS);

	pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));

	free_old_xmit_skbs(vi);

	/* Encode metadata header at front. */
	hdr = skb_vnet_hdr(skb);
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
		hdr->csum_start = skb->csum_start - skb_headroom(skb);
		hdr->csum_offset = skb->csum_offset;
	} else {
		hdr->flags = 0;
		hdr->csum_offset = hdr->csum_start = 0;
	}

	if (skb_is_gso(skb)) {
		hdr->gso_size = skb_shinfo(skb)->gso_size;
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
		else
			BUG();
	} else {
		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->gso_size = 0;
	}

	vnet_hdr_to_sg(sg, skb);
	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
	__skb_queue_head(&vi->send, skb);
	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
	if (err) {
		pr_debug("%s: virtio not prepared to send\n", dev->name);
		skb_unlink(skb, &vi->send);
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}
	vi->svq->vq_ops->kick(vi->svq);

	return 0;
}
Пример #7
0
/**
 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
 *
 * @next_hop: output argument for next hop address
 * @skb: frame to be sent
 * @dev: network device the frame will be sent through
 *
 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
 * found, the function will start a path discovery and queue the frame so it is
 * sent when the path is resolved. This means the caller must not free the skb
 * in this case.
 */
int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb,
		struct net_device *dev)
{
	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
	struct sk_buff *skb_to_free = NULL;
	struct mesh_path *mpath;
	int err = 0;

	rcu_read_lock();
	mpath = mesh_path_lookup(skb->data, dev);

	if (!mpath) {
		mesh_path_add(skb->data, dev);
		mpath = mesh_path_lookup(skb->data, dev);
		if (!mpath) {
			dev_kfree_skb(skb);
			sdata->u.sta.mshstats.dropped_frames_no_route++;
			err = -ENOSPC;
			goto endlookup;
		}
	}

	if (mpath->flags & MESH_PATH_ACTIVE) {
		if (time_after(jiffies, mpath->exp_time -
			msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time))
				&& skb->pkt_type != PACKET_OTHERHOST
				&& !(mpath->flags & MESH_PATH_RESOLVING)
				&& !(mpath->flags & MESH_PATH_FIXED)) {
			mesh_queue_preq(mpath,
					PREQ_Q_F_START | PREQ_Q_F_REFRESH);
		}
		memcpy(next_hop, mpath->next_hop->addr,
				ETH_ALEN);
	} else {
		if (!(mpath->flags & MESH_PATH_RESOLVING)) {
			/* Start discovery only if it is not running yet */
			mesh_queue_preq(mpath, PREQ_Q_F_START);
		}

		if (skb_queue_len(&mpath->frame_queue) >=
				MESH_FRAME_QUEUE_LEN) {
			skb_to_free = mpath->frame_queue.next;
			skb_unlink(skb_to_free, &mpath->frame_queue);
		}

		skb_queue_tail(&mpath->frame_queue, skb);
		if (skb_to_free)
			mesh_path_discard_frame(skb_to_free, dev);
		err = -ENOENT;
	}

endlookup:
	rcu_read_unlock();
	return err;
}
Пример #8
0
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
{
	struct sk_buff *skb, *pnext;
	struct brcmf_if *ifp;
	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
	struct brcmf_pub *drvr = bus_if->drvr;
	u8 ifidx;
	int ret;

	brcmf_dbg(DATA, "Enter\n");

	skb_queue_walk_safe(skb_list, skb, pnext) {
		skb_unlink(skb, skb_list);

		/* process and remove protocol-specific header */
		ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
		ifp = drvr->iflist[ifidx];

		if (ret || !ifp || !ifp->ndev) {
			if ((ret != -ENODATA) && ifp)
				ifp->stats.rx_errors++;
			brcmu_pkt_buf_free_skb(skb);
			continue;
		}

		skb->dev = ifp->ndev;
		skb->protocol = eth_type_trans(skb, skb->dev);

		if (skb->pkt_type == PACKET_MULTICAST)
			ifp->stats.multicast++;

		/* Process special event packets */
		brcmf_fweh_process_skb(drvr, skb);

		if (!(ifp->ndev->flags & IFF_UP)) {
			brcmu_pkt_buf_free_skb(skb);
			continue;
		}

		ifp->stats.rx_bytes += skb->len;
		ifp->stats.rx_packets++;

		if (in_interrupt())
			netif_rx(skb);
		else
			/* If the receive is not processed inside an ISR,
			 * the softirqd must be woken explicitly to service the
			 * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
			 */
			netif_rx_ni(skb);
	}
Пример #9
0
static void p54u_rx_cb(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb;
	struct ieee80211_hw *dev = info->dev;
	struct p54u_priv *priv = dev->priv;

	if (unlikely(urb->status)) {
		info->urb = NULL;
		usb_free_urb(urb);
		return;
	}

	skb_unlink(skb, &priv->rx_queue);
	skb_put(skb, urb->actual_length);
	if (!priv->hw_type)
		skb_pull(skb, sizeof(struct net2280_tx_hdr));

	if (p54_rx(dev, skb)) {
		skb = dev_alloc_skb(MAX_RX_SIZE);
		if (unlikely(!skb)) {
			usb_free_urb(urb);
			/* TODO check rx queue length and refill *somewhere* */
			return;
		}

		info = (struct p54u_rx_info *) skb->cb;
		info->urb = urb;
		info->dev = dev;
		urb->transfer_buffer = skb_tail_pointer(skb);
		urb->context = skb;
		skb_queue_tail(&priv->rx_queue, skb);
	} else {
		if (!priv->hw_type)
			skb_push(skb, sizeof(struct net2280_tx_hdr));

		skb_reset_tail_pointer(skb);
		skb_trim(skb, 0);
		if (urb->transfer_buffer != skb_tail_pointer(skb)) {
			/* this should not happen */
			WARN_ON(1);
			urb->transfer_buffer = skb_tail_pointer(skb);
		}

		skb_queue_tail(&priv->rx_queue, skb);
	}

	usb_submit_urb(urb, GFP_ATOMIC);
}
Пример #10
0
void
brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
                  bool (*fn)(struct sk_buff *, void *), void *arg)
{
    struct sk_buff_head *q;
    struct sk_buff *p, *next;

    q = &pq->q[prec].skblist;
    skb_queue_walk_safe(q, p, next) {
        if (fn == NULL || (*fn) (p, arg)) {
            skb_unlink(p, q);
            brcmu_pkt_buf_free_skb(p);
            pq->len--;
        }
    }
}
Пример #11
0
/**
 * tx_urb_complete - completes the execution of an URB
 * @urb: a URB
 *
 * This function is called if the URB has been transferred to a device or an
 * error has happened.
 */
static void tx_urb_complete(struct urb *urb)
{
	int r;
	struct sk_buff *skb;
	struct ieee80211_tx_info *info;
	struct zd_usb *usb;
	struct zd_usb_tx *tx;

	skb = (struct sk_buff *)urb->context;
	info = IEEE80211_SKB_CB(skb);
	/*
	 * grab 'usb' pointer before handing off the skb (since
	 * it might be freed by zd_mac_tx_to_dev or mac80211)
	 */
	usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
	tx = &usb->tx;

	switch (urb->status) {
	case 0:
		break;
	case -ESHUTDOWN:
	case -EINVAL:
	case -ENODEV:
	case -ENOENT:
	case -ECONNRESET:
	case -EPIPE:
		dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
		break;
	default:
		dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
		goto resubmit;
	}
free_urb:
	skb_unlink(skb, &usb->tx.submitted_skbs);
	zd_mac_tx_to_dev(skb, urb->status);
	usb_free_urb(urb);
	tx_dec_submitted_urbs(usb);
	return;
resubmit:
	usb_anchor_urb(urb, &tx->submitted);
	r = usb_submit_urb(urb, GFP_ATOMIC);
	if (r) {
		usb_unanchor_urb(urb);
		dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
		goto free_urb;
	}
}
Пример #12
0
static void shaper_kick(struct shaper *shaper)
{
	struct sk_buff *skb;

	/*
	 *	Walk the list (may be empty)
	 */

	while((skb=skb_peek(&shaper->sendq))!=NULL)
	{
		/*
		 *	Each packet due to go out by now (within an error
		 *	of SHAPER_BURST) gets kicked onto the link
		 */

		if(sh_debug)
			printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
		if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
		{
			/*
			 *	Pull the frame and get interrupts back on.
			 */

			skb_unlink(skb, &shaper->sendq);
			if (shaper->recovery <
			    SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
				shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
			/*
			 *	Pass on to the physical target device via
			 *	our low level packet thrower.
			 */

			SHAPERCB(skb)->shapepend=0;
			shaper_queue_xmit(shaper, skb);	/* Fire */
		}
		else
			break;
	}

	/*
	 *	Next kick.
	 */

	if(skb!=NULL)
		mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
}
Пример #13
0
/*
 * precedence based dequeue with match function. Passing a NULL pointer
 * for the match function parameter is considered to be a wildcard so
 * any packet on the queue is returned. In that case it is no different
 * from brcmu_pktq_pdeq() above.
 */
struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
                                      bool (*match_fn)(struct sk_buff *skb,
                                              void *arg), void *arg)
{
    struct sk_buff_head *q;
    struct sk_buff *p, *next;

    q = &pq->q[prec].skblist;
    skb_queue_walk_safe(q, p, next) {
        if (match_fn == NULL || match_fn(p, arg)) {
            skb_unlink(p, q);
            pq->len--;
            return p;
        }
    }
    return NULL;
}
static int spi_transmit_data(struct spi_device *spi,
		unsigned char *buf, size_t len)
{
	int ret;

	struct spi_message msg;
	struct spi_transfer t = {
		.len = len,
		.tx_buf = buf,
		.delay_usecs = 10,
	};

	spi_message_init(&msg);
	spi_message_add_tail(&t, &msg);

	ret = spi_sync(spi, &msg);
	if (ret < 0)
		mif_err("spi_sync() fail(%d)\n", ret);

	return ret;
}

static int spi_boot_tx_skb(struct link_device *ld, struct io_device *iod,
			struct sk_buff *skb)
{
	int ret = -EINVAL;

	struct spi_boot_link_device *sbld = to_spi_boot_link_dev(ld);

	unsigned char *buf = skb->data + SIPC5_MIN_HEADER_SIZE;
	int len = skb->len - SIPC5_MIN_HEADER_SIZE;

	skb_queue_tail(&sbld->tx_q, skb);

	ret = spi_transmit_data(sbld->spi, buf, len);
	if (ret < 0) {
		mif_err("spi_transmit_data() failed(%d)\n", ret);
		goto exit;
	}

exit:
	skb_unlink(skb, &sbld->tx_q);

	return ret;
}
Пример #15
0
static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
{
	struct bfusb_data_scb *scb;
	struct sk_buff *skb;
	int err, pipe, size = HCI_MAX_FRAME_SIZE + 32;

	BT_DBG("bfusb %p urb %p", data, urb);

	if (!urb) {
		urb = usb_alloc_urb(0, GFP_ATOMIC);
		if (!urb)
			return -ENOMEM;
	}

	skb = bt_skb_alloc(size, GFP_ATOMIC);
	if (!skb) {
		usb_free_urb(urb);
		return -ENOMEM;
	}

	skb->dev = (void *) data;

	scb = (struct bfusb_data_scb *) skb->cb;
	scb->urb = urb;

	pipe = usb_rcvbulkpipe(data->udev, data->bulk_in_ep);

	usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size,
			bfusb_rx_complete, skb);

	skb_queue_tail(&data->pending_q, skb);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (err) {
		BT_ERR("%s bulk rx submit failed urb %p err %d",
					data->hdev->name, urb, err);
		skb_unlink(skb, &data->pending_q);
		kfree_skb(skb);
		usb_free_urb(urb);
	}

	return err;
}
static ssize_t
nanopci_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
{
   struct nano_pci_card *card = filp->private_data;
   struct sk_buff *skb;

   size_t n;

   if(down_interruptible(&card->sem))
      return -ERESTARTSYS;

   while((skb = skb_peek(&card->rx_queue)) == NULL) {
      up(&card->sem);
      if(filp->f_flags & O_NONBLOCK)
	 return -EAGAIN;
      if(wait_event_interruptible(card->rx_waitqueue, 
				  !skb_queue_empty(&card->rx_queue)))
	 return -ERESTARTSYS;
      if(down_interruptible(&card->sem))
	 return -ERESTARTSYS;
   }

   n = skb->len;
   if(n > count)
      n = count;
   if(copy_to_user(buf, skb->data, n)) {
      KDEBUG(ERROR, "EFAULT copying out data");
      up(&card->sem);
      return -EFAULT;
   }
   skb_pull(skb, n);
   if(skb->len == 0) {
      skb_unlink(skb, &card->rx_queue);
      dev_kfree_skb(skb);
   }
   card->status.irq_handled = card->status.irq_count - 
     skb_queue_len(&card->rx_queue);

   up(&card->sem);
   return n;
}
Пример #17
0
/**
 * zd_usb_tx: initiates transfer of a frame of the device
 *
 * @usb: the zd1211rw-private USB structure
 * @skb: a &struct sk_buff pointer
 *
 * This function tranmits a frame to the device. It doesn't wait for
 * completion. The frame must contain the control set and have all the
 * control set information available.
 *
 * The function returns 0 if the transfer has been successfully initiated.
 */
int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
{
	int r;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct usb_device *udev = zd_usb_to_usbdev(usb);
	struct urb *urb;
	struct zd_usb_tx *tx = &usb->tx;

	if (!atomic_read(&tx->enabled)) {
		r = -ENOENT;
		goto out;
	}

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		r = -ENOMEM;
		goto out;
	}

	usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
		          skb->data, skb->len, tx_urb_complete, skb);

	info->rate_driver_data[1] = (void *)jiffies;
	skb_queue_tail(&tx->submitted_skbs, skb);
	usb_anchor_urb(urb, &tx->submitted);

	r = usb_submit_urb(urb, GFP_ATOMIC);
	if (r) {
		dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
		usb_unanchor_urb(urb);
		skb_unlink(skb, &tx->submitted_skbs);
		goto error;
	}
	tx_inc_submitted_urbs(usb);
	return 0;
error:
	usb_free_urb(urb);
out:
	return r;
}
Пример #18
0
static void ab_cleanup(unsigned long h)
{
	struct sk_buff *skb;
	unsigned long flags;

	spin_lock_irqsave(&aun_queue_lock, flags);
	skb = skb_peek(&aun_queue);
	while (skb && skb != (struct sk_buff *)&aun_queue)
	{
		struct sk_buff *newskb = skb->next;
		struct ec_cb *eb = (struct ec_cb *)&skb->cb;
		if ((jiffies - eb->start) > eb->timeout)
		{
			tx_result(skb->sk, eb->cookie, 
				  ECTYPE_TRANSMIT_NOT_PRESENT);
			skb_unlink(skb);
		}
		skb = newskb;
	}
	spin_unlock_irqrestore(&aun_queue_lock, flags);

	mod_timer(&ab_cleanup_timer, jiffies + (HZ*2));
}
Пример #19
0
static int hfa384x_wait(struct net_device *dev, struct sk_buff *skb)
{
	struct hostap_interface *iface = netdev_priv(dev);
	local_info_t *local = iface->local;
	struct hostap_usb_priv *hw_priv = local->hw_priv;
	int res;
	unsigned long flags;

	res = wait_for_completion_interruptible_timeout(&hfa384x_cb(skb)->comp, 5 * HZ);
	if (res > 0)
		return 0;

	if (res == 0) {
		res = -ETIMEDOUT;
	}

	usb_kill_urb(&hw_priv->tx_urb);
	// FIXME: rethink
	spin_lock_irqsave(&hw_priv->tx_queue.lock, flags);
	if (skb->next)
		skb_unlink(skb, &hw_priv->tx_queue);
	spin_unlock_irqrestore(&hw_priv->tx_queue.lock, flags);
	return res;
}
Пример #20
0
				/* not do -- makes minimum  64 bytes add crc, etc*/
int 
sab8253xn_write2(struct sk_buff *skb, struct net_device *dev)
{
	size_t cnt;
	unsigned int flags;
	SAB_PORT *priv = (SAB_PORT*) dev->priv;
	struct sk_buff *substitute;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
	if(dev->tbusy != 0)		/* something of an error */
	{
		++(priv->Counters.tx_drops);
		dev_kfree_skb_any(skb);
		return -EBUSY;		/* only during release */
	}
#endif

	if(priv->active2.transmit == NULL)
	{
		return -ENOMEM;
	}
	
	DEBUGPRINT((KERN_ALERT "sab8253x: sending IP packet(bytes):\n"));

	DEBUGPRINT((KERN_ALERT "sab8253x: start address is %p.\n", skb->data));
	
	cnt = skb->tail - skb->data;
	cnt = MIN(cnt, sab8253xn_rbufsize);
	if(cnt < ETH_ZLEN)
	{
		if((skb->end - skb->data) >= ETH_ZLEN)
		{
			skb->tail = (skb->data + ETH_ZLEN);
			cnt = ETH_ZLEN;
		}
		else
		{
			substitute = dev_alloc_skb(ETH_ZLEN);
			if(substitute == NULL)
			{
				dev_kfree_skb_any(skb);
				return 0;
			}
			substitute->tail = (substitute->data + ETH_ZLEN);
			memcpy(substitute->data, skb->data, cnt);
			cnt = ETH_ZLEN;
			dev_kfree_skb_any(skb);
			skb = substitute;
		}
	}
	
	save_flags(flags); cli();
	if((priv->active2.transmit->Count & OWNER) == OWN_SAB)
	{
		++(priv->Counters.tx_drops);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
		dev->tbusy = 1;
#else
		netif_stop_queue (dev);
#endif
		priv->tx_full = 1;
		restore_flags(flags);
		return 1;
	}
	restore_flags(flags);
#ifndef FREEINTERRUPT
	if(priv->active2.transmit->HostVaddr != NULL)
	{
		register RING_DESCRIPTOR *freeme;
		
		freeme = priv->active2.transmit;
		do
		{
			skb_unlink((struct sk_buff*)freeme->HostVaddr);
			dev_kfree_skb_any((struct sk_buff*)freeme->HostVaddr);
			freeme->HostVaddr = NULL;
			freeme = (RING_DESCRIPTOR*) freeme->VNext;
		}
		while(((freeme->Count & OWNER) != OWN_SAB) &&
		      (freeme->HostVaddr != NULL));
	}
#endif
	dev->trans_start = jiffies;
	skb_queue_head(priv->sab8253xbuflist, skb);
	priv->active2.transmit->HostVaddr = skb;
	priv->active2.transmit->sendcrc = 1;
	priv->active2.transmit->crcindex = 0;
	priv->active2.transmit->crc = fn_calc_memory_crc32(skb->data, cnt);
	priv->active2.transmit->Count = (OWN_SAB|cnt); /* must be this order */
	priv->active2.transmit = 
		(RING_DESCRIPTOR*) priv->active2.transmit->VNext;
	priv->Counters.transmitbytes += cnt;
	sab8253x_start_txS(priv);
	return 0;
}
Пример #21
0
int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
{
	struct dn_skb_cb *cb = DN_SKB_CB(skb);
	struct dn_scp *scp = DN_SK(sk);
	struct sk_buff *skb2, *n, *ack = NULL;
	int wakeup = 0;
	int try_retrans = 0;
	unsigned long reftime = cb->stamp;
	unsigned long pkttime;
	unsigned short xmit_count;
	unsigned short segnum;

	skb_queue_walk_safe(q, skb2, n) {
		struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);

		if (dn_before_or_equal(cb2->segnum, acknum))
			ack = skb2;

		/* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */

		if (ack == NULL)
			continue;

		/* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */

		/* Does _last_ packet acked have xmit_count > 1 */
		try_retrans = 0;
		/* Remember to wake up the sending process */
		wakeup = 1;
		/* Keep various statistics */
		pkttime = cb2->stamp;
		xmit_count = cb2->xmit_count;
		segnum = cb2->segnum;
		/* Remove and drop ack'ed packet */
		skb_unlink(ack, q);
		kfree_skb(ack);
		ack = NULL;

		/*
		 * We don't expect to see acknowledgements for packets we
		 * haven't sent yet.
		 */
		WARN_ON(xmit_count == 0);

		/*
		 * If the packet has only been sent once, we can use it
		 * to calculate the RTT and also open the window a little
		 * further.
		 */
		if (xmit_count == 1) {
			if (dn_equal(segnum, acknum))
				dn_nsp_rtt(sk, (long)(pkttime - reftime));

			if (scp->snd_window < scp->max_window)
				scp->snd_window++;
		}

		/*
		 * Packet has been sent more than once. If this is the last
		 * packet to be acknowledged then we want to send the next
		 * packet in the send queue again (assumes the remote host does
		 * go-back-N error control).
		 */
		if (xmit_count > 1)
			try_retrans = 1;
	}

	if (try_retrans)
		dn_nsp_output(sk);

	return wakeup;
}
Пример #22
0
/* NOTE:
	This function return a list of SKB which is proper to be aggregated. 
	If no proper SKB is found to do aggregation, SendList will only contain the input SKB.
*/
u8 AMSDU_GetAggregatibleList(
	struct rtllib_device *	ieee,
	struct sk_buff *		pCurSkb,
	struct sk_buff_head 		*pSendList,
	u8				queue_index,
	bool				is_ap
	)
{
	struct sk_buff 			*pSkb = NULL;
	u16				nMaxAMSDUSize = 0;
	u32				AggrSize = 0;
	u32				nAggrSkbNum = 0;
	u8 				padding = 0;
	struct sta_info			*psta = NULL;	
	u8 				*addr = (u8*)(pCurSkb->data);	
	struct sk_buff_head *header;
	struct sk_buff 	  *punlinkskb = NULL;	

	padding = ((4-pCurSkb->len%4)==4)?0:(4-pCurSkb->len%4);
	AggrSize = AMSDU_SUBHEADER_LEN + pCurSkb->len + padding;
	skb_queue_tail(pSendList, pCurSkb);
	nAggrSkbNum++;

	if (is_ap) {
#ifdef ASL	    
		if((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_APSTA) && (ieee->ap_state == RTLLIB_LINKED)){	
		    
		psta = ap_get_stainfo(ieee, addr);
		if(NULL != psta){
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		}
		else {
			return 1;
		}

		}else
#endif
			return 1;
	}else {
		if(ieee->iw_mode == IW_MODE_ADHOC){
		psta = GetStaInfo(ieee, addr);
		if(NULL != psta)
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		else
			return 1;
	}else{
		nMaxAMSDUSize = ieee->pHTInfo->nCurrent_AMSDU_MaxSize;
	}
	}

	if(ieee->pHTInfo->ForcedAMSDUMode == HT_AGG_FORCE_ENABLE)
	{
		nMaxAMSDUSize = ieee->pHTInfo->ForcedAMSDUMaxSize;
	}
	
	if (is_ap) {
#ifdef ASL
		header = (&ieee->skb_apaggQ[queue_index]);
#endif
	} else 
	header = (&ieee->skb_aggQ[queue_index]);
	pSkb = header->next;
	while(pSkb != (struct sk_buff*)header)
	{
		if (is_ap) {
#ifdef ASL
			if((ieee->iw_mode == IW_MODE_MASTER) ||(ieee->iw_mode == IW_MODE_APSTA))
		{
			if(memcmp(pCurSkb->data, pSkb->data, ETH_ALEN) != 0) 
			{
				pSkb = pSkb->next;
				continue;				
			}
		}
#endif
		} else {
			if(ieee->iw_mode == IW_MODE_ADHOC)
			{
				if(memcmp(pCurSkb->data, pSkb->data, ETH_ALEN) != 0) 
				{
					pSkb = pSkb->next;
					continue;				
				}
			}
		}
		if((AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize) )
		{
			punlinkskb = pSkb;
			pSkb = pSkb->next;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
			skb_unlink(punlinkskb, header);	
#else
			/*
			 * __skb_unlink before linux2.6.14 does not use spinlock to protect list head.
			 * add spinlock function manually. john,2008/12/03
			 */
			{
				unsigned long flags;
				spin_lock_irqsave(&ieee->lock, flags);
				__skb_unlink(punlinkskb,header);
				spin_unlock_irqrestore(&ieee->lock, flags);
			}
#endif
			
			padding = ((4-punlinkskb->len%4)==4)?0:(4-punlinkskb->len%4);
			AggrSize += AMSDU_SUBHEADER_LEN + punlinkskb->len + padding;
			skb_queue_tail(pSendList, punlinkskb);
			nAggrSkbNum++;
		}
		else
		{
			if(!(AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize))
				;
		
			break; 
		}
	}
	return nAggrSkbNum;
}
Пример #23
0
static void poll_rx(struct atm_dev *dev,int mbx)
{
	struct zatm_dev *zatm_dev;
	unsigned long pos;
	u32 x;
	int error;

	EVENT("poll_rx\n",0,0);
	zatm_dev = ZATM_DEV(dev);
	pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
	while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
		u32 *here;
		struct sk_buff *skb;
		struct atm_vcc *vcc;
		int cells,size,chan;

		EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
		here = (u32 *) pos;
		if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
			pos = zatm_dev->mbx_start[mbx];
		cells = here[0] & uPD98401_AAL5_SIZE;
#if 0
printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
{
unsigned long *x;
		printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
		      zatm_dev->pool_base),
		      zpeekl(zatm_dev,zatm_dev->pool_base+1));
		x = (unsigned long *) here[2];
		printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
		    x[0],x[1],x[2],x[3]);
}
#endif
		error = 0;
		if (here[3] & uPD98401_AAL5_ERR) {
			error = (here[3] & uPD98401_AAL5_ES) >>
			    uPD98401_AAL5_ES_SHIFT;
			if (error == uPD98401_AAL5_ES_DEACT ||
			    error == uPD98401_AAL5_ES_FREE) continue;
		}
EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
  uPD98401_AAL5_ES_SHIFT,error);
		skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
		__net_timestamp(skb);
#if 0
printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
  ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
  ((unsigned *) skb->data)[0]);
#endif
		EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
		    (unsigned long) here);
#if 0
printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
#endif
		size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
		    ATM_CELL_PAYLOAD/sizeof(u16)-3]);
		EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
		chan = (here[3] & uPD98401_AAL5_CHAN) >>
		    uPD98401_AAL5_CHAN_SHIFT;
		if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
			int pos;
			vcc = zatm_dev->rx_map[chan];
			pos = ZATM_VCC(vcc)->pool;
			if (skb == zatm_dev->last_free[pos])
				zatm_dev->last_free[pos] = NULL;
			skb_unlink(skb, zatm_dev->pool + pos);
		}
		else {
			printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
			    "for non-existing channel\n",dev->number);
			size = 0;
			vcc = NULL;
			event_dump();
		}
		if (error) {
			static unsigned long silence = 0;
			static int last_error = 0;

			if (error != last_error ||
			    time_after(jiffies, silence)  || silence == 0){
				printk(KERN_WARNING DEV_LABEL "(itf %d): "
				    "chan %d error %s\n",dev->number,chan,
				    err_txt[error]);
				last_error = error;
				silence = (jiffies+2*HZ)|1;
			}
			size = 0;
		}
		if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
		    size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
			printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
			    "cells\n",dev->number,size,cells);
			size = 0;
			event_dump();
		}
		if (size > ATM_MAX_AAL5_PDU) {
			printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
			    "(%d)\n",dev->number,size);
			size = 0;
			event_dump();
		}
		if (!size) {
			dev_kfree_skb_irq(skb);
			if (vcc) atomic_inc(&vcc->stats->rx_err);
			continue;
		}
		if (!atm_charge(vcc,skb->truesize)) {
			dev_kfree_skb_irq(skb);
			continue;
		}
		skb->len = size;
		ATM_SKB(skb)->vcc = vcc;
		vcc->push(vcc,skb);
		atomic_inc(&vcc->stats->rx);
	}
Пример #24
0
/* NOTE:
	This function return a list of SKB which is proper to be aggregated. 
	If no proper SKB is found to do aggregation, SendList will only contain the input SKB.
*/
u8 AMSDU_GetAggregatibleList(
	struct rtllib_device *	ieee,
	struct sk_buff *		pCurSkb,
	struct sk_buff_head 		*pSendList,
	u8				queue_index
	)
{
	struct sk_buff 			*pSkb = NULL;
	u16				nMaxAMSDUSize = 0;
	u32				AggrSize = 0;
	u32				nAggrSkbNum = 0;
	u8 				padding = 0;
	struct sta_info			*psta = NULL;	
	u8 				*addr = (u8*)(pCurSkb->data);	
	struct sk_buff_head *header;
	struct sk_buff 	  *punlinkskb = NULL;	

	padding = ((4-pCurSkb->len%4)==4)?0:(4-pCurSkb->len%4);
	AggrSize = AMSDU_SUBHEADER_LEN + pCurSkb->len + padding;
	skb_queue_tail(pSendList, pCurSkb);
	nAggrSkbNum++;

	//
	// Get A-MSDU aggregation threshold.
	//
	if(ieee->iw_mode == IW_MODE_MASTER){	
		psta = GetStaInfo(ieee, addr);
		if(NULL != psta)
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		else
			return 1;
	}else if(ieee->iw_mode == IW_MODE_ADHOC){
		psta = GetStaInfo(ieee, addr);
		if(NULL != psta)
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		else
			return 1;
	}else{
		nMaxAMSDUSize = ieee->pHTInfo->nCurrent_AMSDU_MaxSize;
	}
	nMaxAMSDUSize = ((nMaxAMSDUSize)==0)?HT_AMSDU_SIZE_4K:HT_AMSDU_SIZE_8K;

	if(ieee->pHTInfo->ForcedAMSDUMode == HT_AGG_FORCE_ENABLE)
	{
		nMaxAMSDUSize = ieee->pHTInfo->ForcedAMSDUMaxSize;
	}
	
	// 
	// Build pSendList
	//
	header = (&ieee->skb_aggQ[queue_index]);
	pSkb = header->next;
	while(pSkb != (struct sk_buff*)header)
	{
		//
		// Get Aggregation List. Only those frames with the same RA can be aggregated.
		// For Infrastructure mode, RA is always the AP MAC address so the frame can just be aggregated 
		// without checking RA.
		// For AP mode and IBSS mode, RA is the same as DA. Checking RA is needed before aggregation.
		//
		if((ieee->iw_mode == IW_MODE_MASTER) ||(ieee->iw_mode == IW_MODE_ADHOC))
		{
			if(memcmp(pCurSkb->data, pSkb->data, ETH_ALEN) != 0) //DA
			{
				//printk(""MAC_FMT"-"MAC_FMT"\n",MAC_ARG(pCurSkb->data), MAC_ARG(pSkb->data));
				pSkb = pSkb->next;
				continue;				
			}
		}
		//
		// Limitation shall be checked:
		// (1) A-MSDU size limitation
		//
		if((AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize) )
		{
			// Unlink skb
			punlinkskb = pSkb;
			pSkb = pSkb->next;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
			skb_unlink(punlinkskb, header);	
#else
			/*
			 * __skb_unlink before linux2.6.14 does not use spinlock to protect list head.
			 * add spinlock function manually. john,2008/12/03
			 */
			{
				unsigned long flags;
				spin_lock_irqsave(&ieee->lock, flags);
				__skb_unlink(punlinkskb,header);
				spin_unlock_irqrestore(&ieee->lock, flags);
			}
#endif
			
			//Do aggregation
			padding = ((4-punlinkskb->len%4)==4)?0:(4-punlinkskb->len%4);
			AggrSize += AMSDU_SUBHEADER_LEN + punlinkskb->len + padding;
			//printk(""MAC_FMT": %d\n",MAC_ARG(punlinkskb->data),punlinkskb->len);
			skb_queue_tail(pSendList, punlinkskb);
			nAggrSkbNum++;
		}
		else
		{
			//Do not do aggregation because out of resources
			//printk("\nStop aggregation: ");
			if(!(AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize))
				;//printk("[A-MSDU size limitation]");
		
			break; //To be sure First In First Out, 'break' should be marked, 
		}
	}
	return nAggrSkbNum;
}
Пример #25
0
void destroy_sock(struct sock *sk)
{
	struct sk_buff *skb;

  	sk->inuse = 1;			/* just to be safe.对sock结构上锁 */

	/* 首先检查dead标志,只有dead=1时才表示sock结构等待释放	*/
  	/* In case it's sleeping somewhere. */
  	if (!sk->dead) 
  		sk->write_space(sk);	// sock如果出现SO_NOSPACE标志,需要唤醒异步等待sock的进程

  	remove_sock(sk);

  	/* 移除sock连接使用的定时器	*/
  	/* Now we can no longer get new packets. */
  	delete_timer(sk);				// 移除通用定时器
  	/* Nor send them */
	del_timer(&sk->retransmit_timer);	// 移除重传定时器

	/* 释放sock::partial指向的数据写缓冲	*/
	while ((skb = tcp_dequeue_partial(sk)) != NULL) {
		IS_SKB(skb);
		kfree_skb(skb, FREE_WRITE);
	}

	/* Cleanup up the write buffer. */
  	while((skb = skb_dequeue(&sk->write_queue)) != NULL) {
		IS_SKB(skb);
		kfree_skb(skb, FREE_WRITE);
  	}
  	
  	/*
  	 *	Don't discard received data until the user side kills its
  	 *	half of the socket.
  	 */

	if (sk->dead) 
	{
  		while((skb=skb_dequeue(&sk->receive_queue))!=NULL) 
  		{
		/*
		 * This will take care of closing sockets that were
		 * listening and didn't accept everything.
		 */
			if (skb->sk != NULL && skb->sk != sk) 
			{
			/* 如果当前sock对应一个listen socket就需要关闭尚未完成创建的socket连接
			 * 这其中的sock可能是establish或者syn_recv状态	*/
				IS_SKB(skb);
				skb->sk->dead = 1;
				skb->sk->prot->close(skb->sk, 0);
			}
			IS_SKB(skb);
			kfree_skb(skb, FREE_READ);
		}
	}	

	/* Now we need to clean up the send head. 清理重发队列 */
	cli();
	for(skb = sk->send_head; skb != NULL; )
	{
		struct sk_buff *skb2;

		/*
		 * We need to remove skb from the transmit queue,
		 * or maybe the arp queue.
		 */
		if (skb->next  && skb->prev) {
/*			printk("destroy_sock: unlinked skb\n");*/
			IS_SKB(skb);
			skb_unlink(skb);
		}
		skb->dev = NULL;
		skb2 = skb->link3;	// link3在TCP中构建重发队列
		kfree_skb(skb, FREE_WRITE);
		skb = skb2;
	}
	sk->send_head = NULL;
	sti();

  	/* And now the backlog. */
  	while((skb=skb_dequeue(&sk->back_log))!=NULL) 
  	{
		/* this should never happen. */
/*		printk("cleaning back_log\n");*/
		kfree_skb(skb, FREE_READ);
	}

	/* Now if it has a half accepted/ closed socket. */
	if (sk->pair) 
	{
		sk->pair->dead = 1;
		sk->pair->prot->close(sk->pair, 0);
		sk->pair = NULL;
  	}

	/*
	 * Now if everything is gone we can free the socket
	 * structure, otherwise we need to keep it around until
	 * everything is gone.
	 */

	  if (sk->dead && sk->rmem_alloc == 0 && sk->wmem_alloc == 0) 
	  {
		kfree_s((void *)sk,sizeof(*sk));
	  } 
	  else 
	  {
		/* this should never happen. */
		/* actually it can if an ack has just been sent. */
		sk->destroy = 1;
		sk->ack_backlog = 0;
		sk->inuse = 0;
		reset_timer(sk, TIME_DESTROY, SOCK_DESTROY_TIME);
  	}
}
Пример #26
0
static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
{
 	struct sk_buff *ptr;
   
 	/*
 	 *	Get ready to work on this shaper. Lock may fail if its
 	 *	an interrupt and locked.
 	 */
 	 
 	if(!shaper_lock(shaper))
 		return -1;
 	ptr=shaper->sendq.prev;
 	
 	/*
 	 *	Set up our packet details
 	 */
 	 
 	SHAPERCB(skb)->shapelatency=0;
 	SHAPERCB(skb)->shapeclock=shaper->recovery;
 	if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
 		SHAPERCB(skb)->shapeclock=jiffies;
 	skb->priority=0;	/* short term bug fix */
 	SHAPERCB(skb)->shapestamp=jiffies;
 	
 	/*
 	 *	Time slots for this packet.
 	 */
 	 
 	SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
 	
#ifdef SHAPER_COMPLEX /* and broken.. */

 	while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
 	{
 		if(ptr->pri<skb->pri 
 			&& jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
 		{
 			struct sk_buff *tmp=ptr->prev;

 			/*
 			 *	It goes before us therefore we slip the length
 			 *	of the new frame.
 			 */

 			SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
 			SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;

 			/*
 			 *	The packet may have slipped so far back it
 			 *	fell off.
 			 */
 			if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
 			{
 				skb_unlink(ptr);
 				dev_kfree_skb(ptr);
 			}
 			ptr=tmp;
 		}
 		else
 			break;
 	}
 	if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
 		skb_queue_head(&shaper->sendq,skb);
 	else
 	{
 		struct sk_buff *tmp;
 		/*
 		 *	Set the packet clock out time according to the
 		 *	frames ahead. Im sure a bit of thought could drop
 		 *	this loop.
 		 */
 		for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
 			SHAPERCB(skb)->shapeclock+=tmp->shapelen;
 		skb_append(ptr,skb);
 	}
#else
	{
		struct sk_buff *tmp;
		/*
		 *	Up our shape clock by the time pending on the queue
		 *	(Should keep this in the shaper as a variable..)
		 */
		for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && 
			tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
			SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
		/*
		 *	Queue over time. Spill packet.
		 */
		if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) {
			dev_kfree_skb(skb);
			shaper->stats.tx_dropped++;
		} else
			skb_queue_tail(&shaper->sendq, skb);
	}
#endif 	
	if(sh_debug)
 		printk("Frame queued.\n");
 	if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
 	{
 		ptr=skb_dequeue(&shaper->sendq);
                dev_kfree_skb(ptr);
                shaper->stats.collisions++;
 	}
 	shaper_unlock(shaper);
 	return 0;
}
Пример #27
0
int sab8253x_writeS(struct tty_struct * tty, int from_user,
		    const unsigned char *buf, int count)
{
	struct sab_port *port = (struct sab_port *)tty->driver_data;
	struct sk_buff *skb;
	int truelength = 0;
	int do_queue = 1;
	
	if (sab8253x_serial_paranoia_check(port, tty->device, "sab8253x_write"))
	{
		return 0;
	}
	
	if(count == 0)
	{
		return 0;
	}
	
	if(port->active2.transmit == NULL)
	{
		return 0;
	}
	
	if((port->active2.transmit->Count & OWNER) == OWN_SAB)
	{
		sab8253x_start_txS(port);	/* no descriptor slot */
		return 0;
	}
	
#ifndef FREEININTERRUPT
	skb = port->active2.transmit->HostVaddr; /* current slot value */
	
	if(port->buffergreedy == 0)	/* are we avoiding buffer free's */
	{				/* no */
		if((skb != NULL) || /* not OWN_SAB from above */
		   (port->active2.transmit->crcindex != 0)) 
		{
			register RING_DESCRIPTOR *freeme;
			
			freeme = port->active2.transmit;
			do
			{
				if((freeme->crcindex == 0) && (freeme->HostVaddr == NULL))
				{
					break;
				}
				if(freeme->HostVaddr)
				{
					skb_unlink((struct sk_buff*)freeme->HostVaddr);
					dev_kfree_skb_any((struct sk_buff*)freeme->HostVaddr);
					freeme->HostVaddr = NULL;
				}
				freeme->sendcrc = 0;
				freeme->crcindex = 0;
				freeme = (RING_DESCRIPTOR*) freeme->VNext;
			}
			while((freeme->Count & OWNER) != OWN_SAB);
		}
		skb = NULL;		/* buffer was freed */
	}
	
	if(skb != NULL)		/* potentially useful */
	{
		truelength = (skb->end - skb->head);
		if(truelength >= count)
		{
			skb->data = skb->head; /* this buffer is already queued */
			skb->tail = skb->head;
			do_queue = 0;
		}
		else
		{
			skb_unlink(skb);
			dev_kfree_skb_any(skb);
			skb = NULL;
			port->active2.transmit->HostVaddr = NULL;
		}
	}
	/* in all cases the following is allowed */
	port->active2.transmit->sendcrc = 0;
	port->active2.transmit->crcindex = 0;
#endif
	
	if(skb == NULL)
	{
		if(port->DoingInterrupt)
		{
			skb = alloc_skb(count, GFP_ATOMIC);
		}
		else
		{
			skb = alloc_skb(count, GFP_KERNEL);
		}
	}
	
	if(skb == NULL)
	{
		printk(KERN_ALERT "sab8253xs: no skbuffs available.\n");
		return 0;
	}
	if(from_user)
	{
		copy_from_user(skb->data, buf, count);
	}
	else
	{
		memcpy(skb->data, buf, count);
	}
	skb->tail = (skb->data + count);
	skb->data_len = count;
	skb->len = count;
	
	if(do_queue)
	{
		skb_queue_head(port->sab8253xbuflist, skb);
	}
	
	port->active2.transmit->HostVaddr = skb;
	port->active2.transmit->sendcrc = 0;
	port->active2.transmit->crcindex = 0;
	port->active2.transmit->Count = (OWN_SAB|count);
	port->active2.transmit = port->active2.transmit->VNext;
	
	sab8253x_start_txS(port);
	return count;
}
Пример #28
0
	/* stream */
static void sab8253x_receive_charsN(struct sab_port *port,
				    union sab8253x_irq_status *stat)
{
	unsigned char buf[32];
	int free_fifo = 0;
	int reset_fifo = 0;
	int msg_done = 0;
	int msg_bad = 0;
	int count = 0;
	int total_size = 0;
	int rstatus = 0;
	struct sk_buff *skb;
	
	/* Read number of BYTES (Character + Status) available. */
	
	if((stat->images[ISR1_IDX] & SAB82532_ISR1_RDO) || (stat->images[ISR0_IDX] & SAB82532_ISR0_RFO) )
	{
		++msg_bad;
		++free_fifo;
		++reset_fifo;
	}
	else
	{
		if (stat->images[ISR0_IDX] & SAB82532_ISR0_RPF) 
		{
			count = port->recv_fifo_size;
			++free_fifo;
		}
		
		if (stat->images[ISR0_IDX] & SAB82532_ISR0_RME) 
		{
			count = READB(port,rbcl);
			count &= (port->recv_fifo_size - 1);
			++msg_done;
			++free_fifo;
			
			total_size = READB(port, rbch);
			if(total_size & SAB82532_RBCH_OV)
			{
				msg_bad++;
			}
			
			rstatus = READB(port, rsta);
			if((rstatus & SAB82532_RSTA_VFR) == 0)
			{
				msg_bad++;
			}
			if(rstatus & SAB82532_RSTA_RDO)
			{
				msg_bad++;
			}
			if((rstatus & SAB82532_RSTA_CRC) == 0)
			{
				msg_bad++;
			}
			if(rstatus & SAB82532_RSTA_RAB)
			{
				msg_bad++;
			}
		}
	}
	
	/* Read the FIFO. */
	(*port->readfifo)(port, buf, count);
	
	/* Issue Receive Message Complete command. */
	
	if (free_fifo) 
	{
		sab8253x_cec_wait(port);
		WRITEB(port, cmdr, SAB82532_CMDR_RMC);
	}
	
	if(reset_fifo)
	{
		sab8253x_cec_wait(port);
		WRITEB(port, cmdr, SAB82532_CMDR_RHR);
	}
	
	if(port->active2.receive == NULL)
	{
		return;
	}
	
	if(msg_bad)
	{
		++(port->Counters.rx_drops);
		port->active2.receive->HostVaddr->tail = port->active2.receive->HostVaddr->data; /* clear the buffer */
		port->active2.receive->Count = sab8253xn_rbufsize|OWN_SAB;
		return;
	}
	
	memcpy(port->active2.receive->HostVaddr->tail, buf, count);
	port->active2.receive->HostVaddr->tail += count;
	
	if(msg_done)
	{
		port->active2.receive->Count = 
			(port->active2.receive->HostVaddr->tail - port->active2.receive->HostVaddr->data);
		if((port->active2.receive->Count < (ETH_ZLEN+4+3)) || /* 4 is the CRC32 size 3 bytes from the SAB part */
		   (skb = dev_alloc_skb(sab8253xn_rbufsize), skb == NULL))
		{
			++(port->Counters.rx_drops);
			port->active2.receive->HostVaddr->tail = port->active2.receive->HostVaddr->data; 
				/* clear the buffer */
			port->active2.receive->Count = sab8253xn_rbufsize|OWN_SAB;
		}
		else
		{
			port->active2.receive->Count -= 3;
			port->active2.receive->HostVaddr->len = port->active2.receive->Count;
			port->active2.receive->HostVaddr->pkt_type = PACKET_HOST;
			port->active2.receive->HostVaddr->dev = port->dev;
			port->active2.receive->HostVaddr->protocol = 
				eth_type_trans(port->active2.receive->HostVaddr, port->dev);
			port->active2.receive->HostVaddr->tail -= 3;
			++(port->Counters.receivepacket);
			port->Counters.receivebytes += port->active2.receive->Count;
			skb_unlink(port->active2.receive->HostVaddr);
			
			netif_rx(port->active2.receive->HostVaddr);
			
			skb_queue_head(port->sab8253xbuflist, skb);
			port->active2.receive->HostVaddr = skb;
			port->active2.receive->Count = sab8253xn_rbufsize|OWN_SAB;
		}
	}
}
Пример #29
0
static void bfusb_rx_complete(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct bfusb_data *data = (struct bfusb_data *) skb->dev;
	unsigned char *buf = urb->transfer_buffer;
	int count = urb->actual_length;
	int err, hdr, len;

	BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len);

	read_lock(&data->lock);

	if (!test_bit(HCI_RUNNING, &data->hdev->flags))
		goto unlock;

	if (urb->status || !count)
		goto resubmit;

	data->hdev->stat.byte_rx += count;

	skb_put(skb, count);

	while (count) {
		hdr = buf[0] | (buf[1] << 8);

		if (hdr & 0x4000) {
			len = 0;
			count -= 2;
			buf   += 2;
		} else {
			len = (buf[2] == 0) ? 256 : buf[2];
			count -= 3;
			buf   += 3;
		}

		if (count < len) {
			BT_ERR("%s block extends over URB buffer ranges",
					data->hdev->name);
		}

		if ((hdr & 0xe1) == 0xc1)
			bfusb_recv_block(data, hdr, buf, len);

		count -= len;
		buf   += len;
	}

	skb_unlink(skb, &data->pending_q);
	kfree_skb(skb);

	bfusb_rx_submit(data, urb);

	read_unlock(&data->lock);

	return;

resubmit:
	urb->dev = data->udev;

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (err) {
		BT_ERR("%s bulk resubmit failed urb %p err %d",
					data->hdev->name, urb, err);
	}

unlock:
	read_unlock(&data->lock);
}
Пример #30
0
void sab8253x_start_txS(struct sab_port *port)
{
	unsigned long flags;
	register int count;
	register int total;
	register int offset;
	char temporary[32];
	register unsigned int slopspace;
	register int sendsize;
	unsigned int totaltransmit;
	unsigned fifospace;
	unsigned loadedcount;
	struct tty_struct *tty = port->tty; /* a little gross tty flags whether
					       invoked from a tty or the network */
	
	fifospace = port->xmit_fifo_size; /* This code can handle fragmented frames
					     although currently none are generated*/
	loadedcount = 0;
	
	if(port->sabnext2.transmit == NULL)
	{
		return;
	}
	
	save_flags(flags); 
	cli();			
	
	
	if(count = port->sabnext2.transmit->Count, (count & OWNER) == OWN_SAB)
	{
		count &= ~OWN_SAB; /* OWN_SAB is really 0 but cannot guarantee in the future */
		
		if(port->sabnext2.transmit->HostVaddr)
		{
			total = (port->sabnext2.transmit->HostVaddr->tail - 
				 port->sabnext2.transmit->HostVaddr->data); /* packet size */
		}
		else
		{
			total = 0;		/* the data is only the crc/trailer */
		}
		
		if(tty && (tty->stopped || tty->hw_stopped) && (count == total))
		{			/* works for frame that only has a trailer (crc) */
			port->interrupt_mask1 |= SAB82532_IMR1_XPR;
			WRITEB(port, imr1, port->interrupt_mask1);
			restore_flags(flags);	/* can't send */
			return;
		}
		
		offset = (total - count);	/* offset to data still to send */
		
		port->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS);
		WRITEB(port, imr1, port->interrupt_mask1);
		port->all_sent = 0;
		
		if(READB(port,star) & SAB82532_STAR_XFW)
		{
			if(count <= fifospace)
			{
				port->xmit_cnt = count;
				slopspace = 0;
				sendsize = 0;
				if(port->sabnext2.transmit->sendcrc) 
				/* obviously should not happen for async but might use for
				   priority transmission */
				{
					slopspace = fifospace - count;
				}
				if(slopspace)
				{
					if(count)
					{
						memcpy(temporary, &port->sabnext2.transmit->HostVaddr->data[offset], 
						       count);
					}
					sendsize = MIN(slopspace, (4 - port->sabnext2.transmit->crcindex)); 
				/* how many bytes to send */
					memcpy(&temporary[count], 
					       &((unsigned char*)(&port->sabnext2.transmit->crc))
					       [port->sabnext2.transmit->crcindex], 
					       sendsize);
					port->sabnext2.transmit->crcindex += sendsize;
					if(port->sabnext2.transmit->crcindex >= 4)
					{
						port->sabnext2.transmit->sendcrc = 0;
					}
					port->xmit_buf = temporary;
				}
				else
				{
					port->xmit_buf =	/* set up wrifefifo variables */
						&port->sabnext2.transmit->HostVaddr->data[offset];
				}
				port->xmit_cnt += sendsize;
				count = 0;
			}
			else
			{
				count -= fifospace;
				port->xmit_cnt = fifospace;
				port->xmit_buf =	/* set up wrifefifo variables */
					&port->sabnext2.transmit->HostVaddr->data[offset];
				
			}
			port->xmit_tail= 0;
			loadedcount = port->xmit_cnt;
			(*port->writefifo)(port);
			totaltransmit = Sab8253xCountTransmitDescriptors(port);
			if(tty && (totaltransmit < (sab8253xs_listsize/2))) /* only makes sense on a TTY */
			{
				sab8253x_sched_event(port, SAB8253X_EVENT_WRITE_WAKEUP);
			}
			
			if((sab8253xt_listsize - totaltransmit) > (sab8253xt_listsize/2))
			{
				port->buffergreedy = 0;
			}
			else
			{
				port->buffergreedy = 1;
			}
			
			port->xmit_buf = NULL; /* this var is used to indicate whether to call kfree */
			
			/* fifospace -= loadedcount;*/
			/* Here to make mods to handle arbitrarily fragmented frames look to 8253xtty.c for help */
			
			if ((count <= 0) && (port->sabnext2.transmit->sendcrc == 0))
			{
				port->sabnext2.transmit->Count = OWN_DRIVER;
				if(!tty)
				{		/* called by network driver */
					++(port->Counters.transmitpacket);
				}
#ifdef FREEININTERRUPT		/* treat this routine as if taking place in interrupt */
				if(port->sabnext2.transmit->HostVaddr)
				{
					skb_unlink(port->sabnext2.transmit->HostVaddr);
					dev_kfree_skb_any(port->sabnext2.transmit->HostVaddr);
					port->sabnext2.transmit->HostVaddr = 0; /* no skb */
				}
				port->sabnext2.transmit->crcindex = 0; /* no single byte */
#endif
				sab8253x_cec_wait(port);
				WRITEB(port, cmdr, SAB82532_CMDR_XME|SAB82532_CMDR_XTF); /* Terminate the frame */
				
				port->sabnext2.transmit = port->sabnext2.transmit->VNext;
				
				if(!tty && port->tx_full)	/* invoked from the network driver */
				{
					port->tx_full = 0; /* there is a free slot */
					switch(port->open_type)
					{
					case OPEN_SYNC_NET:
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
						port->dev->start = 1;
						port->dev->tbusy = 0;	/* maybe need mark_bh here */
#else
						netif_start_queue(port->dev);
#endif
						break;
						
					case OPEN_SYNC_CHAR:
						wake_up_interruptible(&port->write_wait);
						break;
						
					default:
						break;
					}
				}
				
				if((port->sabnext2.transmit->Count & OWNER) == OWN_SAB)
				{		/* new frame to send */
					port->interrupt_mask1 &= ~(SAB82532_IMR1_XPR);
					WRITEB(port, imr1, port->interrupt_mask1);
				}
				else
				{
					port->interrupt_mask1 |= SAB82532_IMR1_XPR;
					WRITEB(port, imr1, port->interrupt_mask1);
					if((port->open_type == OPEN_SYNC_CHAR) && port->async_queue)
					{		/* if indication of transmission is needed by the */
						/* application on a per-frame basis kill_fasync */
						/* can provide it */
						kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
					}
				}
				restore_flags(flags);
				return;
			}
			/* Issue a Transmit FIFO command. */
			sab8253x_cec_wait(port);
			WRITEB(port, cmdr, SAB82532_CMDR_XTF);	
			port->sabnext2.transmit->Count = (count|OWN_SAB);
		}
		port->interrupt_mask1 &= ~(SAB82532_IMR1_XPR); /* more to send */
		WRITEB(port, imr1, port->interrupt_mask1);
	}
	else
	{				/* nothing to send */
		port->interrupt_mask1 |= SAB82532_IMR1_XPR;
		WRITEB(port, imr1, port->interrupt_mask1);
	}
	restore_flags(flags);
	return;
}