示例#1
0
文件: ifb.c 项目: 19Dan01/linux
static void ri_tasklet(unsigned long dev)
{
	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(_dev, 0);
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		if (__netif_tx_trylock(txq)) {
			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
			__netif_tx_unlock(txq);
		} else {
			/* reschedule */
			goto resched;
		}
	}

	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);

		u64_stats_update_begin(&dp->tsync);
		dp->tx_packets++;
		dp->tx_bytes += skb->len;
		u64_stats_update_end(&dp->tsync);

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			_dev->stats.tx_dropped++;
			if (skb_queue_len(&dp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = _dev->ifindex;

		if (from & AT_EGRESS) {
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {
			skb_pull(skb, skb->mac_len);
			netif_receive_skb(skb);
		} else
			BUG();
	}

	if (__netif_tx_trylock(txq)) {
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
示例#2
0
文件: usb.c 项目: mhei/linux
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
                                    struct sk_buff *skb)
{
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    u8 *rxdesc = skb->data;
    struct ieee80211_hdr *hdr;
    bool unicast = false;
    __le16 fc;
    struct ieee80211_rx_status rx_status = {0};
    struct rtl_stats stats = {
        .signal = 0,
        .rate = 0,
    };

    skb_pull(skb, RTL_RX_DESC_SIZE);
    rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
    skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
    hdr = (struct ieee80211_hdr *)(skb->data);
    fc = hdr->frame_control;
    if (!stats.crc) {
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

        if (is_broadcast_ether_addr(hdr->addr1)) {
            /*TODO*/;
        } else if (is_multicast_ether_addr(hdr->addr1)) {
            /*TODO*/
        } else {
            unicast = true;
            rtlpriv->stats.rxbytesunicast +=  skb->len;
        }

        if (ieee80211_is_data(fc)) {
            rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

            if (unicast)
                rtlpriv->link_info.num_rx_inperiod++;
        }
        /* static bcn for roaming */
        rtl_beacon_statistic(hw, skb);
    }
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
                                      struct sk_buff *skb)
{
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    u8 *rxdesc = skb->data;
    struct ieee80211_hdr *hdr;
    bool unicast = false;
    __le16 fc;
    struct ieee80211_rx_status rx_status = {0};
    struct rtl_stats stats = {
        .signal = 0,
        .rate = 0,
    };

    skb_pull(skb, RTL_RX_DESC_SIZE);
    rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
    skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
    hdr = (struct ieee80211_hdr *)(skb->data);
    fc = hdr->frame_control;
    if (!stats.crc) {
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

        if (is_broadcast_ether_addr(hdr->addr1)) {
            /*TODO*/;
        } else if (is_multicast_ether_addr(hdr->addr1)) {
            /*TODO*/
        } else {
            unicast = true;
            rtlpriv->stats.rxbytesunicast +=  skb->len;
        }

        if (ieee80211_is_data(fc)) {
            rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

            if (unicast)
                rtlpriv->link_info.num_rx_inperiod++;
        }

        /* static bcn for roaming */
        rtl_beacon_statistic(hw, skb);

        if (likely(rtl_action_proc(hw, skb, false)))
            ieee80211_rx(hw, skb);
        else
            dev_kfree_skb_any(skb);
    } else {
        dev_kfree_skb_any(skb);
    }
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
    struct sk_buff *_skb;
    struct sk_buff_head rx_queue;
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

    skb_queue_head_init(&rx_queue);
    if (rtlusb->usb_rx_segregate_hdl)
        rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
    WARN_ON(skb_queue_empty(&rx_queue));
    while (!skb_queue_empty(&rx_queue)) {
        _skb = skb_dequeue(&rx_queue);
        _rtl_usb_rx_process_agg(hw, _skb);
        ieee80211_rx(hw, _skb);
    }
}

#define __RX_SKB_MAX_QUEUED	64

static void _rtl_rx_work(unsigned long param)
{
    struct rtl_usb *rtlusb = (struct rtl_usb *)param;
    struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
    struct sk_buff *skb;

    while ((skb = skb_dequeue(&rtlusb->rx_queue))) {
        if (unlikely(IS_USB_STOP(rtlusb))) {
            dev_kfree_skb_any(skb);
            continue;
        }

        if (likely(!rtlusb->usb_rx_segregate_hdl)) {
            _rtl_usb_rx_process_noagg(hw, skb);
        } else {
            /* TO DO */
            _rtl_rx_pre_process(hw, skb);
            pr_err("rx agg not supported\n");
        }
    }
}

static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
                                        unsigned int len)
{
#if NET_IP_ALIGN != 0
    unsigned int padding = 0;
#endif

    /* make function no-op when possible */
    if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
        return 0;

#if NET_IP_ALIGN != 0
    /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
    /* TODO: deduplicate common code, define helper function instead? */

    if (ieee80211_is_data_qos(hdr->frame_control)) {
        u8 *qc = ieee80211_get_qos_ctl(hdr);

        padding ^= NET_IP_ALIGN;

        /* Input might be invalid, avoid accessing memory outside
         * the buffer.
         */
        if ((unsigned long)qc - (unsigned long)hdr < len &&
                *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
            padding ^= NET_IP_ALIGN;
    }

    if (ieee80211_has_a4(hdr->frame_control))
        padding ^= NET_IP_ALIGN;

    return padding;
#endif
}

#define __RADIO_TAP_SIZE_RSV	32

static void _rtl_rx_completed(struct urb *_urb)
{
    struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
    struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    int err = 0;

    if (unlikely(IS_USB_STOP(rtlusb)))
        goto free;

    if (likely(0 == _urb->status)) {
        unsigned int padding;
        struct sk_buff *skb;
        unsigned int qlen;
        unsigned int size = _urb->actual_length;
        struct ieee80211_hdr *hdr;

        if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Too short packet from bulk IN! (len: %d)\n",
                     size);
            goto resubmit;
        }

        qlen = skb_queue_len(&rtlusb->rx_queue);
        if (qlen >= __RX_SKB_MAX_QUEUED) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Pending RX skbuff queue full! (qlen: %d)\n",
                     qlen);
            goto resubmit;
        }

        hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE);
        padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE);

        skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
        if (!skb) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Can't allocate skb for bulk IN!\n");
            goto resubmit;
        }

        _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);

        /* Make sure the payload data is 4 byte aligned. */
        skb_reserve(skb, padding);

        /* reserve some space for mac80211's radiotap */
        skb_reserve(skb, __RADIO_TAP_SIZE_RSV);

        memcpy(skb_put(skb, size), _urb->transfer_buffer, size);

        skb_queue_tail(&rtlusb->rx_queue, skb);
        tasklet_schedule(&rtlusb->rx_work_tasklet);

        goto resubmit;
    }

    switch (_urb->status) {
    /* disconnect */
    case -ENOENT:
    case -ECONNRESET:
    case -ENODEV:
    case -ESHUTDOWN:
        goto free;
    default:
        break;
    }

resubmit:
    usb_anchor_urb(_urb, &rtlusb->rx_submitted);
    err = usb_submit_urb(_urb, GFP_ATOMIC);
    if (unlikely(err)) {
        usb_unanchor_urb(_urb);
        goto free;
    }
    return;

free:
    /* On some architectures, usb_free_coherent must not be called from
     * hardirq context. Queue urb to cleanup list.
     */
    usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs);
}

#undef __RADIO_TAP_SIZE_RSV

static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
{
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
    struct urb *urb;

    usb_kill_anchored_urbs(&rtlusb->rx_submitted);

    tasklet_kill(&rtlusb->rx_work_tasklet);
    cancel_work_sync(&rtlpriv->works.lps_change_work);

    flush_workqueue(rtlpriv->works.rtl_wq);
    destroy_workqueue(rtlpriv->works.rtl_wq);

    skb_queue_purge(&rtlusb->rx_queue);

    while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
        usb_free_coherent(urb->dev, urb->transfer_buffer_length,
                          urb->transfer_buffer, urb->transfer_dma);
        usb_free_urb(urb);
    }
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
    struct urb *urb;
    int err;
    int i;
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

    WARN_ON(0 == rtlusb->rx_urb_num);
    /* 1600 == 1514 + max WLAN header + rtk info */
    WARN_ON(rtlusb->rx_max_size < 1600);

    for (i = 0; i < rtlusb->rx_urb_num; i++) {
        err = -ENOMEM;
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb)
            goto err_out;

        err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
        if (err < 0) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Failed to prep_rx_urb!!\n");
            usb_free_urb(urb);
            goto err_out;
        }

        usb_anchor_urb(urb, &rtlusb->rx_submitted);
        err = usb_submit_urb(urb, GFP_KERNEL);
        if (err)
            goto err_out;
        usb_free_urb(urb);
    }
    return 0;

err_out:
    usb_kill_anchored_urbs(&rtlusb->rx_submitted);
    _rtl_usb_cleanup_rx(hw);
    return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
    int err;
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

    err = rtlpriv->cfg->ops->hw_init(hw);
    if (!err) {
        rtl_init_rx_config(hw);

        /* Enable software */
        SET_USB_START(rtlusb);
        /* should after adapter start and interrupt enable. */
        set_hal_start(rtlhal);

        /* Start bulk IN */
        err = _rtl_usb_receive(hw);
    }

    return err;
}
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return -ENOMEM;
	}
	skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			netif_dbg(dev, ifdown, dev->net, "device gone\n");
			netif_device_detach (dev->net);
			break;
		case -EHOSTUNREACH:
			retval = -ENOLINK;
			break;
		default:
			netif_dbg(dev, rx_err, dev->net,
				  "rx submit, %d\n", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
	return retval;
}
static void kevent (void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->txq);
		status = usb_clear_halt (dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
				deverr (dev, "can't clear tx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->rxq);
		status = usb_clear_halt (dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
				deverr (dev, "can't clear rx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit (dev, urb, GFP_KERNEL);
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
		}
	}

	if (dev->flags)
		devdbg (dev, "kevent done, flags = 0x%lx",
			dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static void tx_complete (struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete (struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		// async unlink
		case -ESHUTDOWN:		// hardware gone
			break;

		// like rx, tx gets controller i/o faults during khubd delays
		// and so it uses the same throttling mechanism.
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending (&dev->delay)) {
				mod_timer (&dev->delay,
					jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link (dev))
					devdbg (dev, "tx throttle %d",
							urb->status);
			}
			netif_stop_queue (dev->net);
			break;
		default:
			if (netif_msg_tx_err (dev))
				devdbg (dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);

	unlink_urbs (dev, &dev->txq);
	tasklet_schedule (&dev->bh);

	// FIXME: device recovery -- reset?
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		axusbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

// tasklet (work deferred from completions, in_irq) or timer

static void axusbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			devdbg (dev, "bogus skb state %d", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !timer_pending (&dev->delay)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit (dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link (dev))
				devdbg (dev, "rxqlen %d --> %d",
						temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static
void axusbnet_disconnect (struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev (intf);

	if (netif_msg_probe (dev))
		devinfo (dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev (net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work ();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind (dev, intf);

	free_netdev(net);
	usb_put_dev (xdev);
}

/*-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static int
axusbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk (KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	// set up our own records
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	INIT_WORK (&dev->kevent, kevent, dev);
#else
	INIT_WORK (&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
//	mutex_init (&dev->phy_mutex);

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
// dma_supported() is deeply broken on almost all architectures
	// possible with some EHCI controllers
	if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	// allow device-specific bind/init procedures
	// NOTE net->name still not usable ...
	status = info->bind (dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev (net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe (dev))
		devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	// ok, it's ready to go.
	usb_set_intfdata (udev, dev);

	// start as if the link is up
	netif_device_attach (net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend (struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach (dev->net);
		(void) unlink_urbs (dev, &dev->rxq);
		(void) unlink_urbs (dev, &dev->txq);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach (dev->net);
	}
	return 0;
}

static int
axusbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!--dev->suspend_count)
		tasklet_schedule (&dev->bh);

	return 0;
}
示例#5
0
static void omap_kp_timer(unsigned long data)
{
	tasklet_schedule(&kp_tasklet);
}
示例#6
0
static void rx_submit (struct usbnet *dev, struct urb *urb, int flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size;

#ifdef CONFIG_USB_NET1080
	if (dev->driver_info->flags & FLAG_FRAMING_NC)
		size = FRAMED_SIZE (dev->net.mtu);
	else
#endif
#ifdef CONFIG_USB_GENESYS
	if (dev->driver_info->flags & FLAG_FRAMING_GL)
		size = GL_RCV_BUF_SIZE;
	else
#endif
		size = (sizeof (struct ethhdr) + dev->net.mtu);

	if ((skb = alloc_skb (size, flags)) == 0) {
		dbg ("no rx skb");
		tasklet_schedule (&dev->bh);
		usb_free_urb (urb);
		return;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	FILL_BULK_URB (urb, dev->udev,
		usb_rcvbulkpipe (dev->udev, dev->driver_info->in),
		skb->data, size, rx_complete, skb);
	urb->transfer_flags |= USB_ASYNC_UNLINK;
#ifdef	REALLY_QUEUE
	urb->transfer_flags |= USB_QUEUE_BULK;
#endif
#if 0
	// Idle-but-posted reads with UHCI really chew up
	// PCI bandwidth unless FSBR is disabled
	urb->transfer_flags |= USB_NO_FSBR;
#endif

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (&dev->net)) {
		if ((retval = usb_submit_urb (urb)) != 0) {
			dbg ("%s rx submit, %d", dev->net.name, retval);
			tasklet_schedule (&dev->bh);
		} else {
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		dbg ("rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;
	struct driver_info	*info = dev->driver_info;
	u8			align;

#if (AX_FORCE_BUFF_ALIGN)
	align = 0;
#else
	if (!(info->flags & FLAG_HW_IP_ALIGNMENT))
		align = NET_IP_ALIGN;
	else
		align = 0;
#endif

	if ((skb = alloc_skb (size + align, flags)) == NULL) {

		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");

		if((dev->rx_urb_size > 2048) && dev->rx_size) {
			dev->rx_size--;
			dev->rx_urb_size = AX88772B_BULKIN_SIZE[dev->rx_size].size;

			ax8817x_write_cmd_async (dev, 0x2A,
				AX88772B_BULKIN_SIZE[dev->rx_size].byte_cnt,
				AX88772B_BULKIN_SIZE[dev->rx_size].threshold,
				0, NULL);
		}

		if (!(dev->flags & EVENT_RX_MEMORY))
			axusbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}

	if (align)
		skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			axusbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
示例#8
0
static void task_sched(unsigned long data)
{
    unsigned int id = (unsigned int)data;
    tasklet_schedule(&task[id]);
}
示例#9
0
/* work that cannot be done in interrupt context uses keventd.
 *
 * NOTE:  with 2.5 we could do more of this using completion callbacks,
 * especially now that control transfers can be queued.
 */
static void
kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->txq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_pipe;
		status = usb_clear_halt (dev->udev, dev->out);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
fail_pipe:
				deverr (dev, "can't clear tx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->rxq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_halt;
		status = usb_clear_halt (dev->udev, dev->in);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
fail_halt:
				deverr (dev, "can't clear rx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
			status = usb_autopm_get_interface(dev->intf);
			if (status < 0)
				goto fail_lowmem;
			rx_submit (dev, urb, GFP_KERNEL);
			usb_autopm_put_interface(dev->intf);
fail_lowmem:
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto skip_reset;
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			usb_autopm_put_interface(dev->intf);
skip_reset:
			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
		} else {
			usb_autopm_put_interface(dev->intf);
		}
	}

	if (dev->flags)
		devdbg (dev, "kevent done, flags = 0x%lx",
			dev->flags);
}
示例#10
0
static void mmc_omap_cover_timer(unsigned long arg)
{
	struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
	tasklet_schedule(&slot->cover_tasklet);
}
示例#11
0
文件: con3215.c 项目: xricson/knoppix
/*
 * Interrupt routine, called from common io layer
 */
static void
raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
	struct raw3215_info *raw;
	struct raw3215_req *req;
	struct tty_struct *tty;
	int cstat, dstat;
	int count, slen;

	raw = cdev->dev.driver_data;
	req = (struct raw3215_req *) intparm;
	cstat = irb->scsw.cstat;
	dstat = irb->scsw.dstat;
	if (cstat != 0) {
		raw->message = KERN_WARNING
			"Got nonzero channel status in raw3215_irq "
			"(dev sts 0x%2x, sch sts 0x%2x)";
		raw->msg_dstat = dstat;
		raw->msg_cstat = cstat;
		tasklet_schedule(&raw->tasklet);
	}
	if (dstat & 0x01) { /* we got a unit exception */
		dstat &= ~0x01;	 /* we can ignore it */
	}
	switch (dstat) {
	case 0x80:
		if (cstat != 0)
			break;
		/* Attention interrupt, someone hit the enter key */
		raw3215_mk_read_req(raw);
		if (MACHINE_IS_P390)
			memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
		tasklet_schedule(&raw->tasklet);
		break;
	case 0x08:
	case 0x0C:
		/* Channel end interrupt. */
		if ((raw = req->info) == NULL)
			return;		     /* That shouldn't happen ... */
		if (req->type == RAW3215_READ) {
			/* store residual count, then wait for device end */
			req->residual = irb->scsw.count;
		}
		if (dstat == 0x08)
			break;
	case 0x04:
		/* Device end interrupt. */
		if ((raw = req->info) == NULL)
			return;		     /* That shouldn't happen ... */
		if (req->type == RAW3215_READ && raw->tty != NULL) {
			unsigned int cchar;

			tty = raw->tty;
			count = 160 - req->residual;
			if (MACHINE_IS_P390) {
				slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
				if (count > slen)
					count = slen;
			} else
			if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
				count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
			EBCASC(raw->inbuf, count);
			cchar = ctrlchar_handle(raw->inbuf, count, tty);
			switch (cchar & CTRLCHAR_MASK) {
			case CTRLCHAR_SYSRQ:
				break;

			case CTRLCHAR_CTRL:
				tty->flip.count++;
				*tty->flip.flag_buf_ptr++ = TTY_NORMAL;
				*tty->flip.char_buf_ptr++ = cchar;
				tty_flip_buffer_push(raw->tty);
				break;

			case CTRLCHAR_NONE:
				memcpy(tty->flip.char_buf_ptr,
				       raw->inbuf, count);
				if (count < 2 ||
				    (strncmp(raw->inbuf+count-2, "^n", 2) ||
				    strncmp(raw->inbuf+count-2, "\252n", 2)) ) {
					/* don't add the auto \n */
					tty->flip.char_buf_ptr[count] = '\n';
					memset(tty->flip.flag_buf_ptr,
					       TTY_NORMAL, count + 1);
					count++;
				} else
					count-=2;
				tty->flip.char_buf_ptr += count;
				tty->flip.flag_buf_ptr += count;
				tty->flip.count += count;
				tty_flip_buffer_push(raw->tty);
				break;
			}
		} else if (req->type == RAW3215_WRITE) {
			raw->count -= req->len;
			raw->written -= req->len;
		}
		raw->flags &= ~RAW3215_WORKING;
		raw3215_free_req(req);
		/* check for empty wait */
		if (waitqueue_active(&raw->empty_wait) &&
		    raw->queued_write == NULL &&
		    raw->queued_read == NULL) {
			wake_up_interruptible(&raw->empty_wait);
		}
		tasklet_schedule(&raw->tasklet);
		break;
	default:
		/* Strange interrupt, I'll do my best to clean up */
		if (req != NULL && req->type != RAW3215_FREE) {
			if (req->type == RAW3215_WRITE) {
				raw->count -= req->len;
				raw->written -= req->len;
			}
			raw->flags &= ~RAW3215_WORKING;
			raw3215_free_req(req);
		}
		raw->message = KERN_WARNING
			"Spurious interrupt in in raw3215_irq "
			"(dev sts 0x%2x, sch sts 0x%2x)";
		raw->msg_dstat = dstat;
		raw->msg_cstat = cstat;
		tasklet_schedule(&raw->tasklet);
	}
	return;
}
示例#12
0
static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
{
	struct mmc_omap_slot *slot = NULL;
	struct mmc_host *mmc;
	int r;

	mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
	if (mmc == NULL)
		return -ENOMEM;

	slot = mmc_priv(mmc);
	slot->host = host;
	slot->mmc = mmc;
	slot->id = id;
	slot->pdata = &host->pdata->slots[id];

	host->slots[id] = slot;

	mmc->caps = 0;
	if (host->pdata->slots[id].wires >= 4)
		mmc->caps |= MMC_CAP_4_BIT_DATA;

	mmc->ops = &mmc_omap_ops;
	mmc->f_min = 400000;

	if (cpu_class_is_omap2())
		mmc->f_max = 48000000;
	else
		mmc->f_max = 24000000;
	if (host->pdata->max_freq)
		mmc->f_max = min((unsigned int)host->pdata->max_freq,
							mmc->f_max);
	mmc->ocr_avail = slot->pdata->ocr_mask;

	/* Use scatterlist DMA to reduce per-transfer costs.
	 * NOTE max_seg_size assumption that small blocks aren't
	 * normally used (except e.g. for reading SD registers).
	 */
	mmc->max_segs = 32;
	mmc->max_blk_size = 2048;	/* BLEN is 11 bits (+1) */
	mmc->max_blk_count = 2048;	/* NBLK is 11 bits (+1) */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

	r = mmc_add_host(mmc);
	if (r < 0)
		goto err_remove_host;

	if (slot->pdata->name != NULL) {
		r = device_create_file(&mmc->class_dev,
					&dev_attr_slot_name);
		if (r < 0)
			goto err_remove_host;
	}

	if (slot->pdata->get_cover_state != NULL) {
		r = device_create_file(&mmc->class_dev,
					&dev_attr_cover_switch);
		if (r < 0)
			goto err_remove_slot_name;

		setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
			    (unsigned long)slot);
		tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
			     (unsigned long)slot);
		tasklet_schedule(&slot->cover_tasklet);
	}

	return 0;

err_remove_slot_name:
	if (slot->pdata->name != NULL)
		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
err_remove_host:
	mmc_remove_host(mmc);
	mmc_free_host(mmc);
	return r;
}
示例#13
0
文件: jiq.c 项目: zxbuaa/ldd3
/*
 * Call jiq_print from a tasklet
 */
static void jiq_print_tasklet(unsigned long ptr)
{
	if (!jiq_print((struct clientdata *)ptr))
		return;
	tasklet_schedule(&jiq_tasklet);
}
示例#14
0
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
	struct acm *acm;
	int rv = -ENODEV;
	int i;
	dbg("Entering acm_tty_open.");

	mutex_lock(&open_mutex);

	acm = acm_table[tty->index];
	if (!acm || !acm->dev)
		goto err_out;
	else
		rv = 0;

	set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);

	tty->driver_data = acm;
	tty_port_tty_set(&acm->port, tty);

	if (usb_autopm_get_interface(acm->control) < 0)
		goto early_bail;
	else
		acm->control->needs_remote_wakeup = 1;

	mutex_lock(&acm->mutex);
	if (acm->port.count++) {
		usb_autopm_put_interface(acm->control);
		goto done;
	}

	acm->ctrlurb->dev = acm->dev;
	if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
		dbg("usb_submit_urb(ctrl irq) failed");
		goto bail_out;
	}

	if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
	    (acm->ctrl_caps & USB_CDC_CAP_LINE))
		goto full_bailout;

	usb_autopm_put_interface(acm->control);

	INIT_LIST_HEAD(&acm->spare_read_urbs);
	INIT_LIST_HEAD(&acm->spare_read_bufs);
	INIT_LIST_HEAD(&acm->filled_read_bufs);

	for (i = 0; i < acm->rx_buflimit; i++)
		list_add(&(acm->ru[i].list), &acm->spare_read_urbs);
	for (i = 0; i < acm->rx_buflimit; i++)
		list_add(&(acm->rb[i].list), &acm->spare_read_bufs);

	acm->throttle = 0;

	set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
	rv = tty_port_block_til_ready(&acm->port, tty, filp);
	tasklet_schedule(&acm->urb_task);
done:
	mutex_unlock(&acm->mutex);
err_out:
	mutex_unlock(&open_mutex);
	return rv;

full_bailout:
	usb_kill_urb(acm->ctrlurb);
bail_out:
	usb_autopm_put_interface(acm->control);
	acm->port.count--;
	mutex_unlock(&acm->mutex);
early_bail:
	mutex_unlock(&open_mutex);
	tty_port_tty_set(&acm->port, NULL);
	return -EIO;
}
static irqreturn_t iwl_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
	u32 inta, inta_mask;
	unsigned long flags;
#ifdef CONFIG_IWLWIFI_DEBUG
	u32 inta_fh;
#endif
	if (!trans)
		return IRQ_NONE;

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock_irqsave(&trans->shrd->lock, flags);

	/* Disable (but don't clear!) interrupts here to avoid
	 *    back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here. */
	inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);  /* just for debug */
	iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(bus(trans), CSR_INT);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!inta) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		goto none;
	}

	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/* Hardware disappeared. It might have already raised
		 * an interrupt */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
		goto unplugged;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
	if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
		inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
		IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
			      "fh 0x%08x\n", inta, inta_mask, inta_fh);
	}
#endif

	trans_pcie->inta |= inta;
	/* iwl_irq_tasklet() will service interrupts and re-enable them */
	if (likely(inta))
		tasklet_schedule(&trans_pcie->irq_tasklet);
	else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
			!trans_pcie->inta)
		iwl_enable_interrupts(trans);

 unplugged:
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
	return IRQ_HANDLED;

 none:
	/* re-enable interrupts here since we don't have anything to service. */
	/* only Re-enable if disabled by irq  and no schedules tasklet. */
	if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
		!trans_pcie->inta)
		iwl_enable_interrupts(trans);

	spin_unlock_irqrestore(&trans->shrd->lock, flags);
	return IRQ_NONE;
}
/* hci_uart_tty_wakeup()
 *
 *    Callback for transmit wakeup. Called when low level
 *    device driver can accept more send data.
 *    This callback gets called from the isr context so
 *    schedule the send data operation to tasklet.
 *
 * Arguments:        tty    pointer to associated tty instance data
 * Return Value:    None
 */
static void hci_uart_tty_wakeup(struct tty_struct *tty)
{
	struct hci_uart *hu = (void *)tty->disc_data;
	tasklet_schedule(&hu->tty_wakeup_task);
}
/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
irqreturn_t iwl_isr_ict(int irq, void *data)
{
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
	u32 inta, inta_mask;
	u32 val = 0;
	unsigned long flags;

	if (!trans)
		return IRQ_NONE;

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (!trans_pcie->use_ict)
		return iwl_isr(irq, data);

	spin_lock_irqsave(&trans->shrd->lock, flags);

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);  /* just for debug */
	iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);


	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		goto none;
	}

	/* read all entries that not 0 start with ict_index */
	while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {

		val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index,
				le32_to_cpu(
				  trans_pcie->ict_tbl[trans_pcie->ict_index]));
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);

	}

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
			inta, inta_mask, val);

	inta &= trans_pcie->inta_mask;
	trans_pcie->inta |= inta;

	/* iwl_irq_tasklet() will service interrupts and re-enable them */
	if (likely(inta))
		tasklet_schedule(&trans_pcie->irq_tasklet);
	else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
			!trans_pcie->inta) {
		/* Allow interrupt if was disabled by this handler and
		 * no tasklet was schedules, We should not enable interrupt,
		 * tasklet will enable it.
		 */
		iwl_enable_interrupts(trans);
	}

	spin_unlock_irqrestore(&trans->shrd->lock, flags);
	return IRQ_HANDLED;

 none:
	/* re-enable interrupts here since we don't have anything to service.
	 * only Re-enable if disabled by irq.
	 */
	if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
		!trans_pcie->inta)
		iwl_enable_interrupts(trans);

	spin_unlock_irqrestore(&trans->shrd->lock, flags);
	return IRQ_NONE;
}
示例#18
0
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(unsigned long _port)
{
	struct gs_port		*port = (void *)_port;
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port.tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; without low_latency set this is handled by
	 * a workqueue, so we won't get callbacks and can hold port_lock
	 */
	if (tty && do_push)
		tty_flip_buffer_push(tty);


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the tasklet
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				tasklet_schedule(&port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		skb = dev_alloc_skb(sz + NET_IP_ALIGN);
		if (skb == NULL) {
			pr_err("[%s] rmnet_recv() cannot allocate skb\n",
			       dev->name);
			/* out of memory, reschedule a later attempt */
			smd_net_data_tasklet.data = (unsigned long)dev;
			tasklet_schedule(&smd_net_data_tasklet);
			break;
		} else {
			skb->dev = dev;
			skb_reserve(skb, NET_IP_ALIGN);
			ptr = skb_put(skb, sz);
			wake_lock_timeout(&p->wake_lock, HZ / 2);
			if (smd_read(p->ch, ptr, sz) != sz) {
				pr_err("[%s] rmnet_recv() smd lied about avail?!",
					dev->name);
				ptr = 0;
				dev_kfree_skb_irq(skb);
			} else {
				/* Handle Rx frame format */
				spin_lock_irqsave(&p->lock, flags);
				opmode = p->operation_mode;
				spin_unlock_irqrestore(&p->lock, flags);

				if (RMNET_IS_MODE_IP(opmode)) {
					/* Driver in IP mode */
					skb->protocol =
					  rmnet_ip_type_trans(skb, dev);
				} else {
					/* Driver in Ethernet mode */
					skb->protocol =
					  eth_type_trans(skb, dev);
				}
				if (RMNET_IS_MODE_IP(opmode) ||
				    count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
					p->wakeups_rcv +=
					rmnet_cause_wakeup(p);
#endif
					p->stats.rx_packets++;
					p->stats.rx_bytes += skb->len;
				}
				DBG1("[%s] Rx packet #%lu len=%d\n",
					dev->name, p->stats.rx_packets,
					skb->len);

				/* Deliver to network stack */
				netif_rx(skb);
			}
			continue;
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("[%s] rmnet_recv() smd lied about avail?!",
				dev->name);
	}
}
示例#20
0
static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_cmd_complete *ev = (void *) skb->data;
	__u16 opcode;

	skb_pull(skb, sizeof(*ev));

	opcode = __le16_to_cpu(ev->opcode);

	switch (opcode) {
	case HCI_OP_INQUIRY_CANCEL:
		hci_cc_inquiry_cancel(hdev, skb);
		break;

	case HCI_OP_EXIT_PERIODIC_INQ:
		hci_cc_exit_periodic_inq(hdev, skb);
		break;

	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
		hci_cc_remote_name_req_cancel(hdev, skb);
		break;

	case HCI_OP_ROLE_DISCOVERY:
		hci_cc_role_discovery(hdev, skb);
		break;

	case HCI_OP_READ_LINK_POLICY:
		hci_cc_read_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_LINK_POLICY:
		hci_cc_write_link_policy(hdev, skb);
		break;

	case HCI_OP_READ_DEF_LINK_POLICY:
		hci_cc_read_def_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_DEF_LINK_POLICY:
		hci_cc_write_def_link_policy(hdev, skb);
		break;

	case HCI_OP_RESET:
		hci_cc_reset(hdev, skb);
		break;

	case HCI_OP_WRITE_LOCAL_NAME:
		hci_cc_write_local_name(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_NAME:
		hci_cc_read_local_name(hdev, skb);
		break;

	case HCI_OP_WRITE_AUTH_ENABLE:
		hci_cc_write_auth_enable(hdev, skb);
		break;

	case HCI_OP_WRITE_ENCRYPT_MODE:
		hci_cc_write_encrypt_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SCAN_ENABLE:
		hci_cc_write_scan_enable(hdev, skb);
		break;

	case HCI_OP_READ_CLASS_OF_DEV:
		hci_cc_read_class_of_dev(hdev, skb);
		break;

	case HCI_OP_WRITE_CLASS_OF_DEV:
		hci_cc_write_class_of_dev(hdev, skb);
		break;

	case HCI_OP_READ_VOICE_SETTING:
		hci_cc_read_voice_setting(hdev, skb);
		break;

	case HCI_OP_WRITE_VOICE_SETTING:
		hci_cc_write_voice_setting(hdev, skb);
		break;

	case HCI_OP_HOST_BUFFER_SIZE:
		hci_cc_host_buffer_size(hdev, skb);
		break;

	case HCI_OP_READ_SSP_MODE:
		hci_cc_read_ssp_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SSP_MODE:
		hci_cc_write_ssp_mode(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_VERSION:
		hci_cc_read_local_version(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_COMMANDS:
		hci_cc_read_local_commands(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_FEATURES:
		hci_cc_read_local_features(hdev, skb);
		break;

	case HCI_OP_READ_BUFFER_SIZE:
		hci_cc_read_buffer_size(hdev, skb);
		break;

	case HCI_OP_READ_BD_ADDR:
		hci_cc_read_bd_addr(hdev, skb);
		break;

	case HCI_OP_WRITE_CA_TIMEOUT:
		hci_cc_write_ca_timeout(hdev, skb);
		break;

	default:
		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
		break;
	}

	if (ev->ncmd) {
		atomic_set(&hdev->cmd_cnt, 1);
		if (!skb_queue_empty(&hdev->cmd_q))
			tasklet_schedule(&hdev->cmd_task);
	}
}
static
int axusbnet_open (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			retval = 0;
	struct driver_info	*info = dev->driver_info;

	// put into "known safe" state
	if (info->reset && (retval = info->reset (dev)) < 0) {
		if (netif_msg_ifup (dev))
			devinfo (dev,
				"open reset fail (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
			info->description);
		goto done;
	}

	// insist peer be connected
	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
		if (netif_msg_ifup (dev))
			devdbg (dev, "can't open; %d", retval);
		goto done;
	}

	/* start any status interrupt transfer */
	if (dev->interrupt) {
		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
		if (retval < 0) {
			if (netif_msg_ifup (dev))
				deverr (dev, "intr submit %d", retval);
			goto done;
		}
	}

	netif_start_queue (net);
	if (netif_msg_ifup (dev)) {
		char	*framing;

		if (dev->driver_info->flags & FLAG_FRAMING_NC)
			framing = "NetChip";
		else if (dev->driver_info->flags & FLAG_FRAMING_GL)
			framing = "GeneSys";
		else if (dev->driver_info->flags & FLAG_FRAMING_Z)
			framing = "Zaurus";
		else if (dev->driver_info->flags & FLAG_FRAMING_RN)
			framing = "RNDIS";
		else if (dev->driver_info->flags & FLAG_FRAMING_AX)
			framing = "ASIX";
		else
			framing = "simple";

		devinfo (dev, "open: enable queueing "
				"(rx %d, tx %d) mtu %d %s framing",
			(int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
			framing);
	}

	// delay posting reads until we're fully open
	tasklet_schedule (&dev->bh);
	return retval;
done:
	return retval;
}
示例#22
0
static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_cmd_status *ev = (void *) skb->data;
	__u16 opcode;

	skb_pull(skb, sizeof(*ev));

	opcode = __le16_to_cpu(ev->opcode);

	switch (opcode) {
	case HCI_OP_INQUIRY:
		hci_cs_inquiry(hdev, ev->status);
		break;

	case HCI_OP_CREATE_CONN:
		hci_cs_create_conn(hdev, ev->status);
		break;

	case HCI_OP_ADD_SCO:
		hci_cs_add_sco(hdev, ev->status);
		break;

	case HCI_OP_AUTH_REQUESTED:
		hci_cs_auth_requested(hdev, ev->status);
		break;

	case HCI_OP_SET_CONN_ENCRYPT:
		hci_cs_set_conn_encrypt(hdev, ev->status);
		break;

	case HCI_OP_REMOTE_NAME_REQ:
		hci_cs_remote_name_req(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_FEATURES:
		hci_cs_read_remote_features(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_EXT_FEATURES:
		hci_cs_read_remote_ext_features(hdev, ev->status);
		break;

	case HCI_OP_SETUP_SYNC_CONN:
		hci_cs_setup_sync_conn(hdev, ev->status);
		break;

	case HCI_OP_SNIFF_MODE:
		hci_cs_sniff_mode(hdev, ev->status);
		break;

	case HCI_OP_EXIT_SNIFF_MODE:
		hci_cs_exit_sniff_mode(hdev, ev->status);
		break;

	default:
		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
		break;
	}

	if (ev->ncmd) {
		atomic_set(&hdev->cmd_cnt, 1);
		if (!skb_queue_empty(&hdev->cmd_q))
			tasklet_schedule(&hdev->cmd_task);
	}
}
示例#23
0
文件: ifb.c 项目: AlexShiLucky/linux
static void ifb_ri_tasklet(unsigned long _txp)
{
	struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
	skb = skb_peek(&txp->tq);
	if (!skb) {
		if (!__netif_tx_trylock(txq))
			goto resched;
		skb_queue_splice_tail_init(&txp->rq, &txp->tq);
		__netif_tx_unlock(txq);
	}

	while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
		skb->tc_redirected = 0;
		skb->tc_skip_classify = 1;

		u64_stats_update_begin(&txp->tsync);
		txp->tx_packets++;
		txp->tx_bytes += skb->len;
		u64_stats_update_end(&txp->tsync);

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			txp->dev->stats.tx_dropped++;
			if (skb_queue_len(&txp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = txp->dev->ifindex;

		if (!skb->tc_from_ingress) {
			dev_queue_xmit(skb);
		} else {
			skb_pull_rcsum(skb, skb->mac_len);
			netif_receive_skb(skb);
		}
	}

	if (__netif_tx_trylock(txq)) {
		skb = skb_peek(&txp->rq);
		if (!skb) {
			txp->tasklet_pending = 0;
			if (netif_tx_queue_stopped(txq))
				netif_tx_wake_queue(txq);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		txp->tasklet_pending = 1;
		tasklet_schedule(&txp->ifb_tasklet);
	}

}
示例#24
0
unsigned long hp_sdc_put(void) {
	hp_sdc_transaction *curr;
	uint8_t act;
	int idx, curridx;

	int limit = 0;

	write_lock(&hp_sdc.lock);

	/* If i8042 buffers are full, we cannot do anything that
	   requires output, so we skip to the administrativa. */
	if (hp_sdc.ibf) {
		hp_sdc_status_in8();
		if (hp_sdc.ibf) goto finish;
	}

 anew:
	/* See if we are in the middle of a sequence. */
	if (hp_sdc.wcurr < 0) hp_sdc.wcurr = 0;
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr == hp_sdc.wcurr) hp_sdc.wcurr++;
	read_unlock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0;
	curridx = hp_sdc.wcurr;

	if (hp_sdc.tq[curridx] != NULL) goto start;

	while (++curridx != hp_sdc.wcurr) {
		if (curridx >= HP_SDC_QUEUE_LEN) {
			curridx = -1; /* Wrap to top */
			continue;
		}
		read_lock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.rcurr == curridx) {
			read_unlock_irq(&hp_sdc.rtq_lock);
			continue;
		}
		read_unlock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.tq[curridx] != NULL) break; /* Found one. */
	}
	if (curridx == hp_sdc.wcurr) { /* There's nothing queued to do. */
		curridx = -1;
	}
	hp_sdc.wcurr = curridx;

 start:

	/* Check to see if the interrupt mask needs to be set. */
	if (hp_sdc.set_im) {
		hp_sdc_status_out8(hp_sdc.im | HP_SDC_CMD_SET_IM);
		hp_sdc.set_im = 0;
		goto finish;
	}

	if (hp_sdc.wcurr == -1) goto done;

	curr = hp_sdc.tq[curridx];
	idx = curr->actidx;

	if (curr->actidx >= curr->endidx) {
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0;
		goto finish;	
	}

	act = curr->seq[idx];
	idx++;

	if (curr->idx >= curr->endidx) {
		if (act & HP_SDC_ACT_DEALLOC) kfree(curr);
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0;
		goto finish;	
	}

	while (act & HP_SDC_ACT_PRECMD) {
		if (curr->idx != idx) {
			idx++;
			act &= ~HP_SDC_ACT_PRECMD;
			break;
		}
		hp_sdc_status_out8(curr->seq[idx]);
		curr->idx++;
		/* act finished? */
		if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_PRECMD)
		  goto actdone;
		/* skip quantity field if data-out sequence follows. */
		if (act & HP_SDC_ACT_DATAOUT) curr->idx++;
		goto finish;
	}
	if (act & HP_SDC_ACT_DATAOUT) {
		int qty;

		qty = curr->seq[idx];
		idx++;
		if (curr->idx - idx < qty) {
			hp_sdc_data_out8(curr->seq[curr->idx]);
			curr->idx++;
			/* act finished? */
			if ((curr->idx - idx >= qty) && 
			    ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAOUT))
				goto actdone;
			goto finish;
		}
		idx += qty;
		act &= ~HP_SDC_ACT_DATAOUT;
	}
	else while (act & HP_SDC_ACT_DATAREG) {
		int mask;
		uint8_t w7[4];

		mask = curr->seq[idx];
		if (idx != curr->idx) {
			idx++;
			idx += !!(mask & 1);
			idx += !!(mask & 2);
			idx += !!(mask & 4);
			idx += !!(mask & 8);
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}
		
		w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0];
		w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1];
		w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2];
		w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3];
		
		if (hp_sdc.wi > 0x73 || hp_sdc.wi < 0x70 ||
		        w7[hp_sdc.wi-0x70] == hp_sdc.r7[hp_sdc.wi-0x70]) {
			int i = 0;

			/* Need to point the write index register */	
			while ((i < 4) && w7[i] == hp_sdc.r7[i]) i++;
			if (i < 4) {
				hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i);
				hp_sdc.wi = 0x70 + i;
				goto finish;
			}
			idx++;
			if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG)
				goto actdone;
			curr->idx = idx;
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]);
		hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70];
		hp_sdc.wi++; /* write index register autoincrements */
		{
			int i = 0;

			while ((i < 4) && w7[i] == hp_sdc.r7[i]) i++;
			if (i >= 4) {
				curr->idx = idx + 1;
				if ((act & HP_SDC_ACT_DURING) == 
				    HP_SDC_ACT_DATAREG)
				        goto actdone;
			}
		}
		goto finish;
	}
	/* We don't go any further in the command if there is a pending read,
	   because we don't want interleaved results. */
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr >= 0) {
		read_unlock_irq(&hp_sdc.rtq_lock);
		goto finish;
	}
	read_unlock_irq(&hp_sdc.rtq_lock);


	if (act & HP_SDC_ACT_POSTCMD) {
	  	uint8_t postcmd;

		/* curr->idx should == idx at this point. */
		postcmd = curr->seq[idx];
		curr->idx++;
		if (act & HP_SDC_ACT_DATAIN) {

			/* Start a new read */
	  		hp_sdc.rqty = curr->seq[curr->idx];
			do_gettimeofday(&hp_sdc.rtv);
			curr->idx++;
			/* Still need to lock here in case of spurious irq. */
			write_lock_irq(&hp_sdc.rtq_lock);
			hp_sdc.rcurr = curridx; 
			write_unlock_irq(&hp_sdc.rtq_lock);
			hp_sdc_status_out8(postcmd);
			goto finish;
		}
		hp_sdc_status_out8(postcmd);
		goto actdone;
	}

actdone:
	if (act & HP_SDC_ACT_SEMAPHORE) {
		up(curr->act.semaphore);
	}
	else if (act & HP_SDC_ACT_CALLBACK) {
		curr->act.irqhook(0,NULL,0,0);
	}
	if (curr->idx >= curr->endidx) { /* This transaction is over. */
		if (act & HP_SDC_ACT_DEALLOC) kfree(curr);
		hp_sdc.tq[curridx] = NULL;
	}
	else {
		curr->actidx = idx + 1;
		curr->idx = idx + 2;
	}
	/* Interleave outbound data between the transactions. */
	hp_sdc.wcurr++;
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0;

 finish:
	/* If by some quirk IBF has cleared and our ISR has run to 
	   see that that has happened, do it all again. */
	if (!hp_sdc.ibf && limit++ < 20) goto anew;

 done:
	if (hp_sdc.wcurr >= 0) tasklet_schedule(&hp_sdc.task);
	write_unlock(&hp_sdc.lock);
	return 0;
}
示例#25
0
static void ri_tasklet(unsigned long dev)
{

	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct net_device_stats *stats = &_dev->stats;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(_dev, 0);
	dp->st_task_enter++;
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		dp->st_txq_refl_try++;
		if (__netif_tx_trylock(txq)) {
			dp->st_rxq_enter++;
			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
				skb_queue_tail(&dp->tq, skb);
				dp->st_rx2tx_tran++;
			}
			__netif_tx_unlock(txq);
		} else {
			/* reschedule */
			dp->st_rxq_notenter++;
			goto resched;
		}
	}

	while ((skb = skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
		stats->tx_packets++;
		stats->tx_bytes +=skb->len;

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			stats->tx_dropped++;
			if (skb_queue_len(&dp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = _dev->ifindex;

		if (from & AT_EGRESS) {
			dp->st_rx_frm_egr++;
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {
			dp->st_rx_frm_ing++;
			skb_pull(skb, skb->dev->hard_header_len);
			netif_rx(skb);
		} else
			BUG();
	}

	if (__netif_tx_trylock(txq)) {
		dp->st_rxq_check++;
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			dp->st_rxq_rsch++;
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
示例#26
0
static void rx_urb_complete(struct urb *urb)
{
	int r;
	struct zd_usb *usb;
	struct zd_usb_rx *rx;
	const u8 *buffer;
	unsigned int length;

	switch (urb->status) {
	case 0:
		break;
	case -ESHUTDOWN:
	case -EINVAL:
	case -ENODEV:
	case -ENOENT:
	case -ECONNRESET:
	case -EPIPE:
		dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
		return;
	default:
		dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
		goto resubmit;
	}

	buffer = urb->transfer_buffer;
	length = urb->actual_length;
	usb = urb->context;
	rx = &usb->rx;

	tasklet_schedule(&rx->reset_timer_tasklet);

	if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
		/* If there is an old first fragment, we don't care. */
		dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
		ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
		spin_lock(&rx->lock);
		memcpy(rx->fragment, buffer, length);
		rx->fragment_length = length;
		spin_unlock(&rx->lock);
		goto resubmit;
	}

	spin_lock(&rx->lock);
	if (rx->fragment_length > 0) {
		/* We are on a second fragment, we believe */
		ZD_ASSERT(length + rx->fragment_length <=
			  ARRAY_SIZE(rx->fragment));
		dev_dbg_f(urb_dev(urb), "*** second fragment ***\n");
		memcpy(rx->fragment+rx->fragment_length, buffer, length);
		handle_rx_packet(usb, rx->fragment,
			         rx->fragment_length + length);
		rx->fragment_length = 0;
		spin_unlock(&rx->lock);
	} else {
		spin_unlock(&rx->lock);
		handle_rx_packet(usb, buffer, length);
	}

resubmit:
	r = usb_submit_urb(urb, GFP_ATOMIC);
	if (r)
		dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
}
static void usbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net) &&
		   netif_device_present (dev->net) &&
		   !timer_pending (&dev->delay) &&
		   !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
					if (rx_submit (dev, urb, GFP_ATOMIC) ==
					    -ENOLINK)
						return;
				}
			}
			if (temp != dev->rxq.qlen)
				netif_dbg(dev, link, dev->net,
					  "rxqlen %d --> %d\n",
					  temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}
示例#28
0
static void au1xmmc_receive_pio(struct au1xmmc_host *host)
{
	struct mmc_data *data;
	int max, count, sg_len = 0;
	unsigned char *sg_ptr = NULL;
	u32 status, val;
	struct scatterlist *sg;

	data = host->mrq->data;

	if (!(host->flags & HOST_F_RECV))
		return;

	max = host->pio.len;

	if (host->pio.index < host->dma.len) {
		sg = &data->sg[host->pio.index];
		sg_ptr = sg_virt(sg) + host->pio.offset;

		/* This is the space left inside the buffer */
		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;

		/* Check if we need less than the size of the sg_buffer */
		if (sg_len < max)
			max = sg_len;
	}

	if (max > AU1XMMC_MAX_TRANSFER)
		max = AU1XMMC_MAX_TRANSFER;

	for (count = 0; count < max; count++) {
		status = au_readl(HOST_STATUS(host));

		if (!(status & SD_STATUS_NE))
			break;

		if (status & SD_STATUS_RC) {
			DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
					host->pio.len, count);
			break;
		}

		if (status & SD_STATUS_RO) {
			DBG("RX Overrun [%d + %d]\n", host->pdev->id,
					host->pio.len, count);
			break;
		}
		else if (status & SD_STATUS_RU) {
			DBG("RX Underrun [%d + %d]\n", host->pdev->id,
					host->pio.len,	count);
			break;
		}

		val = au_readl(HOST_RXPORT(host));

		if (sg_ptr)
			*sg_ptr++ = (unsigned char)(val & 0xFF);
	}

	host->pio.len -= count;
	host->pio.offset += count;

	if (sg_len && count == sg_len) {
		host->pio.index++;
		host->pio.offset = 0;
	}

	if (host->pio.len == 0) {
		/* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
		IRQ_OFF(host, SD_CONFIG_NE);

		if (host->flags & HOST_F_STOP)
			SEND_STOP(host);

		tasklet_schedule(&host->data_task);
	}
}
int usbnet_open (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			retval;
	struct driver_info	*info = dev->driver_info;

	if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
		netif_info(dev, ifup, dev->net,
			   "resumption fail (%d) usbnet usb-%s-%s, %s\n",
			   retval,
			   dev->udev->bus->bus_name,
			   dev->udev->devpath,
			   info->description);
		goto done_nopm;
	}

	// put into "known safe" state
	if (info->reset && (retval = info->reset (dev)) < 0) {
		netif_info(dev, ifup, dev->net,
			   "open reset fail (%d) usbnet usb-%s-%s, %s\n",
			   retval,
			   dev->udev->bus->bus_name,
			   dev->udev->devpath,
			   info->description);
		goto done;
	}

	// insist peer be connected
	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
		netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
		goto done;
	}

	/* start any status interrupt transfer */
	if (dev->interrupt) {
		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
		if (retval < 0) {
			netif_err(dev, ifup, dev->net,
				  "intr submit %d\n", retval);
			goto done;
		}
	}

	set_bit(EVENT_DEV_OPEN, &dev->flags);
	netif_start_queue (net);
	netif_info(dev, ifup, dev->net,
		   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
		   (int)RX_QLEN(dev), (int)TX_QLEN(dev),
		   dev->net->mtu,
		   (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
		   (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
		   (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
		   (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
		   "simple");

	// delay posting reads until we're fully open
	tasklet_schedule (&dev->bh);
	if (info->manage_power) {
		retval = info->manage_power(dev, 1);
		if (retval < 0)
			goto done;
		usb_autopm_put_interface(dev->intf);
	}
	return retval;

done:
	usb_autopm_put_interface(dev->intf);
done_nopm:
	return retval;
}
示例#30
0
static inline void ace_fsm_yield(struct ace_device *ace)
{
	dev_dbg(ace->dev, "ace_fsm_yield()\n");
	tasklet_schedule(&ace->fsm_tasklet);
	ace->fsm_continue_flag = 0;
}