Beispiel #1
0
int softing_startstop(struct net_device *dev, int up)
{
	int ret;
	struct softing *card;
	struct softing_priv *priv;
	struct net_device *netdev;
	int bus_bitmask_start;
	int j, error_reporting;
	struct can_frame msg;
	const struct can_bittiming *bt;

	priv = netdev_priv(dev);
	card = priv->card;

	if (!card->fw.up)
		return -EIO;

	ret = mutex_lock_interruptible(&card->fw.lock);
	if (ret)
		return ret;

	bus_bitmask_start = 0;
	if (dev && up)
		/* prepare to start this bus as well */
		bus_bitmask_start |= (1 << priv->index);
	/* bring netdevs down */
	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
		netdev = card->net[j];
		if (!netdev)
			continue;
		priv = netdev_priv(netdev);

		if (dev != netdev)
			netif_stop_queue(netdev);

		if (netif_running(netdev)) {
			if (dev != netdev)
				bus_bitmask_start |= (1 << j);
			priv->tx.pending = 0;
			priv->tx.echo_put = 0;
			priv->tx.echo_get = 0;
			/*
			 * this bus' may just have called open_candev()
			 * which is rather stupid to call close_candev()
			 * already
			 * but we may come here from busoff recovery too
			 * in which case the echo_skb _needs_ flushing too.
			 * just be sure to call open_candev() again
			 */
			close_candev(netdev);
		}
		priv->can.state = CAN_STATE_STOPPED;
	}
	card->tx.pending = 0;

	softing_enable_irq(card, 0);
	ret = softing_reset_chip(card);
	if (ret)
		goto failed;
	if (!bus_bitmask_start)
		/* no busses to be brought up */
		goto card_done;

	if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
			&& (softing_error_reporting(card->net[0])
				!= softing_error_reporting(card->net[1]))) {
		dev_alert(&card->pdev->dev,
				"err_reporting flag differs for busses\n");
		goto invalid;
	}
	error_reporting = 0;
	if (bus_bitmask_start & 1) {
		netdev = card->net[0];
		priv = netdev_priv(netdev);
		error_reporting += softing_error_reporting(netdev);
		/* init chip 1 */
		bt = &priv->can.bittiming;
		iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
		iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
		iowrite16(bt->phase_seg1 + bt->prop_seg,
				&card->dpram[DPRAM_FCT_PARAM + 6]);
		iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
		iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
				&card->dpram[DPRAM_FCT_PARAM + 10]);
		ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
		if (ret < 0)
			goto failed;
		/* set mode */
		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
		ret = softing_fct_cmd(card, 3, "set_mode[0]");
		if (ret < 0)
			goto failed;
		/* set filter */
		/* 11bit id & mask */
		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
		iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
		/* 29bit id.lo & mask.lo & id.hi & mask.hi */
		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
		iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
		iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
		ret = softing_fct_cmd(card, 7, "set_filter[0]");
		if (ret < 0)
			goto failed;
		/* set output control */
		iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
		ret = softing_fct_cmd(card, 5, "set_output[0]");
		if (ret < 0)
			goto failed;
	}
	if (bus_bitmask_start & 2) {
		netdev = card->net[1];
		priv = netdev_priv(netdev);
		error_reporting += softing_error_reporting(netdev);
		/* init chip2 */
		bt = &priv->can.bittiming;
		iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
		iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
		iowrite16(bt->phase_seg1 + bt->prop_seg,
				&card->dpram[DPRAM_FCT_PARAM + 6]);
		iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
		iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
				&card->dpram[DPRAM_FCT_PARAM + 10]);
		ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
		if (ret < 0)
			goto failed;
		/* set mode2 */
		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
		ret = softing_fct_cmd(card, 4, "set_mode[1]");
		if (ret < 0)
			goto failed;
		/* set filter2 */
		/* 11bit id & mask */
		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
		iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
		/* 29bit id.lo & mask.lo & id.hi & mask.hi */
		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
		iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
		iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
		ret = softing_fct_cmd(card, 8, "set_filter[1]");
		if (ret < 0)
			goto failed;
		/* set output control2 */
		iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
		ret = softing_fct_cmd(card, 6, "set_output[1]");
		if (ret < 0)
			goto failed;
	}
	/* enable_error_frame */
	/*
	 * Error reporting is switched off at the moment since
	 * the receiving of them is not yet 100% verified
	 * This should be enabled sooner or later
	 *
	if (error_reporting) {
		ret = softing_fct_cmd(card, 51, "enable_error_frame");
		if (ret < 0)
			goto failed;
	}
	*/
	/* initialize interface */
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
	ret = softing_fct_cmd(card, 17, "initialize_interface");
	if (ret < 0)
		goto failed;
	/* enable_fifo */
	ret = softing_fct_cmd(card, 36, "enable_fifo");
	if (ret < 0)
		goto failed;
	/* enable fifo tx ack */
	ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
	if (ret < 0)
		goto failed;
	/* enable fifo tx ack2 */
	ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
	if (ret < 0)
		goto failed;
	/* start_chip */
	ret = softing_fct_cmd(card, 11, "start_chip");
	if (ret < 0)
		goto failed;
	iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
	iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
	if (card->pdat->generation < 2) {
		iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
		/* flush the DPRAM caches */
		wmb();
	}

	softing_initialize_timestamp(card);

	/*
	 * do socketcan notifications/status changes
	 * from here, no errors should occur, or the failed: part
	 * must be reviewed
	 */
	memset(&msg, 0, sizeof(msg));
	msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
	msg.can_dlc = CAN_ERR_DLC;
	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
		if (!(bus_bitmask_start & (1 << j)))
			continue;
		netdev = card->net[j];
		if (!netdev)
			continue;
		priv = netdev_priv(netdev);
		priv->can.state = CAN_STATE_ERROR_ACTIVE;
		open_candev(netdev);
		if (dev != netdev) {
			/* notify other busses on the restart */
			softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
			++priv->can.can_stats.restarts;
		}
		netif_wake_queue(netdev);
	}

	/* enable interrupts */
	ret = softing_enable_irq(card, 1);
	if (ret)
		goto failed;
card_done:
	mutex_unlock(&card->fw.lock);
	return 0;
invalid:
	ret = -EINVAL;
failed:
	softing_enable_irq(card, 0);
	softing_reset_chip(card);
	mutex_unlock(&card->fw.lock);
	/* bring all other interfaces down */
	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
		netdev = card->net[j];
		if (!netdev)
			continue;
		dev_close(netdev);
	}
	return ret;
}
Beispiel #2
0
/*
 * Handle changes in state of network devices enslaved to a bridge.
 *
 * Note: don't care about up/down if bridge itself is down, because
 *     port state is checked when bridge is brought up.
 */
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct net_bridge_port *p;
	struct net_bridge *br;
	bool changed_addr;
	int err;

	/* register of bridge completed, add sysfs entries */
	if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
		br_sysfs_addbr(dev);
		return NOTIFY_DONE;
	}

	/* not a port of a bridge */
	p = br_port_get_rtnl(dev);
	if (!p)
		return NOTIFY_DONE;

	br = p->br;

	switch (event) {
	case NETDEV_CHANGEMTU:
		dev_set_mtu(br->dev, br_min_mtu(br));
		break;

	case NETDEV_CHANGEADDR:
		spin_lock_bh(&br->lock);
		br_fdb_changeaddr(p, dev->dev_addr);
		changed_addr = br_stp_recalculate_bridge_id(br);
		spin_unlock_bh(&br->lock);

		if (changed_addr)
			call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);

		break;

	case NETDEV_CHANGE:
		br_port_carrier_check(p);
		break;

	case NETDEV_FEAT_CHANGE:
		netdev_update_features(br->dev);
		break;

	case NETDEV_DOWN:
		spin_lock_bh(&br->lock);
		if (br->dev->flags & IFF_UP)
			br_stp_disable_port(p);
		spin_unlock_bh(&br->lock);
		break;

	case NETDEV_UP:
		if (netif_running(br->dev) && netif_oper_up(dev)) {
			spin_lock_bh(&br->lock);
			br_stp_enable_port(p);
			spin_unlock_bh(&br->lock);
		}
		break;

	case NETDEV_UNREGISTER:
		br_del_if(br, dev);
		break;

	case NETDEV_CHANGENAME:
		err = br_sysfs_renameif(p);
		if (err)
			return notifier_from_errno(err);
		break;

	case NETDEV_PRE_TYPE_CHANGE:
		/* Forbid underlaying device to change its type. */
		return NOTIFY_BAD;

	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, br->dev);
		break;
	}

	/* Events that may cause spanning tree to refresh */
	if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
	    event == NETDEV_CHANGE || event == NETDEV_DOWN)
		br_ifinfo_notify(RTM_NEWLINK, NULL, p);

	return NOTIFY_DONE;
}
Beispiel #3
0
static void usbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net) &&
		   netif_device_present (dev->net) &&
		   !timer_pending (&dev->delay) &&
		   !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
					if (rx_submit (dev, urb, GFP_ATOMIC) ==
					    -ENOLINK)
						return;
				}
			}
			if (temp != dev->rxq.qlen)
				netif_dbg(dev, link, dev->net,
					  "rxqlen %d --> %d\n",
					  temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}
Beispiel #4
0
int orinoco_hw_trigger_scan(struct orinoco_private *priv,
			    const struct cfg80211_ssid *ssid)
{
	struct net_device *dev = priv->ndev;
	struct hermes *hw = &priv->hw;
	unsigned long flags;
	int err = 0;

	if (orinoco_lock(priv, &flags) != 0)
		return -EBUSY;

	/*                                          */
	if (!netif_running(dev)) {
		err = -ENETDOWN;
		goto out;
	}

	/*                                                    
                                                        
                                               */
	if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
		err = -EOPNOTSUPP;
		goto out;
	}

	if (priv->has_hostscan) {
		switch (priv->firmware_type) {
		case FIRMWARE_TYPE_SYMBOL:
			err = hermes_write_wordrec(hw, USER_BAP,
						HERMES_RID_CNFHOSTSCAN_SYMBOL,
						HERMES_HOSTSCAN_SYMBOL_ONCE |
						HERMES_HOSTSCAN_SYMBOL_BCAST);
			break;
		case FIRMWARE_TYPE_INTERSIL: {
			__le16 req[3];

			req[0] = cpu_to_le16(0x3fff);	/*              */
			req[1] = cpu_to_le16(0x0001);	/*             */
			req[2] = 0;			/*           */
			err = HERMES_WRITE_RECORD(hw, USER_BAP,
						  HERMES_RID_CNFHOSTSCAN, &req);
			break;
		}
		case FIRMWARE_TYPE_AGERE:
			if (ssid->ssid_len > 0) {
				struct hermes_idstring idbuf;
				size_t len = ssid->ssid_len;

				idbuf.len = cpu_to_le16(len);
				memcpy(idbuf.val, ssid->ssid, len);

				err = hw->ops->write_ltv(hw, USER_BAP,
					       HERMES_RID_CNFSCANSSID_AGERE,
					       HERMES_BYTES_TO_RECLEN(len + 2),
					       &idbuf);
			} else
				err = hermes_write_wordrec(hw, USER_BAP,
						   HERMES_RID_CNFSCANSSID_AGERE,
						   0);	/*           */
			if (err)
				break;

			if (priv->has_ext_scan) {
				err = hermes_write_wordrec(hw, USER_BAP,
						HERMES_RID_CNFSCANCHANNELS2GHZ,
						0x7FFF);
				if (err)
					goto out;

				err = hermes_inquire(hw,
						     HERMES_INQ_CHANNELINFO);
			} else
				err = hermes_inquire(hw, HERMES_INQ_SCAN);

			break;
		}
	} else
		err = hermes_inquire(hw, HERMES_INQ_SCAN);

 out:
	orinoco_unlock(priv, &flags);

	return err;
}
Beispiel #5
0
static int
bnad_set_ringparam(struct net_device *netdev,
		   struct ethtool_ringparam *ringparam)
{
	int i, current_err, err = 0;
	struct bnad *bnad = netdev_priv(netdev);
	unsigned long flags;

	mutex_lock(&bnad->conf_mutex);
	if (ringparam->rx_pending == bnad->rxq_depth &&
	    ringparam->tx_pending == bnad->txq_depth) {
		mutex_unlock(&bnad->conf_mutex);
		return 0;
	}

	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
	    !BNA_POWER_OF_2(ringparam->rx_pending)) {
		mutex_unlock(&bnad->conf_mutex);
		return -EINVAL;
	}
	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
	    !BNA_POWER_OF_2(ringparam->tx_pending)) {
		mutex_unlock(&bnad->conf_mutex);
		return -EINVAL;
	}

	if (ringparam->rx_pending != bnad->rxq_depth) {
		bnad->rxq_depth = ringparam->rx_pending;
		if (!netif_running(netdev)) {
			mutex_unlock(&bnad->conf_mutex);
			return 0;
		}

		for (i = 0; i < bnad->num_rx; i++) {
			if (!bnad->rx_info[i].rx)
				continue;
			bnad_destroy_rx(bnad, i);
			current_err = bnad_setup_rx(bnad, i);
			if (current_err && !err)
				err = current_err;
		}

		if (!err && bnad->rx_info[0].rx) {
			/* restore rx configuration */
			bnad_restore_vlans(bnad, 0);
			bnad_enable_default_bcast(bnad);
			spin_lock_irqsave(&bnad->bna_lock, flags);
			bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
			spin_unlock_irqrestore(&bnad->bna_lock, flags);
			bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
					     BNAD_CF_PROMISC);
			bnad_set_rx_mode(netdev);
		}
	}
	if (ringparam->tx_pending != bnad->txq_depth) {
		bnad->txq_depth = ringparam->tx_pending;
		if (!netif_running(netdev)) {
			mutex_unlock(&bnad->conf_mutex);
			return 0;
		}

		for (i = 0; i < bnad->num_tx; i++) {
			if (!bnad->tx_info[i].tx)
				continue;
			bnad_destroy_tx(bnad, i);
			current_err = bnad_setup_tx(bnad, i);
			if (current_err && !err)
				err = current_err;
		}
	}

	mutex_unlock(&bnad->conf_mutex);
	return err;
}
Beispiel #6
0
/**
 * gether_connect - notify network layer that USB link is active
 * @link: the USB link, set up with endpoints, descriptors matching
 *	current device speed, and any framing wrapper(s) set up.
 * Context: irqs blocked
 *
 * This is called to activate endpoints and let the network layer know
 * the connection is active ("carrier detect").  It may cause the I/O
 * queues to open and start letting network packets flow, but will in
 * any case activate the endpoints so that they respond properly to the
 * USB host.
 *
 * Verify net_device pointer returned using IS_ERR().  If it doesn't
 * indicate some error code (negative errno), ep->driver_data values
 * have been overwritten.
 */
struct net_device *gether_connect(struct gether *link)
{
	struct eth_dev		*dev = the_dev;
	int			result = 0;

	if (!dev)
		return ERR_PTR(-EINVAL);

	link->in_ep->driver_data = dev;
	result = usb_ep_enable(link->in_ep);
	if (result != 0) {
		DBG(dev, "enable %s --> %d\n",
			link->in_ep->name, result);
		goto fail0;
	}

	link->out_ep->driver_data = dev;
	result = usb_ep_enable(link->out_ep);
	if (result != 0) {
		DBG(dev, "enable %s --> %d\n",
			link->out_ep->name, result);
		goto fail1;
	}

	if (result == 0)
		result = alloc_requests(dev, link, qlen(dev->gadget));

	if (result == 0) {
		dev->zlp = link->is_zlp_ok;
		DBG(dev, "qlen %d\n", qlen(dev->gadget));

		dev->header_len = link->header_len;
		dev->unwrap = link->unwrap;
		dev->wrap = link->wrap;

		spin_lock(&dev->lock);
		dev->port_usb = link;
		link->ioport = dev;
		if (netif_running(dev->net)) {
			if (link->open)
				link->open(link);
		} else {
			if (link->close)
				link->close(link);
		}
		spin_unlock(&dev->lock);

		netif_carrier_on(dev->net);
		if (netif_running(dev->net))
			eth_start(dev, GFP_ATOMIC);

	/* on error, disable any endpoints  */
	} else {
		(void) usb_ep_disable(link->out_ep);
fail1:
		(void) usb_ep_disable(link->in_ep);
	}
fail0:
	/* caller is responsible for cleanup on error */
	if (result < 0)
		return ERR_PTR(result);
	return dev->net;
}
Beispiel #7
0
int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
			    struct wireless_dev *wdev)
{
	struct cfg80211_cached_keys *ck = NULL;
	enum ieee80211_band band;
	int i, err;

	ASSERT_WDEV_LOCK(wdev);

	if (!wdev->wext.ibss.beacon_interval)
		wdev->wext.ibss.beacon_interval = 100;

	/* try to find an IBSS channel if none requested ... */
	if (!wdev->wext.ibss.chandef.chan) {
		wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;

		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
			struct ieee80211_supported_band *sband;
			struct ieee80211_channel *chan;

			sband = rdev->wiphy.bands[band];
			if (!sband)
				continue;

			for (i = 0; i < sband->n_channels; i++) {
				chan = &sband->channels[i];
				if (chan->flags & IEEE80211_CHAN_NO_IBSS)
					continue;
				if (chan->flags & IEEE80211_CHAN_DISABLED)
					continue;
				wdev->wext.ibss.chandef.chan = chan;
				break;
			}

			if (wdev->wext.ibss.chandef.chan)
				break;
		}

		if (!wdev->wext.ibss.chandef.chan)
			return -EINVAL;
	}

	/* don't join -- SSID is not there */
	if (!wdev->wext.ibss.ssid_len)
		return 0;

	if (!netif_running(wdev->netdev))
		return 0;

	if (wdev->wext.keys) {
		wdev->wext.keys->def = wdev->wext.default_key;
		wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
	}

	wdev->wext.ibss.privacy = wdev->wext.default_key != -1;

	if (wdev->wext.keys) {
		ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
		if (!ck)
			return -ENOMEM;
		for (i = 0; i < 6; i++)
			ck->params[i].key = ck->data[i];
	}
	err = __cfg80211_join_ibss(rdev, wdev->netdev,
				   &wdev->wext.ibss, ck);
	if (err)
		kfree(ck);

	return err;
}
Beispiel #8
0
static int sm_ioctl(struct net_device *dev, struct ifreq *ifr,
		    struct hdlcdrv_ioctl *hi, int cmd)
{
	struct sm_state *sm;
	struct sm_ioctl bi;
	unsigned long flags;
	unsigned int newdiagmode;
	unsigned int newdiagflags;
	char *cp;
	const struct modem_tx_info **mtp = sm_modem_tx_table;
	const struct modem_rx_info **mrp = sm_modem_rx_table;
	const struct hardware_info **hwp = sm_hardware_table;

	if (!dev || !dev->priv ||
	    ((struct sm_state *)dev->priv)->hdrv.magic != HDLCDRV_MAGIC) {
		printk(KERN_ERR "sm_ioctl: invalid device struct\n");
		return -EINVAL;
	}
	sm = (struct sm_state *)dev->priv;

	if (cmd != SIOCDEVPRIVATE) {
		if (!sm->hwdrv || !sm->hwdrv->ioctl)
			return sm->hwdrv->ioctl(dev, sm, ifr, hi, cmd);
		return -ENOIOCTLCMD;
	}
	switch (hi->cmd) {
	default:
		if (sm->hwdrv && sm->hwdrv->ioctl)
			return sm->hwdrv->ioctl(dev, sm, ifr, hi, cmd);
		return -ENOIOCTLCMD;

	case HDLCDRVCTL_GETMODE:
		cp = hi->data.modename;
		if (sm->hwdrv && sm->hwdrv->hw_name)
			cp += sprintf(cp, "%s:", sm->hwdrv->hw_name);
		else
			cp += sprintf(cp, "<unspec>:");
		if (sm->mode_tx && sm->mode_tx->name)
			cp += sprintf(cp, "%s", sm->mode_tx->name);
		else
			cp += sprintf(cp, "<unspec>");
		if (!sm->mode_rx || !sm->mode_rx ||
		    strcmp(sm->mode_rx->name, sm->mode_tx->name)) {
			if (sm->mode_rx && sm->mode_rx->name)
				cp += sprintf(cp, ",%s", sm->mode_rx->name);
			else
				cp += sprintf(cp, ",<unspec>");
		}
		if (copy_to_user(ifr->ifr_data, hi, sizeof(*hi)))
			return -EFAULT;
		return 0;

	case HDLCDRVCTL_SETMODE:
		if (netif_running(dev) || !capable(CAP_NET_ADMIN))
			return -EACCES;
		hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
		return sethw(dev, sm, hi->data.modename);

	case HDLCDRVCTL_MODELIST:
		cp = hi->data.modename;
		while (*hwp) {
			if ((*hwp)->hw_name)
				cp += sprintf(cp, "%s:,", (*hwp)->hw_name);
			hwp++;
		}
		while (*mtp) {
			if ((*mtp)->name)
				cp += sprintf(cp, ">%s,", (*mtp)->name);
			mtp++;
		}
		while (*mrp) {
			if ((*mrp)->name)
				cp += sprintf(cp, "<%s,", (*mrp)->name);
			mrp++;
		}
		cp[-1] = '\0';
		if (copy_to_user(ifr->ifr_data, hi, sizeof(*hi)))
			return -EFAULT;
		return 0;

#ifdef SM_DEBUG
	case SMCTL_GETDEBUG:
		if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
			return -EFAULT;
		bi.data.dbg.int_rate = sm->debug_vals.last_intcnt;
		bi.data.dbg.mod_cycles = sm->debug_vals.mod_cyc;
		bi.data.dbg.demod_cycles = sm->debug_vals.demod_cyc;
		bi.data.dbg.dma_residue = sm->debug_vals.dma_residue;
		sm->debug_vals.mod_cyc = sm->debug_vals.demod_cyc =
			sm->debug_vals.dma_residue = 0;
		if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
			return -EFAULT;
		return 0;
#endif /* SM_DEBUG */

	case SMCTL_DIAGNOSE:
		if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
			return -EFAULT;
		newdiagmode = bi.data.diag.mode;
		newdiagflags = bi.data.diag.flags;
		if (newdiagmode > SM_DIAGMODE_CONSTELLATION)
			return -EINVAL;
		bi.data.diag.mode = sm->diag.mode;
		bi.data.diag.flags = sm->diag.flags;
		bi.data.diag.samplesperbit = sm->mode_rx->sperbit;
		if (sm->diag.mode != newdiagmode) {
			save_flags(flags);
			cli();
			sm->diag.ptr = -1;
			sm->diag.flags = newdiagflags & ~SM_DIAGFLAG_VALID;
			sm->diag.mode = newdiagmode;
			restore_flags(flags);
			if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
				return -EFAULT;
			return 0;
		}
		if (sm->diag.ptr < 0 || sm->diag.mode == SM_DIAGMODE_OFF) {
			if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
				return -EFAULT;
			return 0;
		}
		if (bi.data.diag.datalen > DIAGDATALEN)
			bi.data.diag.datalen = DIAGDATALEN;
		if (sm->diag.ptr < bi.data.diag.datalen) {
			if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
				return -EFAULT;
			return 0;
		}
		if (copy_to_user(bi.data.diag.data, sm->diag.data,
				 bi.data.diag.datalen * sizeof(short)))
			return -EFAULT;
		bi.data.diag.flags |= SM_DIAGFLAG_VALID;
		save_flags(flags);
		cli();
		sm->diag.ptr = -1;
		sm->diag.flags = newdiagflags & ~SM_DIAGFLAG_VALID;
		sm->diag.mode = newdiagmode;
		restore_flags(flags);
		if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
			return -EFAULT;
		return 0;
	}
}
Beispiel #9
0
struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
					  struct sta_info *sta,
					  enum ieee80211_key_alg alg,
					  int idx,
					  size_t key_len,
					  const u8 *key_data)
{
	struct ieee80211_key *key;

	BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS);

	key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
	if (!key)
		return NULL;

	/*
	 * Default to software encryption; we'll later upload the
	 * key to the hardware if possible.
	 */
	key->conf.flags = 0;
	key->flags = 0;

	key->conf.alg = alg;
	key->conf.keyidx = idx;
	key->conf.keylen = key_len;
	memcpy(key->conf.key, key_data, key_len);

	key->local = sdata->local;
	key->sdata = sdata;
	key->sta = sta;

	if (alg == ALG_CCMP) {
		/*
		 * Initialize AES key state here as an optimization so that
		 * it does not need to be initialized for every packet.
		 */
		key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
		if (!key->u.ccmp.tfm) {
			ieee80211_key_free(key);
			return NULL;
		}
	}

	ieee80211_debugfs_key_add(key->local, key);

	/* remove key first */
	if (sta)
		ieee80211_key_free(sta->key);
	else
		ieee80211_key_free(sdata->keys[idx]);

	if (sta) {
		ieee80211_debugfs_key_sta_link(key, sta);

		/*
		 * some hardware cannot handle TKIP with QoS, so
		 * we indicate whether QoS could be in use.
		 */
		if (sta->flags & WLAN_STA_WME)
			key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
	} else {
		if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
			struct sta_info *ap;

			/* same here, the AP could be using QoS */
			ap = sta_info_get(key->local, key->sdata->u.sta.bssid);
			if (ap) {
				if (ap->flags & WLAN_STA_WME)
					key->conf.flags |=
						IEEE80211_KEY_FLAG_WMM_STA;
				sta_info_put(ap);
			}
		}
	}

	/* enable hwaccel if appropriate */
	if (netif_running(key->sdata->dev))
		ieee80211_key_enable_hw_accel(key);

	if (sta)
		rcu_assign_pointer(sta->key, key);
	else
		rcu_assign_pointer(sdata->keys[idx], key);

	list_add(&key->list, &sdata->key_list);

	return key;
}
Beispiel #10
0
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}
	skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
static int atl1e_set_settings(struct net_device *netdev,
			      struct ethtool_cmd *ecmd)
{
	struct atl1e_adapter *adapter = netdev_priv(netdev);
	struct atl1e_hw *hw = &adapter->hw;

	while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
		msleep(1);

	if (ecmd->autoneg == AUTONEG_ENABLE) {
		u16 adv4, adv9;

		if ((ecmd->advertising&ADVERTISE_1000_FULL)) {
			if (hw->nic_type == athr_l1e) {
				hw->autoneg_advertised =
					ecmd->advertising & AT_ADV_MASK;
			} else {
				clear_bit(__AT_RESETTING, &adapter->flags);
				return -EINVAL;
			}
		} else if (ecmd->advertising&ADVERTISE_1000_HALF) {
			clear_bit(__AT_RESETTING, &adapter->flags);
			return -EINVAL;
		} else {
			hw->autoneg_advertised =
				ecmd->advertising & AT_ADV_MASK;
		}
		ecmd->advertising = hw->autoneg_advertised |
				    ADVERTISED_TP | ADVERTISED_Autoneg;

		adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
		adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
		if (hw->autoneg_advertised & ADVERTISE_10_HALF)
			adv4 |= ADVERTISE_10HALF;
		if (hw->autoneg_advertised & ADVERTISE_10_FULL)
			adv4 |= ADVERTISE_10FULL;
		if (hw->autoneg_advertised & ADVERTISE_100_HALF)
			adv4 |= ADVERTISE_100HALF;
		if (hw->autoneg_advertised & ADVERTISE_100_FULL)
			adv4 |= ADVERTISE_100FULL;
		if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
			adv9 |= ADVERTISE_1000FULL;

		if (adv4 != hw->mii_autoneg_adv_reg ||
				adv9 != hw->mii_1000t_ctrl_reg) {
			hw->mii_autoneg_adv_reg = adv4;
			hw->mii_1000t_ctrl_reg = adv9;
			hw->re_autoneg = true;
		}

	} else {
		clear_bit(__AT_RESETTING, &adapter->flags);
		return -EINVAL;
	}

	/* reset the link */

	if (netif_running(adapter->netdev)) {
		atl1e_down(adapter);
		atl1e_up(adapter);
	} else
		atl1e_reset_hw(&adapter->hw);

	clear_bit(__AT_RESETTING, &adapter->flags);
	return 0;
}
Beispiel #12
0
static void pg_inject(void)
{
	u32 saddr;
	struct net_device *odev;
	struct sk_buff *skb;
	struct timeval start, stop;
	u32 total, idle;
	int pc, lcount;

	odev = pg_setup_inject(&saddr);
	if (!odev)
		return;

	skb = fill_packet(odev, saddr);
	if (skb == NULL)
		goto out_reldev;

	forced_stop = 0;
	idle_acc_hi = 0;
	idle_acc_lo = 0;
	pc = 0;
	lcount = pg_count;
	do_gettimeofday(&start);

	for(;;) {
		spin_lock_bh(&odev->xmit_lock);
		atomic_inc(&skb->users);
		if (!netif_queue_stopped(odev)) {
			if (odev->hard_start_xmit(skb, odev)) {
				kfree_skb(skb);
				if (net_ratelimit())
					printk(KERN_INFO "Hard xmit error\n");
			}
			pc++;
		} else {
			kfree_skb(skb);
		}
		spin_unlock_bh(&odev->xmit_lock);

		if (pg_ipg)
			nanospin(pg_ipg);
		if (forced_stop)
			goto out_intr;
		if (signal_pending(current))
			goto out_intr;

		if (--lcount == 0) {
			if (atomic_read(&skb->users) != 1) {
				u32 idle_start, idle;

				idle_start = get_cycles();
				while (atomic_read(&skb->users) != 1) {
					if (signal_pending(current))
						goto out_intr;
					schedule();
				}
				idle = get_cycles() - idle_start;
				idle_acc_lo += idle;
				if (idle_acc_lo < idle)
					idle_acc_hi++;
			}
			break;
		}

		if (netif_queue_stopped(odev) || current->need_resched) {
			u32 idle_start, idle;

			idle_start = get_cycles();
			do {
				if (signal_pending(current))
					goto out_intr;
				if (!netif_running(odev))
					goto out_intr;
				if (current->need_resched)
					schedule();
				else
					do_softirq();
			} while (netif_queue_stopped(odev));
			idle = get_cycles() - idle_start;
			idle_acc_lo += idle;
			if (idle_acc_lo < idle)
				idle_acc_hi++;
		}
	}

	do_gettimeofday(&stop);

	total = (stop.tv_sec - start.tv_sec)*1000000 +
		stop.tv_usec - start.tv_usec;

	idle = (((idle_acc_hi<<20)/pg_cpu_speed)<<12)+idle_acc_lo/pg_cpu_speed;

	if (1) {
		char *p = pg_result;

		p += sprintf(p, "OK: %u(c%u+d%u) usec, %u (%dbyte,%dfrags) %upps %uMB/sec",
			     total, total-idle, idle,
			     pc, skb->len, skb_shinfo(skb)->nr_frags,
			     ((pc*1000)/(total/1000)),
			     (((pc*1000)/(total/1000))*pkt_size)/1024/1024
			     );
	}

out_relskb:
	kfree_skb(skb);
out_reldev:
	dev_put(odev);
	return;

out_intr:
	sprintf(pg_result, "Interrupted");
	goto out_relskb;
}
void WILC_WFI_monitor_rx(uint8_t *buff, uint32_t size)
{
	uint32_t header,pkt_offset;
        struct sk_buff *skb = NULL;
        struct wilc_wfi_radiotap_hdr *hdr;
	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;

	 PRINT_INFO(HOSTAPD_DBG,"In monitor interface receive function\n");

     //   struct WILC_WFI_priv *priv = netdev_priv(dev);

     //   priv = wiphy_priv(priv->dev->ieee80211_ptr->wiphy);
	 
	 /* Bug 4601 */
     if(wilc_wfi_mon == NULL)
		return;
	 
       if (!netif_running(wilc_wfi_mon))
       {
    	   PRINT_INFO(HOSTAPD_DBG,"Monitor interface already RUNNING\n");
           return;
       }

	//Get WILC header
	memcpy(&header, (buff-HOST_HDR_OFFSET), HOST_HDR_OFFSET);

	//The packet offset field conain info about what type of managment frame 
	// we are dealing with and ack status
	pkt_offset = GET_PKT_OFFSET(header);
	
	if(pkt_offset & IS_MANAGMEMENT_CALLBACK)
	{
		
		// hostapd callback mgmt frame

		skb = dev_alloc_skb(size+sizeof(struct wilc_wfi_radiotap_cb_hdr));
		if(skb == NULL)
		{
			PRINT_INFO(HOSTAPD_DBG,"Monitor if : No memory to allocate skb");
			return;
		}

		memcpy(skb_put(skb,size),buff, size);

		cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb, sizeof(*cb_hdr));
		memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));

		 cb_hdr->hdr.it_version = 0;//PKTHDR_RADIOTAP_VERSION;
        
      	        cb_hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_cb_hdr));

       	 cb_hdr->hdr.it_present = cpu_to_le32(
                                          (1 << IEEE80211_RADIOTAP_RATE) |
                                         (1 << IEEE80211_RADIOTAP_TX_FLAGS));
 
        	cb_hdr->rate = 5;//txrate->bitrate / 5;

        	if(pkt_offset & IS_MGMT_STATUS_SUCCES)
        	{
        		//success
        		cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_RTS;
        	}
		else
		{
			cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_FAIL;
		}
		
	}
	else
	{

		skb = dev_alloc_skb(size+sizeof(struct wilc_wfi_radiotap_hdr));

		if(skb == NULL)
		{
			PRINT_INFO(HOSTAPD_DBG,"Monitor if : No memory to allocate skb");
			return;
		}

	        //skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC);
	        //if (skb == NULL)
	          //      return;
	       
		memcpy(skb_put(skb,size),buff, size);
	        hdr = (struct wilc_wfi_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
		memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
	        hdr->hdr.it_version = 0;//PKTHDR_RADIOTAP_VERSION;
	        //hdr->hdr.it_pad = 0;
	        hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
		PRINT_INFO(HOSTAPD_DBG,"Radiotap len %d\n", hdr->hdr.it_len);
	        hdr->hdr.it_present = cpu_to_le32
	                                          (1 << IEEE80211_RADIOTAP_RATE); //|
	                                         //(1 << IEEE80211_RADIOTAP_CHANNEL));
		PRINT_INFO(HOSTAPD_DBG,"Presentflags %d\n", hdr->hdr.it_present);
	       hdr->rate = 5;//txrate->bitrate / 5;

	}

/*	if(INFO || if(skb->data[9] == 0x00 || skb->data[9] == 0xb0))
	{
		for(i=0;i<skb->len;i++)
			PRINT_INFO(HOSTAPD_DBG,"Mon RxData[%d] = %02x\n",i,skb->data[i]);
	}*/

	
        skb->dev = wilc_wfi_mon;
        skb_set_mac_header(skb, 0);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->pkt_type = PACKET_OTHERHOST;
        skb->protocol = htons(ETH_P_802_2);
        memset(skb->cb, 0, sizeof(skb->cb));
	
        netif_rx(skb);


}
Beispiel #14
0
static irqreturn_t
elmc_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	unsigned short stat;
	struct priv *p;

	if (!netif_running(dev)) {
		/* The 3c523 has this habit of generating interrupts during the
		   reset.  I'm not sure if the ni52 has this same problem, but it's
		   really annoying if we haven't finished initializing it.  I was
		   hoping all the elmc_id_* commands would disable this, but I
		   might have missed a few. */

		elmc_id_attn586();	/* ack inter. and disable any more */
		return IRQ_HANDLED;
	} else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
		/* wasn't this device */
		return IRQ_NONE;
	}
	/* reading ELMC_CTRL also clears the INT bit. */

	p = netdev_priv(dev);

	while ((stat = p->scb->status & STAT_MASK))
	{
		p->scb->cmd = stat;
		elmc_attn586();	/* ack inter. */

		if (stat & STAT_CX) {
			/* command with I-bit set complete */
			elmc_xmt_int(dev);
		}
		if (stat & STAT_FR) {
			/* received a frame */
			elmc_rcv_int(dev);
		}
#ifndef NO_NOPCOMMANDS
		if (stat & STAT_CNA) {
			/* CU went 'not ready' */
			if (netif_running(dev)) {
				printk(KERN_WARNING "%s: oops! CU has left active state. stat: %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
			}
		}
#endif

		if (stat & STAT_RNR) {
			/* RU went 'not ready' */

			if (p->scb->status & RU_SUSPEND) {
				/* special case: RU_SUSPEND */

				WAIT_4_SCB_CMD();
				p->scb->cmd = RUC_RESUME;
				elmc_attn586();
			} else {
				printk(KERN_WARNING "%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
				elmc_rnr_int(dev);
			}
		}
		WAIT_4_SCB_CMD();	/* wait for ack. (elmc_xmt_int can be faster than ack!!) */
		if (p->scb->cmd) {	/* timed out? */
			break;
		}
	}
	return IRQ_HANDLED;
}
static void rx_complete (struct urb *urb)
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;
	int			urb_status = urb->status;
	enum skb_state		state;

	skb_put (skb, urb->actual_length);
	state = rx_done;
	entry->urb = NULL;

	switch (urb_status) {
	/* success */
	case 0:
		if (skb->len < dev->net->hard_header_len) {
			state = rx_cleanup;
			dev->net->stats.rx_errors++;
			dev->net->stats.rx_length_errors++;
			netif_dbg(dev, rx_err, dev->net,
				  "rx length %d\n", skb->len);
		}
		break;

	/* stalls need manual reset. this is rare ... except that
	 * when going through USB 2.0 TTs, unplug appears this way.
	 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
	 * storm, recovering as needed.
	 */
	case -EPIPE:
		dev->net->stats.rx_errors++;
		usbnet_defer_kevent (dev, EVENT_RX_HALT);
		// FALLTHROUGH

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* async unlink */
	case -ESHUTDOWN:		/* hardware gone */
		netif_dbg(dev, ifdown, dev->net,
			  "rx shutdown, code %d\n", urb_status);
		goto block;

	/* we get controller i/o faults during khubd disconnect() delays.
	 * throttle down resubmits, to avoid log floods; just temporarily,
	 * so we still recover when the fault isn't a khubd delay.
	 */
	case -EPROTO:
	case -ETIME:
	case -EILSEQ:
		dev->net->stats.rx_errors++;
		if (!timer_pending (&dev->delay)) {
			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
			netif_dbg(dev, link, dev->net,
				  "rx throttle %d\n", urb_status);
		}
block:
		state = rx_cleanup;
		entry->urb = urb;
		urb = NULL;
		break;

	/* data overrun ... flush fifo? */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		// FALLTHROUGH

	default:
		state = rx_cleanup;
		dev->net->stats.rx_errors++;
		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
		break;
	}

	state = defer_bh(dev, skb, &dev->rxq, state);

	if (urb) {
		if (netif_running (dev->net) &&
		    !test_bit (EVENT_RX_HALT, &dev->flags) &&
		    state != unlink_start) {
			rx_submit (dev, urb, GFP_ATOMIC);
			return;
		}
		usb_free_urb (urb);
	}
	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
Beispiel #16
0
static void rx_submit (struct usbnet *dev, struct urb *urb, int flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size;

#ifdef CONFIG_USB_NET1080
	if (dev->driver_info->flags & FLAG_FRAMING_NC)
		size = FRAMED_SIZE (dev->net.mtu);
	else
#endif
#ifdef CONFIG_USB_GENESYS
	if (dev->driver_info->flags & FLAG_FRAMING_GL)
		size = GL_RCV_BUF_SIZE;
	else
#endif
		size = (sizeof (struct ethhdr) + dev->net.mtu);

	if ((skb = alloc_skb (size, flags)) == 0) {
		dbg ("no rx skb");
		tasklet_schedule (&dev->bh);
		usb_free_urb (urb);
		return;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	FILL_BULK_URB (urb, dev->udev,
		usb_rcvbulkpipe (dev->udev, dev->driver_info->in),
		skb->data, size, rx_complete, skb);
	urb->transfer_flags |= USB_ASYNC_UNLINK;
#ifdef	REALLY_QUEUE
	urb->transfer_flags |= USB_QUEUE_BULK;
#endif
#if 0
	// Idle-but-posted reads with UHCI really chew up
	// PCI bandwidth unless FSBR is disabled
	urb->transfer_flags |= USB_NO_FSBR;
#endif

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (&dev->net)) {
		if ((retval = usb_submit_urb (urb)) != 0) {
			dbg ("%s rx submit, %d", dev->net.name, retval);
			tasklet_schedule (&dev->bh);
		} else {
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		dbg ("rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
Beispiel #17
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context, *skb2;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}
		skb = NULL;

		skb2 = skb_dequeue(&dev->rx_frames);
		while (skb2) {
			if (status < 0
					|| ETH_HLEN > skb2->len
					|| skb2->len > ETH_FRAME_LEN) {
				dev->net->stats.rx_errors++;
				dev->net->stats.rx_length_errors++;
				DBG(dev, "rx length %d\n", skb2->len);
				dev_kfree_skb_any(skb2);
				goto next_frame;
			}
			skb2->protocol = eth_type_trans(skb2, dev->net);
			dev->net->stats.rx_packets++;
			dev->net->stats.rx_bytes += skb2->len;

			/* no buffer copies needed, unless hardware can't
			 * use skb buffers.
			 */
			status = netif_rx(skb2);
next_frame:
			skb2 = skb_dequeue(&dev->rx_frames);
		}
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any(skb);
	if (!netif_running(dev->net)) {
clean:
		spin_lock(&dev->req_lock);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock(&dev->req_lock);
		req = NULL;
	}
	if (req)
		rx_submit(dev, req, GFP_ATOMIC);
}
Beispiel #18
0
static int
qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
	struct qlcnic_adapter *adapter = netdev_priv(dev);
	int check_sfp_module = 0;
	u16 pcifn = adapter->ahw.pci_func;

	/* read which mode */
	if (adapter->ahw.port_type == QLCNIC_GBE) {
		ecmd->supported = (SUPPORTED_10baseT_Half |
				   SUPPORTED_10baseT_Full |
				   SUPPORTED_100baseT_Half |
				   SUPPORTED_100baseT_Full |
				   SUPPORTED_1000baseT_Half |
				   SUPPORTED_1000baseT_Full);

		ecmd->advertising = (ADVERTISED_100baseT_Half |
				     ADVERTISED_100baseT_Full |
				     ADVERTISED_1000baseT_Half |
				     ADVERTISED_1000baseT_Full);

		ecmd->speed = adapter->link_speed;
		ecmd->duplex = adapter->link_duplex;
		ecmd->autoneg = adapter->link_autoneg;

	} else if (adapter->ahw.port_type == QLCNIC_XGBE) {
		u32 val;

		val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
		if (val == QLCNIC_PORT_MODE_802_3_AP) {
			ecmd->supported = SUPPORTED_1000baseT_Full;
			ecmd->advertising = ADVERTISED_1000baseT_Full;
		} else {
			ecmd->supported = SUPPORTED_10000baseT_Full;
			ecmd->advertising = ADVERTISED_10000baseT_Full;
		}

		if (netif_running(dev) && adapter->has_link_events) {
			ecmd->speed = adapter->link_speed;
			ecmd->autoneg = adapter->link_autoneg;
			ecmd->duplex = adapter->link_duplex;
			goto skip;
		}

		val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
		ecmd->speed = P3P_LINK_SPEED_MHZ *
			P3P_LINK_SPEED_VAL(pcifn, val);
		ecmd->duplex = DUPLEX_FULL;
		ecmd->autoneg = AUTONEG_DISABLE;
	} else
		return -EIO;

skip:
	ecmd->phy_address = adapter->physical_port;
	ecmd->transceiver = XCVR_EXTERNAL;

	switch (adapter->ahw.board_type) {
	case QLCNIC_BRDTYPE_P3P_REF_QG:
	case QLCNIC_BRDTYPE_P3P_4_GB:
	case QLCNIC_BRDTYPE_P3P_4_GB_MM:

		ecmd->supported |= SUPPORTED_Autoneg;
		ecmd->advertising |= ADVERTISED_Autoneg;
	case QLCNIC_BRDTYPE_P3P_10G_CX4:
	case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
	case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
		ecmd->supported |= SUPPORTED_TP;
		ecmd->advertising |= ADVERTISED_TP;
		ecmd->port = PORT_TP;
		ecmd->autoneg =  adapter->link_autoneg;
		break;
	case QLCNIC_BRDTYPE_P3P_IMEZ:
	case QLCNIC_BRDTYPE_P3P_XG_LOM:
	case QLCNIC_BRDTYPE_P3P_HMEZ:
		ecmd->supported |= SUPPORTED_MII;
		ecmd->advertising |= ADVERTISED_MII;
		ecmd->port = PORT_MII;
		ecmd->autoneg = AUTONEG_DISABLE;
		break;
	case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
	case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
	case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
		ecmd->advertising |= ADVERTISED_TP;
		ecmd->supported |= SUPPORTED_TP;
		check_sfp_module = netif_running(dev) &&
			adapter->has_link_events;
	case QLCNIC_BRDTYPE_P3P_10G_XFP:
		ecmd->supported |= SUPPORTED_FIBRE;
		ecmd->advertising |= ADVERTISED_FIBRE;
		ecmd->port = PORT_FIBRE;
		ecmd->autoneg = AUTONEG_DISABLE;
		break;
	case QLCNIC_BRDTYPE_P3P_10G_TP:
		if (adapter->ahw.port_type == QLCNIC_XGBE) {
			ecmd->autoneg = AUTONEG_DISABLE;
			ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
			ecmd->advertising |=
				(ADVERTISED_FIBRE | ADVERTISED_TP);
			ecmd->port = PORT_FIBRE;
			check_sfp_module = netif_running(dev) &&
				adapter->has_link_events;
		} else {
			ecmd->autoneg = AUTONEG_ENABLE;
			ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
			ecmd->advertising |=
				(ADVERTISED_TP | ADVERTISED_Autoneg);
			ecmd->port = PORT_TP;
		}
		break;
	default:
		dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
			adapter->ahw.board_type);
		return -EIO;
	}

	if (check_sfp_module) {
		switch (adapter->module_type) {
		case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
		case LINKEVENT_MODULE_OPTICAL_SRLR:
		case LINKEVENT_MODULE_OPTICAL_LRM:
		case LINKEVENT_MODULE_OPTICAL_SFP_1G:
			ecmd->port = PORT_FIBRE;
			break;
		case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
		case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
		case LINKEVENT_MODULE_TWINAX:
			ecmd->port = PORT_TP;
			break;
		default:
			ecmd->port = PORT_OTHER;
		}
	}

	return 0;
}
Beispiel #19
0
void
cfg80211_get_chan_state(struct wireless_dev *wdev,
		        struct ieee80211_channel **chan,
		        enum cfg80211_chan_mode *chanmode)
{
	*chan = NULL;
	*chanmode = CHAN_MODE_UNDEFINED;

	ASSERT_WDEV_LOCK(wdev);

	if (wdev->netdev && !netif_running(wdev->netdev))
		return;

	switch (wdev->iftype) {
	case NL80211_IFTYPE_ADHOC:
		if (wdev->current_bss) {
			*chan = wdev->current_bss->pub.channel;
			*chanmode = wdev->ibss_fixed
				  ? CHAN_MODE_SHARED
				  : CHAN_MODE_EXCLUSIVE;
			return;
		}
	case NL80211_IFTYPE_STATION:
	case NL80211_IFTYPE_P2P_CLIENT:
		if (wdev->current_bss) {
			*chan = wdev->current_bss->pub.channel;
			*chanmode = CHAN_MODE_SHARED;
			return;
		}
		break;
	case NL80211_IFTYPE_AP:
	case NL80211_IFTYPE_P2P_GO:
		if (wdev->cac_started) {
			*chan = wdev->channel;
			*chanmode = CHAN_MODE_SHARED;
		} else if (wdev->beacon_interval) {
			*chan = wdev->channel;
			*chanmode = CHAN_MODE_SHARED;
		}
		return;
	case NL80211_IFTYPE_MESH_POINT:
		if (wdev->mesh_id_len) {
			*chan = wdev->channel;
			*chanmode = CHAN_MODE_SHARED;
		}
		return;
	case NL80211_IFTYPE_MONITOR:
	case NL80211_IFTYPE_AP_VLAN:
	case NL80211_IFTYPE_WDS:
		/* these interface types don't really have a channel */
		return;
	case NL80211_IFTYPE_P2P_DEVICE:
		if (wdev->wiphy->features &
				NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
			*chanmode = CHAN_MODE_EXCLUSIVE;
		return;
	case NL80211_IFTYPE_UNSPECIFIED:
	case NUM_NL80211_IFTYPES:
		WARN_ON(1);
	}

	return;
}
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;
	struct driver_info	*info = dev->driver_info;
	u8			align;

#if (AX_FORCE_BUFF_ALIGN)
	align = 0;
#else
	if (!(info->flags & FLAG_HW_IP_ALIGNMENT))
		align = NET_IP_ALIGN;
	else
		align = 0;
#endif

	if ((skb = alloc_skb (size + align, flags)) == NULL) {

		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");

		if((dev->rx_urb_size > 2048) && dev->rx_size) {
			dev->rx_size--;
			dev->rx_urb_size = AX88772B_BULKIN_SIZE[dev->rx_size].size;

			ax8817x_write_cmd_async (dev, 0x2A,
				AX88772B_BULKIN_SIZE[dev->rx_size].byte_cnt,
				AX88772B_BULKIN_SIZE[dev->rx_size].threshold,
				0, NULL);
		}

		if (!(dev->flags & EVENT_RX_MEMORY))
			axusbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}

	if (align)
		skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			axusbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
Beispiel #21
0
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	bool running = netif_running(netdev);
	struct fjes_hw *hw = &adapter->hw;
	unsigned long flags;
	int ret = -EINVAL;
	int idx, epidx;

	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
		if (new_mtu <= fjes_support_mtu[idx]) {
			new_mtu = fjes_support_mtu[idx];
			if (new_mtu == netdev->mtu)
				return 0;

			ret = 0;
			break;
		}
	}

	if (ret)
		return ret;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;
			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
				~FJES_RX_MTU_CHANGING_DONE;
		}
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		netif_tx_stop_all_queues(netdev);
		netif_carrier_off(netdev);
		cancel_work_sync(&adapter->tx_stall_task);
		napi_disable(&adapter->napi);

		msleep(1000);

		netif_tx_stop_all_queues(netdev);
	}

	netdev->mtu = new_mtu;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			spin_lock_irqsave(&hw->rx_status_lock, flags);
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr,
					    netdev->mtu);

			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
				FJES_RX_MTU_CHANGING_DONE;
			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
		}

		netif_tx_wake_all_queues(netdev);
		netif_carrier_on(netdev);
		napi_enable(&adapter->napi);
		napi_schedule(&adapter->napi);
	}

	return ret;
}
static void kevent (void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->txq);
		status = usb_clear_halt (dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
				deverr (dev, "can't clear tx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->rxq);
		status = usb_clear_halt (dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
				deverr (dev, "can't clear rx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit (dev, urb, GFP_KERNEL);
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
		}
	}

	if (dev->flags)
		devdbg (dev, "kevent done, flags = 0x%lx",
			dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static void tx_complete (struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete (struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		// async unlink
		case -ESHUTDOWN:		// hardware gone
			break;

		// like rx, tx gets controller i/o faults during khubd delays
		// and so it uses the same throttling mechanism.
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending (&dev->delay)) {
				mod_timer (&dev->delay,
					jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link (dev))
					devdbg (dev, "tx throttle %d",
							urb->status);
			}
			netif_stop_queue (dev->net);
			break;
		default:
			if (netif_msg_tx_err (dev))
				devdbg (dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);

	unlink_urbs (dev, &dev->txq);
	tasklet_schedule (&dev->bh);

	// FIXME: device recovery -- reset?
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		axusbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

// tasklet (work deferred from completions, in_irq) or timer

static void axusbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			devdbg (dev, "bogus skb state %d", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !timer_pending (&dev->delay)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit (dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link (dev))
				devdbg (dev, "rxqlen %d --> %d",
						temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static
void axusbnet_disconnect (struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev (intf);

	if (netif_msg_probe (dev))
		devinfo (dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev (net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work ();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind (dev, intf);

	free_netdev(net);
	usb_put_dev (xdev);
}

/*-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static int
axusbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk (KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	// set up our own records
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	INIT_WORK (&dev->kevent, kevent, dev);
#else
	INIT_WORK (&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
//	mutex_init (&dev->phy_mutex);

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
// dma_supported() is deeply broken on almost all architectures
	// possible with some EHCI controllers
	if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	// allow device-specific bind/init procedures
	// NOTE net->name still not usable ...
	status = info->bind (dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev (net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe (dev))
		devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	// ok, it's ready to go.
	usb_set_intfdata (udev, dev);

	// start as if the link is up
	netif_device_attach (net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend (struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach (dev->net);
		(void) unlink_urbs (dev, &dev->rxq);
		(void) unlink_urbs (dev, &dev->txq);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach (dev->net);
	}
	return 0;
}

static int
axusbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!--dev->suspend_count)
		tasklet_schedule (&dev->bh);

	return 0;
}
Beispiel #23
0
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	int ret, i;
#ifdef IXGBE_FCOE
	struct dcb_app app = {
			      .selector = DCB_APP_IDTYPE_ETHTYPE,
			      .protocol = ETH_P_FCOE,
			     };
	u8 up = dcb_getapp(netdev, &app);
#endif

	/* Fail command if not in CEE mode */
	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
		return 1;

	ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
				 MAX_TRAFFIC_CLASS);
	if (ret)
		return DCB_NO_HW_CHG;

#ifdef IXGBE_FCOE
	if (up && (up != (1 << adapter->fcoe.up)))
		adapter->dcb_set_bitmap |= BIT_APP_UPCHG;

	/*
	 * Only take down the adapter if an app change occurred. FCoE
	 * may shuffle tx rings in this case and this can not be done
	 * without a reset currently.
	 */
	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
		while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
			usleep_range(1000, 2000);

		adapter->fcoe.up = ffs(up) - 1;

		if (netif_running(netdev))
			netdev->netdev_ops->ndo_stop(netdev);
		ixgbe_clear_interrupt_scheme(adapter);
	}
#endif

	if (adapter->dcb_cfg.pfc_mode_enable) {
		switch (adapter->hw.mac.type) {
		case ixgbe_mac_82599EB:
		case ixgbe_mac_X540:
			if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
				adapter->last_lfc_mode =
				                  adapter->hw.fc.current_mode;
			break;
		default:
			break;
		}
		adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
	} else {
		switch (adapter->hw.mac.type) {
		case ixgbe_mac_82598EB:
			adapter->hw.fc.requested_mode = ixgbe_fc_none;
			break;
		case ixgbe_mac_82599EB:
		case ixgbe_mac_X540:
			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
			break;
		default:
			break;
		}
	}

#ifdef IXGBE_FCOE
	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
		ixgbe_init_interrupt_scheme(adapter);
		if (netif_running(netdev))
			netdev->netdev_ops->ndo_open(netdev);
		ret = DCB_HW_CHG_RST;
	}
#endif

	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
		/* Priority to TC mapping in CEE case default to 1:1 */
		u8 prio_tc[MAX_USER_PRIORITY];
		int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;

#ifdef IXGBE_FCOE
		if (adapter->netdev->features & NETIF_F_FCOE_MTU)
			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif

		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
					       max_frame, DCB_TX_CONFIG);
		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
					       max_frame, DCB_RX_CONFIG);

		ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
					DCB_TX_CONFIG, refill);
		ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
		ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
				       DCB_TX_CONFIG, bwg_id);
		ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
				      DCB_TX_CONFIG, prio_type);
		ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
				     DCB_TX_CONFIG, prio_tc);

		ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
					bwg_id, prio_type, prio_tc);

		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
			netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
	}

	if (adapter->dcb_set_bitmap & BIT_PFC) {
		u8 pfc_en;
		u8 prio_tc[MAX_USER_PRIORITY];

		ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
				     DCB_TX_CONFIG, prio_tc);
		ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
		ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
		ret = DCB_HW_CHG;
	}

	if (adapter->dcb_cfg.pfc_mode_enable)
		adapter->hw.fc.current_mode = ixgbe_fc_pfc;

	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
		clear_bit(__IXGBE_RESETTING, &adapter->state);
	adapter->dcb_set_bitmap = 0x00;
	return ret;
}
static int fm10k_set_ringparam(struct net_device *netdev,
			       struct ethtool_ringparam *ring)
{
	struct fm10k_intfc *interface = netdev_priv(netdev);
	struct fm10k_ring *temp_ring;
	int i, err = 0;
	u32 new_rx_count, new_tx_count;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_tx_count = clamp_t(u32, ring->tx_pending,
			       FM10K_MIN_TXD, FM10K_MAX_TXD);
	new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE);

	new_rx_count = clamp_t(u32, ring->rx_pending,
			       FM10K_MIN_RXD, FM10K_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == interface->tx_ring_count) &&
	    (new_rx_count == interface->rx_ring_count)) {
		/* nothing to do */
		return 0;
	}

	while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
		usleep_range(1000, 2000);

	if (!netif_running(interface->netdev)) {
		for (i = 0; i < interface->num_tx_queues; i++)
			interface->tx_ring[i]->count = new_tx_count;
		for (i = 0; i < interface->num_rx_queues; i++)
			interface->rx_ring[i]->count = new_rx_count;
		interface->tx_ring_count = new_tx_count;
		interface->rx_ring_count = new_rx_count;
		goto clear_reset;
	}

	/* allocate temporary buffer to store rings in */
	i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
	temp_ring = vmalloc(i * sizeof(struct fm10k_ring));

	if (!temp_ring) {
		err = -ENOMEM;
		goto clear_reset;
	}

	fm10k_down(interface);

	/* Setup new Tx resources and free the old Tx resources in that order.
	 * We can then assign the new resources to the rings via a memcpy.
	 * The advantage to this approach is that we are guaranteed to still
	 * have resources even in the case of an allocation failure.
	 */
	if (new_tx_count != interface->tx_ring_count) {
		for (i = 0; i < interface->num_tx_queues; i++) {
			memcpy(&temp_ring[i], interface->tx_ring[i],
			       sizeof(struct fm10k_ring));

			temp_ring[i].count = new_tx_count;
			err = fm10k_setup_tx_resources(&temp_ring[i]);
			if (err) {
				while (i) {
					i--;
					fm10k_free_tx_resources(&temp_ring[i]);
				}
				goto err_setup;
			}
		}

		for (i = 0; i < interface->num_tx_queues; i++) {
			fm10k_free_tx_resources(interface->tx_ring[i]);

			memcpy(interface->tx_ring[i], &temp_ring[i],
			       sizeof(struct fm10k_ring));
		}

		interface->tx_ring_count = new_tx_count;
	}

	/* Repeat the process for the Rx rings if needed */
	if (new_rx_count != interface->rx_ring_count) {
		for (i = 0; i < interface->num_rx_queues; i++) {
			memcpy(&temp_ring[i], interface->rx_ring[i],
			       sizeof(struct fm10k_ring));

			temp_ring[i].count = new_rx_count;
			err = fm10k_setup_rx_resources(&temp_ring[i]);
			if (err) {
				while (i) {
					i--;
					fm10k_free_rx_resources(&temp_ring[i]);
				}
				goto err_setup;
			}
		}

		for (i = 0; i < interface->num_rx_queues; i++) {
			fm10k_free_rx_resources(interface->rx_ring[i]);

			memcpy(interface->rx_ring[i], &temp_ring[i],
			       sizeof(struct fm10k_ring));
		}

		interface->rx_ring_count = new_rx_count;
	}

err_setup:
	fm10k_up(interface);
	vfree(temp_ring);
clear_reset:
	clear_bit(__FM10K_RESETTING, &interface->state);
	return err;
}
Beispiel #25
0
static int ixgbevf_set_ringparam(struct net_device *netdev,
                                 struct ethtool_ringparam *ring)
{
    struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
    int i, err = 0;
    u32 new_rx_count, new_tx_count;

    if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
        return -EINVAL;

    new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
    new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
    new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);

    new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
    new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
    new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);

    if ((new_tx_count == adapter->tx_ring->count) &&
            (new_rx_count == adapter->rx_ring->count)) {
        /* nothing to do */
        return 0;
    }

    while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
        msleep(1);

    /*
     * If the adapter isn't up and running then just set the
     * new parameters and scurry for the exits.
     */
    if (!netif_running(adapter->netdev)) {
        for (i = 0; i < adapter->num_tx_queues; i++)
            adapter->tx_ring[i].count = new_tx_count;
        for (i = 0; i < adapter->num_rx_queues; i++)
            adapter->rx_ring[i].count = new_rx_count;
        adapter->tx_ring_count = new_tx_count;
        adapter->rx_ring_count = new_rx_count;
        goto clear_reset;
    }

    tx_ring = kcalloc(adapter->num_tx_queues,
                      sizeof(struct ixgbevf_ring), GFP_KERNEL);
    if (!tx_ring) {
        err = -ENOMEM;
        goto clear_reset;
    }

    rx_ring = kcalloc(adapter->num_rx_queues,
                      sizeof(struct ixgbevf_ring), GFP_KERNEL);
    if (!rx_ring) {
        err = -ENOMEM;
        goto err_rx_setup;
    }

    ixgbevf_down(adapter);

    memcpy(tx_ring, adapter->tx_ring,
           adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
    for (i = 0; i < adapter->num_tx_queues; i++) {
        tx_ring[i].count = new_tx_count;
        err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
        if (err) {
            while (i) {
                i--;
                ixgbevf_free_tx_resources(adapter,
                                          &tx_ring[i]);
            }
            goto err_tx_ring_setup;
        }
        tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
    }

    memcpy(rx_ring, adapter->rx_ring,
           adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
    for (i = 0; i < adapter->num_rx_queues; i++) {
        rx_ring[i].count = new_rx_count;
        err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
        if (err) {
            while (i) {
                i--;
                ixgbevf_free_rx_resources(adapter,
                                          &rx_ring[i]);
            }
            goto err_rx_ring_setup;
        }
        rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
    }

    /*
     * Only switch to new rings if all the prior allocations
     * and ring setups have succeeded.
     */
    kfree(adapter->tx_ring);
    adapter->tx_ring = tx_ring;
    adapter->tx_ring_count = new_tx_count;

    kfree(adapter->rx_ring);
    adapter->rx_ring = rx_ring;
    adapter->rx_ring_count = new_rx_count;

    /* success! */
    ixgbevf_up(adapter);

    goto clear_reset;

err_rx_ring_setup:
    for(i = 0; i < adapter->num_tx_queues; i++)
        ixgbevf_free_tx_resources(adapter, &tx_ring[i]);

err_tx_ring_setup:
    kfree(rx_ring);

err_rx_setup:
    kfree(tx_ring);

clear_reset:
    clear_bit(__IXGBEVF_RESETTING, &adapter->state);
    return err;
}
Beispiel #26
0
static void hxge_diag_test(struct net_device *netdev,
		struct ethtool_test *eth_test, uint64_t *data)
{
	struct hxge_adapter *hxgep = netdev_priv(netdev);
	boolean_t if_running = netif_running(netdev);
	int link_up = hxge_peu_get_link_status(hxgep);
	int i;
	loopback_params_t *param;
	loopback_params_t orig_params;
	char *str;
	int num_tests;

	num_tests = sizeof(loopback_params)/sizeof(loopback_params_t);
	for (i = 0, param = loopback_params; i < num_tests; i++)
	{
		str = strstr(hxge_gstrings_test[i], "=");
		if (!str) {
			HXGE_ERR(hxgep, "Error in test strings construct");
			return;
		}
		str += 2; /* skip = and a space */
		strncpy(str, "NOTRUN", strlen("NOTRUN"));
	}

	for (i = 0; i < num_tests; i++)
		data[i] = 0;

	/* These are offline tests */
	if (eth_test->flags == ETH_TEST_FL_OFFLINE)
		HXGE_DBG(hxgep, "hxge_diag_test: Offline test starting");

	set_bit(HXGE_DEVICE_TESTING, &hxgep->state);

	/* Close the device before running this offline test */
	if (if_running) {
		HXGE_ERR(hxgep, "hxge_diag_test: Cannot run offline test on a running  interface. Bring interface down before attempting offline tests!");
		eth_test->flags |= ETH_TEST_FL_FAILED;
		return;
	}

	if (link_up) {
		HXGE_ERR(hxgep, "hxge_diag_test: Link should be down for offline tests");
		eth_test->flags |= ETH_TEST_FL_FAILED;
		return;
	}


	if ((hxge_get_option("intr_type", &orig_params.intr_type) < 0) ||
	    (hxge_get_option("num_tx_descs",&orig_params.tx_descs) < 0) ||
	    (hxge_get_option("tx_dma_channels", &orig_params.tx_channels) < 0) ||
	    (hxge_get_option("rx_dma_channels", &orig_params.rx_channels) < 0) ||
	    (hxge_get_option("rcr_entries", &orig_params.rcr_entries) < 0) ||
	    (hxge_get_option("rbr_entries", &orig_params.rbr_entries) < 0) ||
	    (hxge_get_option("rcr_threshold", &orig_params.rcr_threshold) < 0) ||
	    (hxge_get_option("rcr_timeout", &orig_params.rcr_timeout) < 0))
	{
		eth_test->flags |= ETH_TEST_FL_FAILED;
		return;
	}



	for (i = 0, param = loopback_params; i < num_tests; i++)
	{
		str = strstr(hxge_gstrings_test[i], "=");
		if (!str) {
			HXGE_ERR(hxgep, "Error in test strings construct");
			return;
		}
		str += 2; /* skip = and a space */
		HXGE_DBG(hxgep, "*** LOOPBACK TEST %d", i);
		if (hxge_loopback_test(hxgep, &param[i])) {
			eth_test->flags |= ETH_TEST_FL_FAILED;
			strncpy(str, "FAILED", strlen("FAILED"));
			break;
		}
		/* Replace FAILED with PASSED */
		strncpy(str, "PASSED", strlen("PASSED"));
		data[i] = 1;
	}

        /* restore parameters to original value */
        hxge_set_option("rbr_entries", orig_params.rbr_entries);
        hxge_set_option("rcr_entries", orig_params.rcr_entries);
        hxge_set_option("rx_dma_channels", orig_params.rx_channels);
        hxge_set_option("tx_dma_channels", orig_params.tx_channels);
        hxge_set_option("num_tx_descs", orig_params.tx_descs);
	hxge_set_option("intr_type", orig_params.intr_type);
	hxge_set_option("rcr_threshold", orig_params.rcr_threshold);
	hxge_set_option("rcr_timeout", orig_params.rcr_timeout);

	clear_bit(HXGE_DEVICE_TESTING, &hxgep->state);
}
Beispiel #27
0
static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
				 int type, u32 pid, u32 seq, u32 change, 
				 unsigned int flags)
{
	struct ifinfomsg *r;
	struct nlmsghdr  *nlh;
	unsigned char	 *b = skb->tail;

	nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags);
	r = NLMSG_DATA(nlh);
	r->ifi_family = AF_UNSPEC;
	r->__ifi_pad = 0;
	r->ifi_type = dev->type;
	r->ifi_index = dev->ifindex;
	r->ifi_flags = dev_get_flags(dev);
	r->ifi_change = change;

	RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name);

	if (1) {
		u32 txqlen = dev->tx_queue_len;
		RTA_PUT(skb, IFLA_TXQLEN, sizeof(txqlen), &txqlen);
	}

	if (1) {
		u32 weight = dev->weight;
		RTA_PUT(skb, IFLA_WEIGHT, sizeof(weight), &weight);
	}

	if (1) {
		u8 operstate = netif_running(dev)?dev->operstate:IF_OPER_DOWN;
		u8 link_mode = dev->link_mode;
		RTA_PUT(skb, IFLA_OPERSTATE, sizeof(operstate), &operstate);
		RTA_PUT(skb, IFLA_LINKMODE, sizeof(link_mode), &link_mode);
	}

	if (1) {
		struct rtnl_link_ifmap map = {
			.mem_start   = dev->mem_start,
			.mem_end     = dev->mem_end,
			.base_addr   = dev->base_addr,
			.irq         = dev->irq,
			.dma         = dev->dma,
			.port        = dev->if_port,
		};
		RTA_PUT(skb, IFLA_MAP, sizeof(map), &map);
	}

	if (dev->addr_len) {
		RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
		RTA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
	}

	if (1) {
		u32 mtu = dev->mtu;
		RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu);
	}

	if (dev->ifindex != dev->iflink) {
		u32 iflink = dev->iflink;
		RTA_PUT(skb, IFLA_LINK, sizeof(iflink), &iflink);
	}

	if (dev->qdisc_sleeping)
		RTA_PUT(skb, IFLA_QDISC,
			strlen(dev->qdisc_sleeping->ops->id) + 1,
			dev->qdisc_sleeping->ops->id);
	
	if (dev->master) {
		u32 master = dev->master->ifindex;
		RTA_PUT(skb, IFLA_MASTER, sizeof(master), &master);
	}

	if (dev->get_stats) {
		unsigned long *stats = (unsigned long*)dev->get_stats(dev);
		if (stats) {
			struct rtattr  *a;
			__u32	       *s;
			int		i;
			int		n = sizeof(struct rtnl_link_stats)/4;

			a = __RTA_PUT(skb, IFLA_STATS, n*4);
			s = RTA_DATA(a);
			for (i=0; i<n; i++)
				s[i] = stats[i];
		}
	}
	nlh->nlmsg_len = skb->tail - b;
	return skb->len;

nlmsg_failure:
rtattr_failure:
	skb_trim(skb, b - skb->data);
	return -1;
}

static int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
	int idx;
	int s_idx = cb->args[0];
	struct net_device *dev;

	read_lock(&dev_base_lock);
	for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
		if (idx < s_idx)
			continue;
		if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK,
					  NETLINK_CB(cb->skb).pid,
					  cb->nlh->nlmsg_seq, 0,
					  NLM_F_MULTI) <= 0)
			break;
	}
	read_unlock(&dev_base_lock);
	cb->args[0] = idx;

	return skb->len;
}
/* work that cannot be done in interrupt context uses keventd.
 *
 * NOTE:  with 2.5 we could do more of this using completion callbacks,
 * especially now that control transfers can be queued.
 */
static void
kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->txq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_pipe;
		status = usb_clear_halt (dev->udev, dev->out);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
fail_pipe:
				netdev_err(dev->net, "can't clear tx halt, status %d\n",
					   status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->rxq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_halt;
		status = usb_clear_halt (dev->udev, dev->in);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
fail_halt:
				netdev_err(dev->net, "can't clear rx halt, status %d\n",
					   status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;
		int resched = 1;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
			status = usb_autopm_get_interface(dev->intf);
			if (status < 0) {
				usb_free_urb(urb);
				goto fail_lowmem;
			}
			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
				resched = 0;
			usb_autopm_put_interface(dev->intf);
fail_lowmem:
			if (resched)
				tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto skip_reset;
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			usb_autopm_put_interface(dev->intf);
skip_reset:
			netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
				    retval,
				    dev->udev->bus->bus_name,
				    dev->udev->devpath,
				    info->description);
		} else {
			usb_autopm_put_interface(dev->intf);
		}
	}

	if (dev->flags)
		netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
}
Beispiel #29
0
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
	if (!skb) {
		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return -ENOMEM;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			netif_dbg(dev, ifdown, dev->net, "device gone\n");
			netif_device_detach (dev->net);
			break;
		case -EHOSTUNREACH:
			retval = -ENOLINK;
			break;
		default:
			netif_dbg(dev, rx_err, dev->net,
				  "rx submit, %d\n", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
	return retval;
}
Beispiel #30
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context, *skb2;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}
		skb = NULL;

		skb2 = skb_dequeue(&dev->rx_frames);
		while (skb2) {
			if (status < 0
					|| ETH_HLEN > skb2->len
					|| skb2->len > ETH_FRAME_LEN) {
				dev->net->stats.rx_errors++;
				dev->net->stats.rx_length_errors++;
				DBG(dev, "rx length %d\n", skb2->len);
				dev_kfree_skb_any(skb2);
				goto next_frame;
			}
			skb2->protocol = eth_type_trans(skb2, dev->net);
			dev->net->stats.rx_packets++;
			dev->net->stats.rx_bytes += skb2->len;

			
			status = netif_rx(skb2);
next_frame:
			skb2 = skb_dequeue(&dev->rx_frames);
		}
		break;

	
	case -ECONNRESET:		
	case -ESHUTDOWN:		
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	
	case -ECONNABORTED:		
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		

	default:
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any(skb);
	if (!netif_running(dev->net)) {
clean:
		spin_lock(&dev->req_lock);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock(&dev->req_lock);
		req = NULL;
	}
	if (req)
		rx_submit(dev, req, GFP_ATOMIC);
}