static void kevent (void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
printk ("EVENT_TX_HALT\n");
		unlink_urbs (dev, &dev->txq);
		status = usb_clear_halt (dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
				deverr (dev, "can't clear tx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
printk ("EVENT_RX_HALT\n");
		unlink_urbs (dev, &dev->rxq);
		status = usb_clear_halt (dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
				deverr (dev, "can't clear rx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;
printk ("EVENT_RX_MEMORY\n");
		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit (dev, urb, GFP_KERNEL);
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
		}
	}

	if (dev->flags)
		devdbg (dev, "kevent done, flags = 0x%lx",
			dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static void tx_complete (struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete (struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		/* async unlink */
		case -ESHUTDOWN:		/* hardware gone */
			break;

		/* like rx, tx gets controller i/o faults during khubd delays
		 * and so it uses the same throttling mechanism.
		 */
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending (&dev->delay)) {
				mod_timer (&dev->delay,
					jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link (dev))
					devdbg (dev, "tx throttle %d",
							urb->status);
			}
			netif_stop_queue (dev->net);
			break;
		default:
			if (netif_msg_tx_err (dev))
				devdbg (dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);

	unlink_urbs (dev, &dev->txq);
	tasklet_schedule (&dev->bh);

	/* FIXME: device recovery -- reset? */
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	/* some devices want funky USB-level framing, for
	 * win32 driver (usually) and/or hardware quirks
	 */
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		axusbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

/* tasklet (work deferred from completions, in_irq) or timer */

static void axusbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			devdbg (dev, "bogus skb state %d", entry->state);
		}
	}

	/* waiting for all pending urbs to complete? */
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	/* or are we maybe short a few urbs? */
	} else if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !timer_pending (&dev->delay)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			/* don't refill the queue all at once */
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit (dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link (dev))
				devdbg (dev, "rxqlen %d --> %d",
						temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen &&
				!test_bit(EVENT_RX_MEMORY, &dev->flags))
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static
void axusbnet_disconnect (struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev (intf);

	if (netif_msg_probe (dev))
		devinfo (dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev (net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work ();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind (dev, intf);

	free_netdev(net);
	usb_put_dev (xdev);
}

/*-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static int
axusbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk (KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	/* set up our own records */
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	INIT_WORK (&dev->kevent, kevent, dev);
#else
	INIT_WORK (&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
/*	mutex_init (&dev->phy_mutex); */

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	/* allow device-specific bind/init procedures
	 * NOTE net->name still not usable ...
	 */
	status = info->bind (dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}
	strcpy(net->name, "usbeth%d");
	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev (net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe (dev))
		devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	/* ok, it's ready to go. */
	usb_set_intfdata (udev, dev);

	/* start as if the link is up */
	netif_device_attach (net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend (struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach (dev->net);
		(void) unlink_urbs (dev, &dev->rxq);
		(void) unlink_urbs (dev, &dev->txq);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach (dev->net);
	}
	return 0;
}

static int
axusbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!--dev->suspend_count)
		tasklet_schedule (&dev->bh);

	return 0;
}
示例#2
0
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
					const struct ieee80211_ops *ops)
{
	struct ieee80211_local *local;
	int priv_size, i;
	struct wiphy *wiphy;

	/* Ensure 32-byte alignment of our private data and hw private data.
	 * We use the wiphy priv data for both our ieee80211_local and for
	 * the driver's private data
	 *
	 * In memory it'll be like this:
	 *
	 * +-------------------------+
	 * | struct wiphy	    |
	 * +-------------------------+
	 * | struct ieee80211_local  |
	 * +-------------------------+
	 * | driver's private data   |
	 * +-------------------------+
	 *
	 */
	priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;

	wiphy = wiphy_new(&mac80211_config_ops, priv_size);

	if (!wiphy)
		return NULL;

	wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;

	wiphy->privid = mac80211_wiphy_privid;

	wiphy->flags |= WIPHY_FLAG_NETNS_OK |
			WIPHY_FLAG_4ADDR_AP |
			WIPHY_FLAG_4ADDR_STATION |
			WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS;

	if (!ops->set_key)
		wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

	wiphy->bss_priv_size = sizeof(struct ieee80211_bss);

	local = wiphy_priv(wiphy);

	local->hw.wiphy = wiphy;

	local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);

	BUG_ON(!ops->tx);
	BUG_ON(!ops->start);
	BUG_ON(!ops->stop);
	BUG_ON(!ops->config);
	BUG_ON(!ops->add_interface);
	BUG_ON(!ops->remove_interface);
	BUG_ON(!ops->configure_filter);
	local->ops = ops;

	/* set up some defaults */
	local->hw.queues = 1;
	local->hw.max_rates = 1;
	local->hw.max_report_rates = 0;
	local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
	local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
	local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
	local->user_power_level = -1;
	local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
	local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;

	INIT_LIST_HEAD(&local->interfaces);

	__hw_addr_init(&local->mc_list);

	mutex_init(&local->iflist_mtx);
	mutex_init(&local->mtx);

	mutex_init(&local->key_mtx);
	spin_lock_init(&local->filter_lock);
	spin_lock_init(&local->queue_stop_reason_lock);

	/*
	 * The rx_skb_queue is only accessed from tasklets,
	 * but other SKB queues are used from within IRQ
	 * context. Therefore, this one needs a different
	 * locking class so our direct, non-irq-safe use of
	 * the queue's lock doesn't throw lockdep warnings.
	 */
	skb_queue_head_init_class(&local->rx_skb_queue,
				  &ieee80211_rx_skb_queue_class);

	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);

	ieee80211_work_init(local);

	INIT_WORK(&local->restart_work, ieee80211_restart_work);

	INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
	INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
	local->smps_mode = IEEE80211_SMPS_OFF;

	INIT_WORK(&local->dynamic_ps_enable_work,
		  ieee80211_dynamic_ps_enable_work);
	INIT_WORK(&local->dynamic_ps_disable_work,
		  ieee80211_dynamic_ps_disable_work);
	setup_timer(&local->dynamic_ps_timer,
		    ieee80211_dynamic_ps_timer, (unsigned long) local);

	sta_info_init(local);

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
		skb_queue_head_init(&local->pending[i]);
		atomic_set(&local->agg_queue_stop[i], 0);
	}
	tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
		     (unsigned long)local);

	tasklet_init(&local->tasklet,
		     ieee80211_tasklet_handler,
		     (unsigned long) local);

	skb_queue_head_init(&local->skb_queue);
	skb_queue_head_init(&local->skb_queue_unreliable);

	/* init dummy netdev for use w/ NAPI */
	init_dummy_netdev(&local->napi_dev);

	ieee80211_led_names(local);

	ieee80211_hw_roc_setup(local);

	return local_to_hw(local);
}
示例#3
0
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				    enum ieee80211_agg_stop_reason reason)
{
	struct ieee80211_local *local = sta->local;
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.tid = tid,
		.buf_size = 0,
		.amsdu = false,
		.timeout = 0,
		.ssn = 0,
	};
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	switch (reason) {
	case AGG_STOP_DECLINED:
	case AGG_STOP_LOCAL_REQUEST:
	case AGG_STOP_PEER_REQUEST:
		params.action = IEEE80211_AMPDU_TX_STOP_CONT;
		break;
	case AGG_STOP_DESTROY_STA:
		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
		break;
	default:
		WARN_ON_ONCE(1);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* free struct pending for start, if present */
	tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
	kfree(tid_tx);
	sta->ampdu_mlme.tid_start_tx[tid] = NULL;

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx) {
		spin_unlock_bh(&sta->lock);
		return -ENOENT;
	}

	/*
	 * if we're already stopping ignore any new requests to stop
	 * unless we're destroying it in which case notify the driver
	 */
	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		spin_unlock_bh(&sta->lock);
		if (reason != AGG_STOP_DESTROY_STA)
			return -EALREADY;
		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
		ret = drv_ampdu_action(local, sta->sdata, &params);
		WARN_ON_ONCE(ret);
		return 0;
	}

	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
		/* not even started yet! */
		ieee80211_assign_tid_tx(sta, tid, NULL);
		spin_unlock_bh(&sta->lock);
		kfree_rcu(tid_tx, rcu_head);
		return 0;
	}

	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);

	spin_unlock_bh(&sta->lock);

	ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
	       sta->sta.addr, tid);

	del_timer_sync(&tid_tx->addba_resp_timer);
	del_timer_sync(&tid_tx->session_timer);

	/*
	 * After this packets are no longer handed right through
	 * to the driver but are put onto tid_tx->pending instead,
	 * with locking to ensure proper access.
	 */
	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);

	/*
	 * There might be a few packets being processed right now (on
	 * another CPU) that have already gotten past the aggregation
	 * check when it was still OPERATIONAL and consequently have
	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
	 * call into the driver at the same time or even before the
	 * TX paths calls into it, which could confuse the driver.
	 *
	 * Wait for all currently running TX paths to finish before
	 * telling the driver. New packets will not go through since
	 * the aggregation session is no longer OPERATIONAL.
	 */
	synchronize_net();

	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
					WLAN_BACK_RECIPIENT :
					WLAN_BACK_INITIATOR;
	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;

	ret = drv_ampdu_action(local, sta->sdata, &params);

	/* HW shall not deny going back to legacy */
	if (WARN_ON(ret)) {
		/*
		 * We may have pending packets get stuck in this case...
		 * Not bothering with a workaround for now.
		 */
	}

	/*
	 * In the case of AGG_STOP_DESTROY_STA, the driver won't
	 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
	 * seem like we can leave the tid_tx data pending forever.
	 * This is true, in a way, but "forever" is only until the
	 * station struct is actually destroyed. In the meantime,
	 * leaving it around ensures that we don't transmit packets
	 * to the driver on this TID which might confuse it.
	 */

	return 0;
}

/*
 * After sending add Block Ack request we activated a timer until
 * add Block Ack response will arrive from the recipient.
 * If this timer expires sta_addba_resp_timer_expired will be executed.
 */
static void sta_addba_resp_timer_expired(struct timer_list *t)
{
	struct tid_ampdu_tx *tid_tx_timer =
		from_timer(tid_tx_timer, t, addba_resp_timer);
	struct sta_info *sta = tid_tx_timer->sta;
	u8 tid = tid_tx_timer->tid;
	struct tid_ampdu_tx *tid_tx;

	/* check if the TID waits for addBA response */
	rcu_read_lock();
	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
	if (!tid_tx ||
	    test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
		rcu_read_unlock();
		ht_dbg(sta->sdata,
		       "timer expired on %pM tid %d not expecting addBA response\n",
		       sta->sta.addr, tid);
		return;
	}

	ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
	       sta->sta.addr, tid);

	ieee80211_stop_tx_ba_session(&sta->sta, tid);
	rcu_read_unlock();
}

void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_local *local = sta->local;
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_START,
		.tid = tid,
		.buf_size = 0,
		.amsdu = false,
		.timeout = 0,
	};
	int ret;

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	/*
	 * Start queuing up packets for this aggregation session.
	 * We're going to release them once the driver is OK with
	 * that.
	 */
	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	ieee80211_agg_stop_txq(sta, tid);

	/*
	 * Make sure no packets are being processed. This ensures that
	 * we have a valid starting sequence number and that in-flight
	 * packets have been flushed out and no packets for this TID
	 * will go into the driver during the ampdu_action call.
	 */
	synchronize_net();

	params.ssn = sta->tid_seq[tid] >> 4;
	ret = drv_ampdu_action(local, sdata, &params);
	if (ret) {
		ht_dbg(sdata,
		       "BA request denied - HW unavailable for %pM tid %d\n",
		       sta->sta.addr, tid);
		spin_lock_bh(&sta->lock);
		ieee80211_agg_splice_packets(sdata, tid_tx, tid);
		ieee80211_assign_tid_tx(sta, tid, NULL);
		ieee80211_agg_splice_finish(sdata, tid);
		spin_unlock_bh(&sta->lock);

		ieee80211_agg_start_txq(sta, tid, false);

		kfree_rcu(tid_tx, rcu_head);
		return;
	}

	/* activate the timer for the recipient's addBA response */
	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
	ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
	       sta->sta.addr, tid);

	spin_lock_bh(&sta->lock);
	sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
	sta->ampdu_mlme.addba_req_num[tid]++;
	spin_unlock_bh(&sta->lock);

	/* send AddBA request */
	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
				     tid_tx->dialog_token, params.ssn,
				     IEEE80211_MAX_AMPDU_BUF,
				     tid_tx->timeout);
}

/*
 * After accepting the AddBA Response we activated a timer,
 * resetting it after each frame that we send.
 */
static void sta_tx_agg_session_timer_expired(struct timer_list *t)
{
	struct tid_ampdu_tx *tid_tx_timer =
		from_timer(tid_tx_timer, t, session_timer);
	struct sta_info *sta = tid_tx_timer->sta;
	u8 tid = tid_tx_timer->tid;
	struct tid_ampdu_tx *tid_tx;
	unsigned long timeout;

	rcu_read_lock();
	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
	if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		rcu_read_unlock();
		return;
	}

	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
	if (time_is_after_jiffies(timeout)) {
		mod_timer(&tid_tx->session_timer, timeout);
		rcu_read_unlock();
		return;
	}

	rcu_read_unlock();

	ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
	       sta->sta.addr, tid);

	ieee80211_stop_tx_ba_session(&sta->sta, tid);
}

int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
				  u16 timeout)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_start_tx_ba_session(pubsta, tid);

	if (WARN(sta->reserved_tid == tid,
		 "Requested to start BA session on reserved tid=%d", tid))
		return -EINVAL;

	if (!pubsta->ht_cap.ht_supported)
		return -EINVAL;

	if (WARN_ON_ONCE(!local->ops->ampdu_action))
		return -EINVAL;

	if ((tid >= IEEE80211_NUM_TIDS) ||
	    !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
	    ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
		return -EINVAL;

	if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
		return -EINVAL;

	ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
	       pubsta->addr, tid);

	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
	    sdata->vif.type != NL80211_IFTYPE_AP &&
	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
		return -EINVAL;

	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
		ht_dbg(sdata,
		       "BA sessions blocked - Denying BA session request %pM tid %d\n",
		       sta->sta.addr, tid);
		return -EINVAL;
	}

	/*
	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
	 * member of an IBSS, and has no other existing Block Ack agreement
	 * with the recipient STA, then the initiating STA shall transmit a
	 * Probe Request frame to the recipient STA and shall not transmit an
	 * ADDBA Request frame unless it receives a Probe Response frame
	 * from the recipient within dot11ADDBAFailureTimeout.
	 *
	 * The probe request mechanism for ADDBA is currently not implemented,
	 * but we only build up Block Ack session with HT STAs. This information
	 * is set when we receive a bss info from a probe response or a beacon.
	 */
	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
	    !sta->sta.ht_cap.ht_supported) {
		ht_dbg(sdata,
		       "BA request denied - IBSS STA %pM does not advertise HT support\n",
		       pubsta->addr);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* we have tried too many times, receiver does not want A-MPDU */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	/*
	 * if we have tried more than HT_AGG_BURST_RETRIES times we
	 * will spread our requests in time to avoid stalling connection
	 * for too long
	 */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
			HT_AGG_RETRIES_PERIOD)) {
		ht_dbg(sdata,
		       "BA request denied - %d failed requests on %pM tid %u\n",
		       sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	/* check if the TID is not in aggregation flow already */
	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
		ht_dbg(sdata,
		       "BA request denied - session is not idle on %pM tid %u\n",
		       sta->sta.addr, tid);
		ret = -EAGAIN;
		goto err_unlock_sta;
	}

	/* prepare A-MPDU MLME for Tx aggregation */
	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
	if (!tid_tx) {
		ret = -ENOMEM;
		goto err_unlock_sta;
	}

	skb_queue_head_init(&tid_tx->pending);
	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	tid_tx->timeout = timeout;
	tid_tx->sta = sta;
	tid_tx->tid = tid;

	/* response timer */
	timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);

	/* tx timer */
	timer_setup(&tid_tx->session_timer,
		    sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);

	/* assign a dialog token */
	sta->ampdu_mlme.dialog_token_allocator++;
	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

	/*
	 * Finally, assign it to the start array; the work item will
	 * collect it and move it to the normal array.
	 */
	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

	/* this flow continues off the work */
 err_unlock_sta:
	spin_unlock_bh(&sta->lock);
	return ret;
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);

static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
					 struct sta_info *sta, u16 tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_OPERATIONAL,
		.tid = tid,
		.timeout = 0,
		.ssn = 0,
	};

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	params.buf_size = tid_tx->buf_size;
	params.amsdu = tid_tx->amsdu;

	ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
	       sta->sta.addr, tid);

	drv_ampdu_action(local, sta->sdata, &params);

	/*
	 * synchronize with TX path, while splicing the TX path
	 * should block so it won't put more packets onto pending.
	 */
	spin_lock_bh(&sta->lock);

	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
	/*
	 * Now mark as operational. This will be visible
	 * in the TX path, and lets it go lock-free in
	 * the common case.
	 */
	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
	ieee80211_agg_splice_finish(sta->sdata, tid);

	spin_unlock_bh(&sta->lock);

	ieee80211_agg_start_txq(sta, tid, true);
}

void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
			      struct tid_ampdu_tx *tid_tx)
{
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;

	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
		return;

	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
		ieee80211_agg_tx_operational(local, sta, tid);
}

static struct tid_ampdu_tx *
ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
			const u8 *ra, u16 tid, struct sta_info **sta)
{
	struct tid_ampdu_tx *tid_tx;

	if (tid >= IEEE80211_NUM_TIDS) {
		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
		       tid, IEEE80211_NUM_TIDS);
		return NULL;
	}

	*sta = sta_info_get_bss(sdata, ra);
	if (!*sta) {
		ht_dbg(sdata, "Could not find station: %pM\n", ra);
		return NULL;
	}

	tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);

	if (WARN_ON(!tid_tx))
		ht_dbg(sdata, "addBA was not requested!\n");

	return tid_tx;
}

void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				      const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_start_tx_ba_cb(sdata, ra, tid);

	rcu_read_lock();
	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
	if (!tid_tx)
		goto out;

	set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
 out:
	rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);

int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				   enum ieee80211_agg_stop_reason reason)
{
	int ret;

	mutex_lock(&sta->ampdu_mlme.mtx);

	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);

	mutex_unlock(&sta->ampdu_mlme.mtx);

	return ret;
}

int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_stop_tx_ba_session(pubsta, tid);

	if (!local->ops->ampdu_action)
		return -EINVAL;

	if (tid >= IEEE80211_NUM_TIDS)
		return -EINVAL;

	spin_lock_bh(&sta->lock);
	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	if (!tid_tx) {
		ret = -ENOENT;
		goto unlock;
	}

	WARN(sta->reserved_tid == tid,
	     "Requested to stop BA session on reserved tid=%d", tid);

	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		/* already in progress stopping it */
		ret = 0;
		goto unlock;
	}

	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

 unlock:
	spin_unlock_bh(&sta->lock);
	return ret;
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);

void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
			     struct tid_ampdu_tx *tid_tx)
{
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	bool send_delba = false;

	ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
	       sta->sta.addr, tid);

	spin_lock_bh(&sta->lock);

	if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		ht_dbg(sdata,
		       "unexpected callback to A-MPDU stop for %pM tid %d\n",
		       sta->sta.addr, tid);
		goto unlock_sta;
	}

	if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
		send_delba = true;

	ieee80211_remove_tid_tx(sta, tid);

 unlock_sta:
	spin_unlock_bh(&sta->lock);

	if (send_delba)
		ieee80211_send_delba(sdata, sta->sta.addr, tid,
			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
}

void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				     const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_stop_tx_ba_cb(sdata, ra, tid);

	rcu_read_lock();
	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
	if (!tid_tx)
		goto out;

	set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
 out:
	rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);


void ieee80211_process_addba_resp(struct ieee80211_local *local,
				  struct sta_info *sta,
				  struct ieee80211_mgmt *mgmt,
				  size_t len)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_txq *txq;
	u16 capab, tid;
	u8 buf_size;
	bool amsdu;

	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
	amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
	buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);

	txq = sta->sta.txq[tid];
	if (!amsdu && txq)
		set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);

	mutex_lock(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx)
		goto out;

	if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
		ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
		       sta->sta.addr, tid);
		goto out;
	}

	del_timer_sync(&tid_tx->addba_resp_timer);

	ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
	       sta->sta.addr, tid);

	/*
	 * addba_resp_timer may have fired before we got here, and
	 * caused WANT_STOP to be set. If the stop then was already
	 * processed further, STOPPING might be set.
	 */
	if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
	    test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		ht_dbg(sta->sdata,
		       "got addBA resp for %pM tid %d but we already gave up\n",
		       sta->sta.addr, tid);
		goto out;
	}

	/*
	 * IEEE 802.11-2007 7.3.1.14:
	 * In an ADDBA Response frame, when the Status Code field
	 * is set to 0, the Buffer Size subfield is set to a value
	 * of at least 1.
	 */
	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
			== WLAN_STATUS_SUCCESS && buf_size) {
		if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
				     &tid_tx->state)) {
			/* ignore duplicate response */
			goto out;
		}

		tid_tx->buf_size = buf_size;
		tid_tx->amsdu = amsdu;

		if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
			ieee80211_agg_tx_operational(local, sta, tid);

		sta->ampdu_mlme.addba_req_num[tid] = 0;

		if (tid_tx->timeout) {
			mod_timer(&tid_tx->session_timer,
				  TU_TO_EXP_TIME(tid_tx->timeout));
			tid_tx->last_tx = jiffies;
		}

	} else {
		___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
	}

 out:
	mutex_unlock(&sta->ampdu_mlme.mtx);
}
示例#4
0
文件: c4.c 项目: NieHao/Tomato-RAF
static int c4_add_card(struct capi_driver *driver,
		       struct capicardparams *p,
		       int nr)
{
	avmctrl_info *cinfo;
	avmcard *card;
	int retval;
	int i;

	MOD_INC_USE_COUNT;

	card = (avmcard *) kmalloc(sizeof(avmcard), GFP_ATOMIC);

	if (!card) {
		printk(KERN_WARNING "%s: no memory.\n", driver->name);
	        MOD_DEC_USE_COUNT;
		return -ENOMEM;
	}
	memset(card, 0, sizeof(avmcard));
	card->dma = (avmcard_dmainfo *) kmalloc(sizeof(avmcard_dmainfo), GFP_ATOMIC);
	if (!card->dma) {
		printk(KERN_WARNING "%s: no memory.\n", driver->name);
		kfree(card);
	        MOD_DEC_USE_COUNT;
		return -ENOMEM;
	}
	memset(card->dma, 0, sizeof(avmcard_dmainfo));
        cinfo = (avmctrl_info *) kmalloc(sizeof(avmctrl_info)*4, GFP_ATOMIC);
	if (!cinfo) {
		printk(KERN_WARNING "%s: no memory.\n", driver->name);
		kfree(card->dma);
		kfree(card);
	        MOD_DEC_USE_COUNT;
		return -ENOMEM;
	}
	memset(cinfo, 0, sizeof(avmctrl_info)*4);
	card->ctrlinfo = cinfo;
	for (i=0; i < 4; i++) {
		cinfo = &card->ctrlinfo[i];
		cinfo->card = card;
	}
	sprintf(card->name, "%s-%x", driver->name, p->port);
	card->port = p->port;
	card->irq = p->irq;
	card->membase = p->membase;
	card->cardtype = nr == 4 ? avm_c4 : avm_c2;

	if (check_region(card->port, AVMB1_PORTLEN)) {
		printk(KERN_WARNING
		       "%s: ports 0x%03x-0x%03x in use.\n",
		       driver->name, card->port, card->port + AVMB1_PORTLEN);
	        kfree(card->ctrlinfo);
		kfree(card->dma);
		kfree(card);
	        MOD_DEC_USE_COUNT;
		return -EBUSY;
	}

	card->mbase = ioremap_nocache(card->membase, 128);
	if (card->mbase == 0) {
		printk(KERN_NOTICE "%s: can't remap memory at 0x%lx\n",
					driver->name, card->membase);
	        kfree(card->ctrlinfo);
		kfree(card->dma);
		kfree(card);
	        MOD_DEC_USE_COUNT;
		return -EIO;
	}

	if ((retval = c4_detect(card)) != 0) {
		printk(KERN_NOTICE "%s: NO card at 0x%x (%d)\n",
					driver->name, card->port, retval);
                iounmap(card->mbase);
	        kfree(card->ctrlinfo);
		kfree(card->dma);
		kfree(card);
	        MOD_DEC_USE_COUNT;
		return -EIO;
	}
	c4_reset(card);

	request_region(p->port, AVMB1_PORTLEN, card->name);

	retval = request_irq(card->irq, c4_interrupt, SA_SHIRQ, card->name, card);
	if (retval) {
		printk(KERN_ERR "%s: unable to get IRQ %d.\n",
				driver->name, card->irq);
                iounmap(card->mbase);
		release_region(card->port, AVMB1_PORTLEN);
	        kfree(card->ctrlinfo);
		kfree(card->dma);
		kfree(card);
	        MOD_DEC_USE_COUNT;
		return -EBUSY;
	}

	for (i=0; i < nr ; i++) {
		cinfo = &card->ctrlinfo[i];
		cinfo->card = card;
		cinfo->capi_ctrl = di->attach_ctr(driver, card->name, cinfo);
		if (!cinfo->capi_ctrl) {
			printk(KERN_ERR "%s: attach controller failed (%d).\n",
					driver->name, i);
			for (i--; i >= 0; i--) {
				cinfo = &card->ctrlinfo[i];
				di->detach_ctr(cinfo->capi_ctrl);
			}
                	iounmap(card->mbase);
			free_irq(card->irq, card);
			release_region(card->port, AVMB1_PORTLEN);
	        	kfree(card->dma);
	        	kfree(card->ctrlinfo);
			kfree(card);
	        	MOD_DEC_USE_COUNT;
			return -EBUSY;
		}
		if (i == 0)
			card->cardnr = cinfo->capi_ctrl->cnr;
	}

	skb_queue_head_init(&card->dma->send_queue);

	printk(KERN_INFO
		"%s: AVM C%d at i/o %#x, irq %d, mem %#lx\n",
		driver->name, nr, card->port, card->irq, card->membase);

	return 0;
}
示例#5
0
static int btuart_open(btuart_info_t *info)
{
	unsigned long flags;
	unsigned int iobase = info->p_dev->resource[0]->start;
	struct hci_dev *hdev;

	spin_lock_init(&(info->lock));

	skb_queue_head_init(&(info->txq));

	info->rx_state = RECV_WAIT_PACKET_TYPE;
	info->rx_count = 0;
	info->rx_skb = NULL;

	/* Initialize HCI device */
	hdev = hci_alloc_dev();
	if (!hdev) {
		BT_ERR("Can't allocate HCI device");
		return -ENOMEM;
	}

	info->hdev = hdev;

	hdev->bus = HCI_PCCARD;
	hdev->driver_data = info;
	SET_HCIDEV_DEV(hdev, &info->p_dev->dev);

	hdev->open     = btuart_hci_open;
	hdev->close    = btuart_hci_close;
	hdev->flush    = btuart_hci_flush;
	hdev->send     = btuart_hci_send_frame;
	hdev->destruct = btuart_hci_destruct;
	hdev->ioctl    = btuart_hci_ioctl;

	spin_lock_irqsave(&(info->lock), flags);

	/* Reset UART */
	outb(0, iobase + UART_MCR);

	/* Turn off interrupts */
	outb(0, iobase + UART_IER);

	/* Initialize UART */
	outb(UART_LCR_WLEN8, iobase + UART_LCR);	/* Reset DLAB */
	outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);

	/* Turn on interrupts */
	// outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);

	spin_unlock_irqrestore(&(info->lock), flags);

	btuart_change_speed(info, DEFAULT_BAUD_RATE);

	/* Timeout before it is safe to send the first HCI packet */
	msleep(1000);

	/* Register HCI device */
	if (hci_register_dev(hdev) < 0) {
		BT_ERR("Can't register HCI device");
		info->hdev = NULL;
		hci_free_dev(hdev);
		return -ENODEV;
	}

	return 0;
}
示例#6
0
static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
					  struct iovec const *msg_sect,
					  size_t total_len)
{
	struct sk_buff_head *msg_head;
	struct sk_buff *msg;
	int i, copied, first = 1;
	int data_size = 0, request_size, offset;
	void *data;

	for (i = 0; i < num_sect; i++)
		data_size += msg_sect[i].iov_len;

	if (!data_size)
		return NULL;

	msg_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
	if (!msg_head) {
		pr_err("%s: cannot allocate skb_head\n", __func__);
		return NULL;
	}
	skb_queue_head_init(msg_head);

	for (copied = 1, i = 0; copied && (i < num_sect); i++) {
		data_size = msg_sect[i].iov_len;
		offset = 0;
		while (offset != msg_sect[i].iov_len) {
			request_size = data_size;
			if (first)
				request_size += IPC_ROUTER_HDR_SIZE;

			msg = alloc_skb(request_size, GFP_KERNEL);
			if (!msg) {
				if (request_size <= (PAGE_SIZE/2)) {
					pr_err("%s: cannot allocated skb\n",
						__func__);
					goto msg_build_failure;
				}
				data_size = data_size / 2;
				continue;
			}

			if (first) {
				skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
				first = 0;
			}

			data = skb_put(msg, data_size);
			copied = !copy_from_user(msg->data,
					msg_sect[i].iov_base + offset,
					data_size);
			if (!copied) {
				pr_err("%s: copy_from_user failed\n",
					__func__);
				kfree_skb(msg);
				goto msg_build_failure;
			}
			skb_queue_tail(msg_head, msg);
			offset += data_size;
			data_size = msg_sect[i].iov_len - offset;
		}
	}
	return msg_head;

msg_build_failure:
	while (!skb_queue_empty(msg_head)) {
		msg = skb_dequeue(msg_head);
		kfree_skb(msg);
	}
	kfree(msg_head);
	return NULL;
}
示例#7
0
文件: sw.c 项目: AshishNamdev/linux
static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
{
	int err;
	u8 tid;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	char *fw_name = "rtlwifi/rtl8192defw.bin";

	rtlpriv->dm.dm_initialgain_enable = true;
	rtlpriv->dm.dm_flag = 0;
	rtlpriv->dm.disable_framebursting = false;
	rtlpriv->dm.thermalvalue = 0;
	rtlpriv->dm.useramask = true;

	/* dual mac */
	if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
		rtlpriv->phy.current_channel = 36;
	else
		rtlpriv->phy.current_channel = 1;

	if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
		rtlpriv->rtlhal.disable_amsdu_8k = true;
		/* No long RX - reduce fragmentation */
		rtlpci->rxbuffersize = 4096;
	}

	rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);

	rtlpci->receive_config = (
			RCR_APPFCS
			| RCR_AMF
			| RCR_ADF
			| RCR_APP_MIC
			| RCR_APP_ICV
			| RCR_AICV
			| RCR_ACRC32
			| RCR_AB
			| RCR_AM
			| RCR_APM
			| RCR_APP_PHYST_RXFF
			| RCR_HTC_LOC_CTRL
	);

	rtlpci->irq_mask[0] = (u32) (
			IMR_ROK
			| IMR_VODOK
			| IMR_VIDOK
			| IMR_BEDOK
			| IMR_BKDOK
			| IMR_MGNTDOK
			| IMR_HIGHDOK
			| IMR_BDOK
			| IMR_RDU
			| IMR_RXFOVW
	);

	rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD);

	/* for LPS & IPS */
	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
	if (!rtlpriv->psc.inactiveps)
		pr_info("Power Save off (module option)\n");
	if (!rtlpriv->psc.fwctrl_lps)
		pr_info("FW Power Save off (module option)\n");
	rtlpriv->psc.reg_fwctrl_lps = 3;
	rtlpriv->psc.reg_max_lps_awakeintvl = 5;
	/* for ASPM, you can close aspm through
	 * set const_support_pciaspm = 0 */
	rtl92d_init_aspm_vars(hw);

	if (rtlpriv->psc.reg_fwctrl_lps == 1)
		rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
	else if (rtlpriv->psc.reg_fwctrl_lps == 2)
		rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
	else if (rtlpriv->psc.reg_fwctrl_lps == 3)
		rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;

	/* for early mode */
	rtlpriv->rtlhal.earlymode_enable = false;
	for (tid = 0; tid < 8; tid++)
		skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);

	/* for firmware buf */
	rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
	if (!rtlpriv->rtlhal.pfirmware) {
		pr_err("Can't alloc buffer for fw\n");
		return 1;
	}

	rtlpriv->max_fw_size = 0x8000;
	pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
	pr_info("Loading firmware file %s\n", fw_name);

	/* request fw */
	err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
				      rtlpriv->io.dev, GFP_KERNEL, hw,
				      rtl_fw_cb);
	if (err) {
		pr_err("Failed to request firmware!\n");
		return 1;
	}

	return 0;
}
示例#8
0
文件: usb.c 项目: 513855417/linux
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
				    struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
		/* static bcn for roaming */
		rtl_beacon_statistic(hw, skb);
	}
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
				      struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}

		/* static bcn for roaming */
		rtl_beacon_statistic(hw, skb);

		if (likely(rtl_action_proc(hw, skb, false)))
			ieee80211_rx(hw, skb);
		else
			dev_kfree_skb_any(skb);
	} else {
		dev_kfree_skb_any(skb);
	}
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct sk_buff *_skb;
	struct sk_buff_head rx_queue;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	skb_queue_head_init(&rx_queue);
	if (rtlusb->usb_rx_segregate_hdl)
		rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
	WARN_ON(skb_queue_empty(&rx_queue));
	while (!skb_queue_empty(&rx_queue)) {
		_skb = skb_dequeue(&rx_queue);
		_rtl_usb_rx_process_agg(hw, _skb);
		ieee80211_rx(hw, _skb);
	}
}

#define __RX_SKB_MAX_QUEUED	64

static void _rtl_rx_work(unsigned long param)
{
	struct rtl_usb *rtlusb = (struct rtl_usb *)param;
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct sk_buff *skb;

	while ((skb = skb_dequeue(&rtlusb->rx_queue))) {
		if (unlikely(IS_USB_STOP(rtlusb))) {
			dev_kfree_skb_any(skb);
			continue;
		}

		if (likely(!rtlusb->usb_rx_segregate_hdl)) {
			_rtl_usb_rx_process_noagg(hw, skb);
		} else {
			/* TO DO */
			_rtl_rx_pre_process(hw, skb);
			pr_err("rx agg not supported\n");
		}
	}
}

static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
					unsigned int len)
{
#if NET_IP_ALIGN != 0
	unsigned int padding = 0;
#endif

	/* make function no-op when possible */
	if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
		return 0;

#if NET_IP_ALIGN != 0
	/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
	/* TODO: deduplicate common code, define helper function instead? */

	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);

		padding ^= NET_IP_ALIGN;

		/* Input might be invalid, avoid accessing memory outside
		 * the buffer.
		 */
		if ((unsigned long)qc - (unsigned long)hdr < len &&
		    *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
			padding ^= NET_IP_ALIGN;
	}

	if (ieee80211_has_a4(hdr->frame_control))
		padding ^= NET_IP_ALIGN;

	return padding;
#endif
}

#define __RADIO_TAP_SIZE_RSV	32

static void _rtl_rx_completed(struct urb *_urb)
{
	struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	int err = 0;

	if (unlikely(IS_USB_STOP(rtlusb)))
		goto free;

	if (likely(0 == _urb->status)) {
		unsigned int padding;
		struct sk_buff *skb;
		unsigned int qlen;
		unsigned int size = _urb->actual_length;
		struct ieee80211_hdr *hdr;

		if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Too short packet from bulk IN! (len: %d)\n",
				 size);
			goto resubmit;
		}

		qlen = skb_queue_len(&rtlusb->rx_queue);
		if (qlen >= __RX_SKB_MAX_QUEUED) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Pending RX skbuff queue full! (qlen: %d)\n",
				 qlen);
			goto resubmit;
		}

		hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE);
		padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE);

		skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
		if (!skb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Can't allocate skb for bulk IN!\n");
			goto resubmit;
		}

		_rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);

		/* Make sure the payload data is 4 byte aligned. */
		skb_reserve(skb, padding);

		/* reserve some space for mac80211's radiotap */
		skb_reserve(skb, __RADIO_TAP_SIZE_RSV);

		memcpy(skb_put(skb, size), _urb->transfer_buffer, size);

		skb_queue_tail(&rtlusb->rx_queue, skb);
		tasklet_schedule(&rtlusb->rx_work_tasklet);

		goto resubmit;
	}

	switch (_urb->status) {
	/* disconnect */
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
		break;
	}

resubmit:
	usb_anchor_urb(_urb, &rtlusb->rx_submitted);
	err = usb_submit_urb(_urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(_urb);
		goto free;
	}
	return;

free:
	/* On some architectures, usb_free_coherent must not be called from
	 * hardirq context. Queue urb to cleanup list.
	 */
	usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs);
}

#undef __RADIO_TAP_SIZE_RSV

static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct urb *urb;

	usb_kill_anchored_urbs(&rtlusb->rx_submitted);

	tasklet_kill(&rtlusb->rx_work_tasklet);
	cancel_work_sync(&rtlpriv->works.lps_change_work);

	flush_workqueue(rtlpriv->works.rtl_wq);
	destroy_workqueue(rtlpriv->works.rtl_wq);

	skb_queue_purge(&rtlusb->rx_queue);

	while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
		usb_free_coherent(urb->dev, urb->transfer_buffer_length,
				urb->transfer_buffer, urb->transfer_dma);
		usb_free_urb(urb);
	}
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
	struct urb *urb;
	int err;
	int i;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	WARN_ON(0 == rtlusb->rx_urb_num);
	/* 1600 == 1514 + max WLAN header + rtk info */
	WARN_ON(rtlusb->rx_max_size < 1600);

	for (i = 0; i < rtlusb->rx_urb_num; i++) {
		err = -ENOMEM;
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to alloc URB!!\n");
			goto err_out;
		}

		err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
		if (err < 0) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to prep_rx_urb!!\n");
			usb_free_urb(urb);
			goto err_out;
		}

		usb_anchor_urb(urb, &rtlusb->rx_submitted);
		err = usb_submit_urb(urb, GFP_KERNEL);
		if (err)
			goto err_out;
		usb_free_urb(urb);
	}
	return 0;

err_out:
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);
	_rtl_usb_cleanup_rx(hw);
	return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
	int err;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	err = rtlpriv->cfg->ops->hw_init(hw);
	if (!err) {
		rtl_init_rx_config(hw);

		/* Enable software */
		SET_USB_START(rtlusb);
		/* should after adapter start and interrupt enable. */
		set_hal_start(rtlhal);

		/* Start bulk IN */
		err = _rtl_usb_receive(hw);
	}

	return err;
}
static int __init bridge_init(void)
{
	struct data_bridge	*dev;
	int			ret;
	int			i = 0;

	ret = ctrl_bridge_init();
	if (ret)
		return ret;

	bridge_wq  = create_singlethread_workqueue("mdm_bridge");
	if (!bridge_wq) {
		pr_err("%s: Unable to create workqueue:bridge\n", __func__);
		ret = -ENOMEM;
		goto free_ctrl;
	}

	for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {

		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
		if (!dev) {
			err("%s: unable to allocate dev\n", __func__);
			ret = -ENOMEM;
			goto error;
		}

		dev->wq = bridge_wq;

		
		dev->name = "none";

		init_usb_anchor(&dev->tx_active);
		init_usb_anchor(&dev->rx_active);

		INIT_LIST_HEAD(&dev->rx_idle);

		skb_queue_head_init(&dev->rx_done);

		INIT_WORK(&dev->kevent, defer_kevent);
		INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);

		__dev[i] = dev;
	}

	ret = usb_register(&bridge_driver);
	if (ret) {
		err("%s: unable to register mdm_bridge driver", __func__);
		goto error;
	}

	data_bridge_debugfs_init();

	return 0;

error:
	while (--i >= 0) {
		kfree(__dev[i]);
		__dev[i] = NULL;
	}
	destroy_workqueue(bridge_wq);
free_ctrl:
	ctrl_bridge_exit();
	return ret;
}
示例#10
0
static int octeon_mgmt_probe(struct platform_device *pdev)
{
    struct net_device *netdev;
    struct octeon_mgmt *p;
    const __be32 *data;
    const u8 *mac;
    struct resource *res_mix;
    struct resource *res_agl;
    struct resource *res_agl_prt_ctl;
    int len;
    int result;

    netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
    if (netdev == NULL)
        return -ENOMEM;

    SET_NETDEV_DEV(netdev, &pdev->dev);

    platform_set_drvdata(pdev, netdev);
    p = netdev_priv(netdev);
    netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
                   OCTEON_MGMT_NAPI_WEIGHT);

    p->netdev = netdev;
    p->dev = &pdev->dev;
    p->has_rx_tstamp = false;

    data = of_get_property(pdev->dev.of_node, "cell-index", &len);
    if (data && len == sizeof(*data)) {
        p->port = be32_to_cpup(data);
    } else {
        dev_err(&pdev->dev, "no 'cell-index' property\n");
        result = -ENXIO;
        goto err;
    }

    snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);

    result = platform_get_irq(pdev, 0);
    if (result < 0)
        goto err;

    p->irq = result;

    res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (res_mix == NULL) {
        dev_err(&pdev->dev, "no 'reg' resource\n");
        result = -ENXIO;
        goto err;
    }

    res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
    if (res_agl == NULL) {
        dev_err(&pdev->dev, "no 'reg' resource\n");
        result = -ENXIO;
        goto err;
    }

    res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
    if (res_agl_prt_ctl == NULL) {
        dev_err(&pdev->dev, "no 'reg' resource\n");
        result = -ENXIO;
        goto err;
    }

    p->mix_phys = res_mix->start;
    p->mix_size = resource_size(res_mix);
    p->agl_phys = res_agl->start;
    p->agl_size = resource_size(res_agl);
    p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
    p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);


    if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
                                 res_mix->name)) {
        dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
                res_mix->name);
        result = -ENXIO;
        goto err;
    }

    if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
                                 res_agl->name)) {
        result = -ENXIO;
        dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
                res_agl->name);
        goto err;
    }

    if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
                                 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
        result = -ENXIO;
        dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
                res_agl_prt_ctl->name);
        goto err;
    }

    p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
    p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
    p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
                                       p->agl_prt_ctl_size);
    spin_lock_init(&p->lock);

    skb_queue_head_init(&p->tx_list);
    skb_queue_head_init(&p->rx_list);
    tasklet_init(&p->tx_clean_tasklet,
                 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);

    netdev->priv_flags |= IFF_UNICAST_FLT;

    netdev->netdev_ops = &octeon_mgmt_ops;
    netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;

    mac = of_get_mac_address(pdev->dev.of_node);

    if (mac)
        memcpy(netdev->dev_addr, mac, ETH_ALEN);
    else
        eth_hw_addr_random(netdev);

    p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);

    result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    if (result)
        goto err;

    netif_carrier_off(netdev);
    result = register_netdev(netdev);
    if (result)
        goto err;

    dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
    return 0;

err:
    free_netdev(netdev);
    return result;
}
示例#11
0
static int dtl1_open(dtl1_info_t *info)
{
    unsigned long flags;
    unsigned int iobase = info->p_dev->resource[0]->start;
    struct hci_dev *hdev;

    spin_lock_init(&(info->lock));

    skb_queue_head_init(&(info->txq));

    info->rx_state = RECV_WAIT_NSH;
    info->rx_count = NSHL;
    info->rx_skb = NULL;

    set_bit(XMIT_WAITING, &(info->tx_state));

    /* Initialize HCI device */
    hdev = hci_alloc_dev();
    if (!hdev) {
        BT_ERR("Can't allocate HCI device");
        return -ENOMEM;
    }

    info->hdev = hdev;

    hdev->bus = HCI_PCCARD;
    hdev->driver_data = info;
    SET_HCIDEV_DEV(hdev, &info->p_dev->dev);

    hdev->open     = dtl1_hci_open;
    hdev->close    = dtl1_hci_close;
    hdev->flush    = dtl1_hci_flush;
    hdev->send     = dtl1_hci_send_frame;
    hdev->destruct = dtl1_hci_destruct;
    hdev->ioctl    = dtl1_hci_ioctl;

    hdev->owner = THIS_MODULE;

    spin_lock_irqsave(&(info->lock), flags);

    /* Reset UART */
    outb(0, iobase + UART_MCR);

    /* Turn off interrupts */
    outb(0, iobase + UART_IER);

    /* Initialize UART */
    outb(UART_LCR_WLEN8, iobase + UART_LCR);	/* Reset DLAB */
    outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);

    info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR)
                     & UART_MSR_RI;

    /* Turn on interrupts */
    outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);

    spin_unlock_irqrestore(&(info->lock), flags);

    /* Timeout before it is safe to send the first HCI packet */
    msleep(2000);

    /* Register HCI device */
    if (hci_register_dev(hdev) < 0) {
        BT_ERR("Can't register HCI device");
        info->hdev = NULL;
        hci_free_dev(hdev);
        return -ENODEV;
    }

    return 0;
}
示例#12
0
/*
 *	Add a new route to a node, and in the process add the node and the
 *	neighbour if it is new.
 */
static int rose_add_node(struct rose_route_struct *rose_route, struct net_device *dev)
{
	struct rose_node  *rose_node, *rose_tmpn, *rose_tmpp;
	struct rose_neigh *rose_neigh;
	unsigned long flags;
	int i;

	for (rose_node = rose_node_list; rose_node != NULL; rose_node = rose_node->next)
		if ((rose_node->mask == rose_route->mask) && (rosecmpm(&rose_route->address, &rose_node->address, rose_route->mask) == 0))
			break;

	if (rose_node != NULL && rose_node->loopback)
		return -EINVAL;

	for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next)
		if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0 && rose_neigh->dev == dev)
			break;

	if (rose_neigh == NULL) {
		if ((rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC)) == NULL)
			return -ENOMEM;

		rose_neigh->callsign  = rose_route->neighbour;
		rose_neigh->digipeat  = NULL;
		rose_neigh->ax25      = NULL;
		rose_neigh->dev       = dev;
		rose_neigh->count     = 0;
		rose_neigh->use       = 0;
		rose_neigh->dce_mode  = 0;
		rose_neigh->loopback  = 0;
		rose_neigh->number    = rose_neigh_no++;
		rose_neigh->restarted = 0;

		skb_queue_head_init(&rose_neigh->queue);

		init_timer(&rose_neigh->ftimer);
		init_timer(&rose_neigh->t0timer);

		if (rose_route->ndigis != 0) {
			if ((rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
				kfree(rose_neigh);
				return -ENOMEM;
			}

			rose_neigh->digipeat->ndigi      = rose_route->ndigis;
			rose_neigh->digipeat->lastrepeat = -1;

			for (i = 0; i < rose_route->ndigis; i++) {
				rose_neigh->digipeat->calls[i]    = rose_route->digipeaters[i];
				rose_neigh->digipeat->repeated[i] = 0;
			}
		}

		save_flags(flags); cli();
		rose_neigh->next = rose_neigh_list;
		rose_neigh_list  = rose_neigh;
		restore_flags(flags);
	}

	/*
	 * This is a new node to be inserted into the list. Find where it needs
	 * to be inserted into the list, and insert it. We want to be sure
	 * to order the list in descending order of mask size to ensure that
	 * later when we are searching this list the first match will be the
	 * best match.
	 */
	if (rose_node == NULL) {
		rose_tmpn = rose_node_list;
		rose_tmpp = NULL;

		while (rose_tmpn != NULL) {
			if (rose_tmpn->mask > rose_route->mask) {
				rose_tmpp = rose_tmpn;
				rose_tmpn = rose_tmpn->next;
			} else {
				break;
			}
		}

		/* create new node */
		if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL)
			return -ENOMEM;

		rose_node->address      = rose_route->address;
		rose_node->mask         = rose_route->mask;
		rose_node->count        = 1;
		rose_node->loopback     = 0;
		rose_node->neighbour[0] = rose_neigh;

		save_flags(flags); cli();

		if (rose_tmpn == NULL) {
			if (rose_tmpp == NULL) {	/* Empty list */
				rose_node_list  = rose_node;
				rose_node->next = NULL;
			} else {
				rose_tmpp->next = rose_node;
				rose_node->next = NULL;
			}
		} else {
			if (rose_tmpp == NULL) {	/* 1st node */
				rose_node->next = rose_node_list;
				rose_node_list  = rose_node;
			} else {
				rose_tmpp->next = rose_node;
				rose_node->next = rose_tmpn;
			}
		}

		restore_flags(flags);

		rose_neigh->count++;

		return 0;
	}

	/* We have space, slot it in */
	if (rose_node->count < 3) {
		rose_node->neighbour[rose_node->count] = rose_neigh;
		rose_node->count++;
		rose_neigh->count++;
	}

	return 0;
}
示例#13
0
static void * hci_usb_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id)
{
	struct usb_endpoint_descriptor *bulk_out_ep, *intr_in_ep, *bulk_in_ep;
	struct usb_interface_descriptor *uif;
	struct usb_endpoint_descriptor *ep;
	struct hci_usb *husb;
	struct hci_dev *hdev;
	int i, size, pipe;
	__u8 * buf;

	DBG("udev %p ifnum %d", udev, ifnum);

	/* Check device signature */
	if ((udev->descriptor.bDeviceClass    != HCI_DEV_CLASS)   ||
	    (udev->descriptor.bDeviceSubClass != HCI_DEV_SUBCLASS)||
	    (udev->descriptor.bDeviceProtocol != HCI_DEV_PROTOCOL) )
		return NULL;

	MOD_INC_USE_COUNT;

	uif = &udev->actconfig->interface[ifnum].altsetting[0];

	if (uif->bNumEndpoints != 3) {
		DBG("Wrong number of endpoints %d", uif->bNumEndpoints);
		MOD_DEC_USE_COUNT;
		return NULL;
	}

	bulk_out_ep = intr_in_ep = bulk_in_ep = NULL;

	/* Find endpoints that we need */
	for ( i = 0; i < uif->bNumEndpoints; ++i) {
		ep = &uif->endpoint[i];

		switch (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
			case USB_ENDPOINT_XFER_BULK:
				if (ep->bEndpointAddress & USB_DIR_IN)
					bulk_in_ep  = ep;
				else
					bulk_out_ep = ep;
				break;

			case USB_ENDPOINT_XFER_INT:
				intr_in_ep = ep;
				break;
		};
	}

	if (!bulk_in_ep || !bulk_out_ep || !intr_in_ep) {
		DBG("Endpoints not found: %p %p %p", bulk_in_ep, bulk_out_ep, intr_in_ep);
		MOD_DEC_USE_COUNT;
		return NULL;
	}

	if (!(husb = kmalloc(sizeof(struct hci_usb), GFP_KERNEL))) {
		ERR("Can't allocate: control structure");
		MOD_DEC_USE_COUNT;
		return NULL;
	}

	memset(husb, 0, sizeof(struct hci_usb));

	husb->udev = udev;
	husb->bulk_out_ep_addr = bulk_out_ep->bEndpointAddress;

	if (!(husb->ctrl_urb = usb_alloc_urb(0))) {
		ERR("Can't allocate: control URB");
		goto probe_error;
	}

	if (!(husb->write_urb = usb_alloc_urb(0))) {
		ERR("Can't allocate: write URB");
		goto probe_error;
	}

	if (!(husb->read_urb = usb_alloc_urb(0))) {
		ERR("Can't allocate: read URB");
		goto probe_error;
	}

	ep = bulk_in_ep;
	pipe = usb_rcvbulkpipe(udev, ep->bEndpointAddress);
	size = HCI_MAX_FRAME_SIZE;

	if (!(buf = kmalloc(size, GFP_KERNEL))) {
		ERR("Can't allocate: read buffer");
		goto probe_error;
	}

	FILL_BULK_URB(husb->read_urb, udev, pipe, buf, size, hci_usb_bulk_read, husb);
	husb->read_urb->transfer_flags |= USB_QUEUE_BULK;

	ep = intr_in_ep;
	pipe = usb_rcvintpipe(udev, ep->bEndpointAddress);
	size = usb_maxpacket(udev, pipe, usb_pipeout(pipe));

	if (!(husb->intr_urb = usb_alloc_urb(0))) {
		ERR("Can't allocate: interrupt URB");
		goto probe_error;
	}

	if (!(buf = kmalloc(size, GFP_KERNEL))) {
		ERR("Can't allocate: interrupt buffer");
		goto probe_error;
	}

	FILL_INT_URB(husb->intr_urb, udev, pipe, buf, size, hci_usb_intr, husb, ep->bInterval);

	skb_queue_head_init(&husb->tx_ctrl_q);
	skb_queue_head_init(&husb->tx_write_q);

	/* Initialize and register HCI device */
	hdev = &husb->hdev;

	hdev->type = HCI_USB;
	hdev->driver_data = husb;

	hdev->open  = hci_usb_open;
	hdev->close = hci_usb_close;
	hdev->flush = hci_usb_flush;
	hdev->send	= hci_usb_send_frame;

	if (hci_register_dev(hdev) < 0) {
		ERR("Can't register HCI device %s", hdev->name);
		goto probe_error;
	}

	return husb;

probe_error:
	hci_usb_free_bufs(husb);
	kfree(husb);
	MOD_DEC_USE_COUNT;
	return NULL;
}
示例#14
0
文件: sw.c 项目: elvismt/rtlwifi_new
int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
{
	int err = 0;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	u8 tid;
	char *fw_name;

	rtl8188ee_bt_reg_init(hw);
	rtlpriv->dm.dm_initialgain_enable = 1;
	rtlpriv->dm.dm_flag = 0;
	rtlpriv->dm.disable_framebursting = 0;
	rtlpriv->dm.thermalvalue = 0;
	rtlpci->transmit_config = CFENDFORM | BIT(15);

	/* compatible 5G band 88ce just 2.4G band & smsp */
	rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
	rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
	rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;

	rtlpci->receive_config = (RCR_APPFCS |
				  RCR_APP_MIC |
				  RCR_APP_ICV |
				  RCR_APP_PHYST_RXFF |
				  RCR_HTC_LOC_CTRL |
				  RCR_AMF |
				  RCR_ACF |
				  RCR_ADF |
				  RCR_AICV |
				  RCR_ACRC32 |
				  RCR_AB |
				  RCR_AM |
				  RCR_APM |
				  0);

	rtlpci->irq_mask[0] =
				(u32)(IMR_PSTIMEOUT	|
				IMR_HSISR_IND_ON_INT	|
				IMR_C2HCMD		|
				IMR_HIGHDOK		|
				IMR_MGNTDOK		|
				IMR_BKDOK		|
				IMR_BEDOK		|
				IMR_VIDOK		|
				IMR_VODOK		|
				IMR_RDU			|
				IMR_ROK			|
				0);
	rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0);
	rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN);

	/* for debug level */
	rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
	/* for LPS & IPS */
	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
	rtlpriv->cfg->mod_params->sw_crypto =
		rtlpriv->cfg->mod_params->sw_crypto;
	rtlpriv->cfg->mod_params->disable_watchdog =
		rtlpriv->cfg->mod_params->disable_watchdog;
	if (rtlpriv->cfg->mod_params->disable_watchdog)
		pr_info("watchdog disabled\n");
	if (!rtlpriv->psc.inactiveps)
		pr_info("rtl8188ee: Power Save off (module option)\n");
	if (!rtlpriv->psc.fwctrl_lps)
		pr_info("rtl8188ee: FW Power Save off (module option)\n");
	rtlpriv->psc.reg_fwctrl_lps = 3;
	rtlpriv->psc.reg_max_lps_awakeintvl = 5;
	/* for ASPM, you can close aspm through
	 * set const_support_pciaspm = 0
	 */
	rtl88e_init_aspm_vars(hw);

	if (rtlpriv->psc.reg_fwctrl_lps == 1)
		rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
	else if (rtlpriv->psc.reg_fwctrl_lps == 2)
		rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
	else if (rtlpriv->psc.reg_fwctrl_lps == 3)
		rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;

	/* for firmware buf */
	rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
	if (!rtlpriv->rtlhal.pfirmware) {
		pr_info("Can't alloc buffer for fw.\n");
		return 1;
	}

	fw_name = "rtlwifi/rtl8188efw.bin";
	rtlpriv->max_fw_size = 0x8000;
	pr_info("Using firmware %s\n", fw_name);
	err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
				      rtlpriv->io.dev, GFP_KERNEL, hw,
				      rtl_fw_cb);
	if (err) {
		pr_info("Failed to request firmware!\n");
		return 1;
	}

	/* for early mode */
	rtlpriv->rtlhal.earlymode_enable = false;
	rtlpriv->rtlhal.max_earlymode_num = 10;
	for (tid = 0; tid < 8; tid++)
		skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);

	/*low power */
	rtlpriv->psc.low_power_enable = false;
	if (rtlpriv->psc.low_power_enable) {
		init_timer(&rtlpriv->works.fw_clockoff_timer);
		setup_timer(&rtlpriv->works.fw_clockoff_timer,
			    rtl88ee_fw_clk_off_timer_callback,
			    (unsigned long)hw);
	}

	init_timer(&rtlpriv->works.fast_antenna_training_timer);
	setup_timer(&rtlpriv->works.fast_antenna_training_timer,
		    rtl88e_dm_fast_antenna_training_callback,
			(unsigned long)hw);
	return err;
}
示例#15
0
static int __init brf6150_init(void)
{
	struct brf6150_info *info;
	int irq, err;

	info = kmalloc(sizeof(struct brf6150_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;
	memset(info, 0, sizeof(struct brf6150_info));

	brf6150_device.dev.driver_data = info;
	init_completion(&info->init_completion);
	init_completion(&info->fw_completion);
	info->pm_enabled = 0;
	info->rx_pm_enabled = 0;
	info->tx_pm_enabled = 0;
	info->garbage_bytes = 0;
	tasklet_init(&info->tx_task, brf6150_tx_tasklet, (unsigned long)info);
	spin_lock_init(&info->lock);
	skb_queue_head_init(&info->txq);
	init_timer(&info->pm_timer);
	info->pm_timer.function = brf6150_pm_timer;
	info->pm_timer.data = (unsigned long)info;
	exit_info = NULL;

	info->btinfo = omap_get_config(OMAP_TAG_NOKIA_BT, struct omap_bluetooth_config);
	if (info->btinfo == NULL)
		return -1;

	NBT_DBG("RESET gpio: %d\n", info->btinfo->reset_gpio);
	NBT_DBG("BTWU gpio: %d\n", info->btinfo->bt_wakeup_gpio);
	NBT_DBG("HOSTWU gpio: %d\n", info->btinfo->host_wakeup_gpio);
	NBT_DBG("Uart: %d\n", info->btinfo->bt_uart);
	NBT_DBG("sysclk: %d\n", info->btinfo->bt_sysclk);

	err = omap_request_gpio(info->btinfo->reset_gpio);
	if (err < 0)
	{
		printk(KERN_WARNING "Cannot get GPIO line %d", 
		       info->btinfo->reset_gpio);
		kfree(info);
		return err;
	}

	err = omap_request_gpio(info->btinfo->bt_wakeup_gpio);
	if (err < 0)
	{
		printk(KERN_WARNING "Cannot get GPIO line 0x%d",
		       info->btinfo->bt_wakeup_gpio);
		omap_free_gpio(info->btinfo->reset_gpio);
		kfree(info);
		return err;
	}

	err = omap_request_gpio(info->btinfo->host_wakeup_gpio);
	if (err < 0)
	{
		printk(KERN_WARNING "Cannot get GPIO line %d",
		       info->btinfo->host_wakeup_gpio);
		omap_free_gpio(info->btinfo->reset_gpio);
		omap_free_gpio(info->btinfo->bt_wakeup_gpio);
		kfree(info);
		return err;
	}

	omap_set_gpio_direction(info->btinfo->reset_gpio, 0);
	omap_set_gpio_direction(info->btinfo->bt_wakeup_gpio, 0);
	omap_set_gpio_direction(info->btinfo->host_wakeup_gpio, 1);
	set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQT_NOEDGE);

	switch (info->btinfo->bt_uart) {
	case 1:
		irq = INT_UART1;
		info->uart_ck = clk_get(NULL, "uart1_ck");
		info->uart_base = io_p2v((unsigned long)OMAP_UART1_BASE);
		break;
	case 2:
		irq = INT_UART2;
		info->uart_ck = clk_get(NULL, "uart2_ck");
		info->uart_base = io_p2v((unsigned long)OMAP_UART2_BASE);
		break;
	case 3:
		irq = INT_UART3;
		info->uart_ck = clk_get(NULL, "uart3_ck");
		info->uart_base = io_p2v((unsigned long)OMAP_UART3_BASE);
		break;
	default:
		printk(KERN_ERR "No uart defined\n");
		goto cleanup;
	}

	info->irq = irq;
	err = request_irq(irq, brf6150_interrupt, 0, "brf6150", (void *)info);
	if (err < 0) {
		printk(KERN_ERR "brf6150: unable to get IRQ %d\n", irq);
		goto cleanup;
	}

	err = request_irq(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio),
			brf6150_wakeup_interrupt, 0, "brf6150_wkup", (void *)info);
	if (err < 0) {
		printk(KERN_ERR "brf6150: unable to get wakeup IRQ %d\n",
				OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio));
		free_irq(irq, (void *)info);
		goto cleanup;
	}

	/* Register with LDM */
	if (platform_device_register(&brf6150_device)) {
		printk(KERN_ERR "failed to register brf6150 device\n");
		err = -ENODEV;
		goto cleanup_irq;
	}
	/* Register the driver with LDM */
	if (driver_register(&brf6150_driver)) {
		printk(KERN_WARNING "failed to register brf6150 driver\n");
		platform_device_unregister(&brf6150_device);
		err = -ENODEV;
		goto cleanup_irq;
	}

	if (brf6150_register_hdev(info) < 0) {
		printk(KERN_WARNING "failed to register brf6150 hci device\n");
		platform_device_unregister(&brf6150_device);
		driver_unregister(&brf6150_driver);
		goto cleanup_irq;
	}

	exit_info = info;
	return 0;

cleanup_irq:
	free_irq(irq, (void *)info);
	free_irq(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), (void *)info);
cleanup:
	omap_free_gpio(info->btinfo->reset_gpio);
	omap_free_gpio(info->btinfo->bt_wakeup_gpio);
	omap_free_gpio(info->btinfo->host_wakeup_gpio);
	kfree(info);

	return err;
}
示例#16
0
static int __devinit fnic_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	
	host = scsi_host_alloc(&fnic_host_template,
			       sizeof(struct fc_lport) + sizeof(struct fnic));
	if (!host) {
		printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
		err = -ENOMEM;
		goto err_out;
	}
	lp = shost_priv(host);
	lp->host = host;
	fnic = lport_priv(lp);
	fnic->lport = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to alloc shared tag map\n");
		goto err_out_free_hba;
	}

	
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 40-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}

	
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}
	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_clear_intr;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_free_intr;
	}


	
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	fnic->flogi_oxid = FC_XID_UNKNOWN;
	fnic->flogi = NULL;
	fnic->flogi_resp = NULL;
	fnic->state = FNIC_IN_FC_MODE;

	
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	

	lp->link_up = 0;
	lp->tt = fnic_transport_template;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fc_lport_init(lp);
	fc_exch_init(lp);
	fc_elsct_init(lp);
	fc_rport_init(lp);
	fc_disc_init(lp);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);

	
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(fnic->lport->host);
	scsi_remove_host(fnic->lport->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_free_intr:
	fnic_free_intr(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	scsi_host_put(lp->host);
err_out:
	return err;
}
示例#17
0
static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
	const struct firmware *firmware;
	struct usb_device *udev = interface_to_usbdev(intf);
	struct usb_host_endpoint *bulk_out_ep;
	struct usb_host_endpoint *bulk_in_ep;
	struct hci_dev *hdev;
	struct bfusb_data *data;

	BT_DBG("intf %p id %p", intf, id);

	/* Check number of endpoints */
	if (intf->cur_altsetting->desc.bNumEndpoints < 2)
		return -EIO;

	bulk_out_ep = &intf->cur_altsetting->endpoint[0];
	bulk_in_ep  = &intf->cur_altsetting->endpoint[1];

	if (!bulk_out_ep || !bulk_in_ep) {
		BT_ERR("Bulk endpoints not found");
		goto done;
	}

	/* Initialize control structure and load firmware */
	data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
	if (!data) {
		BT_ERR("Can't allocate memory for control structure");
		goto done;
	}

	data->udev = udev;
	data->bulk_in_ep    = bulk_in_ep->desc.bEndpointAddress;
	data->bulk_out_ep   = bulk_out_ep->desc.bEndpointAddress;
	data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize);

	rwlock_init(&data->lock);

	data->reassembly = NULL;

	skb_queue_head_init(&data->transmit_q);
	skb_queue_head_init(&data->pending_q);
	skb_queue_head_init(&data->completed_q);

	if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
		BT_ERR("Firmware request failed");
		goto error;
	}

	BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);

	if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) {
		BT_ERR("Firmware loading failed");
		goto release;
	}

	release_firmware(firmware);

	/* Initialize and register HCI device */
	hdev = hci_alloc_dev();
	if (!hdev) {
		BT_ERR("Can't allocate HCI device");
		goto error;
	}

	data->hdev = hdev;

	hdev->bus = HCI_USB;
	hdev->driver_data = data;
	SET_HCIDEV_DEV(hdev, &intf->dev);

	hdev->open     = bfusb_open;
	hdev->close    = bfusb_close;
	hdev->flush    = bfusb_flush;
	hdev->send     = bfusb_send_frame;
	hdev->destruct = bfusb_destruct;
	hdev->ioctl    = bfusb_ioctl;

	if (hci_register_dev(hdev) < 0) {
		BT_ERR("Can't register HCI device");
		hci_free_dev(hdev);
		goto error;
	}

	usb_set_intfdata(intf, data);

	return 0;

release:
	release_firmware(firmware);

error:
	kfree(data);

done:
	return -EIO;
}
示例#18
0
文件: fnic_main.c 项目: Lyude/linux
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = fnic_stats_debugfs_init(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
				"Failed to initialize debugfs for stats\n");
		fnic_stats_debugfs_remove(fnic);
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 64-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 64-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	/* Configure Maximum Outstanding IO reqs*/
	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
					max_t(u32, FNIC_MIN_IO_REQ,
					fnic->config.io_throttle_count));
	}
	fnic->fnic_max_tag_id = host->can_queue;

	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FCOE_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware supports FIP\n");
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
		fnic->set_vlan = fnic_set_vlan;
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
		timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
		spin_lock_init(&fnic->vlans_lock);
		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
		INIT_WORK(&fnic->event_work, fnic_handle_event);
		skb_queue_head_init(&fnic->fip_frame_queue);
		INIT_LIST_HEAD(&fnic->evlist);
		INIT_LIST_HEAD(&fnic->vlans);
	} else {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware uses non-FIP mode\n");
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
		fnic->ctlr.state = FIP_ST_NON_FIP;
	}
	fnic->state = FNIC_IN_FC_MODE;

	atomic_set(&fnic->in_flight, 0);
	fnic->state_flags = FNIC_FLAGS_NONE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);
	fnic->stats_reset_time = jiffies;

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;
	fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	fnic_stats_debugfs_remove(fnic);
	scsi_host_put(lp->host);
err_out:
	return err;
}
示例#19
0
int
usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;
	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);

	/* usbnet already took usb runtime pm, so have to enable the feature
	 * for usb interface, otherwise usb_autopm_get_interface may return
	 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
	 */
	if (!driver->supports_autosuspend) {
		driver->supports_autosuspend = 1;
		pm_runtime_enable(&udev->dev);
	}

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		dev_dbg (&udev->dev, "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	// set up our own records
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	/* netdev_printk() needs this so do it as early as possible */
	SET_NETDEV_DEV(net, &udev->dev);

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	skb_queue_head_init(&dev->rxq_pause);
	dev->bh.func = usbnet_bh;
	dev->bh.data = (unsigned long) dev;
	INIT_WORK (&dev->kevent, kevent);
	init_usb_anchor(&dev->deferred);
	dev->delay.function = usbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
	mutex_init (&dev->phy_mutex);

	dev->net = net;
	strcpy (net->name, "usb%d");
	memcpy (net->dev_addr, node_id, sizeof node_id);

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;
#if 0
// dma_supported() is deeply broken on almost all architectures
	// possible with some EHCI controllers
	if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

	net->netdev_ops = &usbnet_netdev_ops;
	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &usbnet_ethtool_ops;

	// allow device-specific bind/init procedures
	// NOTE net->name still not usable ...
	if (info->bind) {
		status = info->bind (dev, udev);
		if (status < 0)
			goto out1;

		// heuristic:  "usb%d" for links we know are two-host,
		// else "eth%d" when there's reasonable doubt.  userspace
		// can rename the link if it knows better.
		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
		     (net->dev_addr [0] & 0x02) == 0))
			strcpy (net->name, "eth%d");
		/* WLAN devices should always be named "wlan%d" */
		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
			strcpy(net->name, "wlan%d");
		/* WWAN devices should always be named "wwan%d" */
		if ((dev->driver_info->flags & FLAG_WWAN) != 0)
			strcpy(net->name, "wwan%d");
		/* RMNET devices should always be named "rmnet%d" */
		if ((dev->driver_info->flags & FLAG_RMNET) != 0)
			strcpy(net->name, "rmnet%d");

		/* maybe the remote can't receive an Ethernet MTU */
		if (net->mtu > (dev->hard_mtu - net->hard_header_len))
			net->mtu = dev->hard_mtu - net->hard_header_len;
	} else if (!info->in || !info->out)
		status = usbnet_get_endpoints (dev, udev);
	else {
		dev->in = usb_rcvbulkpipe (xdev, info->in);
		dev->out = usb_sndbulkpipe (xdev, info->out);
		if (!(info->flags & FLAG_NO_SETINT))
			status = usb_set_interface (xdev,
				interface->desc.bInterfaceNumber,
				interface->desc.bAlternateSetting);
		else
			status = 0;

	}
	if (status >= 0 && dev->status)
		status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
		SET_NETDEV_DEVTYPE(net, &wlan_type);
	if ((dev->driver_info->flags & FLAG_WWAN) != 0)
		SET_NETDEV_DEVTYPE(net, &wwan_type);

	status = register_netdev (net);
	if (status)
		goto out3;
	netif_info(dev, probe, dev->net,
		   "register '%s' at usb-%s-%s, %s, %pM\n",
		   udev->dev.driver->name,
		   xdev->bus->bus_name, xdev->devpath,
		   dev->driver_info->description,
		   net->dev_addr);

	// ok, it's ready to go.
	usb_set_intfdata (udev, dev);

	netif_device_attach (net);

	if (dev->driver_info->flags & FLAG_LINK_INTR)
		netif_carrier_off(net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}
示例#20
0
文件: net.c 项目: ctos/bpi
static io_return_t
device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
	     dev_mode_t mode, char *name, device_t *devp)
{
  io_return_t err = D_SUCCESS;
  ipc_port_t notify;
  struct ifnet *ifp;
  struct linux_device *dev;
  struct net_data *nd;

  /* Search for the device.  */
  for (dev = dev_base; dev; dev = dev->next)
    if (dev->base_addr
	&& dev->base_addr != 0xffe0
	&& !strcmp (name, dev->name))
      break;
  if (!dev)
    return D_NO_SUCH_DEVICE;

  /* Allocate and initialize device data if this is the first open.  */
  nd = dev->net_data;
  if (!nd)
    {
      dev->net_data = nd = ((struct net_data *)
			    kalloc (sizeof (struct net_data)));
      if (!nd)
	{
	  err = D_NO_MEMORY;
	  goto out;
	}
      nd->dev = dev;
      nd->device.emul_data = nd;
      nd->device.emul_ops = &linux_net_emulation_ops;
      nd->port = ipc_port_alloc_kernel ();
      if (nd->port == IP_NULL)
	{
	  err = KERN_RESOURCE_SHORTAGE;
	  goto out;
	}
      ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE);
      notify = ipc_port_make_sonce (nd->port);
      ip_lock (nd->port);
      ipc_port_nsrequest (nd->port, 1, notify, &notify);
      assert (notify == IP_NULL);

      ifp = &nd->ifnet;
      ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0';
      ifp->if_flags = IFF_UP | IFF_RUNNING;
      ifp->if_mtu = dev->mtu;
      ifp->if_header_size = dev->hard_header_len;
      ifp->if_header_format = dev->type;
      ifp->if_address_size = dev->addr_len;
      ifp->if_address = dev->dev_addr;
      if_init_queues (ifp);

      if (dev->open)
	{
	  linux_intr_pri = SPL6;
	  if ((*dev->open) (dev))
	    err = D_NO_SUCH_DEVICE;
	}

    out:
      if (err)
	{
	  if (nd)
	    {
	      if (nd->port != IP_NULL)
		{
		  ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE);
		  ipc_port_dealloc_kernel (nd->port);
		}
	      kfree ((vm_offset_t) nd, sizeof (struct net_data));
	      nd = NULL;
	      dev->net_data = NULL;
	    }
	}
      else
	{
	  /* IPv6 heavily relies on multicasting (especially router and
	     neighbor solicits and advertisements), so enable reception of
	     those multicast packets by setting `LINUX_IFF_ALLMULTI'.  */
	  dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI;
	  skb_queue_head_init (&dev->buffs[0]);

	  if (dev->set_multicast_list)
	    dev->set_multicast_list (dev);
	}
      if (IP_VALID (reply_port))
	ds_device_open_reply (reply_port, reply_port_type,
			      err, dev_to_port (nd));
      return MIG_NO_REPLY;
    }

  *devp = &nd->device;
  return D_SUCCESS;
}
示例#21
0
/*
 * State machine for state 3, Connected State.
 * The handling of the timer(s) is in file nr_timer.c
 * Handling of state 0 and connection release is in netrom.c.
 */
static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
	struct sk_buff_head temp_queue;
	struct sk_buff *skbn;
	unsigned short save_vr;
	unsigned short nr, ns;
	int queued = 0;

	nr = skb->data[18];
	ns = skb->data[17];

	switch (frametype) {

		case NR_CONNREQ:
			nr_write_internal(sk, NR_CONNACK);
			break;

		case NR_DISCREQ:
			nr_write_internal(sk, NR_DISCACK);
			nr_disconnect(sk, 0);
			break;

		case NR_CONNACK | NR_CHOKE_FLAG:
		case NR_DISCACK:
			nr_disconnect(sk, ECONNRESET);
			break;

		case NR_INFOACK:
		case NR_INFOACK | NR_CHOKE_FLAG:
		case NR_INFOACK | NR_NAK_FLAG:
		case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG:
			if (frametype & NR_CHOKE_FLAG) {
				sk->protinfo.nr->condition |= NR_COND_PEER_RX_BUSY;
				nr_start_t4timer(sk);
			} else {
				sk->protinfo.nr->condition &= ~NR_COND_PEER_RX_BUSY;
				nr_stop_t4timer(sk);
			}
			if (!nr_validate_nr(sk, nr)) {
				break;
			}
			if (frametype & NR_NAK_FLAG) {
				nr_frames_acked(sk, nr);
				nr_send_nak_frame(sk);
			} else {
				if (sk->protinfo.nr->condition & NR_COND_PEER_RX_BUSY) {
					nr_frames_acked(sk, nr);
				} else {
					nr_check_iframes_acked(sk, nr);
				}
			}
			break;

		case NR_INFO:
		case NR_INFO | NR_NAK_FLAG:
		case NR_INFO | NR_CHOKE_FLAG:
		case NR_INFO | NR_MORE_FLAG:
		case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG:
		case NR_INFO | NR_CHOKE_FLAG | NR_MORE_FLAG:
		case NR_INFO | NR_NAK_FLAG | NR_MORE_FLAG:
		case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG | NR_MORE_FLAG:
			if (frametype & NR_CHOKE_FLAG) {
				sk->protinfo.nr->condition |= NR_COND_PEER_RX_BUSY;
				nr_start_t4timer(sk);
			} else {
				sk->protinfo.nr->condition &= ~NR_COND_PEER_RX_BUSY;
				nr_stop_t4timer(sk);
			}
			if (nr_validate_nr(sk, nr)) {
				if (frametype & NR_NAK_FLAG) {
					nr_frames_acked(sk, nr);
					nr_send_nak_frame(sk);
				} else {
					if (sk->protinfo.nr->condition & NR_COND_PEER_RX_BUSY) {
						nr_frames_acked(sk, nr);
					} else {
						nr_check_iframes_acked(sk, nr);
					}
				}
			}
			queued = 1;
			skb_queue_head(&sk->protinfo.nr->reseq_queue, skb);
			if (sk->protinfo.nr->condition & NR_COND_OWN_RX_BUSY)
				break;
			skb_queue_head_init(&temp_queue);
			do {
				save_vr = sk->protinfo.nr->vr;
				while ((skbn = skb_dequeue(&sk->protinfo.nr->reseq_queue)) != NULL) {
					ns = skbn->data[17];
					if (ns == sk->protinfo.nr->vr) {
						if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) {
							sk->protinfo.nr->vr = (sk->protinfo.nr->vr + 1) % NR_MODULUS;
						} else {
							sk->protinfo.nr->condition |= NR_COND_OWN_RX_BUSY;
							skb_queue_tail(&temp_queue, skbn);
						}
					} else if (nr_in_rx_window(sk, ns)) {
						skb_queue_tail(&temp_queue, skbn);
					} else {
						kfree_skb(skbn);
					}
				}
				while ((skbn = skb_dequeue(&temp_queue)) != NULL) {
					skb_queue_tail(&sk->protinfo.nr->reseq_queue, skbn);
				}
			} while (save_vr != sk->protinfo.nr->vr);
			/*
			 * Window is full, ack it immediately.
			 */
			if (((sk->protinfo.nr->vl + sk->protinfo.nr->window) % NR_MODULUS) == sk->protinfo.nr->vr) {
				nr_enquiry_response(sk);
			} else {
				if (!(sk->protinfo.nr->condition & NR_COND_ACK_PENDING)) {
					sk->protinfo.nr->condition |= NR_COND_ACK_PENDING;
					nr_start_t2timer(sk);
				}
			}
			break;

		default:
			break;
	}

	return queued;
}
示例#22
0
文件: net.c 项目: ctos/bpi
/* Do any initialization required for network devices.  */
void
linux_net_emulation_init ()
{
  skb_queue_head_init (&skb_done_list);
}
示例#23
0
static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct sock *newsk;
	struct pep_sock *newpn, *pn = pep_sk(sk);
	struct pnpipehdr *hdr;
	struct sockaddr_pn dst;
	u16 peer_type;
	u8 pipe_handle, enabled, n_sb;

	if (!pskb_pull(skb, sizeof(*hdr) + 4))
		return -EINVAL;

	hdr = pnp_hdr(skb);
	pipe_handle = hdr->pipe_handle;
	switch (hdr->state_after_connect) {
	case PN_PIPE_DISABLE:
		enabled = 0;
		break;
	case PN_PIPE_ENABLE:
		enabled = 1;
		break;
	default:
		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
		return -EINVAL;
	}
	peer_type = hdr->other_pep_type << 8;

	if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
		return -ENOBUFS;
	}

	/* Parse sub-blocks (options) */
	n_sb = hdr->data[4];
	while (n_sb > 0) {
		u8 type, buf[1], len = sizeof(buf);
		const u8 *data = pep_get_sb(skb, &type, &len, buf);

		if (data == NULL)
			return -EINVAL;
		switch (type) {
		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
			if (len < 1)
				return -EINVAL;
			peer_type = (peer_type & 0xff00) | data[0];
			break;
		}
		n_sb--;
	}

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

	/* Create a new to-be-accepted sock */
	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
	if (!newsk) {
		kfree_skb(skb);
		return -ENOMEM;
	}
	sock_init_data(NULL, newsk);
	newsk->sk_state = TCP_SYN_RECV;
	newsk->sk_backlog_rcv = pipe_do_rcv;
	newsk->sk_protocol = sk->sk_protocol;
	newsk->sk_destruct = pipe_destruct;

	newpn = pep_sk(newsk);
	pn_skb_get_dst_sockaddr(skb, &dst);
	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
	newpn->pn_sk.resource = pn->pn_sk.resource;
	skb_queue_head_init(&newpn->ctrlreq_queue);
	newpn->pipe_handle = pipe_handle;
	atomic_set(&newpn->tx_credits, 0);
	newpn->peer_type = peer_type;
	newpn->rx_credits = 0;
	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
	newpn->init_enable = enabled;

	BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
	skb_queue_head(&newsk->sk_receive_queue, skb);
	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, 0);

	sk_acceptq_added(sk);
	sk_add_node(newsk, &pn->ackq);
	return 0;
}
示例#24
0
static int __init cvm_oct_init_module(void)
{
    int num_interfaces;
    int interface;
    int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
    int qos;

    octeon_mdiobus_force_mod_depencency();
    pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);

    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
        cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
    else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
        cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
    else
        cvm_oct_mac_addr_offset = 0;

    cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
    if (cvm_oct_poll_queue == NULL) {
        pr_err("octeon-ethernet: Cannot create workqueue");
        return -ENOMEM;
    }

    cvm_oct_configure_common_hw();

    cvmx_helper_initialize_packet_io_global();

    /* Change the input group for all ports before input is enabled */
    num_interfaces = cvmx_helper_get_number_of_interfaces();
    for (interface = 0; interface < num_interfaces; interface++) {
        int num_ports = cvmx_helper_ports_on_interface(interface);
        int port;

        for (port = cvmx_helper_get_ipd_port(interface, 0);
                port < cvmx_helper_get_ipd_port(interface, num_ports);
                port++) {
            union cvmx_pip_prt_tagx pip_prt_tagx;
            pip_prt_tagx.u64 =
                cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
            pip_prt_tagx.s.grp = pow_receive_group;
            cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
                           pip_prt_tagx.u64);
        }
    }

    cvmx_helper_ipd_and_packet_input_enable();

    memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

    /*
     * Initialize the FAU used for counting packet buffers that
     * need to be freed.
     */
    cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

    /* Initialize the FAU used for counting tx SKBs that need to be freed */
    cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

    if ((pow_send_group != -1)) {
        struct net_device *dev;
        pr_info("\tConfiguring device for POW only access\n");
        dev = alloc_etherdev(sizeof(struct octeon_ethernet));
        if (dev) {
            /* Initialize the device private structure. */
            struct octeon_ethernet *priv = netdev_priv(dev);

            dev->netdev_ops = &cvm_oct_pow_netdev_ops;
            priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
            priv->port = CVMX_PIP_NUM_INPUT_PORTS;
            priv->queue = -1;
            strcpy(dev->name, "pow%d");
            for (qos = 0; qos < 16; qos++)
                skb_queue_head_init(&priv->tx_free_list[qos]);

            if (register_netdev(dev) < 0) {
                pr_err("Failed to register ethernet device for POW\n");
                free_netdev(dev);
            } else {
                cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
                pr_info("%s: POW send group %d, receive group %d\n",
                        dev->name, pow_send_group,
                        pow_receive_group);
            }
        } else {
            pr_err("Failed to allocate ethernet device for POW\n");
        }
    }

    num_interfaces = cvmx_helper_get_number_of_interfaces();
    for (interface = 0; interface < num_interfaces; interface++) {
        cvmx_helper_interface_mode_t imode =
            cvmx_helper_interface_get_mode(interface);
        int num_ports = cvmx_helper_ports_on_interface(interface);
        int port;

        for (port = cvmx_helper_get_ipd_port(interface, 0);
                port < cvmx_helper_get_ipd_port(interface, num_ports);
                port++) {
            struct octeon_ethernet *priv;
            struct net_device *dev =
                alloc_etherdev(sizeof(struct octeon_ethernet));
            if (!dev) {
                pr_err("Failed to allocate ethernet device for port %d\n", port);
                continue;
            }

            /* Initialize the device private structure. */
            priv = netdev_priv(dev);

            INIT_DELAYED_WORK(&priv->port_periodic_work,
                              cvm_oct_periodic_worker);
            priv->imode = imode;
            priv->port = port;
            priv->queue = cvmx_pko_get_base_queue(priv->port);
            priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
            for (qos = 0; qos < 16; qos++)
                skb_queue_head_init(&priv->tx_free_list[qos]);
            for (qos = 0; qos < cvmx_pko_get_num_queues(port);
                    qos++)
                cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

            switch (priv->imode) {

            /* These types don't support ports to IPD/PKO */
            case CVMX_HELPER_INTERFACE_MODE_DISABLED:
            case CVMX_HELPER_INTERFACE_MODE_PCIE:
            case CVMX_HELPER_INTERFACE_MODE_PICMG:
                break;

            case CVMX_HELPER_INTERFACE_MODE_NPI:
                dev->netdev_ops = &cvm_oct_npi_netdev_ops;
                strcpy(dev->name, "npi%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_XAUI:
                dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
                strcpy(dev->name, "xaui%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_LOOP:
                dev->netdev_ops = &cvm_oct_npi_netdev_ops;
                strcpy(dev->name, "loop%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_SGMII:
                dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
                strcpy(dev->name, "eth%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_SPI:
                dev->netdev_ops = &cvm_oct_spi_netdev_ops;
                strcpy(dev->name, "spi%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_RGMII:
            case CVMX_HELPER_INTERFACE_MODE_GMII:
                dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
                strcpy(dev->name, "eth%d");
                break;
            }

            if (!dev->netdev_ops) {
                free_netdev(dev);
            } else if (register_netdev(dev) < 0) {
                pr_err("Failed to register ethernet device "
                       "for interface %d, port %d\n",
                       interface, priv->port);
                free_netdev(dev);
            } else {
                cvm_oct_device[priv->port] = dev;
                fau -=
                    cvmx_pko_get_num_queues(priv->port) *
                    sizeof(uint32_t);
                queue_delayed_work(cvm_oct_poll_queue,
                                   &priv->port_periodic_work, HZ);
            }
        }
    }

    cvm_oct_tx_initialize();
    cvm_oct_rx_initialize();

    /*
     * 150 uS: about 10 1500-byte packtes at 1GE.
     */
    cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

    queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);

    return 0;
}
示例#25
0
int	rtl8188eu_init_recv_priv(_adapter *padapter)
{
    struct recv_priv	*precvpriv = &padapter->recvpriv;
    int	i, res = _SUCCESS;
    struct recv_buf *precvbuf;

#ifdef CONFIG_RECV_THREAD_MODE
    _rtw_init_sema(&precvpriv->recv_sema, 0);//will be removed
    _rtw_init_sema(&precvpriv->terminate_recvthread_sema, 0);//will be removed
#endif

#ifdef PLATFORM_LINUX
    tasklet_init(&precvpriv->recv_tasklet,
                 (void(*)(unsigned long))rtl8188eu_recv_tasklet,
                 (unsigned long)padapter);
#endif

#ifdef CONFIG_USB_INTERRUPT_IN_PIPE
#ifdef PLATFORM_LINUX
    precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
    if(precvpriv->int_in_urb == NULL) {
        DBG_8192C("alloc_urb for interrupt in endpoint fail !!!!\n");
    }
#endif
    precvpriv->int_in_buf = rtw_zmalloc(sizeof(INTERRUPT_MSG_FORMAT_EX));
    if(precvpriv->int_in_buf == NULL) {
        DBG_8192C("alloc_mem for interrupt in endpoint fail !!!!\n");
    }
#endif

    //init recv_buf
    _rtw_init_queue(&precvpriv->free_recv_buf_queue);

#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
    _rtw_init_queue(&precvpriv->recv_buf_pending_queue);
#endif	// CONFIG_USE_USB_BUFFER_ALLOC_RX

    precvpriv->pallocated_recv_buf = rtw_zmalloc(NR_RECVBUFF *sizeof(struct recv_buf) + 4);
    if(precvpriv->pallocated_recv_buf==NULL) {
        res= _FAIL;
        RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("alloc recv_buf fail!\n"));
        goto exit;
    }
    _rtw_memset(precvpriv->pallocated_recv_buf, 0, NR_RECVBUFF *sizeof(struct recv_buf) + 4);

    precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_recv_buf), 4);
    //precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
    //						((uint) (precvpriv->pallocated_recv_buf) &(4-1));


    precvbuf = (struct recv_buf*)precvpriv->precv_buf;

    for(i=0; i < NR_RECVBUFF ; i++)
    {
        _rtw_init_listhead(&precvbuf->list);

        _rtw_spinlock_init(&precvbuf->recvbuf_lock);

        precvbuf->alloc_sz = MAX_RECVBUF_SZ;

        res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
        if(res==_FAIL)
            break;

        precvbuf->ref_cnt = 0;
        precvbuf->adapter =padapter;


        //rtw_list_insert_tail(&precvbuf->list, &(precvpriv->free_recv_buf_queue.queue));

        precvbuf++;

    }

    precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;

#ifdef PLATFORM_LINUX

    skb_queue_head_init(&precvpriv->rx_skb_queue);

#ifdef CONFIG_PREALLOC_RECV_SKB
    {
        int i;
        SIZE_PTR tmpaddr=0;
        SIZE_PTR alignment=0;
        struct sk_buff *pskb=NULL;

        skb_queue_head_init(&precvpriv->free_recv_skb_queue);

        for(i=0; i<NR_PREALLOC_RECV_SKB; i++)
        {

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
            pskb = dev_alloc_skb(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#else
            pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#endif

            if(pskb)
            {
                pskb->dev = padapter->pnetdev;

                tmpaddr = (SIZE_PTR)pskb->data;
                alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
                skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));

                skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
            }

            pskb=NULL;

        }
    }
#endif

#endif

exit:

    return res;

}
示例#26
0
文件: ipsec_ocf.c 项目: dkg/libreswan
static void ipsec_ocf_queue_init(void)
{
	skb_queue_head_init(&ipsec_ocf_skbq);
	tasklet_init(&ipsec_ocf_task, ipsec_ocf_skbq_process,
		     (unsigned long) 0);
}
示例#27
0
/**
 * gether_setup_name - initialize one ethernet-over-usb link
 * @g: gadget to associated with these links
 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 *	host side of the link is recorded
 * @netname: name for network device (for example, "usb")
 * Context: may sleep
 *
 * This sets up the single network link that may be exported by a
 * gadget driver using this framework.  The link layer addresses are
 * set up using module parameters.
 *
 * Returns negative errno, or zero on success
 */
int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
		const char *netname)
{
	struct eth_dev		*dev;
	struct net_device	*net;
	int			status;

	if (the_dev)
		return -EBUSY;

	net = alloc_etherdev(sizeof *dev);
	if (!net)
		return -ENOMEM;

	dev = netdev_priv(net);
	spin_lock_init(&dev->lock);
	spin_lock_init(&dev->req_lock);
	INIT_WORK(&dev->work, eth_work);
	INIT_LIST_HEAD(&dev->tx_reqs);
	INIT_LIST_HEAD(&dev->rx_reqs);

	skb_queue_head_init(&dev->rx_frames);

	/* network device setup */
	dev->net = net;
	snprintf(net->name, sizeof(net->name), "%s%%d", netname);

	if (get_ether_addr(dev_addr, net->dev_addr))
		dev_warn(&g->dev,
			"using random %s ethernet address\n", "self");
	if (get_ether_addr(host_addr, dev->host_mac))
		dev_warn(&g->dev,
			"using random %s ethernet address\n", "host");

	if (ethaddr)
		memcpy(ethaddr, dev->host_mac, ETH_ALEN);

	net->netdev_ops = &eth_netdev_ops;

	SET_ETHTOOL_OPS(net, &ops);

	dev->gadget = g;
	SET_NETDEV_DEV(net, &g->dev);
	SET_NETDEV_DEVTYPE(net, &gadget_type);

	status = register_netdev(net);
	if (status < 0) {
		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
		free_netdev(net);
	} else {
		INFO(dev, "MAC %pM\n", net->dev_addr);
		INFO(dev, "HOST MAC %pM\n", dev->host_mac);

		the_dev = dev;

		/* two kinds of host-initiated state changes:
		 *  - iff DATA transfer is active, carrier is "on"
		 *  - tx queueing enabled if open *and* carrier is "on"
		 */
		netif_carrier_off(net);
	}

	return status;
}
示例#28
0
void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
	struct l2cap_pinfo *pi = l2cap_pi(sk);

	BT_DBG("sk %p parent %p", sk, parent);

	if (parent) {
		sk->sk_type = parent->sk_type;
		sk->sk_rcvbuf = parent->sk_rcvbuf;
		sk->sk_sndbuf = parent->sk_sndbuf;
		bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;

		pi->imtu = l2cap_pi(parent)->imtu;
		pi->omtu = l2cap_pi(parent)->omtu;
		pi->conf_state = l2cap_pi(parent)->conf_state;
		pi->mode = l2cap_pi(parent)->mode;
		pi->fcs  = l2cap_pi(parent)->fcs;
		pi->max_tx = l2cap_pi(parent)->max_tx;
		pi->tx_win = l2cap_pi(parent)->tx_win;
		pi->sec_level = l2cap_pi(parent)->sec_level;
		pi->role_switch = l2cap_pi(parent)->role_switch;
		pi->force_reliable = l2cap_pi(parent)->force_reliable;
		pi->flushable = l2cap_pi(parent)->flushable;
		pi->force_active = l2cap_pi(parent)->force_active;
		pi->amp_pref = l2cap_pi(parent)->amp_pref;
	} else {
		pi->imtu = L2CAP_DEFAULT_MTU;
		pi->omtu = 0;
		if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
			pi->mode = L2CAP_MODE_ERTM;
			pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
		} else {
			pi->mode = L2CAP_MODE_BASIC;
		}
		pi->reconf_state = L2CAP_RECONF_NONE;
		pi->max_tx = L2CAP_DEFAULT_MAX_TX;
		pi->fcs = L2CAP_FCS_CRC16;
		pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
		pi->sec_level = BT_SECURITY_LOW;
		pi->role_switch = 0;
		pi->force_reliable = 0;
		pi->flushable = 0;
		pi->force_active = 1;
		pi->amp_pref = BT_AMP_POLICY_REQUIRE_BR_EDR;
	}

	/* Default config options */
	sk->sk_backlog_rcv = l2cap_data_channel;
	pi->ampcon = NULL;
	pi->ampchan = NULL;
	pi->conf_len = 0;
	pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
	pi->scid = 0;
	pi->dcid = 0;
	pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
	pi->ack_win = pi->tx_win;
	pi->extended_control = 0;

	pi->local_conf.fcs = pi->fcs;
	pi->local_conf.flush_to = pi->flush_to;

	set_default_config(&pi->remote_conf);

	skb_queue_head_init(TX_QUEUE(sk));
	skb_queue_head_init(SREJ_QUEUE(sk));
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
				  u16 timeout)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	if (WARN(sta->reserved_tid == tid,
		 "Requested to start BA session on reserved tid=%d", tid))
		return -EINVAL;

	trace_api_start_tx_ba_session(pubsta, tid);

	if (WARN_ON_ONCE(!local->ops->ampdu_action))
		return -EINVAL;

	if ((tid >= IEEE80211_NUM_TIDS) ||
	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
	    (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
		return -EINVAL;

	ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
	       pubsta->addr, tid);

	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
	    sdata->vif.type != NL80211_IFTYPE_AP &&
	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
		return -EINVAL;

	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
		ht_dbg(sdata,
		       "BA sessions blocked - Denying BA session request %pM tid %d\n",
		       sta->sta.addr, tid);
		return -EINVAL;
	}

	/*
	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
	 * member of an IBSS, and has no other existing Block Ack agreement
	 * with the recipient STA, then the initiating STA shall transmit a
	 * Probe Request frame to the recipient STA and shall not transmit an
	 * ADDBA Request frame unless it receives a Probe Response frame
	 * from the recipient within dot11ADDBAFailureTimeout.
	 *
	 * The probe request mechanism for ADDBA is currently not implemented,
	 * but we only build up Block Ack session with HT STAs. This information
	 * is set when we receive a bss info from a probe response or a beacon.
	 */
	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
	    !sta->sta.ht_cap.ht_supported) {
		ht_dbg(sdata,
		       "BA request denied - IBSS STA %pM does not advertise HT support\n",
		       pubsta->addr);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* we have tried too many times, receiver does not want A-MPDU */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	/*
	 * if we have tried more than HT_AGG_BURST_RETRIES times we
	 * will spread our requests in time to avoid stalling connection
	 * for too long
	 */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
			HT_AGG_RETRIES_PERIOD)) {
		ht_dbg(sdata,
		       "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
		       sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	/* check if the TID is not in aggregation flow already */
	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
		ht_dbg(sdata,
		       "BA request denied - session is not idle on %pM tid %u\n",
		       sta->sta.addr, tid);
		ret = -EAGAIN;
		goto err_unlock_sta;
	}

	/* prepare A-MPDU MLME for Tx aggregation */
	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
	if (!tid_tx) {
		ret = -ENOMEM;
		goto err_unlock_sta;
	}

	skb_queue_head_init(&tid_tx->pending);
	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	tid_tx->timeout = timeout;

	/* response timer */
	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer(&tid_tx->addba_resp_timer);

	/* tx timer */
	tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
	tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer_deferrable(&tid_tx->session_timer);

	/* assign a dialog token */
	sta->ampdu_mlme.dialog_token_allocator++;
	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

	/*
	 * Finally, assign it to the start array; the work item will
	 * collect it and move it to the normal array.
	 */
	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

	/* this flow continues off the work */
 err_unlock_sta:
	spin_unlock_bh(&sta->lock);
	return ret;
}
示例#30
0
文件: main.c 项目: krzk/linux
static int nfp_flower_init(struct nfp_app *app)
{
	const struct nfp_pf *pf = app->pf;
	struct nfp_flower_priv *app_priv;
	u64 version, features;
	int err;

	if (!pf->eth_tbl) {
		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
		return -EINVAL;
	}

	if (!pf->mac_stats_bar) {
		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
		return -EINVAL;
	}

	if (!pf->vf_cfg_bar) {
		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
		return -EINVAL;
	}

	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
	if (err) {
		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
		return err;
	}

	/* We need to ensure hardware has enough flower capabilities. */
	if (version != NFP_FLOWER_ALLOWED_VER) {
		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
		return -EINVAL;
	}

	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
	if (!app_priv)
		return -ENOMEM;

	app->priv = app_priv;
	app_priv->app = app;
	skb_queue_head_init(&app_priv->cmsg_skbs_high);
	skb_queue_head_init(&app_priv->cmsg_skbs_low);
	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
	init_waitqueue_head(&app_priv->reify_wait_queue);

	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
	spin_lock_init(&app_priv->mtu_conf.lock);

	err = nfp_flower_metadata_init(app);
	if (err)
		goto err_free_app_priv;

	/* Extract the extra features supported by the firmware. */
	features = nfp_rtsym_read_le(app->pf->rtbl,
				     "_abi_flower_extra_features", &err);
	if (err)
		app_priv->flower_ext_feats = 0;
	else
		app_priv->flower_ext_feats = features;

	/* Tell the firmware that the driver supports lag. */
	err = nfp_rtsym_write_le(app->pf->rtbl,
				 "_abi_flower_balance_sync_enable", 1);
	if (!err) {
		app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
		nfp_flower_lag_init(&app_priv->nfp_lag);
	} else if (err == -ENOENT) {
		nfp_warn(app->cpp, "LAG not supported by FW.\n");
	} else {
		goto err_cleanup_metadata;
	}

	return 0;

err_cleanup_metadata:
	nfp_flower_metadata_cleanup(app);
err_free_app_priv:
	vfree(app->priv);
	return err;
}