Esempio n. 1
0
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	if ((skb = alloc_skb (size + NET_IP_ALIGN + FOE_INFO_LEN, flags)) == NULL) {
#else
	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
#endif
		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	skb_reserve (skb, NET_IP_ALIGN + FOE_INFO_LEN);
#else
	skb_reserve (skb, NET_IP_ALIGN);
#endif
	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}


/*-------------------------------------------------------------------------*/

static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
	if (dev->driver_info->rx_fixup
			&& !dev->driver_info->rx_fixup (dev, skb))
		goto error;
	// else network stack removes extra byte if we forced a short packet

	if (skb->len)
		usbnet_skb_return (dev, skb);
	else {
		if (netif_msg_rx_err (dev))
			devdbg (dev, "drop");
error:
		dev->stats.rx_errors++;
		skb_queue_tail (&dev->done, skb);
	}
}

/*-------------------------------------------------------------------------*/

static void rx_complete (struct urb *urb)
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;
	int			urb_status = urb->status;
	enum skb_state		state;

	skb_put (skb, urb->actual_length);
	state = rx_done;
	entry->urb = NULL;

	switch (urb_status) {
	    // success
	    case 0:
		if (skb->len < dev->net->hard_header_len) {
			state = rx_cleanup;
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx length %d", skb->len);
		}
		break;

	    // stalls need manual reset. this is rare ... except that
	    // when going through USB 2.0 TTs, unplug appears this way.
	    // we avoid the highspeed version of the ETIMEOUT/EILSEQ
	    // storm, recovering as needed.
	    case -EPIPE:
		dev->stats.rx_errors++;
		usbnet_defer_kevent (dev, EVENT_RX_HALT);
		// FALLTHROUGH

	    // software-driven interface shutdown
	    case -ECONNRESET:		// async unlink
	    case -ESHUTDOWN:		// hardware gone
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx shutdown, code %d", urb_status);
		goto block;

	    // we get controller i/o faults during khubd disconnect() delays.
	    // throttle down resubmits, to avoid log floods; just temporarily,
	    // so we still recover when the fault isn't a khubd delay.
	    case -EPROTO:
	    case -ETIME:
	    case -EILSEQ:
		dev->stats.rx_errors++;
		if (!timer_pending (&dev->delay)) {
			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
			if (netif_msg_link (dev))
				devdbg (dev, "rx throttle %d", urb_status);
		}
block:
		state = rx_cleanup;
		entry->urb = urb;
		urb = NULL;
		break;

	    // data overrun ... flush fifo?
	    case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		// FALLTHROUGH

	    default:
		state = rx_cleanup;
		dev->stats.rx_errors++;
		if (netif_msg_rx_err (dev))
			devdbg (dev, "rx status %d", urb_status);
		break;
	}

	state = defer_bh(dev, skb, &dev->rxq, state);

	if (urb) {
		if (netif_running (dev->net)
		    && !test_bit (EVENT_RX_HALT, &dev->flags) &&
		    state != unlink_start) {
			rx_submit (dev, urb, GFP_ATOMIC);
			return;
		}
		usb_free_urb (urb);
	}
	if (netif_msg_rx_err (dev))
		devdbg (dev, "no read resubmitted");
}

static void intr_complete (struct urb *urb)
{
	struct usbnet	*dev = urb->context;
	int		status = urb->status;

	switch (status) {
	    /* success */
	    case 0:
		dev->driver_info->status(dev, urb);
		break;

	    /* software-driven interface shutdown */
	    case -ENOENT:		// urb killed
	    case -ESHUTDOWN:		// hardware gone
		if (netif_msg_ifdown (dev))
			devdbg (dev, "intr shutdown, code %d", status);
		return;

	    /* NOTE:  not throttling like RX/TX, since this endpoint
	     * already polls infrequently
	     */
	    default:
		devdbg (dev, "intr status %d", status);
		break;
	}

	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
	status = usb_submit_urb (urb, GFP_ATOMIC);
	if (status != 0 && netif_msg_timer (dev))
		deverr(dev, "intr resubmit --> %d", status);
}

/*-------------------------------------------------------------------------*/

// unlink pending rx/tx; completion handlers do all other cleanup

static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
	unsigned long		flags;
	struct sk_buff		*skb;
	int			count = 0;

	spin_lock_irqsave (&q->lock, flags);
	while (!skb_queue_empty(q)) {
		struct skb_data		*entry;
		struct urb		*urb;
		int			retval;

		skb_queue_walk(q, skb) {
		entry = (struct skb_data *) skb->cb;
			if (entry->state != unlink_start)
				goto found;
		}
		break;
found:
		entry->state = unlink_start;
		urb = entry->urb;

		/*
		 * Get reference count of the URB to avoid it to be
		 * freed during usb_unlink_urb, which may trigger
		 * use-after-free problem inside usb_unlink_urb since
		 * usb_unlink_urb is always racing with .complete
		 * handler(include defer_bh).
		 */
		usb_get_urb(urb);
		spin_unlock_irqrestore(&q->lock, flags);
		// during some PM-driven resume scenarios,
		// these (async) unlinks complete immediately
		retval = usb_unlink_urb (urb);
		if (retval != -EINPROGRESS && retval != 0)
			devdbg (dev, "unlink urb err, %d", retval);
		else
			count++;
		usb_put_urb(urb);
		spin_lock_irqsave(&q->lock, flags);
	}
Esempio n. 2
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
	struct rt6_info *rt, unsigned int flags, int dontfrag)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct inet_cork *cork;
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
	int exthdrlen;
	int dst_exthdrlen;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;
	__u8 tx_flags = 0;

	if (flags&MSG_PROBE)
		return 0;
	cork = &inet->cork.base;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->dst);
		cork->dst = &rt->dst;
		inet->cork.fl.u.ip6 = *fl6;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		if (rt->dst.flags & DST_XFRM_TUNNEL)
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
		else
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		cork->fragsize = mtu;
		if (dst_allfrag(rt->dst.path))
			cork->flags |= IPCORK_ALLFRAG;
		cork->length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	} else {
		rt = (struct rt6_info *)cork->dst;
		fl6 = &inet->cork.fl.u.ip6;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		dst_exthdrlen = 0;
		mtu = cork->fragsize;
	}
	orig_mtu = mtu;

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/* For UDP, check if TX timestamp is enabled */
	if (sk->sk_type == SOCK_DGRAM) {
		err = sock_tx_timestamp(sk, &tx_flags);
		if (err)
			goto error;
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
					   sk->sk_protocol == IPPROTO_RAW)) {
		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
		return -EMSGSIZE;
	}

	skb = skb_peek_tail(&sk->sk_write_queue);
	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_has_frags(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO) &&
	    (sk->sk_type == SOCK_DGRAM)) {
		err = ip6_ufo_append_data(sk, getfrag, from, length,
					  hh_len, fragheaderlen,
					  transhdrlen, mtu, flags, rt);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (skb == NULL || skb_prev == NULL)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    orig_mtu);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
				else {
					/* Only the initial fragment
					 * is time stamped.
					 */
					tx_flags = 0;
				}
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			if (sk->sk_type == SOCK_DGRAM)
				skb_shinfo(skb)->tx_flags = tx_flags;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Esempio n. 3
0
File: wcmd.c Progetto: IDM350/linux
void vRunCommand(struct work_struct *work)
{
	struct vnt_private *pDevice =
		container_of(work, struct vnt_private, run_command_work.work);
	struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
	PWLAN_IE_SSID pItemSSID;
	PWLAN_IE_SSID pItemSSIDCurr;
	CMD_STATUS Status;
	struct sk_buff  *skb;
	union iwreq_data wrqu;
	int ii;
	u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
	u8 byData;

	if (pDevice->Flags & fMP_DISCONNECTED)
		return;

	if (pDevice->bCmdRunning != true)
		return;

	spin_lock_irq(&pDevice->lock);

	switch (pDevice->eCommandState) {

	case WLAN_CMD_SCAN_START:

		pDevice->byReAssocCount = 0;
		if (pDevice->bRadioOff == true) {
			s_bCommandComplete(pDevice);
			spin_unlock_irq(&pDevice->lock);
			return;
		}

		if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
			s_bCommandComplete(pDevice);
			spin_unlock_irq(&pDevice->lock);
			return;
		}

		pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;

		if (pMgmt->uScanChannel == 0)
			pMgmt->uScanChannel = pDevice->byMinChannel;
		if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
			pDevice->eCommandState = WLAN_CMD_SCAN_END;
			s_bCommandComplete(pDevice);
			spin_unlock_irq(&pDevice->lock);
			return;
		} else {
			if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d\n", pMgmt->uScanChannel);
				pMgmt->uScanChannel++;
				s_bCommandComplete(pDevice);
				spin_unlock_irq(&pDevice->lock);
				return;
			}
			if (pMgmt->uScanChannel == pDevice->byMinChannel) {
				// pMgmt->eScanType = WMAC_SCAN_ACTIVE;          //mike mark
				pMgmt->abyScanBSSID[0] = 0xFF;
				pMgmt->abyScanBSSID[1] = 0xFF;
				pMgmt->abyScanBSSID[2] = 0xFF;
				pMgmt->abyScanBSSID[3] = 0xFF;
				pMgmt->abyScanBSSID[4] = 0xFF;
				pMgmt->abyScanBSSID[5] = 0xFF;
				pItemSSID->byElementID = WLAN_EID_SSID;
				// clear bssid list
				/* BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); */
				pMgmt->eScanState = WMAC_IS_SCANNING;
				pDevice->byScanBBType = pDevice->byBBType;  //lucas
				pDevice->bStopDataPkt = true;
				// Turn off RCR_BSSID filter every time
				MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
				pDevice->byRxMode &= ~RCR_BSSID;
			}
			//lucas
			vAdHocBeaconStop(pDevice);
			if ((pDevice->byBBType != BB_TYPE_11A) &&
			    (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
				pDevice->byBBType = BB_TYPE_11A;
				CARDvSetBSSMode(pDevice);
			} else if ((pDevice->byBBType == BB_TYPE_11A) &&
				   (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) {
				pDevice->byBBType = BB_TYPE_11G;
				CARDvSetBSSMode(pDevice);
			}
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning....  channel: [%d]\n", pMgmt->uScanChannel);
			// Set channel
			CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel);
			// Set Baseband to be more sensitive.

			if (pDevice->bUpdateBBVGA) {
				BBvSetShortSlotTime(pDevice);
				BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
				BBvUpdatePreEDThreshold(pDevice, true);
			}
			pMgmt->uScanChannel++;

			while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
				pMgmt->uScanChannel <= pDevice->byMaxChannel){
				pMgmt->uScanChannel++;
			}

			if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
				// Set Baseband to be not sensitive and rescan
				pDevice->eCommandState = WLAN_CMD_SCAN_END;
			}
			if ((pMgmt->b11hEnable == false) ||
			    (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
				s_vProbeChannel(pDevice);
				spin_unlock_irq(&pDevice->lock);
				vCommandTimerWait((void *) pDevice, 100);
				return;
			} else {
				spin_unlock_irq(&pDevice->lock);
				vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
				return;
			}
		}

		break;

	case WLAN_CMD_SCAN_END:

		// Set Baseband's sensitivity back.
		if (pDevice->byBBType != pDevice->byScanBBType) {
			pDevice->byBBType = pDevice->byScanBBType;
			CARDvSetBSSMode(pDevice);
		}

		if (pDevice->bUpdateBBVGA) {
			BBvSetShortSlotTime(pDevice);
			BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
			BBvUpdatePreEDThreshold(pDevice, false);
		}

		// Set channel back
		vAdHocBeaconRestart(pDevice);
		// Set channel back
		CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
		// Set Filter
		if (pMgmt->bCurrBSSIDFilterOn) {
			MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
			pDevice->byRxMode |= RCR_BSSID;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
		pMgmt->uScanChannel = 0;
		pMgmt->eScanState = WMAC_NO_SCANNING;
		pDevice->bStopDataPkt = false;

		/*send scan event to wpa_Supplicant*/
		PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
		memset(&wrqu, 0, sizeof(wrqu));
		wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);

		s_bCommandComplete(pDevice);
		break;

	case WLAN_CMD_DISASSOCIATE_START:
		pDevice->byReAssocCount = 0;
		if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
		    (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
			s_bCommandComplete(pDevice);
			spin_unlock_irq(&pDevice->lock);
			return;
		} else {
			pDevice->bwextstep0 = false;
			pDevice->bwextstep1 = false;
			pDevice->bwextstep2 = false;
			pDevice->bwextstep3 = false;
			pDevice->bWPASuppWextEnabled = false;
			pDevice->fWPA_Authened = false;

			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
			// reason = 8 : disassoc because sta has left
			vMgrDisassocBeginSta((void *) pDevice,
					     pMgmt,
					     pMgmt->abyCurrBSSID,
					     (8),
					     &Status);
			pDevice->bLinkPass = false;
			ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
			// unlock command busy
			pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
			pItemSSID->len = 0;
			memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
			pMgmt->eCurrState = WMAC_STATE_IDLE;
			pMgmt->sNodeDBTable[0].bActive = false;
//			pDevice->bBeaconBufReady = false;
		}
		netif_stop_queue(pDevice->dev);
		if (pDevice->bNeedRadioOFF == true)
			CARDbRadioPowerOff(pDevice);
		s_bCommandComplete(pDevice);
		break;

	case WLAN_CMD_SSID_START:

		pDevice->byReAssocCount = 0;
		if (pDevice->bRadioOff == true) {
			s_bCommandComplete(pDevice);
			spin_unlock_irq(&pDevice->lock);
			return;
		}

		memcpy(pMgmt->abyAdHocSSID, pMgmt->abyDesireSSID,
		       ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);

		pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
		pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);

		if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n", pItemSSID->len);
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n", pItemSSIDCurr->len);
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
		}

		if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
		    ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
			if (pItemSSID->len == pItemSSIDCurr->len) {
				if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
					s_bCommandComplete(pDevice);
					spin_unlock_irq(&pDevice->lock);
					return;
				}
			}
			netif_stop_queue(pDevice->dev);
			pDevice->bLinkPass = false;
			ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
		}
		// set initial state
		pMgmt->eCurrState = WMAC_STATE_IDLE;
		pMgmt->eCurrMode = WMAC_MODE_STANDBY;
		PSvDisablePowerSaving((void *) pDevice);
		BSSvClearNodeDBTable(pDevice, 0);
		vMgrJoinBSSBegin((void *) pDevice, &Status);
		// if Infra mode
		if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
			// Call mgr to begin the deauthentication
			// reason = (3) because sta has left ESS
			if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
				vMgrDeAuthenBeginSta((void *)pDevice,
						     pMgmt,
						     pMgmt->abyCurrBSSID,
						     (3),
						     &Status);
			}
			// Call mgr to begin the authentication
			vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
			if (Status == CMD_STATUS_SUCCESS) {
				pDevice->byLinkWaitCount = 0;
				pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
				vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
				spin_unlock_irq(&pDevice->lock);
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
				return;
			}
		}
		// if Adhoc mode
		else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
			if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
				if (netif_queue_stopped(pDevice->dev))
					netif_wake_queue(pDevice->dev);
				pDevice->bLinkPass = true;
				ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
				pMgmt->sNodeDBTable[0].bActive = true;
				pMgmt->sNodeDBTable[0].uInActiveCount = 0;
			} else {
				// start own IBSS
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
				vMgrCreateOwnIBSS((void *) pDevice, &Status);
				if (Status != CMD_STATUS_SUCCESS) {
					DBG_PRT(MSG_LEVEL_DEBUG,
						KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
				}
				BSSvAddMulticastNode(pDevice);
			}
			s_bClearBSSID_SCAN(pDevice);
		}
		// if SSID not found
		else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
			if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
			    pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
				// start own IBSS
				DBG_PRT(MSG_LEVEL_DEBUG,
					KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
				vMgrCreateOwnIBSS((void *) pDevice, &Status);
				if (Status != CMD_STATUS_SUCCESS) {
					DBG_PRT(MSG_LEVEL_DEBUG,
						KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
				}
				BSSvAddMulticastNode(pDevice);
				s_bClearBSSID_SCAN(pDevice);
/*
				pDevice->bLinkPass = true;
				ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
				if (netif_queue_stopped(pDevice->dev)){
					netif_wake_queue(pDevice->dev);
				}
				s_bClearBSSID_SCAN(pDevice);
*/
			} else {
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
				// if(pDevice->bWPASuppWextEnabled == true)
				{
					union iwreq_data  wrqu;
					memset(&wrqu, 0, sizeof(wrqu));
					wrqu.ap_addr.sa_family = ARPHRD_ETHER;
					PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
					wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
				}
			}
		}
		s_bCommandComplete(pDevice);
		break;

	case WLAN_AUTHENTICATE_WAIT:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
		if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
			pDevice->byLinkWaitCount = 0;
			// Call mgr to begin the association
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
			vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
			if (Status == CMD_STATUS_SUCCESS) {
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
				pDevice->byLinkWaitCount = 0;
				pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
				vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
				spin_unlock_irq(&pDevice->lock);
				return;
			}
		} else if (pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
			printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
		} else if (pDevice->byLinkWaitCount <= 4) {
			//mike add:wait another 2 sec if authenticated_frame delay!
			pDevice->byLinkWaitCount++;
			printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
			spin_unlock_irq(&pDevice->lock);
			vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
			return;
		}
		pDevice->byLinkWaitCount = 0;

		s_bCommandComplete(pDevice);
		break;

	case WLAN_ASSOCIATE_WAIT:
		if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
			if (pDevice->ePSMode != WMAC_POWER_CAM) {
				PSvEnablePowerSaving((void *) pDevice,
						pMgmt->wListenInterval);
			}
/*
			if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
				KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
			}
*/
			pDevice->byLinkWaitCount = 0;
			pDevice->byReAssocCount = 0;
			pDevice->bLinkPass = true;
			ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
			s_bClearBSSID_SCAN(pDevice);

			if (netif_queue_stopped(pDevice->dev))
				netif_wake_queue(pDevice->dev);

		} else if (pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
			printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
		} else if (pDevice->byLinkWaitCount <= 4) {
			//mike add:wait another 2 sec if associated_frame delay!
			pDevice->byLinkWaitCount++;
			printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
			spin_unlock_irq(&pDevice->lock);
			vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
			return;
		}

		s_bCommandComplete(pDevice);
		break;

	case WLAN_CMD_AP_MODE_START:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");

		if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
			cancel_delayed_work_sync(&pDevice->second_callback_work);
			pMgmt->eCurrState = WMAC_STATE_IDLE;
			pMgmt->eCurrMode = WMAC_MODE_STANDBY;
			pDevice->bLinkPass = false;
			ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
			if (pDevice->bEnableHostWEP == true)
				BSSvClearNodeDBTable(pDevice, 1);
			else
				BSSvClearNodeDBTable(pDevice, 0);
			pDevice->uAssocCount = 0;
			pMgmt->eCurrState = WMAC_STATE_IDLE;
			pDevice->bFixRate = false;

			vMgrCreateOwnIBSS((void *) pDevice, &Status);
			if (Status != CMD_STATUS_SUCCESS) {
				DBG_PRT(MSG_LEVEL_DEBUG,
					KERN_INFO "vMgrCreateOwnIBSS fail!\n");
			}
			// always turn off unicast bit
			MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
			pDevice->byRxMode &= ~RCR_UNICAST;
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode);
			BSSvAddMulticastNode(pDevice);
			if (netif_queue_stopped(pDevice->dev))
				netif_wake_queue(pDevice->dev);
			pDevice->bLinkPass = true;
			ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
			schedule_delayed_work(&pDevice->second_callback_work, HZ);
		}
		s_bCommandComplete(pDevice);
		break;

	case WLAN_CMD_TX_PSPACKET_START:
		// DTIM Multicast tx
		if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
			while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
				if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
					pMgmt->abyPSTxMap[0] &= ~byMask[0];
					pDevice->bMoreData = false;
				} else {
					pDevice->bMoreData = true;
				}

				if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0)
					DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail\n");

				pMgmt->sNodeDBTable[0].wEnQueueCnt--;
			}
		}

		// PS nodes tx
		for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
			if (pMgmt->sNodeDBTable[ii].bActive &&
			    pMgmt->sNodeDBTable[ii].bRxPSPoll) {
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
						ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
				while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
					if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
						// clear tx map
						pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
									~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
						pDevice->bMoreData = false;
					} else {
						pDevice->bMoreData = true;
					}

					if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0)
						DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail\n");

					pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
					// check if sta ps enable, wait next pspoll
					// if sta ps disable, send all pending buffers.
					if (pMgmt->sNodeDBTable[ii].bPSEnable)
						break;
				}
				if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
					// clear tx map
					pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
							~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
					DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear\n", ii);
				}
Esempio n. 4
0
/**
 * connect - establish a connection to another TIPC port
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
 * @flags: file-related flags associated with socket
 *
 * Returns 0 on success, errno otherwise
 */
static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
		   int flags)
{
	struct sock *sk = sock->sk;
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
	struct sk_buff *buf;
	struct tipc_msg *msg;
	unsigned int timeout;
	int res;

	lock_sock(sk);

	/* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
	if (sock->state == SS_READY) {
		res = -EOPNOTSUPP;
		goto exit;
	}

	/* For now, TIPC does not support the non-blocking form of connect() */
	if (flags & O_NONBLOCK) {
		res = -EOPNOTSUPP;
		goto exit;
	}

	/* Issue Posix-compliant error code if socket is in the wrong state */
	if (sock->state == SS_LISTENING) {
		res = -EOPNOTSUPP;
		goto exit;
	}
	if (sock->state == SS_CONNECTING) {
		res = -EALREADY;
		goto exit;
	}
	if (sock->state != SS_UNCONNECTED) {
		res = -EISCONN;
		goto exit;
	}

	/*
	 * Reject connection attempt using multicast address
	 *
	 * Note: send_msg() validates the rest of the address fields,
	 *       so there's no need to do it here
	 */
	if (dst->addrtype == TIPC_ADDR_MCAST) {
		res = -EINVAL;
		goto exit;
	}

	/* Reject any messages already in receive queue (very unlikely) */
	reject_rx_queue(sk);

	/* Send a 'SYN-' to destination */
	m.msg_name = dest;
	m.msg_namelen = destlen;
	res = send_msg(NULL, sock, &m, 0);
	if (res < 0)
		goto exit;

	/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
	timeout = tipc_sk(sk)->conn_timeout;
	release_sock(sk);
	res = wait_event_interruptible_timeout(*sk_sleep(sk),
			(!skb_queue_empty(&sk->sk_receive_queue) ||
			(sock->state != SS_CONNECTING)),
			timeout ? (long)msecs_to_jiffies(timeout)
				: MAX_SCHEDULE_TIMEOUT);
	lock_sock(sk);

	if (res > 0) {
		buf = skb_peek(&sk->sk_receive_queue);
		if (buf != NULL) {
			msg = buf_msg(buf);
			res = auto_connect(sock, msg);
			if (!res) {
				if (!msg_data_sz(msg))
					advance_rx_queue(sk);
			}
		} else {
			if (sock->state == SS_CONNECTED)
				res = -EISCONN;
			else
				res = -ECONNREFUSED;
		}
	} else {
		if (res == 0)
			res = -ETIMEDOUT;
		else
			; /* leave "res" unchanged */
		sock->state = SS_DISCONNECTING;
	}

exit:
	release_sock(sk);
	return res;
}
Esempio n. 5
0
/**
 * recv_msg - receive packet-oriented message
 * @iocb: (unused)
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
 *
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
static int recv_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t buf_len, int flags)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	long timeout;
	unsigned int sz;
	u32 err;
	int res;

	/* Catch invalid receive requests */
	if (unlikely(!buf_len))
		return -EINVAL;

	lock_sock(sk);

	if (unlikely(sock->state == SS_UNCONNECTED)) {
		res = -ENOTCONN;
		goto exit;
	}

	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:

	/* Look for a message in receive queue; wait if necessary */
	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (sock->state == SS_DISCONNECTING) {
			res = -ENOTCONN;
			goto exit;
		}
		if (timeout <= 0L) {
			res = timeout ? timeout : -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
							   tipc_rx_ready(sock),
							   timeout);
		lock_sock(sk);
	}

	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Complete connection setup for an implied connect */
	if (unlikely(sock->state == SS_CONNECTING)) {
		res = auto_connect(sock, msg);
		if (res)
			goto exit;
	}

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
		advance_rx_queue(sk);
		goto restart;
	}

	/* Capture sender's address (optional) */
	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */
	res = anc_data_recv(m, msg, tport);
	if (res)
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
					      m->msg_iov, sz);
		if (res)
			goto exit;
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */
	if (likely(!(flags & MSG_PEEK))) {
		if ((sock->state != SS_READY) &&
		    (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tport->ref, tport->conn_unacked);
		advance_rx_queue(sk);
	}
exit:
	release_sock(sk);
	return res;
}
static void __devexit fnic_remove(struct pci_dev *pdev)
{
	struct fnic *fnic = pci_get_drvdata(pdev);
	struct fc_lport *lp = fnic->lport;
	unsigned long flags;

	/*
	 * Mark state so that the workqueue thread stops forwarding
	 * received frames and link events to the local port. ISR and
	 * other threads that can queue work items will also stop
	 * creating work items on the fnic workqueue
	 */
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	fnic->stop_rx_link_events = 1;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		del_timer_sync(&fnic->notify_timer);

	/*
	 * Flush the fnic event queue. After this call, there should
	 * be no event queued for this fnic device in the workqueue
	 */
	flush_workqueue(fnic_event_queue);
	skb_queue_purge(&fnic->frame_queue);
	skb_queue_purge(&fnic->tx_queue);

	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		del_timer_sync(&fnic->fip_timer);
		skb_queue_purge(&fnic->fip_frame_queue);
		fnic_fcoe_reset_vlans(fnic);
		fnic_fcoe_evlist_free(fnic);
	}

	/*
	 * Log off the fabric. This stops all remote ports, dns port,
	 * logs off the fabric. This flushes all rport, disc, lport work
	 * before returning
	 */
	fc_fabric_logoff(fnic->lport);

	spin_lock_irqsave(&fnic->fnic_lock, flags);
	fnic->in_remove = 1;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	fcoe_ctlr_destroy(&fnic->ctlr);
	fc_lport_destroy(lp);
	/*
	 * This stops the fnic device, masks all interrupts. Completed
	 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
	 * cleaned up
	 */
	fnic_cleanup(fnic);

	BUG_ON(!skb_queue_empty(&fnic->frame_queue));
	BUG_ON(!skb_queue_empty(&fnic->tx_queue));

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_del(&fnic->list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

#if defined(__VMKLNX__)
        vmklnx_iodm_disable_events(fnic->lport->host);
#endif

	fc_remove_host(fnic->lport->host);
	scsi_remove_host(fnic->lport->host);
	fc_exch_mgr_free(fnic->lport);
	vnic_dev_notify_unset(fnic->vdev);
	fnic_free_intr(fnic);
	fnic_free_vnic_resources(fnic);
	fnic_clear_intr_mode(fnic);
	vnic_dev_close(fnic->vdev);
	vnic_dev_unregister(fnic->vdev);
	fnic_iounmap(fnic);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	scsi_host_put(lp->host);
}
Esempio n. 7
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
		    void *from, int length, int transhdrlen,
		    int hlimit, struct ipv6_txoptions *opt, struct flowi *fl, struct rt6_info *rt,
		    unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;

	if (flags&MSG_PROBE)
		return 0;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (np->cork.opt == NULL) {
				np->cork.opt = kmalloc(opt->tot_len,
						       sk->sk_allocation);
				if (unlikely(np->cork.opt == NULL))
					return -ENOBUFS;
			} else if (np->cork.opt->tot_len < opt->tot_len) {
				printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
				return -EINVAL;
			}
			memcpy(np->cork.opt, opt, opt->tot_len);
			inet->cork.flags |= IPCORK_OPT;
			/* need source address above miyazawa*/
		}
		dst_hold(&rt->u.dst);
		np->cork.rt = rt;
		inet->cork.fl = *fl;
		np->cork.hop_limit = hlimit;
		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
		if (dst_allfrag(rt->u.dst.path))
			inet->cork.flags |= IPCORK_ALLFRAG;
		inet->cork.length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = np->cork.rt;
		fl = &inet->cork.fl;
		if (inet->cork.flags & IPCORK_OPT)
			opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = inet->cork.fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split 
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks 
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji 
	 */

	inet->cork.length += length;

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
			struct sk_buff *skb_prev;
alloc_new_skb:
			skb_prev = skb;

			/* There's no room in the current skb */
			if (skb_prev)
				fraggap = skb_prev->len - maxfraglen;
			else
				fraggap = 0;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;
			if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen;

			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->u.dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			/*
			 * The last fragment gets additional space at tail.
			 * Note: we overallocate on fragments with MSG_MODE
			 * because we have no idea if we're the last one.
			 */
			if (datalen == length + fraggap)
				alloclen += rt->u.dst.trailer_len;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message 
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb->nh.raw = data + exthdrlen;
			data += fragheaderlen;
			skb->h.raw = data + exthdrlen;

			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				skb_trim(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
				skb->truesize += PAGE_SIZE;
				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	inet->cork.length -= length;
	IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
	return err;
}
void
ctc_tty_netif_rx(struct sk_buff *skb)
{
	int i;
	ctc_tty_info *info = NULL;

	DBF_TEXT(trace, 5, __FUNCTION__);
	if (!skb)
		return;
	if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
		dev_kfree_skb(skb);
		return;
	}
	for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
		if (driver->info[i].netdev == skb->dev) {
			info = &driver->info[i];
			break;
		}
	if (!info) {
		dev_kfree_skb(skb);
		return;
	}
	if (skb->len < 6) {
		dev_kfree_skb(skb);
		return;
	}
	if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) {
		dev_kfree_skb(skb);
		return;
	}
	skb_pull(skb, sizeof(__u32));

	i = *((int *)skb->data);
	skb_pull(skb, sizeof(info->mcr));
	if (i & UART_MCR_RTS) {
		info->msr |= UART_MSR_CTS;
		if (info->flags & CTC_ASYNC_CTS_FLOW)
			info->tty->hw_stopped = 0;
	} else {
		info->msr &= ~UART_MSR_CTS;
		if (info->flags & CTC_ASYNC_CTS_FLOW)
			info->tty->hw_stopped = 1;
	}
	if (i & UART_MCR_DTR)
		info->msr |= UART_MSR_DSR;
	else
		info->msr &= ~UART_MSR_DSR;
	if (skb->len <= 0) {
		kfree_skb(skb);
		return;
	}
	/* Try to deliver directly via tty-flip-buf if queue is empty */
	if (skb_queue_empty(&info->rx_queue))
		if (ctc_tty_try_read(info, skb))
			return;
	/* Direct deliver failed or queue wasn't empty.
	 * Queue up for later dequeueing via timer-irq.
	 */
	skb_queue_tail(&info->rx_queue, skb);
	/* Schedule dequeuing */
	tasklet_schedule(&info->tasklet);
}
static int
ctc_tty_tint(ctc_tty_info * info)
{
	struct sk_buff *skb = skb_dequeue(&info->tx_queue);
	int stopped = (info->tty->hw_stopped || info->tty->stopped);
	int wake = 1;
	int rc;

	DBF_TEXT(trace, 4, __FUNCTION__);
	if (!info->netdev) {
		if (skb)
			kfree_skb(skb);
		return 0;
	}
	if (info->flags & CTC_ASYNC_TX_LINESTAT) {
		int skb_res = info->netdev->hard_header_len +
			sizeof(info->mcr) + sizeof(__u32);
		/* If we must update line status,
		 * create an empty dummy skb and insert it.
		 */
		if (skb)
			skb_queue_head(&info->tx_queue, skb);

		skb = dev_alloc_skb(skb_res);
		if (!skb) {
			printk(KERN_WARNING
			       "ctc_tty: Out of memory in %s%d tint\n",
			       CTC_TTY_NAME, info->line);
			return 1;
		}
		skb_reserve(skb, skb_res);
		stopped = 0;
		wake = 0;
	}
	if (!skb)
		return 0;
	if (stopped) {
		skb_queue_head(&info->tx_queue, skb);
		return 1;
	}
#if 0
	if (skb->len > 0)
		printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data));
	else
		printk(KERN_DEBUG "tint: %d STAT\n", skb->len);
#endif
	memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr));
	memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32));
	rc = info->netdev->hard_start_xmit(skb, info->netdev);
	if (rc) {
		skb_pull(skb, sizeof(info->mcr) + sizeof(__u32));
		if (skb->len > 0)
			skb_queue_head(&info->tx_queue, skb);
		else
			kfree_skb(skb);
	} else {
		struct tty_struct *tty = info->tty;

		info->flags &= ~CTC_ASYNC_TX_LINESTAT;
		if (tty) {
			if (wake && (tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
			    tty->ldisc.write_wakeup)
				(tty->ldisc.write_wakeup)(tty);
			wake_up_interruptible(&tty->write_wait);
		}
	}
	return (skb_queue_empty(&info->tx_queue) ? 0 : 1);
}
Esempio n. 10
0
static int clip_mkip(struct atm_vcc *vcc, int timeout)
{
	struct clip_vcc *clip_vcc;
	struct sk_buff *skb;
	struct sk_buff_head *rq;
	unsigned long flags;

	if (!vcc->push)
		return -EBADFD;
	clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
	if (!clip_vcc)
		return -ENOMEM;
	DPRINTK("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc);
	clip_vcc->vcc = vcc;
	vcc->user_back = clip_vcc;
	set_bit(ATM_VF_IS_CLIP, &vcc->flags);
	clip_vcc->entry = NULL;
	clip_vcc->xoff = 0;
	clip_vcc->encap = 1;
	clip_vcc->last_use = jiffies;
	clip_vcc->idle_timeout = timeout * HZ;
	clip_vcc->old_push = vcc->push;
	clip_vcc->old_pop = vcc->pop;
	vcc->push = clip_push;
	vcc->pop = clip_pop;

	rq = &sk_atm(vcc)->sk_receive_queue;

	spin_lock_irqsave(&rq->lock, flags);
	if (skb_queue_empty(rq)) {
		skb = NULL;
	} else {
		/* NULL terminate the list.  */
		rq->prev->next = NULL;
		skb = rq->next;
	}
	rq->prev = rq->next = (struct sk_buff *)rq;
	rq->qlen = 0;
	spin_unlock_irqrestore(&rq->lock, flags);

	/* re-process everything received between connection setup and MKIP */
	while (skb) {
		struct sk_buff *next = skb->next;

		skb->next = skb->prev = NULL;
		if (!clip_devs) {
			atm_return(vcc, skb->truesize);
			kfree_skb(skb);
		} else {
			unsigned int len = skb->len;

			skb_get(skb);
			clip_push(vcc, skb);
			PRIV(skb->dev)->stats.rx_packets--;
			PRIV(skb->dev)->stats.rx_bytes -= len;
			kfree_skb(skb);
		}

		skb = next;
	}
	return 0;
}
Esempio n. 11
0
/*
 * segment the img and use the ptr and length to remember info on each segment
 *
 */
static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
			     u32 buffer_len)
{
	struct r8192_priv   *priv = ieee80211_priv(dev);
	bool		    rt_status = true;
	u16		    frag_threshold;
	u16		    frag_length, frag_offset = 0;
	//u16		    total_size;
	int		    i;

	rt_firmware	    *pfirmware = priv->pFirmware;
	struct sk_buff	    *skb;
	unsigned char	    *seg_ptr;
	cb_desc		    *tcb_desc;
	u8                  bLastIniPkt;

	firmware_init_param(dev);
	//Fragmentation might be required
	frag_threshold = pfirmware->cmdpacket_frag_thresold;
	do {
		if ((buffer_len - frag_offset) > frag_threshold) {
			frag_length = frag_threshold;
			bLastIniPkt = 0;

		} else {
			frag_length = buffer_len - frag_offset;
			bLastIniPkt = 1;

		}

		/* Allocate skb buffer to contain firmware info and tx descriptor info
		 * add 4 to avoid packet appending overflow.
		 * */
		skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
		if (!skb)
			return false;
		memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
		tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->queue_index = TXCMD_QUEUE;
		tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
		tcb_desc->bLastIniPkt = bLastIniPkt;

		skb_reserve(skb, USB_HWDESC_HEADER_LEN);
		seg_ptr = skb->data;
		/*
		 * Transform from little endian to big endian
		 * and pending  zero
		 */
		for (i=0; i < frag_length; i+=4) {
			*seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0;
			*seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0;
			*seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0;
			*seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0;
		}
		tcb_desc->txbuf_size= (u16)i;
		skb_put(skb, i);

		if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
			(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
			(priv->ieee80211->queue_stop)) {
			RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
			skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
		} else {
			priv->ieee80211->softmac_hard_start_xmit(skb, dev);
		}

		code_virtual_address += frag_length;
		frag_offset += frag_length;

	} while (frag_offset < buffer_len);

	return rt_status;

}
Esempio n. 12
0
static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
{
	struct sk_buff *skb;
	unsigned char b, temp;

	switch (yp->tx_state) {
	case TX_OFF:
		break;
	case TX_HEAD:
		if (--yp->tx_count <= 0) {
			if (!(skb = skb_dequeue(&yp->send_queue))) {
				ptt_off(dev);
				yp->tx_state = TX_OFF;
				break;
			}
			yp->tx_state = TX_DATA;
			if (skb->data[0] != 0) {
/*                              do_kiss_params(s, skb->data, skb->len); */
				dev_kfree_skb_any(skb);
				break;
			}
			yp->tx_len = skb->len - 1;	/* strip KISS byte */
			if (yp->tx_len >= YAM_MAX_FRAME || yp->tx_len < 2) {
        			dev_kfree_skb_any(skb);
				break;
			}
			skb_copy_from_linear_data_offset(skb, 1,
							 yp->tx_buf,
							 yp->tx_len);
			dev_kfree_skb_any(skb);
			yp->tx_count = 0;
			yp->tx_crcl = 0x21;
			yp->tx_crch = 0xf3;
			yp->tx_state = TX_DATA;
		}
		break;
	case TX_DATA:
		b = yp->tx_buf[yp->tx_count++];
		outb(b, THR(dev->base_addr));
		temp = yp->tx_crcl;
		yp->tx_crcl = chktabl[temp] ^ yp->tx_crch;
		yp->tx_crch = chktabh[temp] ^ b;
		if (yp->tx_count >= yp->tx_len) {
			yp->tx_state = TX_CRC1;
		}
		break;
	case TX_CRC1:
		yp->tx_crch = chktabl[yp->tx_crcl] ^ yp->tx_crch;
		yp->tx_crcl = chktabh[yp->tx_crcl] ^ chktabl[yp->tx_crch] ^ 0xff;
		outb(yp->tx_crcl, THR(dev->base_addr));
		yp->tx_state = TX_CRC2;
		break;
	case TX_CRC2:
		outb(chktabh[yp->tx_crch] ^ 0xFF, THR(dev->base_addr));
		if (skb_queue_empty(&yp->send_queue)) {
			yp->tx_count = (yp->bitrate * yp->txtail) / 8000;
			if (yp->dupmode == 2)
				yp->tx_count += (yp->bitrate * yp->holdd) / 8;
			if (yp->tx_count == 0)
				yp->tx_count = 1;
			yp->tx_state = TX_TAIL;
		} else {
			yp->tx_count = 1;
			yp->tx_state = TX_HEAD;
		}
		++dev->stats.tx_packets;
		break;
	case TX_TAIL:
		if (--yp->tx_count <= 0) {
			yp->tx_state = TX_OFF;
			ptt_off(dev);
		}
		break;
	}
}
Esempio n. 13
0
/*
 * This function handles the event generated by firmware, rx data
 * received from firmware, and tx data sent from kernel.
 */
static int btmrvl_service_main_thread(void *data)
{
	struct btmrvl_thread *thread = data;
	struct btmrvl_private *priv = thread->priv;
	struct btmrvl_adapter *adapter = priv->adapter;
	wait_queue_t wait;
	struct sk_buff *skb;
	ulong flags;

	init_waitqueue_entry(&wait, current);

	for (;;) {
		add_wait_queue(&thread->wait_q, &wait);

		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop()) {
			BT_DBG("main_thread: break from main thread");
			break;
		}

		if (adapter->wakeup_tries ||
				((!adapter->int_count) &&
				(!priv->btmrvl_dev.tx_dnld_rdy ||
				skb_queue_empty(&adapter->tx_queue)))) {
			BT_DBG("main_thread is sleeping...");
			schedule();
		}

		set_current_state(TASK_RUNNING);

		remove_wait_queue(&thread->wait_q, &wait);

		BT_DBG("main_thread woke up");

		spin_lock_irqsave(&priv->driver_lock, flags);
		if (adapter->int_count) {
			adapter->int_count = 0;
			spin_unlock_irqrestore(&priv->driver_lock, flags);
			priv->hw_process_int_status(priv);
		} else if (adapter->ps_state == PS_SLEEP &&
					!skb_queue_empty(&adapter->tx_queue)) {
			spin_unlock_irqrestore(&priv->driver_lock, flags);
			adapter->wakeup_tries++;
			priv->hw_wakeup_firmware(priv);
			continue;
		} else {
			spin_unlock_irqrestore(&priv->driver_lock, flags);
		}

		if (adapter->ps_state == PS_SLEEP)
			continue;

		if (!priv->btmrvl_dev.tx_dnld_rdy)
			continue;

		skb = skb_dequeue(&adapter->tx_queue);
		if (skb) {
			if (btmrvl_tx_pkt(priv, skb))
				priv->btmrvl_dev.hcidev->stat.err_tx++;
			else
				priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len;

			kfree_skb(skb);
		}
	}

	return 0;
}
Esempio n. 14
0
void vRunCommand(void *hDeviceContext)
{
    PSDevice        pDevice = (PSDevice)hDeviceContext;
    PSMgmtObject    pMgmt = &(pDevice->sMgmtObj);
    PWLAN_IE_SSID   pItemSSID;
    PWLAN_IE_SSID   pItemSSIDCurr;
    CMD_STATUS      Status;
    unsigned int            ii;
    BYTE            byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
    struct sk_buff  *skb;
    BYTE            byData;


    if (pDevice->dwDiagRefCount != 0)
        return;
    if (pDevice->bCmdRunning != TRUE)
        return;

    spin_lock_irq(&pDevice->lock);

    switch ( pDevice->eCommandState ) {

        case WLAN_CMD_SCAN_START:

		pDevice->byReAssocCount = 0;
            if (pDevice->bRadioOff == TRUE) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            }

            if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            }

            pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;

            if (pMgmt->uScanChannel == 0 ) {
                pMgmt->uScanChannel = pDevice->byMinChannel;
            }
            if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
                pMgmt->eScanState = WMAC_NO_SCANNING;

                if (pDevice->byBBType != pDevice->byScanBBType) {
                    pDevice->byBBType = pDevice->byScanBBType;
                    CARDvSetBSSMode(pDevice);
                }

                if (pDevice->bUpdateBBVGA) {
                    BBvSetShortSlotTime(pDevice);
                    BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
                    BBvUpdatePreEDThreshold(pDevice, FALSE);
                }
                
                vAdHocBeaconRestart(pDevice);
                
                CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
                
                if (pMgmt->bCurrBSSIDFilterOn) {
                    MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
                    pDevice->byRxMode |= RCR_BSSID;
                }
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
                pDevice->bStopDataPkt = FALSE;
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;

            } else {
                if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel);
                    s_bCommandComplete(pDevice);
                    spin_unlock_irq(&pDevice->lock);
                    return;
                }
                if (pMgmt->uScanChannel == pDevice->byMinChannel) {
                   
                    pMgmt->abyScanBSSID[0] = 0xFF;
                    pMgmt->abyScanBSSID[1] = 0xFF;
                    pMgmt->abyScanBSSID[2] = 0xFF;
                    pMgmt->abyScanBSSID[3] = 0xFF;
                    pMgmt->abyScanBSSID[4] = 0xFF;
                    pMgmt->abyScanBSSID[5] = 0xFF;
                    pItemSSID->byElementID = WLAN_EID_SSID;
                    
                    pMgmt->eScanState = WMAC_IS_SCANNING;
                    pDevice->byScanBBType = pDevice->byBBType;  
                    pDevice->bStopDataPkt = TRUE;
                    
                    MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
                    pDevice->byRxMode &= ~RCR_BSSID;

                }
                
                vAdHocBeaconStop(pDevice);
                if ((pDevice->byBBType != BB_TYPE_11A) && (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
                    pDevice->byBBType = BB_TYPE_11A;
                    CARDvSetBSSMode(pDevice);
                }
                else if ((pDevice->byBBType == BB_TYPE_11A) && (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) {
                    pDevice->byBBType = BB_TYPE_11G;
                    CARDvSetBSSMode(pDevice);
                }
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning....  channel: [%d]\n", pMgmt->uScanChannel);
                
                CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel);
                

                if (pDevice->bUpdateBBVGA) {
                    BBvSetShortSlotTime(pDevice);
                    BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
                    BBvUpdatePreEDThreshold(pDevice, TRUE);
                }
                pMgmt->uScanChannel++;

                while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
                        pMgmt->uScanChannel <= pDevice->byMaxChannel ){
                    pMgmt->uScanChannel++;
                }

                if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
                    
                    pDevice->eCommandState = WLAN_CMD_SCAN_END;

                }
                if ((pMgmt->b11hEnable == FALSE) ||
                    (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
                    s_vProbeChannel(pDevice);
                    spin_unlock_irq(&pDevice->lock);
		     vCommandTimerWait((void *) pDevice, 100);
                    return;
                } else {
                    spin_unlock_irq(&pDevice->lock);
		    vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
                    return;
                }

            }

            break;

        case WLAN_CMD_SCAN_END:

            
            if (pDevice->byBBType != pDevice->byScanBBType) {
                pDevice->byBBType = pDevice->byScanBBType;
                CARDvSetBSSMode(pDevice);
            }

            if (pDevice->bUpdateBBVGA) {
                BBvSetShortSlotTime(pDevice);
                BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
                BBvUpdatePreEDThreshold(pDevice, FALSE);
            }

            
            vAdHocBeaconRestart(pDevice);
            
            CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
            
            if (pMgmt->bCurrBSSIDFilterOn) {
                MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
                pDevice->byRxMode |= RCR_BSSID;
            }
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
            pMgmt->eScanState = WMAC_NO_SCANNING;
            pDevice->bStopDataPkt = FALSE;

#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
	if(pMgmt->eScanType == WMAC_SCAN_PASSIVE)
		{
			
				union iwreq_data wrqu;
				PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
				memset(&wrqu, 0, sizeof(wrqu));
				wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
			}
#endif
            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_DISASSOCIATE_START :
		pDevice->byReAssocCount = 0;
            if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
                (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            } else {

          #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
		      pDevice->bwextstep0 = FALSE;
                        pDevice->bwextstep1 = FALSE;
                        pDevice->bwextstep2 = FALSE;
                        pDevice->bwextstep3 = FALSE;
		   pDevice->bWPASuppWextEnabled = FALSE;
	 #endif
                   pDevice->fWPA_Authened = FALSE;

                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
                
		vMgrDisassocBeginSta((void *) pDevice,
				     pMgmt,
				     pMgmt->abyCurrBSSID,
				     (8),
				     &Status);
                pDevice->bLinkPass = FALSE;
                ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
                
                pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
                pItemSSID->len = 0;
                memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
                pMgmt->eCurrState = WMAC_STATE_IDLE;
                pMgmt->sNodeDBTable[0].bActive = FALSE;
            }
            netif_stop_queue(pDevice->dev);
            if (pDevice->bNeedRadioOFF == TRUE)
                CARDbRadioPowerOff(pDevice);
            s_bCommandComplete(pDevice);
            break;


        case WLAN_CMD_SSID_START:

		pDevice->byReAssocCount = 0;
            if (pDevice->bRadioOff == TRUE) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            }

            memcpy(pMgmt->abyAdHocSSID,pMgmt->abyDesireSSID,
                              ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);

            pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
            pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);

            if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
            }

            if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
                ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {

                if (pItemSSID->len == pItemSSIDCurr->len) {
                    if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
                        s_bCommandComplete(pDevice);
                        spin_unlock_irq(&pDevice->lock);
                        return;
                    }
                }
                netif_stop_queue(pDevice->dev);
                pDevice->bLinkPass = FALSE;
                ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
            }
            
            pMgmt->eCurrState = WMAC_STATE_IDLE;
            pMgmt->eCurrMode = WMAC_MODE_STANDBY;
	    PSvDisablePowerSaving((void *) pDevice);
            BSSvClearNodeDBTable(pDevice, 0);
	    vMgrJoinBSSBegin((void *) pDevice, &Status);
            
            if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
                
                
	      if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
		vMgrDeAuthenBeginSta((void *)pDevice,
				     pMgmt,
				     pMgmt->abyCurrBSSID,
				     (3),
				     &Status);
	      }
                
		vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
                if (Status == CMD_STATUS_SUCCESS) {
		   pDevice->byLinkWaitCount = 0;
                    pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
		    vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
                    spin_unlock_irq(&pDevice->lock);
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
                    return;
                }
            }
            
            else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
                if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
                    if (netif_queue_stopped(pDevice->dev)){
                        netif_wake_queue(pDevice->dev);
                    }
                    pDevice->bLinkPass = TRUE;
                    ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
                    pMgmt->sNodeDBTable[0].bActive = TRUE;
                    pMgmt->sNodeDBTable[0].uInActiveCount = 0;
                }
                else {
                    
		    DBG_PRT(MSG_LEVEL_DEBUG,
			    KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
		    vMgrCreateOwnIBSS((void *) pDevice, &Status);
                    if (Status != CMD_STATUS_SUCCESS){
			DBG_PRT(MSG_LEVEL_DEBUG,
				KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
                    }
                    BSSvAddMulticastNode(pDevice);
                }
                s_bClearBSSID_SCAN(pDevice);
            }
            
            else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
                if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
                    pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
                    
			DBG_PRT(MSG_LEVEL_DEBUG,
				KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
		    vMgrCreateOwnIBSS((void *) pDevice, &Status);
                    if (Status != CMD_STATUS_SUCCESS){
			DBG_PRT(MSG_LEVEL_DEBUG,
				KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
                    }
                    BSSvAddMulticastNode(pDevice);
                    s_bClearBSSID_SCAN(pDevice);
                }
                else {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
                     #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
                    
                        {
                  	union iwreq_data  wrqu;
                  	memset(&wrqu, 0, sizeof (wrqu));
                          wrqu.ap_addr.sa_family = ARPHRD_ETHER;
                  	PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
                  	wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
                       }
                    #endif
                }
            }
            s_bCommandComplete(pDevice);
            break;

        case WLAN_AUTHENTICATE_WAIT :
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
            if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
		pDevice->byLinkWaitCount = 0;
                
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
		vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
                if (Status == CMD_STATUS_SUCCESS) {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
		  pDevice->byLinkWaitCount = 0;
                    pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
		    vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
                    spin_unlock_irq(&pDevice->lock);
                    return;
                }
            }
	   else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
               printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
	   }
	   else  if(pDevice->byLinkWaitCount <= 4){    
                pDevice->byLinkWaitCount ++;
	       printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
	       spin_unlock_irq(&pDevice->lock);
	       vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
	       return;
	   }
	          pDevice->byLinkWaitCount = 0;

            s_bCommandComplete(pDevice);
            break;

        case WLAN_ASSOCIATE_WAIT :
            if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
                if (pDevice->ePSMode != WMAC_POWER_CAM) {
			PSvEnablePowerSaving((void *) pDevice,
					     pMgmt->wListenInterval);
                }
                pDevice->byLinkWaitCount = 0;
                pDevice->byReAssocCount = 0;
                pDevice->bLinkPass = TRUE;
                ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
                s_bClearBSSID_SCAN(pDevice);

                if (netif_queue_stopped(pDevice->dev)){
                    netif_wake_queue(pDevice->dev);
                }

		 if(pDevice->IsTxDataTrigger != FALSE)   {    
                     
		    del_timer(&pDevice->sTimerTxData);
                      init_timer(&pDevice->sTimerTxData);
			pDevice->sTimerTxData.data = (unsigned long) pDevice;
                      pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
                      pDevice->sTimerTxData.expires = RUN_AT(10*HZ);      
                      pDevice->fTxDataInSleep = FALSE;
                      pDevice->nTxDataTimeCout = 0;
		 }
		 else {
		   
		 }
		pDevice->IsTxDataTrigger = TRUE;
                add_timer(&pDevice->sTimerTxData);

            }
	   else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
               printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
	   }
	   else  if(pDevice->byLinkWaitCount <= 4){    
                pDevice->byLinkWaitCount ++;
	       printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
	       spin_unlock_irq(&pDevice->lock);
	       vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
	       return;
	   }
	          pDevice->byLinkWaitCount = 0;

            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_AP_MODE_START :
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");

            if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
                del_timer(&pMgmt->sTimerSecondCallback);
                pMgmt->eCurrState = WMAC_STATE_IDLE;
                pMgmt->eCurrMode = WMAC_MODE_STANDBY;
                pDevice->bLinkPass = FALSE;
                ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
                if (pDevice->bEnableHostWEP == TRUE)
                    BSSvClearNodeDBTable(pDevice, 1);
                else
                    BSSvClearNodeDBTable(pDevice, 0);
                pDevice->uAssocCount = 0;
                pMgmt->eCurrState = WMAC_STATE_IDLE;
                pDevice->bFixRate = FALSE;

		vMgrCreateOwnIBSS((void *) pDevice, &Status);
		if (Status != CMD_STATUS_SUCCESS) {
			DBG_PRT(MSG_LEVEL_DEBUG,
				KERN_INFO "vMgrCreateOwnIBSS fail!\n");
                }
                
                MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
                pDevice->byRxMode &= ~RCR_UNICAST;
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode );
                BSSvAddMulticastNode(pDevice);
                if (netif_queue_stopped(pDevice->dev)){
                    netif_wake_queue(pDevice->dev);
                }
                pDevice->bLinkPass = TRUE;
                ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
                add_timer(&pMgmt->sTimerSecondCallback);
            }
            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_TX_PSPACKET_START :
            
            if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
                while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
                    if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
                        pMgmt->abyPSTxMap[0] &= ~byMask[0];
                        pDevice->bMoreData = FALSE;
                    }
                    else {
                        pDevice->bMoreData = TRUE;
                    }

                    if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
                        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n");
                    }

                    pMgmt->sNodeDBTable[0].wEnQueueCnt--;
                }
            }

            
            for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
                if (pMgmt->sNodeDBTable[ii].bActive &&
                    pMgmt->sNodeDBTable[ii].bRxPSPoll) {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
                               ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
                    while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
                        if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
                            
                            pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
                                    ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
                            pDevice->bMoreData = FALSE;
                        }
                        else {
                            pDevice->bMoreData = TRUE;
                        }

                        if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
                            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n");
                        }

                        pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
                        
                        
                        if (pMgmt->sNodeDBTable[ii].bPSEnable)
                            break;
                    }
                    if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
                        
                        pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
                                    ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
                        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii);
                    }
Esempio n. 15
0
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
	int err;
	unsigned long flags;
	struct xfrm_state *x;
	struct sk_buff *skb2;
	struct softnet_data *sd;
	netdev_features_t esp_features = features;
	struct xfrm_offload *xo = xfrm_offload(skb);

	if (!xo)
		return skb;

	if (!(features & NETIF_F_HW_ESP))
		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);

	x = skb->sp->xvec[skb->sp->len - 1];
	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
		return skb;

	local_irq_save(flags);
	sd = this_cpu_ptr(&softnet_data);
	err = !skb_queue_empty(&sd->xfrm_backlog);
	local_irq_restore(flags);

	if (err) {
		*again = true;
		return skb;
	}

	if (skb_is_gso(skb)) {
		struct net_device *dev = skb->dev;

		if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
			struct sk_buff *segs;

			/* Packet got rerouted, fixup features and segment it. */
			esp_features = esp_features & ~(NETIF_F_HW_ESP
							| NETIF_F_GSO_ESP);

			segs = skb_gso_segment(skb, esp_features);
			if (IS_ERR(segs)) {
				kfree_skb(skb);
				atomic_long_inc(&dev->tx_dropped);
				return NULL;
			} else {
				consume_skb(skb);
				skb = segs;
			}
		}
	}

	if (!skb->next) {
		x->outer_mode->xmit(x, skb);

		xo->flags |= XFRM_DEV_RESUME;

		err = x->type_offload->xmit(x, skb, esp_features);
		if (err) {
			if (err == -EINPROGRESS)
				return NULL;

			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			kfree_skb(skb);
			return NULL;
		}

		skb_push(skb, skb->data - skb_mac_header(skb));

		return skb;
	}

	skb2 = skb;

	do {
		struct sk_buff *nskb = skb2->next;
		skb2->next = NULL;

		xo = xfrm_offload(skb2);
		xo->flags |= XFRM_DEV_RESUME;

		x->outer_mode->xmit(x, skb2);

		err = x->type_offload->xmit(x, skb2, esp_features);
		if (!err) {
			skb2->next = nskb;
		} else if (err != -EINPROGRESS) {
			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			skb2->next = nskb;
			kfree_skb_list(skb2);
			return NULL;
		} else {
			if (skb == skb2)
				skb = nskb;

			if (!skb)
				return NULL;

			goto skip_push;
		}

		skb_push(skb2, skb2->data - skb_mac_header(skb2));

skip_push:
		skb2 = nskb;
	} while (skb2);

	return skb;
}
Esempio n. 16
0
void iwm_tx_worker(struct work_struct *work)
{
	struct iwm_priv *iwm;
	struct iwm_tx_info *tx_info = NULL;
	struct sk_buff *skb;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	struct iwm_tid_info *tid_info;
	int cmdlen, ret, pool_id;

	txq = container_of(work, struct iwm_tx_queue, worker);
	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);

	pool_id = queue_to_pool_id(txq->id);

	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	       !skb_queue_empty(&txq->queue)) {

		spin_lock_bh(&txq->lock);
		skb = skb_dequeue(&txq->queue);
		spin_unlock_bh(&txq->lock);

		tx_info = skb_to_tx_info(skb);
		sta_info = &iwm->sta_table[tx_info->sta];
		if (!sta_info->valid) {
			IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
			kfree_skb(skb);
			continue;
		}

		tid_info = &sta_info->tid_info[tx_info->tid];

		mutex_lock(&tid_info->mutex);

		/*
		 * If the RAxTID is stopped, we queue the skb to the stopped
		 * queue.
		 * Whenever we'll get a UMAC notification to resume the tx flow
		 * for this RAxTID, we'll merge back the stopped queue into the
		 * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
		 */
		if (tid_info->stopped) {
			IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
				   tx_info->sta, tx_info->tid);
			spin_lock_bh(&txq->lock);
			skb_queue_tail(&txq->stopped_queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			continue;
		}

		cmdlen = IWM_UDMA_HDR_LEN + skb->len;

		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
			   tx_info->color);

		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
			iwm_tx_send_concat_packets(iwm, txq);

		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
		if (ret) {
			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
				   "%d, Tx worker stopped\n", txq->id);
			spin_lock_bh(&txq->lock);
			skb_queue_head(&txq->queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			break;
		}

		txq->concat_ptr = txq->concat_buf + txq->concat_count;
		tid_info->last_seq_num =
			iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
		txq->concat_count += ALIGN(cmdlen, 16);

		mutex_unlock(&tid_info->mutex);

		kfree_skb(skb);
	}

	iwm_tx_send_concat_packets(iwm, txq);

	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
	}
}
Esempio n. 17
0
static unsigned int vsock_poll(struct file *file, struct socket *sock,
			       poll_table *wait)
{
	struct sock *sk;
	unsigned int mask;
	struct vsock_sock *vsk;

	sk = sock->sk;
	vsk = vsock_sk(sk);

	poll_wait(file, sk_sleep(sk), wait);
	mask = 0;

	if (sk->sk_err)
		/* Signify that there has been an error on this socket. */
		mask |= POLLERR;

	/* INET sockets treat local write shutdown and peer write shutdown as a
	 * case of POLLHUP set.
	 */
	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
	    ((sk->sk_shutdown & SEND_SHUTDOWN) &&
	     (vsk->peer_shutdown & SEND_SHUTDOWN))) {
		mask |= POLLHUP;
	}

	if (sk->sk_shutdown & RCV_SHUTDOWN ||
	    vsk->peer_shutdown & SEND_SHUTDOWN) {
		mask |= POLLRDHUP;
	}

	if (sock->type == SOCK_DGRAM) {
		/* For datagram sockets we can read if there is something in
		 * the queue and write as long as the socket isn't shutdown for
		 * sending.
		 */
		if (!skb_queue_empty(&sk->sk_receive_queue) ||
		    (sk->sk_shutdown & RCV_SHUTDOWN)) {
			mask |= POLLIN | POLLRDNORM;
		}

		if (!(sk->sk_shutdown & SEND_SHUTDOWN))
			mask |= POLLOUT | POLLWRNORM | POLLWRBAND;

	} else if (sock->type == SOCK_STREAM) {
		lock_sock(sk);

		/* Listening sockets that have connections in their accept
		 * queue can be read.
		 */
		if (sk->sk_state == SS_LISTEN
		    && !vsock_is_accept_queue_empty(sk))
			mask |= POLLIN | POLLRDNORM;

		/* If there is something in the queue then we can read. */
		if (transport->stream_is_active(vsk) &&
		    !(sk->sk_shutdown & RCV_SHUTDOWN)) {
			bool data_ready_now = false;
			int ret = transport->notify_poll_in(
					vsk, 1, &data_ready_now);
			if (ret < 0) {
				mask |= POLLERR;
			} else {
				if (data_ready_now)
					mask |= POLLIN | POLLRDNORM;

			}
		}

		/* Sockets whose connections have been closed, reset, or
		 * terminated should also be considered read, and we check the
		 * shutdown flag for that.
		 */
		if (sk->sk_shutdown & RCV_SHUTDOWN ||
		    vsk->peer_shutdown & SEND_SHUTDOWN) {
			mask |= POLLIN | POLLRDNORM;
		}

		/* Connected sockets that can produce data can be written. */
		if (sk->sk_state == SS_CONNECTED) {
			if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
				bool space_avail_now = false;
				int ret = transport->notify_poll_out(
						vsk, 1, &space_avail_now);
				if (ret < 0) {
					mask |= POLLERR;
				} else {
					if (space_avail_now)
						/* Remove POLLWRBAND since INET
						 * sockets are not setting it.
						 */
						mask |= POLLOUT | POLLWRNORM;

				}
			}
		}

		/* Simulate INET socket poll behaviors, which sets
		 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
		 * but local send is not shutdown.
		 */
		if (sk->sk_state == SS_UNCONNECTED) {
			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
				mask |= POLLOUT | POLLWRNORM;

		}

		release_sock(sk);
	}

	return mask;
}
Esempio n. 18
0
static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_cmd_complete *ev = (void *) skb->data;
	__u16 opcode;

	skb_pull(skb, sizeof(*ev));

	opcode = __le16_to_cpu(ev->opcode);

	switch (opcode) {
	case HCI_OP_INQUIRY_CANCEL:
		hci_cc_inquiry_cancel(hdev, skb);
		break;

	case HCI_OP_EXIT_PERIODIC_INQ:
		hci_cc_exit_periodic_inq(hdev, skb);
		break;

	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
		hci_cc_remote_name_req_cancel(hdev, skb);
		break;

	case HCI_OP_ROLE_DISCOVERY:
		hci_cc_role_discovery(hdev, skb);
		break;

	case HCI_OP_READ_LINK_POLICY:
		hci_cc_read_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_LINK_POLICY:
		hci_cc_write_link_policy(hdev, skb);
		break;

	case HCI_OP_READ_DEF_LINK_POLICY:
		hci_cc_read_def_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_DEF_LINK_POLICY:
		hci_cc_write_def_link_policy(hdev, skb);
		break;

	case HCI_OP_RESET:
		hci_cc_reset(hdev, skb);
		break;

	case HCI_OP_WRITE_LOCAL_NAME:
		hci_cc_write_local_name(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_NAME:
		hci_cc_read_local_name(hdev, skb);
		break;

	case HCI_OP_WRITE_AUTH_ENABLE:
		hci_cc_write_auth_enable(hdev, skb);
		break;

	case HCI_OP_WRITE_ENCRYPT_MODE:
		hci_cc_write_encrypt_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SCAN_ENABLE:
		hci_cc_write_scan_enable(hdev, skb);
		break;

	case HCI_OP_READ_CLASS_OF_DEV:
		hci_cc_read_class_of_dev(hdev, skb);
		break;

	case HCI_OP_WRITE_CLASS_OF_DEV:
		hci_cc_write_class_of_dev(hdev, skb);
		break;

	case HCI_OP_READ_VOICE_SETTING:
		hci_cc_read_voice_setting(hdev, skb);
		break;

	case HCI_OP_WRITE_VOICE_SETTING:
		hci_cc_write_voice_setting(hdev, skb);
		break;

	case HCI_OP_HOST_BUFFER_SIZE:
		hci_cc_host_buffer_size(hdev, skb);
		break;

	case HCI_OP_READ_SSP_MODE:
		hci_cc_read_ssp_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SSP_MODE:
		hci_cc_write_ssp_mode(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_VERSION:
		hci_cc_read_local_version(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_COMMANDS:
		hci_cc_read_local_commands(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_FEATURES:
		hci_cc_read_local_features(hdev, skb);
		break;

	case HCI_OP_READ_BUFFER_SIZE:
		hci_cc_read_buffer_size(hdev, skb);
		break;

	case HCI_OP_READ_BD_ADDR:
		hci_cc_read_bd_addr(hdev, skb);
		break;

	default:
		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
		break;
	}

	if (ev->ncmd) {
		atomic_set(&hdev->cmd_cnt, 1);
		if (!skb_queue_empty(&hdev->cmd_q))
			tasklet_schedule(&hdev->cmd_task);
	}
}
Esempio n. 19
0
/*
 * receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
		  struct msghdr *msg, size_t len, int flags)
{
	struct rxrpc_skb_priv *sp;
	struct rxrpc_call *call = NULL, *continue_call = NULL;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct sk_buff *skb;
	long timeo;
	int copy, ret, ullen, offset, copied = 0;
	u32 abort_code;

	DEFINE_WAIT(wait);

	_enter(",,,%zu,%d", len, flags);

	if (flags & (MSG_OOB | MSG_TRUNC))
		return -EOPNOTSUPP;

	ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);

	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
	msg->msg_flags |= MSG_MORE;

	lock_sock(&rx->sk);

	for (;;) {
		/* return immediately if a client socket has no outstanding
		 * calls */
		if (RB_EMPTY_ROOT(&rx->calls)) {
			if (copied)
				goto out;
			if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
				release_sock(&rx->sk);
				if (continue_call)
					rxrpc_put_call(continue_call);
				return -ENODATA;
			}
		}

		/* get the next message on the Rx queue */
		skb = skb_peek(&rx->sk.sk_receive_queue);
		if (!skb) {
			/* nothing remains on the queue */
			if (copied &&
			    (msg->msg_flags & MSG_PEEK || timeo == 0))
				goto out;

			/* wait for a message to turn up */
			release_sock(&rx->sk);
			prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
						  TASK_INTERRUPTIBLE);
			ret = sock_error(&rx->sk);
			if (ret)
				goto wait_error;

			if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
				if (signal_pending(current))
					goto wait_interrupted;
				timeo = schedule_timeout(timeo);
			}
			finish_wait(sk_sleep(&rx->sk), &wait);
			lock_sock(&rx->sk);
			continue;
		}

	peek_next_packet:
		sp = rxrpc_skb(skb);
		call = sp->call;
		ASSERT(call != NULL);

		_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);

		/* make sure we wait for the state to be updated in this call */
		spin_lock_bh(&call->lock);
		spin_unlock_bh(&call->lock);

		if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
			_debug("packet from released call");
			if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
				BUG();
			rxrpc_free_skb(skb);
			continue;
		}

		/* determine whether to continue last data receive */
		if (continue_call) {
			_debug("maybe cont");
			if (call != continue_call ||
			    skb->mark != RXRPC_SKB_MARK_DATA) {
				release_sock(&rx->sk);
				rxrpc_put_call(continue_call);
				_leave(" = %d [noncont]", copied);
				return copied;
			}
		}

		rxrpc_get_call(call);

		/* copy the peer address and timestamp */
		if (!continue_call) {
			if (msg->msg_name && msg->msg_namelen > 0)
				memcpy(msg->msg_name,
				       &call->conn->trans->peer->srx,
				       sizeof(call->conn->trans->peer->srx));
			sock_recv_ts_and_drops(msg, &rx->sk, skb);
		}

		/* receive the message */
		if (skb->mark != RXRPC_SKB_MARK_DATA)
			goto receive_non_data_message;

		_debug("recvmsg DATA #%u { %d, %d }",
		       ntohl(sp->hdr.seq), skb->len, sp->offset);

		if (!continue_call) {
			/* only set the control data once per recvmsg() */
			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
				       ullen, &call->user_call_ID);
			if (ret < 0)
				goto copy_error;
			ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		}

		ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
		ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
		call->rx_data_recv = ntohl(sp->hdr.seq);

		ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);

		offset = sp->offset;
		copy = skb->len - offset;
		if (copy > len - copied)
			copy = len - copied;

		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
			ret = skb_copy_datagram_iovec(skb, offset,
						      msg->msg_iov, copy);
		} else {
			ret = skb_copy_and_csum_datagram_iovec(skb, offset,
							       msg->msg_iov);
			if (ret == -EINVAL)
				goto csum_copy_error;
		}

		if (ret < 0)
			goto copy_error;

		/* handle piecemeal consumption of data packets */
		_debug("copied %d+%d", copy, copied);

		offset += copy;
		copied += copy;

		if (!(flags & MSG_PEEK))
			sp->offset = offset;

		if (sp->offset < skb->len) {
			_debug("buffer full");
			ASSERTCMP(copied, ==, len);
			break;
		}

		/* we transferred the whole data packet */
		if (sp->hdr.flags & RXRPC_LAST_PACKET) {
			_debug("last");
			if (call->conn->out_clientflag) {
				 /* last byte of reply received */
				ret = copied;
				goto terminal_message;
			}

			/* last bit of request received */
			if (!(flags & MSG_PEEK)) {
				_debug("eat packet");
				if (skb_dequeue(&rx->sk.sk_receive_queue) !=
				    skb)
					BUG();
				rxrpc_free_skb(skb);
			}
			msg->msg_flags &= ~MSG_MORE;
			break;
		}

		/* move on to the next data message */
		_debug("next");
		if (!continue_call)
			continue_call = sp->call;
		else
			rxrpc_put_call(call);
		call = NULL;

		if (flags & MSG_PEEK) {
			_debug("peek next");
			skb = skb->next;
			if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
				break;
			goto peek_next_packet;
		}

		_debug("eat packet");
		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
			BUG();
		rxrpc_free_skb(skb);
	}
Esempio n. 20
0
static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_cmd_status *ev = (void *) skb->data;
	__u16 opcode;

	skb_pull(skb, sizeof(*ev));

	opcode = __le16_to_cpu(ev->opcode);

	switch (opcode) {
	case HCI_OP_INQUIRY:
		hci_cs_inquiry(hdev, ev->status);
		break;

	case HCI_OP_CREATE_CONN:
		hci_cs_create_conn(hdev, ev->status);
		break;

	case HCI_OP_ADD_SCO:
		hci_cs_add_sco(hdev, ev->status);
		break;

	case HCI_OP_AUTH_REQUESTED:
		hci_cs_auth_requested(hdev, ev->status);
		break;

	case HCI_OP_SET_CONN_ENCRYPT:
		hci_cs_set_conn_encrypt(hdev, ev->status);
		break;

	case HCI_OP_REMOTE_NAME_REQ:
		hci_cs_remote_name_req(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_FEATURES:
		hci_cs_read_remote_features(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_EXT_FEATURES:
		hci_cs_read_remote_ext_features(hdev, ev->status);
		break;

	case HCI_OP_SETUP_SYNC_CONN:
		hci_cs_setup_sync_conn(hdev, ev->status);
		break;

	case HCI_OP_SNIFF_MODE:
		hci_cs_sniff_mode(hdev, ev->status);
		break;

	case HCI_OP_EXIT_SNIFF_MODE:
		hci_cs_exit_sniff_mode(hdev, ev->status);
		break;

	default:
		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
		break;
	}

	if (ev->ncmd) {
		atomic_set(&hdev->cmd_cnt, 1);
		if (!skb_queue_empty(&hdev->cmd_q))
			tasklet_schedule(&hdev->cmd_task);
	}
}
Esempio n. 21
0
/**
 * recv_stream - receive stream-oriented data
 * @iocb: (unused)
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
static int recv_stream(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *m, size_t buf_len, int flags)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	long timeout;
	unsigned int sz;
	int sz_to_copy, target, needed;
	int sz_copied = 0;
	u32 err;
	int res = 0;

	/* Catch invalid receive attempts */
	if (unlikely(!buf_len))
		return -EINVAL;

	lock_sock(sk);

	if (unlikely((sock->state == SS_UNCONNECTED) ||
		     (sock->state == SS_CONNECTING))) {
		res = -ENOTCONN;
		goto exit;
	}

	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

restart:
	/* Look for a message in receive queue; wait if necessary */
	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (sock->state == SS_DISCONNECTING) {
			res = -ENOTCONN;
			goto exit;
		}
		if (timeout <= 0L) {
			res = timeout ? timeout : -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
							   tipc_rx_ready(sock),
							   timeout);
		lock_sock(sk);
	}

	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
		advance_rx_queue(sk);
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */
	if (sz_copied == 0) {
		set_orig_addr(m, msg);
		res = anc_data_recv(m, msg, tport);
		if (res)
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
		u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);

		sz -= offset;
		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;

		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
					      m->msg_iov, sz_to_copy);
		if (res)
			goto exit;

		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
				TIPC_SKB_CB(buf)->handle =
				(void *)(unsigned long)(offset + sz_to_copy);
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */
	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tport->ref, tport->conn_unacked);
		advance_rx_queue(sk);
	}

	/* Loop around if more data is required */
	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
	    (sz_copied < target)) &&	/* and more is ready or required */
	    (!(flags & MSG_PEEK)) &&	/* and aren't just peeking at data */
	    (!err))			/* and haven't reached a FIN */
		goto restart;

exit:
	release_sock(sk);
	return sz_copied ? sz_copied : res;
}
Esempio n. 22
0
File: dev.c Progetto: 0xffea/gnumach
void net_bh(void)
{
	struct packet_type *ptype;
	struct packet_type *pt_prev;
	unsigned short type;

	/*
	 *	Can we send anything now? We want to clear the
	 *	decks for any more sends that get done as we
	 *	process the input. This also minimises the
	 *	latency on a transmit interrupt bh.
	 */

	dev_transmit();
  
	/*
	 *	Any data left to process. This may occur because a
	 *	mark_bh() is done after we empty the queue including
	 *	that from the device which does a mark_bh() just after
	 */

	/*
	 *	While the queue is not empty..
	 *
	 *	Note that the queue never shrinks due to
	 *	an interrupt, so we can do this test without
	 *	disabling interrupts.
	 */

	while (!skb_queue_empty(&backlog)) {
		struct sk_buff * skb = backlog.next;

		/*
		 *	We have a packet. Therefore the queue has shrunk
		 */
		cli();
		__skb_unlink(skb, &backlog);
  		backlog_size--;
		sti();
		

#ifdef CONFIG_BRIDGE

		/*
		 *	If we are bridging then pass the frame up to the
		 *	bridging code. If it is bridged then move on
		 */
		 
		if (br_stats.flags & BR_UP)
		{
			/*
			 *	We pass the bridge a complete frame. This means
			 *	recovering the MAC header first.
			 */
			 
			int offset=skb->data-skb->mac.raw;
			cli();
			skb_push(skb,offset);	/* Put header back on for bridge */
			if(br_receive_frame(skb))
			{
				sti();
				continue;
			}
			/*
			 *	Pull the MAC header off for the copy going to
			 *	the upper layers.
			 */
			skb_pull(skb,offset);
			sti();
		}
#endif
		
		/*
	 	 *	Bump the pointer to the next structure.
		 * 
		 *	On entry to the protocol layer. skb->data and
		 *	skb->h.raw point to the MAC and encapsulated data
		 */

		skb->h.raw = skb->data;

		/*
		 * 	Fetch the packet protocol ID. 
		 */
		
		type = skb->protocol;

		/*
		 *	We got a packet ID.  Now loop over the "known protocols"
		 * 	list. There are two lists. The ptype_all list of taps (normally empty)
		 *	and the main protocol list which is hashed perfectly for normal protocols.
		 */
		
		pt_prev = NULL;
		for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
		{
			if(!ptype->dev || ptype->dev == skb->dev) {
				if(pt_prev) {
					struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
					if(skb2)
						pt_prev->func(skb2,skb->dev, pt_prev);
				}
				pt_prev=ptype;
			}
		}
		
		for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next) 
		{
			if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
			{
				/*
				 *	We already have a match queued. Deliver
				 *	to it and then remember the new match
				 */
				if(pt_prev)
				{
					struct sk_buff *skb2;

					skb2=skb_clone(skb, GFP_ATOMIC);

					/*
					 *	Kick the protocol handler. This should be fast
					 *	and efficient code.
					 */

					if(skb2)
						pt_prev->func(skb2, skb->dev, pt_prev);
				}
				/* Remember the current last to do */
				pt_prev=ptype;
			}
		} /* End of protocol list loop */
		
		/*
		 *	Is there a last item to send to ?
		 */

		if(pt_prev)
			pt_prev->func(skb, skb->dev, pt_prev);
		/*
		 * 	Has an unknown packet has been received ?
		 */
	 
		else
			kfree_skb(skb, FREE_WRITE);
		/*
		 *	Again, see if we can transmit anything now. 
		 *	[Ought to take this out judging by tests it slows
		 *	 us down not speeds us up]
		 */
#ifdef XMIT_EVERY
		dev_transmit();
#endif		
  	}	/* End of queue loop */
  	
  	/*
  	 *	We have emptied the queue
  	 */
	
	/*
	 *	One last output flush.
	 */

#ifdef XMIT_AFTER	 
	dev_transmit();
#endif
}
Esempio n. 23
0
/**
 * accept - wait for connection request
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
 *
 * Returns 0 on success, errno otherwise
 */
static int accept(struct socket *sock, struct socket *new_sock, int flags)
{
	struct sock *sk = sock->sk;
	struct sk_buff *buf;
	int res;

	lock_sock(sk);

	if (sock->state != SS_LISTENING) {
		res = -EINVAL;
		goto exit;
	}

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (flags & O_NONBLOCK) {
			res = -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		res = wait_event_interruptible(*sk_sleep(sk),
				(!skb_queue_empty(&sk->sk_receive_queue)));
		lock_sock(sk);
		if (res)
			goto exit;
	}

	buf = skb_peek(&sk->sk_receive_queue);

	res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
	if (!res) {
		struct sock *new_sk = new_sock->sk;
		struct tipc_sock *new_tsock = tipc_sk(new_sk);
		struct tipc_port *new_tport = new_tsock->p;
		u32 new_ref = new_tport->ref;
		struct tipc_msg *msg = buf_msg(buf);

		lock_sock(new_sk);

		/*
		 * Reject any stray messages received by new socket
		 * before the socket lock was taken (very, very unlikely)
		 */
		reject_rx_queue(new_sk);

		/* Connect new socket to it's peer */
		new_tsock->peer_name.ref = msg_origport(msg);
		new_tsock->peer_name.node = msg_orignode(msg);
		tipc_connect2port(new_ref, &new_tsock->peer_name);
		new_sock->state = SS_CONNECTED;

		tipc_set_portimportance(new_ref, msg_importance(msg));
		if (msg_named(msg)) {
			new_tport->conn_type = msg_nametype(msg);
			new_tport->conn_instance = msg_nameinst(msg);
		}

		/*
		 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
		 * Respond to 'SYN+' by queuing it on new socket.
		 */
		if (!msg_data_sz(msg)) {
			struct msghdr m = {NULL,};

			advance_rx_queue(sk);
			send_packet(NULL, new_sock, &m, 0);
		} else {
			__skb_dequeue(&sk->sk_receive_queue);
			__skb_queue_head(&new_sk->sk_receive_queue, buf);
		}
		release_sock(new_sk);
	}
exit:
	release_sock(sk);
	return res;
}
Esempio n. 24
0
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data;
	struct hci_ev_cmd_complete *ec;
	struct hci_ev_cmd_status *cs;
	u16 opcode, ocf, ogf;

	skb_pull(skb, HCI_EVENT_HDR_SIZE);

	BT_DBG("%s evt 0x%x", hdev->name, hdr->evt);

	switch (hdr->evt) {
	case HCI_EV_NUM_COMP_PKTS:
		hci_num_comp_pkts_evt(hdev, skb);
		break;

	case HCI_EV_INQUIRY_COMPLETE:
		hci_inquiry_complete_evt(hdev, skb);
		break;

	case HCI_EV_INQUIRY_RESULT:
		hci_inquiry_result_evt(hdev, skb);
		break;

	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
		hci_inquiry_result_with_rssi_evt(hdev, skb);
		break;

	case HCI_EV_CONN_REQUEST:
		hci_conn_request_evt(hdev, skb);
		break;

	case HCI_EV_CONN_COMPLETE:
		hci_conn_complete_evt(hdev, skb);
		break;

	case HCI_EV_DISCONN_COMPLETE:
		hci_disconn_complete_evt(hdev, skb);
		break;

	case HCI_EV_ROLE_CHANGE:
		hci_role_change_evt(hdev, skb);
		break;

	case HCI_EV_AUTH_COMPLETE:
		hci_auth_complete_evt(hdev, skb);
		break;

	case HCI_EV_ENCRYPT_CHANGE:
		hci_encrypt_change_evt(hdev, skb);
		break;

	case HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE:
		hci_change_conn_link_key_complete_evt(hdev, skb);
		break;

	case HCI_EV_PIN_CODE_REQ:
		hci_pin_code_request_evt(hdev, skb);
		break;

	case HCI_EV_LINK_KEY_REQ:
		hci_link_key_request_evt(hdev, skb);
		break;

	case HCI_EV_LINK_KEY_NOTIFY:
		hci_link_key_notify_evt(hdev, skb);
		break;

	case HCI_EV_CLOCK_OFFSET:
		hci_clock_offset_evt(hdev, skb);
		break;

	case HCI_EV_CMD_STATUS:
		cs = (struct hci_ev_cmd_status *) skb->data;
		skb_pull(skb, sizeof(cs));

		opcode = __le16_to_cpu(cs->opcode);
		ogf = hci_opcode_ogf(opcode);
		ocf = hci_opcode_ocf(opcode);

		switch (ogf) {
		case OGF_INFO_PARAM:
			hci_cs_info_param(hdev, ocf, cs->status);
			break;

		case OGF_HOST_CTL:
			hci_cs_host_ctl(hdev, ocf, cs->status);
			break;

		case OGF_LINK_CTL:
			hci_cs_link_ctl(hdev, ocf, cs->status);
			break;

		case OGF_LINK_POLICY:
			hci_cs_link_policy(hdev, ocf, cs->status);
			break;

		default:
			BT_DBG("%s Command Status OGF %x", hdev->name, ogf);
			break;
		}

		if (cs->ncmd) {
			atomic_set(&hdev->cmd_cnt, 1);
			if (!skb_queue_empty(&hdev->cmd_q))
				hci_sched_cmd(hdev);
		}
		break;

	case HCI_EV_CMD_COMPLETE:
		ec = (struct hci_ev_cmd_complete *) skb->data;
		skb_pull(skb, sizeof(*ec));

		opcode = __le16_to_cpu(ec->opcode);
		ogf = hci_opcode_ogf(opcode);
		ocf = hci_opcode_ocf(opcode);

		switch (ogf) {
		case OGF_INFO_PARAM:
			hci_cc_info_param(hdev, ocf, skb);
			break;

		case OGF_HOST_CTL:
			hci_cc_host_ctl(hdev, ocf, skb);
			break;

		case OGF_LINK_CTL:
			hci_cc_link_ctl(hdev, ocf, skb);
			break;

		case OGF_LINK_POLICY:
			hci_cc_link_policy(hdev, ocf, skb);
			break;

		default:
			BT_DBG("%s Command Completed OGF %x", hdev->name, ogf);
			break;
		}

		if (ec->ncmd) {
			atomic_set(&hdev->cmd_cnt, 1);
			if (!skb_queue_empty(&hdev->cmd_q))
				hci_sched_cmd(hdev);
		}
		break;
	}

	kfree_skb(skb);
	hdev->stat.evt_rx++;
}
Esempio n. 25
0
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
				    struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		rtl_is_special_data(hw, skb, false);

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
	}
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
				      struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		rtl_is_special_data(hw, skb, false);

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
		if (likely(rtl_action_proc(hw, skb, false))) {
			struct sk_buff *uskb = NULL;
			u8 *pdata;

			uskb = dev_alloc_skb(skb->len + 128);
			if (uskb) {	/* drop packet on allocation failure */
				memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
				       sizeof(rx_status));
				pdata = (u8 *)skb_put(uskb, skb->len);
				memcpy(pdata, skb->data, skb->len);
				ieee80211_rx_irqsafe(hw, uskb);
			}
			dev_kfree_skb_any(skb);
		} else {
			dev_kfree_skb_any(skb);
		}
	}
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct sk_buff *_skb;
	struct sk_buff_head rx_queue;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	skb_queue_head_init(&rx_queue);
	if (rtlusb->usb_rx_segregate_hdl)
		rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
	WARN_ON(skb_queue_empty(&rx_queue));
	while (!skb_queue_empty(&rx_queue)) {
		_skb = skb_dequeue(&rx_queue);
		_rtl_usb_rx_process_agg(hw, skb);
		ieee80211_rx_irqsafe(hw, skb);
	}
}

static void _rtl_rx_completed(struct urb *_urb)
{
	struct sk_buff *skb = (struct sk_buff *)_urb->context;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	int err = 0;

	if (unlikely(IS_USB_STOP(rtlusb)))
		goto free;

	if (likely(0 == _urb->status)) {
		/* If this code were moved to work queue, would CPU
		 * utilization be improved?  NOTE: We shall allocate another skb
		 * and reuse the original one.
		 */
		skb_put(skb, _urb->actual_length);

		if (likely(!rtlusb->usb_rx_segregate_hdl)) {
			struct sk_buff *_skb;
			_rtl_usb_rx_process_noagg(hw, skb);
			_skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
			if (IS_ERR(_skb)) {
				err = PTR_ERR(_skb);
				RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
					 "Can't allocate skb for bulk IN!\n");
				return;
			}
			skb = _skb;
		} else{
			/* TO DO */
			_rtl_rx_pre_process(hw, skb);
			pr_err("rx agg not supported\n");
		}
		goto resubmit;
	}

	switch (_urb->status) {
	/* disconnect */
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
		break;
	}

resubmit:
	skb_reset_tail_pointer(skb);
	skb_trim(skb, 0);

	usb_anchor_urb(_urb, &rtlusb->rx_submitted);
	err = usb_submit_urb(_urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(_urb);
		goto free;
	}
	return;

free:
	dev_kfree_skb_irq(skb);
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
	struct urb *urb;
	struct sk_buff *skb;
	int err;
	int i;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	WARN_ON(0 == rtlusb->rx_urb_num);
	/* 1600 == 1514 + max WLAN header + rtk info */
	WARN_ON(rtlusb->rx_max_size < 1600);

	for (i = 0; i < rtlusb->rx_urb_num; i++) {
		err = -ENOMEM;
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to alloc URB!!\n");
			goto err_out;
		}

		skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
		if (IS_ERR(skb)) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to prep_rx_urb!!\n");
			err = PTR_ERR(skb);
			goto err_out;
		}

		usb_anchor_urb(urb, &rtlusb->rx_submitted);
		err = usb_submit_urb(urb, GFP_KERNEL);
		if (err)
			goto err_out;
		usb_free_urb(urb);
	}
	return 0;

err_out:
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);
	return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
	int err;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	err = rtlpriv->cfg->ops->hw_init(hw);
	if (!err) {
		rtl_init_rx_config(hw);

		/* Enable software */
		SET_USB_START(rtlusb);
		/* should after adapter start and interrupt enable. */
		set_hal_start(rtlhal);

		/* Start bulk IN */
		_rtl_usb_receive(hw);
	}

	return err;
}
/**
 *
 *
 */

/*=======================  tx =========================================*/
static void rtl_usb_cleanup(struct ieee80211_hw *hw)
{
	u32 i;
	struct sk_buff *_skb;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct ieee80211_tx_info *txinfo;

	SET_USB_STOP(rtlusb);

	/* clean up rx stuff. */
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);

	/* clean up tx stuff */
	for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
		while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
			rtlusb->usb_tx_cleanup(hw, _skb);
			txinfo = IEEE80211_SKB_CB(_skb);
			ieee80211_tx_info_clear_status(txinfo);
			txinfo->flags |= IEEE80211_TX_STAT_ACK;
			ieee80211_tx_status_irqsafe(hw, _skb);
		}
		usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
	}
	usb_kill_anchored_urbs(&rtlusb->tx_submitted);
}
/*-----------------------------------------------------------------------------
 * Function:	cmpk_message_handle_tx()
 *
 * Overview:	Driver internal module can call the API to send message to
 *				firmware side. For example, you can send a debug command packet.
 *				Or you can send a request for FW to modify RLX4181 LBUS HW bank.
 *				Otherwise, you can change MAC/PHT/RF register by firmware at
 *				run time. We do not support message more than one segment now.
 *
 * Input:		NONE
 *
 * Output:		NONE
 *
 * Return:		NONE
 *
 * Revised History:
 *	When		Who		Remark
 *	05/06/2008	amy		porting from windows code.
 *
 *---------------------------------------------------------------------------*/
 extern	rt_status	cmpk_message_handle_tx(
	struct net_device *dev,
	u8*	codevirtualaddress,
	u32	packettype,
	u32	buffer_len)
{

	bool	    rt_status = true;
#ifdef RTL8192U
	return rt_status;
#else
	struct r8192_priv   *priv = ieee80211_priv(dev);
	u16		    frag_threshold;
	u16		    frag_length, frag_offset = 0;
	//u16		    total_size;
	//int		    i;

	rt_firmware	    *pfirmware = priv->pFirmware;
	struct sk_buff	    *skb;
	unsigned char	    *seg_ptr;
	cb_desc		    *tcb_desc;
	u8                  bLastIniPkt;

	firmware_init_param(dev);
	//Fragmentation might be required
	frag_threshold = pfirmware->cmdpacket_frag_thresold;
	do {
		if((buffer_len - frag_offset) > frag_threshold) {
			frag_length = frag_threshold ;
			bLastIniPkt = 0;

		} else {
			frag_length = buffer_len - frag_offset;
			bLastIniPkt = 1;

		}

		/* Allocate skb buffer to contain firmware info and tx descriptor info
		 * add 4 to avoid packet appending overflow.
		 * */
		#ifdef RTL8192U
		skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
		#else
		skb  = dev_alloc_skb(frag_length + 4);
		#endif
		memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
		tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->queue_index = TXCMD_QUEUE;
		tcb_desc->bCmdOrInit = packettype;
		tcb_desc->bLastIniPkt = bLastIniPkt;

		#ifdef RTL8192U
		skb_reserve(skb, USB_HWDESC_HEADER_LEN);
		#endif

		seg_ptr = skb_put(skb, buffer_len);
		/*
		 * Transform from little endian to big endian
		 * and pending zero
		 */
		memcpy(seg_ptr,codevirtualaddress,buffer_len);
		tcb_desc->txbuf_size= (u16)buffer_len;


		if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
			(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
			(priv->ieee80211->queue_stop) ) {
			RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
			skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
		} else {
			priv->ieee80211->softmac_hard_start_xmit(skb,dev);
		}

		codevirtualaddress += frag_length;
		frag_offset += frag_length;

	}while(frag_offset < buffer_len);

	return rt_status;


#endif
}	/* CMPK_Message_Handle_Tx */
Esempio n. 27
0
static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
					  struct iovec const *msg_sect,
					  size_t total_len)
{
	struct sk_buff_head *msg_head;
	struct sk_buff *msg;
	int i, copied, first = 1;
	int data_size = 0, request_size, offset;
	void *data;

	for (i = 0; i < num_sect; i++)
		data_size += msg_sect[i].iov_len;

	if (!data_size)
		return NULL;

	msg_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
	if (!msg_head) {
		pr_err("%s: cannot allocate skb_head\n", __func__);
		return NULL;
	}
	skb_queue_head_init(msg_head);

	for (copied = 1, i = 0; copied && (i < num_sect); i++) {
		data_size = msg_sect[i].iov_len;
		offset = 0;
		while (offset != msg_sect[i].iov_len) {
			request_size = data_size;
			if (first)
				request_size += IPC_ROUTER_HDR_SIZE;

			msg = alloc_skb(request_size, GFP_KERNEL);
			if (!msg) {
				if (request_size <= (PAGE_SIZE/2)) {
					pr_err("%s: cannot allocated skb\n",
						__func__);
					goto msg_build_failure;
				}
				data_size = data_size / 2;
				continue;
			}

			if (first) {
				skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
				first = 0;
			}

			data = skb_put(msg, data_size);
			copied = !copy_from_user(msg->data,
					msg_sect[i].iov_base + offset,
					data_size);
			if (!copied) {
				pr_err("%s: copy_from_user failed\n",
					__func__);
				kfree_skb(msg);
				goto msg_build_failure;
			}
			skb_queue_tail(msg_head, msg);
			offset += data_size;
			data_size = msg_sect[i].iov_len - offset;
		}
	}
	return msg_head;

msg_build_failure:
	while (!skb_queue_empty(msg_head)) {
		msg = skb_dequeue(msg_head);
		kfree_skb(msg);
	}
	kfree(msg_head);
	return NULL;
}
Esempio n. 28
0
int usbnet_stop (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	struct driver_info	*info = dev->driver_info;
#if !defined(CONFIG_ERICSSON_F3307_ENABLE)
	int			temp;
#endif
	int			retval;
#if !defined(CONFIG_ERICSSON_F3307_ENABLE)
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
	DECLARE_WAITQUEUE (wait, current);
#endif
	netif_stop_queue (net);

	if (netif_msg_ifdown (dev))
		devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
			net->stats.rx_packets, net->stats.tx_packets,
			net->stats.rx_errors, net->stats.tx_errors
			);

	/* allow minidriver to stop correctly (wireless devices to turn off
	 * radio etc) */
	if (info->stop) {
		retval = info->stop(dev);
		if (retval < 0 && netif_msg_ifdown(dev))
			devinfo(dev,
				"stop fail (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
	}
#if !defined(CONFIG_ERICSSON_F3307_ENABLE)
	if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) {
		/* ensure there are no more active urbs */
		add_wait_queue(&unlink_wakeup, &wait);
		dev->wait = &unlink_wakeup;
		temp = unlink_urbs(dev, &dev->txq) +
			unlink_urbs(dev, &dev->rxq);

		/* maybe wait for deletions to finish. */
		while (!skb_queue_empty(&dev->rxq)
				&& !skb_queue_empty(&dev->txq)
				&& !skb_queue_empty(&dev->done)) {
			msleep(UNLINK_TIMEOUT_MS);
			if (netif_msg_ifdown(dev))
				devdbg(dev, "waited for %d urb completions",
					temp);
		}
		dev->wait = NULL;
		remove_wait_queue(&unlink_wakeup, &wait);
	}
#else
	if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
 		usbnet_terminate_urbs(dev);
#endif

	usb_kill_urb(dev->interrupt);

	usbnet_purge_paused_rxq(dev);

	/* deferred work (task, timer, softirq) must also stop.
	 * can't flush_scheduled_work() until we drop rtnl (later),
	 * else workers could deadlock; so make workers a NOP.
	 */
	dev->flags = 0;
	del_timer_sync (&dev->delay);
	tasklet_kill (&dev->bh);
#if !defined(CONFIG_ERICSSON_F3307_ENABLE)
	usb_autopm_put_interface(dev->intf);
#else
	if (info->manage_power)
		info->manage_power(dev, 0);
	else
		usb_autopm_put_interface(dev->intf);
#endif

	return 0;
}
Esempio n. 29
0
File: net.c Progetto: 020gzh/linux
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;
	unsigned out, in;
	int head;
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL,
		.msg_controllen = 0,
		.msg_flags = MSG_DONTWAIT,
	};
	size_t len, total_len = 0;
	int err;
	size_t hdr_size;
	struct socket *sock;
	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
	bool zcopy, zcopy_used;

	mutex_lock(&vq->mutex);
	sock = vq->private_data;
	if (!sock)
		goto out;

	vhost_disable_notify(&net->dev, vq);

	hdr_size = nvq->vhost_hlen;
	zcopy = nvq->ubufs;

	for (;;) {
		/* Release DMAs done buffers first */
		if (zcopy)
			vhost_zerocopy_signal_used(net, vq);

		/* If more outstanding DMAs, queue the work.
		 * Handle upend_idx wrap around
		 */
		if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
			      % UIO_MAXIOV == nvq->done_idx))
			break;

		head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
						ARRAY_SIZE(vq->iov),
						&out, &in);
		/* On error, stop handling until the next kick. */
		if (unlikely(head < 0))
			break;
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (head == vq->num) {
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
				vhost_disable_notify(&net->dev, vq);
				continue;
			}
			break;
		}
		if (in) {
			vq_err(vq, "Unexpected descriptor format for TX: "
			       "out %d, int %d\n", out, in);
			break;
		}
		/* Skip header. TODO: support TSO. */
		len = iov_length(vq->iov, out);
		iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
		iov_iter_advance(&msg.msg_iter, hdr_size);
		/* Sanity check */
		if (!msg_data_left(&msg)) {
			vq_err(vq, "Unexpected header len for TX: "
			       "%zd expected %zd\n",
			       len, hdr_size);
			break;
		}
		len = msg_data_left(&msg);

		zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
				   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
				      nvq->done_idx
				   && vhost_net_tx_select_zcopy(net);

		/* use msg_control to pass vhost zerocopy ubuf info to skb */
		if (zcopy_used) {
			struct ubuf_info *ubuf;
			ubuf = nvq->ubuf_info + nvq->upend_idx;

			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
			ubuf->callback = vhost_zerocopy_callback;
			ubuf->ctx = nvq->ubufs;
			ubuf->desc = nvq->upend_idx;
			msg.msg_control = ubuf;
			msg.msg_controllen = sizeof(ubuf);
			ubufs = nvq->ubufs;
			atomic_inc(&ubufs->refcount);
			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
		} else {
			msg.msg_control = NULL;
			ubufs = NULL;
		}
		/* TODO: Check specific error and bomb out unless ENOBUFS? */
		err = sock->ops->sendmsg(sock, &msg, len);
		if (unlikely(err < 0)) {
			if (zcopy_used) {
				vhost_net_ubuf_put(ubufs);
				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
					% UIO_MAXIOV;
			}
			vhost_discard_vq_desc(vq, 1);
			break;
		}
		if (err != len)
			pr_debug("Truncated TX packet: "
				 " len %d != %zd\n", err, len);
		if (!zcopy_used)
			vhost_add_used_and_signal(&net->dev, vq, head, 0);
		else
			vhost_zerocopy_signal_used(net, vq);
		total_len += len;
		vhost_net_tx_packet(net);
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}
out:
	mutex_unlock(&vq->mutex);
}

static int peek_head_len(struct sock *sk)
{
	struct sk_buff *head;
	int len = 0;
	unsigned long flags;

	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
	head = skb_peek(&sk->sk_receive_queue);
	if (likely(head)) {
		len = head->len;
		if (skb_vlan_tag_present(head))
			len += VLAN_HLEN;
	}

	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
	return len;
}

static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;
	unsigned long uninitialized_var(endtime);
	int len = peek_head_len(sk);

	if (!len && vq->busyloop_timeout) {
		/* Both tx vq and rx socket were polled here */
		mutex_lock(&vq->mutex);
		vhost_disable_notify(&net->dev, vq);

		preempt_disable();
		endtime = busy_clock() + vq->busyloop_timeout;

		while (vhost_can_busy_poll(&net->dev, endtime) &&
		       skb_queue_empty(&sk->sk_receive_queue) &&
		       vhost_vq_avail_empty(&net->dev, vq))
			cpu_relax_lowlatency();

		preempt_enable();

		if (vhost_enable_notify(&net->dev, vq))
			vhost_poll_queue(&vq->poll);
		mutex_unlock(&vq->mutex);

		len = peek_head_len(sk);
	}

	return len;
}

/* This is a multi-buffer version of vhost_get_desc, that works if
 *	vq has read descriptors only.
 * @vq		- the relevant virtqueue
 * @datalen	- data length we'll be reading
 * @iovcount	- returned count of io vectors we fill
 * @log		- vhost log
 * @log_num	- log offset
 * @quota       - headcount quota, 1 for big buffer
 *	returns number of buffer heads allocated, negative on error
 */
static int get_rx_bufs(struct vhost_virtqueue *vq,
		       struct vring_used_elem *heads,
		       int datalen,
		       unsigned *iovcount,
		       struct vhost_log *log,
		       unsigned *log_num,
		       unsigned int quota)
{
	unsigned int out, in;
	int seg = 0;
	int headcount = 0;
	unsigned d;
	int r, nlogs = 0;
	/* len is always initialized before use since we are always called with
	 * datalen > 0.
	 */
	u32 uninitialized_var(len);

	while (datalen > 0 && headcount < quota) {
		if (unlikely(seg >= UIO_MAXIOV)) {
			r = -ENOBUFS;
			goto err;
		}
		r = vhost_get_vq_desc(vq, vq->iov + seg,
				      ARRAY_SIZE(vq->iov) - seg, &out,
				      &in, log, log_num);
		if (unlikely(r < 0))
			goto err;

		d = r;
		if (d == vq->num) {
			r = 0;
			goto err;
		}
		if (unlikely(out || in <= 0)) {
			vq_err(vq, "unexpected descriptor format for RX: "
				"out %d, in %d\n", out, in);
			r = -EINVAL;
			goto err;
		}
		if (unlikely(log)) {
			nlogs += *log_num;
			log += *log_num;
		}
		heads[headcount].id = cpu_to_vhost32(vq, d);
		len = iov_length(vq->iov + seg, in);
		heads[headcount].len = cpu_to_vhost32(vq, len);
		datalen -= len;
		++headcount;
		seg += in;
	}
	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
	*iovcount = seg;
	if (unlikely(log))
		*log_num = nlogs;

	/* Detect overrun */
	if (unlikely(datalen > 0)) {
		r = UIO_MAXIOV + 1;
		goto err;
	}
	return headcount;
err:
	vhost_discard_vq_desc(vq, headcount);
	return r;
}
Esempio n. 30
0
static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
			     u32 buffer_len)
{
	struct r8192_priv *priv = rtllib_priv(dev);
	bool		    rt_status = true;
	u16		    frag_threshold;
	u16		    frag_length, frag_offset = 0;
	int		    i;

	struct rt_firmware *pfirmware = priv->pFirmware;
	struct sk_buff	    *skb;
	unsigned char	    *seg_ptr;
	struct cb_desc *tcb_desc;
	u8                  bLastIniPkt;

	firmware_init_param(dev);
	frag_threshold = pfirmware->cmdpacket_frag_thresold;
	do {
		if ((buffer_len - frag_offset) > frag_threshold) {
			frag_length = frag_threshold ;
			bLastIniPkt = 0;

		} else {
			frag_length = buffer_len - frag_offset;
			bLastIniPkt = 1;

		}

		skb  = dev_alloc_skb(frag_length + 4);
		memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
		tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->queue_index = TXCMD_QUEUE;
		tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
		tcb_desc->bLastIniPkt = bLastIniPkt;

		seg_ptr = skb->data;
		for (i = 0; i < frag_length; i += 4) {
			*seg_ptr++ = ((i+0) < frag_length) ?
				     code_virtual_address[i+3] : 0;
			*seg_ptr++ = ((i+1) < frag_length) ?
				     code_virtual_address[i+2] : 0;
			*seg_ptr++ = ((i+2) < frag_length) ?
				     code_virtual_address[i+1] : 0;
			*seg_ptr++ = ((i+3) < frag_length) ?
				     code_virtual_address[i+0] : 0;
		}
		tcb_desc->txbuf_size = (u16)i;
		skb_put(skb, i);

		if (!priv->rtllib->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
		    (!skb_queue_empty(&priv->rtllib->skb_waitQ[tcb_desc->queue_index])) ||
		    (priv->rtllib->queue_stop)) {
			RT_TRACE(COMP_FIRMWARE, "===================> tx "
				 "full!\n");
			skb_queue_tail(&priv->rtllib->skb_waitQ
					[tcb_desc->queue_index], skb);
		} else {
		priv->rtllib->softmac_hard_start_xmit(skb, dev);
		}

		code_virtual_address += frag_length;
		frag_offset += frag_length;

	} while (frag_offset < buffer_len);

	write_nic_byte(dev, TPPoll, TPPoll_CQ);

	return rt_status;
}