struct sk_buff *
cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
	struct usb_cdc_ncm_nth16 *nth16;
	struct usb_cdc_ncm_ndp16 *ndp16;
	struct sk_buff *skb_out;
	u16 n = 0, index, ndplen;
	u8 ready2send = 0;

	/* if there is a remaining skb, it gets priority */
	if (skb != NULL) {
		swap(skb, ctx->tx_rem_skb);
		swap(sign, ctx->tx_rem_sign);
	} else {
		ready2send = 1;
	}

	/* check if we are resuming an OUT skb */
	skb_out = ctx->tx_curr_skb;

	/* allocate a new OUT skb */
	if (!skb_out) {
		skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
		if (skb_out == NULL) {
			if (skb != NULL) {
				dev_kfree_skb_any(skb);
				dev->net->stats.tx_dropped++;
			}
			goto exit_no_skb;
		}
		/* fill out the initial 16-bit NTB header */
		nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
		nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
		nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
		nth16->wSequence = cpu_to_le16(ctx->tx_seq++);

		/* count total number of frames in this NTB */
		ctx->tx_curr_frame_num = 0;
	}

	for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
		/* send any remaining skb first */
		if (skb == NULL) {
			skb = ctx->tx_rem_skb;
			sign = ctx->tx_rem_sign;
			ctx->tx_rem_skb = NULL;

			/* check for end of skb */
			if (skb == NULL)
				break;
		}

		/* get the appropriate NDP for this skb */
		ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);

		/* align beginning of next frame */
		cdc_ncm_align_tail(skb_out,  ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);

		/* check if we had enough room left for both NDP and frame */
		if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
			if (n == 0) {
				/* won't fit, MTU problem? */
				dev_kfree_skb_any(skb);
				skb = NULL;
				dev->net->stats.tx_dropped++;
			} else {
				/* no room for skb - store for later */
				if (ctx->tx_rem_skb != NULL) {
					dev_kfree_skb_any(ctx->tx_rem_skb);
					dev->net->stats.tx_dropped++;
				}
				ctx->tx_rem_skb = skb;
				ctx->tx_rem_sign = sign;
				skb = NULL;
				ready2send = 1;
			}
			break;
		}

		/* calculate frame number withing this NDP */
		ndplen = le16_to_cpu(ndp16->wLength);
		index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;

		/* OK, add this skb */
		ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
		ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
		ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
		memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
		dev_kfree_skb_any(skb);
		skb = NULL;

		/* send now if this NDP is full */
		if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
			ready2send = 1;
			break;
		}
	}

	/* free up any dangling skb */
	if (skb != NULL) {
		dev_kfree_skb_any(skb);
		skb = NULL;
		dev->net->stats.tx_dropped++;
	}

	ctx->tx_curr_frame_num = n;

	if (n == 0) {
		/* wait for more frames */
		/* push variables */
		ctx->tx_curr_skb = skb_out;
		goto exit_no_skb;

	} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
		/* wait for more frames */
		/* push variables */
		ctx->tx_curr_skb = skb_out;
		/* set the pending count */
		if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
			ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
		goto exit_no_skb;

	} else {
		/* frame goes out */
		/* variables will be reset at next call */
	}

	/* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
	 * bytes, we send buffers as it is. If we get more data, it
	 * would be more efficient for USB HS mobile device with DMA
	 * engine to receive a full size NTB, than canceling DMA
	 * transfer and receiving a short packet.
	 *
	 * This optimization support is pointless if we end up sending
	 * a ZLP after full sized NTBs.
	 */
	if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
	    skb_out->len > CDC_NCM_MIN_TX_PKT)
		memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
		       ctx->tx_max - skb_out->len);
	else if ((skb_out->len % dev->maxpacket) == 0)
		*skb_put(skb_out, 1) = 0;	/* force short packet */

	/* set final frame length */
	nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
	nth16->wBlockLength = cpu_to_le16(skb_out->len);

	/* return skb */
	ctx->tx_curr_skb = NULL;
	dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
	return skb_out;

exit_no_skb:
	/* Start timer, if there is a remaining skb */
	if (ctx->tx_curr_skb != NULL)
		cdc_ncm_tx_timeout_start(ctx);
	return NULL;
}
Exemple #2
0
/* replenish the buffers for a pool.  note that we don't need to
 * skb_reserve these since they are used for incoming...
 */
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
					  struct ibmveth_buff_pool *pool)
{
	u32 i;
	u32 count = pool->size - atomic_read(&pool->available);
	u32 buffers_added = 0;
	struct sk_buff *skb;
	unsigned int free_index, index;
	u64 correlator;
	unsigned long lpar_rc;
	dma_addr_t dma_addr;

	mb();

	for (i = 0; i < count; ++i) {
		union ibmveth_buf_desc desc;

		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);

		if (!skb) {
			netdev_dbg(adapter->netdev,
				   "replenish: unable to allocate skb\n");
			adapter->replenish_no_mem++;
			break;
		}

		free_index = pool->consumer_index;
		pool->consumer_index++;
		if (pool->consumer_index >= pool->size)
			pool->consumer_index = 0;
		index = pool->free_map[free_index];

		BUG_ON(index == IBM_VETH_INVALID_MAP);
		BUG_ON(pool->skbuff[index] != NULL);

		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
				pool->buff_size, DMA_FROM_DEVICE);

		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
			goto failure;

		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
		pool->dma_addr[index] = dma_addr;
		pool->skbuff[index] = skb;

		correlator = ((u64)pool->index << 32) | index;
		*(u64 *)skb->data = correlator;

		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
		desc.fields.address = dma_addr;

		if (rx_flush) {
			unsigned int len = min(pool->buff_size,
						adapter->netdev->mtu +
						IBMVETH_BUFF_OH);
			ibmveth_flush_buffer(skb->data, len);
		}
		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
						   desc.desc);

		if (lpar_rc != H_SUCCESS) {
			goto failure;
		} else {
			buffers_added++;
			adapter->replenish_add_buff_success++;
		}
	}

	mb();
	atomic_add(buffers_added, &(pool->available));
	return;

failure:
	pool->free_map[free_index] = index;
	pool->skbuff[index] = NULL;
	if (pool->consumer_index == 0)
		pool->consumer_index = pool->size - 1;
	else
		pool->consumer_index--;
	if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
		dma_unmap_single(&adapter->vdev->dev,
		                 pool->dma_addr[index], pool->buff_size,
		                 DMA_FROM_DEVICE);
	dev_kfree_skb_any(skb);
	adapter->replenish_add_buff_failure++;

	mb();
	atomic_add(buffers_added, &(pool->available));
}
/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
	u32 length;
	u32 chain_prev, chain_next;
	struct dma_desc *td;

	spin_lock_irqsave(&lp->lock, flags);

	td = &lp->td_ring[lp->tx_chain_tail];

	/* stop queue when full, drop pkts if queue already full */
	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
		lp->tx_full = 1;

		if (lp->tx_count == (KORINA_NUM_TDS - 2))
			netif_stop_queue(dev);
		else {
			dev->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);

			return NETDEV_TX_BUSY;
		}
	}

	lp->tx_count++;

	lp->tx_skb[lp->tx_chain_tail] = skb;

	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);

	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;

	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&lp->tx_dma_regs->dmandptr);
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Link to prev */
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			/* Link to prev */
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&(lp->tx_dma_regs->dmandptr));
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
			lp->tx_chain_status = desc_empty;
		}
	} else {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			lp->tx_chain_status = desc_filled;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			lp->tx_chain_tail = chain_next;
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));

	dev->trans_start = jiffies;
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;
	bool		queue = 0;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}

		if (!status)
			queue = 1;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

clean:
	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->rx_reqs);
	spin_unlock(&dev->req_lock);

	if (queue)
		queue_work(uether_wq, &dev->rx_work);
}
Exemple #5
0
int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
{
	struct rtllib_device *ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
	struct rtllib_txb *txb = NULL;
	struct rtllib_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int qos_actived = ieee->current_network.qos_data.active;
	struct rtllib_crypt_data* crypt;
	cb_desc *tcb_desc;
	u8 bIsMulticast = false;
#if defined(RTL8192U) || defined(RTL8192SU) || defined(RTL8192SE)
	struct sta_info *p_sta = NULL;
#endif	
	u8 IsAmsdu = false;
#ifdef ENABLE_AMSDU	
	u8 queue_index = WME_AC_BE;
	cb_desc *tcb_desc_skb;
	u8 bIsSptAmsdu = false;
#endif	

	bool	bdhcp =false;
#ifndef _RTL8192_EXT_PATCH_
	//PRT_POWER_SAVE_CONTROL pPSC = (PRT_POWER_SAVE_CONTROL)(&(ieee->PowerSaveControl));//added by amy for Leisure PS 090402
#endif
	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, dont' bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
		       ieee->dev->name);
		goto success;
	}
	

	if(likely(ieee->raw_tx == 0)){
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		/* Save source and destination addresses */
		memcpy(dest, skb->data, ETH_ALEN);
		memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
	
#ifdef ENABLE_AMSDU	
		if(ieee->iw_mode == IW_MODE_ADHOC)
		{
			p_sta = GetStaInfo(ieee, dest);
			if(p_sta)	{
				if(p_sta->htinfo.bEnableHT)
					bIsSptAmsdu = true;
			}
		}else if(ieee->iw_mode == IW_MODE_INFRA) {
			bIsSptAmsdu = true;
		}else
			bIsSptAmsdu = true;
		bIsSptAmsdu = (bIsSptAmsdu && ieee->pHTInfo->bCurrent_AMSDU_Support && qos_actived);
			
		//u8 *a = skb->data;
		//u8 *b = (u8*)skb->data + ETH_ALEN;
		//printk("\n&&&&&&&skb=%p len=%d dst:"MAC_FMT" src:"MAC_FMT"\n",skb,skb->len,MAC_ARG(a),MAC_ARG(b));
		tcb_desc_skb = (pcb_desc)(skb->cb + MAX_DEV_ADDR_SIZE);  //YJ,move,081104
		if(bIsSptAmsdu) {
			if(!tcb_desc_skb->bFromAggrQ)  //Normal MSDU
			{
				if(qos_actived)
				{
					queue_index = UP2AC(skb->priority);
				} else {
					queue_index = WME_AC_BE;
				}

				//printk("Normal MSDU,queue_idx=%d nic_enough=%d queue_len=%d\n", queue_index, ieee->check_nic_enough_desc(ieee->dev,queue_index), skb_queue_len(&ieee->skb_aggQ[queue_index]));
				if ((skb_queue_len(&ieee->skb_aggQ[queue_index]) != 0)||
#if defined RTL8192SE || defined RTL8192CE
				   (ieee->get_nic_desc_num(ieee->dev,queue_index)) > 1||
#else
				   (!ieee->check_nic_enough_desc(ieee->dev,queue_index))||
#endif
				   (ieee->queue_stop) ||
				   (ieee->amsdu_in_process)) //YJ,add,090409 
				{
					/* insert the skb packet to the Aggregation queue */
					//printk("!!!!!!!!!!%s(): intert to aggr queue\n", __FUNCTION__);
					skb_queue_tail(&ieee->skb_aggQ[queue_index], skb);
					spin_unlock_irqrestore(&ieee->lock, flags);
					return 0;
				}
			}
			else  //AMSDU
			{
				//printk("AMSDU!!!!!!!!!!!!!\n");
				if(tcb_desc_skb->bAMSDU)
					IsAmsdu = true;
				
				//YJ,add,090409
				ieee->amsdu_in_process = false;
			}
		}
#endif	
		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		// The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time 
		// to prevent DHCP protocol fail
		if (skb->len > 282){//MINIMUM_DHCP_PACKET_SIZE) {
			if (ETH_P_IP == ether_type) {// IP header
				const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
				if (IPPROTO_UDP == ip->protocol) {//FIXME windows is 11 but here UDP in linux kernel is 17.
					struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
					//if(((ntohs(udp->source) == 68) && (ntohs(udp->dest) == 67)) ||
					 ///   ((ntohs(udp->source) == 67) && (ntohs(udp->dest) == 68))) {
					if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) ||
					    ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) {
						// 68 : UDP BOOTP client
						// 67 : UDP BOOTP server
						printk("===>DHCP Protocol start tx DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]);
						// Use low rate to send DHCP packet.
						//if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
						//{
						//	tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
						//	tcb_desc->bTxDisableRateFallBack = false;
						//}
						//else
						//pTcb->DataRate = Adapter->MgntInfo.LowestBasicRate; 
						//RTPRINT(FDM, WA_IOT, ("DHCP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate));

						bdhcp = true;
#ifdef _RTL8192_EXT_PATCH_
						ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2; //AMY,090701
#else
						ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2;
#endif	
					}
				}
			}else if(ETH_P_ARP == ether_type){// IP ARP packet
				printk("=================>DHCP Protocol start tx ARP pkt!!\n");
				bdhcp = true;
				ieee->LPSDelayCnt = ieee->current_network.tim.tim_count;

				//if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
				//{
				//	tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(Adapter->MgntInfo.mBrates);//0xc;//ofdm 6m
				//	tcb_desc->bTxDisableRateFallBack = FALSE;
				//}
				//else
				//	tcb_desc->DataRate = Adapter->MgntInfo.LowestBasicRate; 
				//RTPRINT(FDM, WA_IOT, ("ARP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate));

			}
		}
		
		skb->priority = rtllib_classify(skb, IsAmsdu);
	
#ifdef _RTL8192_EXT_PATCH_
		crypt = ieee->sta_crypt[ieee->tx_keyidx];
#else
		crypt = ieee->crypt[ieee->tx_keyidx];
#endif	
		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;
	
		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
	#ifdef CONFIG_RTLLIB_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif
	
		/* Advance the SKB to the start of the payload */
		skb_pull(skb, sizeof(struct ethhdr));

                /* Determine total amount of storage required for TXB packets */
#ifdef ENABLE_AMSDU	
		if(!IsAmsdu)
			bytes = skb->len + SNAP_SIZE + sizeof(u16);
		else
			bytes = skb->len;
#else
		bytes = skb->len + SNAP_SIZE + sizeof(u16);
#endif	

		if (encrypt)
			fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
		else 
			fc = RTLLIB_FTYPE_DATA; 
		
		//if(ieee->current_network.QoS_Enable) 
		if(qos_actived)
			fc |= RTLLIB_STYPE_QOS_DATA; 
		else
			fc |= RTLLIB_STYPE_DATA;
	
#ifdef _RTL8192_EXT_PATCH_
		if ((ieee->iw_mode == IW_MODE_INFRA) 
			//|| ((ieee->iw_mode == IW_MODE_MESH) && (ieee->only_mesh == 0)))  //YJ,test,090610
			|| (ieee->iw_mode == IW_MODE_MESH) ) 
#else
		if (ieee->iw_mode == IW_MODE_INFRA) 
#endif
		{
			fc |= RTLLIB_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			if(IsAmsdu)
				memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
			else
				memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}

		bIsMulticast = is_broadcast_ether_addr(header.addr1) ||is_multicast_ether_addr(header.addr1);

                header.frame_ctl = cpu_to_le16(fc);

		/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (bIsMulticast) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		}
		else {
#ifdef ENABLE_AMSDU	
			if(bIsSptAmsdu) {
				if(ieee->iw_mode == IW_MODE_ADHOC) {
					if(p_sta)
						frag_size = p_sta->htinfo.AMSDU_MaxSize;
					else
						frag_size = ieee->pHTInfo->nAMSDU_MaxSize;
				}
				else
					frag_size = ieee->pHTInfo->nAMSDU_MaxSize;
				qos_ctl = 0;
			}
			else
#endif	
			{
				frag_size = ieee->fts;//default:392
				qos_ctl = 0;
			}
		}
	
		if(qos_actived)
		{
			hdr_len = RTLLIB_3ADDR_LEN + 2;

                    /* in case we are a client verify acm is not set for this ac */
                    while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
                        printk("skb->priority = %x\n", skb->priority);
                        if (wme_downgrade_ac(skb)) {
                            break;
                        }
                        printk("converted skb->priority = %x\n", skb->priority);
                    }
                    qos_ctl |= skb->priority; //set in the rtllib_classify 	
#ifdef ENABLE_AMSDU	
			if(IsAmsdu)
			{
				qos_ctl |= QOS_CTL_AMSDU_PRESENT;
			}
                    header.qos_ctl = cpu_to_le16(qos_ctl);
#else	
                    header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
#endif
		} else {
			hdr_len = RTLLIB_3ADDR_LEN;		
		}
		/* Determine amount of payload per fragment.  Regardless of if
		* this stack is providing the full 802.11 header, one will
		* eventually be affixed to this fragment -- so we must account for
		* it when determining the amount of payload space. */
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		(CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
			bytes_per_frag -= RTLLIB_FCS_LEN;
	
		/* Each fragment may need to have room for encryptiong pre/postfix */
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
				crypt->ops->extra_postfix_len;
	
		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;
	
		/* When we allocate the TXB we allocate enough space for the reserve
		* and full fragment bytes (bytes_per_frag doesn't include prefix,
		* postfix, header, FCS, etc.) */
		txb = rtllib_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		//if (ieee->current_network.QoS_Enable) 
		if(qos_actived)
		{
			txb->queue_index = UP2AC(skb->priority);
		} else {
			txb->queue_index = WME_AC_BE;;
		}

		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
#ifdef _RTL8192_EXT_PATCH_
			tcb_desc->mesh_pkt = 0;//AMY added 090226
			if(ieee->iw_mode == IW_MODE_ADHOC)
				tcb_desc->badhoc = 1;
			else
				tcb_desc->badhoc = 0;
#endif
			if(qos_actived){
				skb_frag->priority = skb->priority;//UP2AC(skb->priority);	
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BE;
				tcb_desc->queue_index = WME_AC_BE;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt){
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
			}
			else
			{
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = (struct rtllib_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);
	
			/* If this is not the last fragment, then add the MOREFRAGS
			* bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | RTLLIB_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;
		
			} else {
				/* The last fragment takes the remaining length */
				bytes = bytes_last_frag;
			}
			//if(ieee->current_network.QoS_Enable) 
			if((qos_actived) && (!bIsMulticast))
			{	
				// add 1 only indicate to corresponding seq number control 2006/7/12
				//frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
				frag_hdr->seq_ctl = rtllib_query_seqnum(ieee, skb_frag, header.addr1); 
				frag_hdr->seq_ctl = cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
			} else {
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}
			/* Put a SNAP header on the first fragment */
#ifdef ENABLE_AMSDU	
			if ((i == 0) && (!IsAmsdu)) 
#else
			if (i == 0) 
#endif	
			{
				rtllib_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}
	
			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
	
			/* Advance the SKB... */
			skb_pull(skb, bytes);
	
			/* Encryption routine will move the header forward in order
			* to insert the IV between the header and the payload */
			if (encrypt)
#ifdef _RTL8192_EXT_PATCH_
				rtllib_encrypt_fragment(ieee, skb_frag, hdr_len, 0);
#else
				rtllib_encrypt_fragment(ieee, skb_frag, hdr_len);
#endif
			if (ieee->config &
			(CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if((qos_actived) && (!bIsMulticast))
		{
		  if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
			ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
		  else
			ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
  		  if (ieee->seq_ctrl[0] == 0xFFF)
			ieee->seq_ctrl[0] = 0;
		  else
			ieee->seq_ctrl[0]++;
		}
	}else{
		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}
	
		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		
		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}	

 success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
	if (txb)
	{
#if 1	
		cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		tcb_desc->priority = skb->priority;

                if(ether_type == ETH_P_PAE) {
			if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
			{
				tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
				tcb_desc->bTxDisableRateFallBack = false;
			}else{
                        tcb_desc->data_rate = ieee->basic_rate;
                        tcb_desc->bTxDisableRateFallBack = 1;
			}
			
			printk("EAPOL TranslateHeader(), pTcb->DataRate = 0x%x\n", tcb_desc->data_rate);
			
                        tcb_desc->RATRIndex = 7;                        
                        tcb_desc->bTxUseDriverAssingedRate = 1;
                } else {
		if (is_multicast_ether_addr(header.addr1))
			tcb_desc->bMulticast = 1;
		if (is_broadcast_ether_addr(header.addr1))
			tcb_desc->bBroadcast = 1;
#if defined(RTL8192U) || defined(RTL8192SU) || defined(RTL8192SE)
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast){
			rtllib_txrate_selectmode(ieee, tcb_desc, 7);  
			tcb_desc->data_rate = ieee->basic_rate;
		}
		else
		{
			if(ieee->iw_mode == IW_MODE_ADHOC)
			{
				u8 is_peer_shortGI_40M = 0;
				u8 is_peer_shortGI_20M = 0;
				u8 is_peer_BW_40M = 0;
				p_sta = GetStaInfo(ieee, header.addr1);
				if(NULL == p_sta)
				{
					rtllib_txrate_selectmode(ieee, tcb_desc, 7);
					tcb_desc->data_rate = ieee->rate;
				}
				else
				{
					rtllib_txrate_selectmode(ieee, tcb_desc, p_sta->ratr_index);
					tcb_desc->data_rate = CURRENT_RATE(p_sta->wireless_mode, p_sta->CurDataRate, p_sta->htinfo.HTHighestOperaRate);
					is_peer_shortGI_40M = p_sta->htinfo.bCurShortGI40MHz;
					is_peer_shortGI_20M = p_sta->htinfo.bCurShortGI20MHz;
					is_peer_BW_40M = p_sta->htinfo.bCurTxBW40MHz;
				}
				rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
				rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
				rtllib_ibss_query_HTCapShortGI(ieee, tcb_desc,is_peer_shortGI_40M,is_peer_shortGI_20M); 
				rtllib_ibss_query_BandwidthMode(ieee, tcb_desc,is_peer_BW_40M);
				rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
				//CB_DESC_DUMP(tcb_desc, __FUNCTION__);
			}
			else {
				rtllib_txrate_selectmode(ieee, tcb_desc, 0); 
				tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
				if(bdhcp == true){
					// Use low rate to send DHCP packet.
					if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) {
						tcb_desc->data_rate = MGN_1M;//MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
						tcb_desc->bTxDisableRateFallBack = false;
					}else{
						tcb_desc->data_rate = MGN_1M;
						tcb_desc->bTxDisableRateFallBack = 1;
					}

					tcb_desc->RATRIndex = 7;
					tcb_desc->bTxUseDriverAssingedRate = 1;
					tcb_desc->bdhcp = 1;
				}
				rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
				rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
				rtllib_query_HTCapShortGI(ieee, tcb_desc); 
				rtllib_query_BandwidthMode(ieee, tcb_desc);
				rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
				
			}
		}
#else
		rtllib_txrate_selectmode(ieee, tcb_desc);
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
			tcb_desc->data_rate = ieee->basic_rate;
		else
			//tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
			tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);

		if(bdhcp == true){
			// Use low rate to send DHCP packet.
			if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
			{
				tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
				tcb_desc->bTxDisableRateFallBack = false;
			}else{
				tcb_desc->data_rate = MGN_1M;
                    tcb_desc->bTxDisableRateFallBack = 1;
			}

			//printk("DHCP TranslateHeader(), pTcb->DataRate = 0x%x\n", tcb_desc->data_rate);
			
                    	tcb_desc->RATRIndex = 7;
                    tcb_desc->bTxUseDriverAssingedRate = 1;
                    //tcb_desc->bTxEnableFwCalcDur = 1;
                }
		
		rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
		rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
		rtllib_query_HTCapShortGI(ieee, tcb_desc); 
		rtllib_query_BandwidthMode(ieee, tcb_desc);
		rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
#endif
                } 		
//		rtllib_query_seqnum(ieee, txb->fragments[0], header.addr1);
//		RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
		//RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, tcb_desc, sizeof(cb_desc));
#endif
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			rtllib_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			rtllib_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}
int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
	memset(skb->cb, 0, sizeof(skb->cb));
	return rtllib_xmit_inter(skb, dev);
}
Exemple #6
0
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
				struct data_queue *queue,
				struct sk_buff *frag_skb)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
	struct ieee80211_tx_info *rts_info;
	struct sk_buff *skb;
	unsigned int data_length;
	int retval = 0;

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		data_length = sizeof(struct ieee80211_cts);
	else
		data_length = sizeof(struct ieee80211_rts);

	skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
	if (unlikely(!skb)) {
		rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
		return -ENOMEM;
	}

	skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
	skb_put(skb, data_length);

	/*
	 * Copy TX information over from original frame to
	 * RTS/CTS frame. Note that we set the no encryption flag
	 * since we don't want this frame to be encrypted.
	 * RTS frames should be acked, while CTS-to-self frames
	 * should not. The ready for TX flag is cleared to prevent
	 * it being automatically send when the descriptor is
	 * written to the hardware.
	 */
	memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
	rts_info = IEEE80211_SKB_CB(skb);
	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
	else
		rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;

	/* Disable hardware encryption */
	rts_info->control.hw_key = NULL;

	/*
	 * RTS/CTS frame should use the length of the frame plus any
	 * encryption overhead that will be added by the hardware.
	 */
	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
					frag_skb->data, data_length, tx_info,
					(struct ieee80211_cts *)(skb->data));
	else
		ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
				  frag_skb->data, data_length, tx_info,
				  (struct ieee80211_rts *)(skb->data));

	retval = rt2x00queue_write_tx_frame(queue, skb, true);
	if (retval) {
		dev_kfree_skb_any(skb);
		rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
	}

	return retval;
}
Exemple #7
0
static void mwifiex_usb_rx_complete(struct urb *urb)
{
	struct urb_context *context = (struct urb_context *)urb->context;
	struct mwifiex_adapter *adapter = context->adapter;
	struct sk_buff *skb = context->skb;
	struct usb_card_rec *card;
	int recv_length = urb->actual_length;
	int size, status;

	if (!adapter || !adapter->card) {
		pr_err("mwifiex adapter or card structure is not valid\n");
		return;
	}

	card = (struct usb_card_rec *)adapter->card;
	if (card->rx_cmd_ep == context->ep)
		atomic_dec(&card->rx_cmd_urb_pending);
	else
		atomic_dec(&card->rx_data_urb_pending);

	if (recv_length) {
		if (urb->status || (adapter->surprise_removed)) {
			dev_err(adapter->dev,
				"URB status is failed: %d\n", urb->status);
			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
			goto setup_for_next;
		}
		if (skb->len > recv_length)
			skb_trim(skb, recv_length);
		else
			skb_put(skb, recv_length - skb->len);

		atomic_inc(&adapter->rx_pending);
		status = mwifiex_usb_recv(adapter, skb, context->ep);

		dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
			recv_length, status);
		if (status == -EINPROGRESS) {
			queue_work(adapter->workqueue, &adapter->main_work);

			/* urb for data_ep is re-submitted now;
			 * urb for cmd_ep will be re-submitted in callback
			 * mwifiex_usb_recv_complete
			 */
			if (card->rx_cmd_ep == context->ep)
				return;
		} else {
			atomic_dec(&adapter->rx_pending);
			if (status == -1)
				dev_err(adapter->dev,
					"received data processing failed!\n");

			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
		}
	} else if (urb->status) {
		if (!adapter->is_suspended) {
			dev_warn(adapter->dev,
				 "Card is removed: %d\n", urb->status);
			adapter->surprise_removed = true;
		}
		dev_kfree_skb_any(skb);
		return;
	} else {
		/* Do not free skb in case of command ep */
		if (card->rx_cmd_ep != context->ep)
			dev_kfree_skb_any(skb);

		/* fall through setup_for_next */
	}

setup_for_next:
	if (card->rx_cmd_ep == context->ep)
		size = MWIFIEX_RX_CMD_BUF_SIZE;
	else
		size = MWIFIEX_RX_DATA_BUF_SIZE;

	mwifiex_usb_submit_rx_urb(context, size);

	return;
}
Exemple #8
0
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
	dev_kfree_skb_any(skb);
}
Exemple #9
0
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	struct htt_rx_ring_setup_ring *ring;
	const int num_rx_ring = 1;
	u16 flags;
	u32 fw_idx;
	int len;
	int ret;

	/*
	 * the HW expects the buffer to be an integral number of 4-byte
	 * "words"
	 */
	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);

	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
	    + (sizeof(*ring) * num_rx_ring);
	skb = ath10k_htc_alloc_skb(len);
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);

	cmd = (struct htt_cmd *)skb->data;
	ring = &cmd->rx_setup.rings[0];

	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
	cmd->rx_setup.hdr.num_rings = 1;

	/* FIXME: do we need all of this? */
	flags = 0;
	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
	flags |= HTT_RX_RING_FLAGS_PPDU_START;
	flags |= HTT_RX_RING_FLAGS_PPDU_END;
	flags |= HTT_RX_RING_FLAGS_MPDU_START;
	flags |= HTT_RX_RING_FLAGS_MPDU_END;
	flags |= HTT_RX_RING_FLAGS_MSDU_START;
	flags |= HTT_RX_RING_FLAGS_MSDU_END;
	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
	flags |= HTT_RX_RING_FLAGS_NULL_RX;
	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;

	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);

	ring->fw_idx_shadow_reg_paddr =
		__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
	ring->flags = __cpu_to_le16(flags);
	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);

#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)

	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));

#undef desc_offset

	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}
Exemple #10
0
static int sipc4_hdlc_rx(struct sipc4_rx_data *data)
{
	int rest = data->size;
	char *buf = page_address(data->page);
	int len;
	int err = -ERANGE;

	if (rest <= 0)
		goto end;

	if (data->format == SIPC4_FMT)
		printk(KERN_DEBUG "IPC:RX size=%d\n", data->size);

next_frame:
	err = len = sipc4_check_header(data, buf, rest);
	if (err < 0)
		goto end;
	buf += len;
	rest -= len;
	if (rest <= 0)
		goto end;

	err = len = sipc4_check_data(data, buf, rest);
	if (err < 0)
		goto end;
	buf += len;
	rest -= len;
	if (rest <= 0)
		goto end;

	err = len = sipc4_check_hdlc_end(data, buf);
	if (err < 0)
		goto end;
	buf += len;
	rest -= len;
	if (rest < 0)
		goto end;

	err = sipc4_hdlc_format_rx(data);
	if (err < 0)
		goto end;
	memset(data->rx_hdr, 0x00, sizeof(struct sipc_rx_hdr));

	data->skb = NULL;

	if (rest)
		goto next_frame;

end:
	netdev_free_page(data->dev, data->page);

	if (rest < 0)
		err = -ERANGE;

	if (err < 0 && data->skb) {
		dev_kfree_skb_any(data->skb);
		data->skb = NULL;
	}

	return err;
}
Exemple #11
0
u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
{
	__u16 appl_id;
	int _len, _len2;
	__u8 msghead[64];
	hycapictrl_info *cinfo = ctrl->driverdata;
	u16 retval = CAPI_NOERROR;

	appl_id = CAPIMSG_APPID(skb->data);
	switch(_hycapi_appCheck(appl_id, ctrl->cnr))
	{
		case 0:
/*			printk(KERN_INFO "Need to register\n"); */
			hycapi_register_internal(ctrl, 
						 appl_id,
						 &(hycapi_applications[appl_id-1].rp));
			break;
		case 1:
			break;
		default:
			printk(KERN_ERR "HYCAPI: Controller mixup!\n");
			retval = CAPI_ILLAPPNR;
			goto out;
	}
	switch(CAPIMSG_CMD(skb->data)) {		
		case CAPI_DISCONNECT_B3_RESP:
			capilib_free_ncci(&cinfo->ncci_head, appl_id, 
					  CAPIMSG_NCCI(skb->data));
			break;
		case CAPI_DATA_B3_REQ:
			_len = CAPIMSG_LEN(skb->data);
			if (_len > 22) {
				_len2 = _len - 22;
				memcpy(msghead, skb->data, 22);
				memcpy(skb->data + _len2, msghead, 22);
				skb_pull(skb, _len2);
				CAPIMSG_SETLEN(skb->data, 22);
				retval = capilib_data_b3_req(&cinfo->ncci_head,
							     CAPIMSG_APPID(skb->data),
							     CAPIMSG_NCCI(skb->data),
							     CAPIMSG_MSGID(skb->data));
			}
			break;
		case CAPI_LISTEN_REQ:
			if(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1])
			{
				kfree_skb(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1]);
				hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1] = NULL;
			}
			if (!(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1] = skb_copy(skb, GFP_ATOMIC))) 
			{
				printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n");
			} 
			break;
		default:
			break;
	}
 out:
	if (retval == CAPI_NOERROR)
		hycapi_sendmsg_internal(ctrl, skb);
	else 
		dev_kfree_skb_any(skb);

	return retval;
}
Exemple #12
0
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_dmaring *ring;
	struct ieee80211_hdr *hdr;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		/* The multicast ring will be sent after the DTIM */
		ring = dev->dma.tx_ring_mcast;
		/* Set the more-data bit. Ucode will clear it on
		 * the last frame for us. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
		ring = select_ring_by_priority(
			dev, skb_get_queue_mapping(skb));
	}

	B43_WARN_ON(!ring->tx);

	if (unlikely(ring->stopped)) {
		/* We get here only because of a bug in mac80211.
		 * Because of a race, one packet may be queued after
		 * the queue is stopped, thus we got called when we shouldn't.
		 * For now, just refuse the transmit. */
		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
			b43err(dev->wl, "Packet after queue stopped\n");
		err = -ENOSPC;
		goto out;
	}

	if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
		/* If we get here, we have a real error with the queue
		 * full, but queues not stopped. */
		b43err(dev->wl, "DMA queue overflow\n");
		err = -ENOSPC;
		goto out;
	}

	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The queue to ring mapping is
	 * static, so we don't need to store it per frame. */
	ring->queue_prio = skb_get_queue_mapping(skb);

	err = dma_tx_fragment(ring, skb);
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		dev_kfree_skb_any(skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "DMA tx mapping failure\n");
		goto out;
	}
	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
	    should_inject_overflow(ring)) {
		/* This TX ring is full. */
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
		ring->stopped = 1;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
		}
	}
out:

	return err;
}
Exemple #13
0
/*
 * Add a socket buffer to a TX queue
 *
 * This maps all fragments of a socket buffer for DMA and adds them to
 * the TX queue.  The queue's insert pointer will be incremented by
 * the number of fragments in the socket buffer.
 *
 * If any DMA mapping fails, any mapped fragments will be unmapped,
 * the queue's insert pointer will be restored to its original value.
 *
 * This function is split out from efx_hard_start_xmit to allow the
 * loopback test to direct packets via specific TX queues.
 *
 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
 * You must hold netif_tx_lock() to call this function.
 */
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
	struct efx_nic *efx = tx_queue->efx;
	struct pci_dev *pci_dev = efx->pci_dev;
	struct efx_tx_buffer *buffer;
	skb_frag_t *fragment;
	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
	dma_addr_t dma_addr, unmap_addr = 0;
	unsigned int dma_len;
	bool unmap_single;
	int q_space, i = 0;
	netdev_tx_t rc = NETDEV_TX_OK;

	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);

	if (skb_shinfo(skb)->gso_size)
		return efx_enqueue_skb_tso(tx_queue, skb);

	/* Get size of the initial fragment */
	len = skb_headlen(skb);

	/* Pad if necessary */
	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
		EFX_BUG_ON_PARANOID(skb->data_len);
		len = 32 + 1;
		if (skb_pad(skb, len - skb->len))
			return NETDEV_TX_OK;
	}

	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
	q_space = efx->txq_entries - 1 - fill_level;

	/* Map for DMA.  Use pci_map_single rather than pci_map_page
	 * since this is more efficient on machines with sparse
	 * memory.
	 */
	unmap_single = true;
	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);

	/* Process all fragments */
	while (1) {
		if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
			goto pci_err;

		/* Store fields for marking in the per-fragment final
		 * descriptor */
		unmap_len = len;
		unmap_addr = dma_addr;

		/* Add to TX queue, splitting across DMA boundaries */
		do {
			if (unlikely(q_space-- <= 0)) {
				/* It might be that completions have
				 * happened since the xmit path last
				 * checked.  Update the xmit path's
				 * copy of read_count.
				 */
				netif_tx_stop_queue(tx_queue->core_txq);
				/* This memory barrier protects the
				 * change of queue state from the access
				 * of read_count. */
				smp_mb();
				tx_queue->old_read_count =
					ACCESS_ONCE(tx_queue->read_count);
				fill_level = (tx_queue->insert_count
					      - tx_queue->old_read_count);
				q_space = efx->txq_entries - 1 - fill_level;
				if (unlikely(q_space-- <= 0)) {
					rc = NETDEV_TX_BUSY;
					goto unwind;
				}
				smp_mb();
				if (likely(!efx->loopback_selftest))
					netif_tx_start_queue(
						tx_queue->core_txq);
			}

			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
			buffer = &tx_queue->buffer[insert_ptr];
			efx_tsoh_free(tx_queue, buffer);
			EFX_BUG_ON_PARANOID(buffer->tsoh);
			EFX_BUG_ON_PARANOID(buffer->skb);
			EFX_BUG_ON_PARANOID(buffer->len);
			EFX_BUG_ON_PARANOID(!buffer->continuation);
			EFX_BUG_ON_PARANOID(buffer->unmap_len);

			dma_len = efx_max_tx_len(efx, dma_addr);
			if (likely(dma_len >= len))
				dma_len = len;

			/* Fill out per descriptor fields */
			buffer->len = dma_len;
			buffer->dma_addr = dma_addr;
			len -= dma_len;
			dma_addr += dma_len;
			++tx_queue->insert_count;
		} while (len);

		/* Transfer ownership of the unmapping to the final buffer */
		buffer->unmap_single = unmap_single;
		buffer->unmap_len = unmap_len;
		unmap_len = 0;

		/* Get address and size of next fragment */
		if (i >= skb_shinfo(skb)->nr_frags)
			break;
		fragment = &skb_shinfo(skb)->frags[i];
		len = skb_frag_size(fragment);
		i++;
		/* Map for DMA */
		unmap_single = false;
		dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
					    DMA_TO_DEVICE);
	}

	/* Transfer ownership of the skb to the final buffer */
	buffer->skb = skb;
	buffer->continuation = false;

	/* Pass off to hardware */
	efx_nic_push_buffers(tx_queue);

	return NETDEV_TX_OK;

 pci_err:
	netif_err(efx, tx_err, efx->net_dev,
		  " TX queue %d could not map skb with %d bytes %d "
		  "fragments for DMA\n", tx_queue->queue, skb->len,
		  skb_shinfo(skb)->nr_frags + 1);

	/* Mark the packet as transmitted, and free the SKB ourselves */
	dev_kfree_skb_any(skb);

 unwind:
	/* Work backwards until we hit the original insert pointer value */
	while (tx_queue->insert_count != tx_queue->write_count) {
		--tx_queue->insert_count;
		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
		buffer = &tx_queue->buffer[insert_ptr];
		efx_dequeue_buffer(tx_queue, buffer);
		buffer->len = 0;
	}

	/* Free the fragment we were mid-way through pushing */
	if (unmap_len) {
		if (unmap_single)
			pci_unmap_single(pci_dev, unmap_addr, unmap_len,
					 PCI_DMA_TODEVICE);
		else
			pci_unmap_page(pci_dev, unmap_addr, unmap_len,
				       PCI_DMA_TODEVICE);
	}

	return rc;
}
Exemple #14
0
/**
 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
 * @tx_queue:		Efx TX queue
 * @skb:		Socket buffer
 *
 * Context: You must hold netif_tx_lock() to call this function.
 *
 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
 * @skb was not enqueued.  In all cases @skb is consumed.  Return
 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
 */
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
			       struct sk_buff *skb)
{
	struct efx_nic *efx = tx_queue->efx;
	int frag_i, rc, rc2 = NETDEV_TX_OK;
	struct tso_state state;

	/* Since the stack does not limit the number of segments per
	 * skb, we must do so.  Otherwise an attacker may be able to
	 * make the TCP produce skbs that will never fit in our TX
	 * queue, causing repeated resets.
	 */
	if (unlikely(skb_shinfo(skb)->gso_segs > EFX_TSO_MAX_SEGS)) {
		unsigned int excess =
			(skb_shinfo(skb)->gso_segs - EFX_TSO_MAX_SEGS) *
			skb_shinfo(skb)->gso_size;
		if (__pskb_trim(skb, skb->len - excess)) {
			dev_kfree_skb_any(skb);
			return NETDEV_TX_OK;
		}
	}

	/* Find the packet protocol and sanity-check it */
	state.protocol = efx_tso_check_protocol(skb);

	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);

	tso_start(&state, skb);

	/* Assume that skb header area contains exactly the headers, and
	 * all payload is in the frag list.
	 */
	if (skb_headlen(skb) == state.header_len) {
		/* Grab the first payload fragment. */
		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
		frag_i = 0;
		rc = tso_get_fragment(&state, efx,
				      skb_shinfo(skb)->frags + frag_i);
		if (rc)
			goto mem_err;
	} else {
		rc = tso_get_head_fragment(&state, efx, skb);
		if (rc)
			goto mem_err;
		frag_i = -1;
	}

	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
		goto mem_err;

	while (1) {
		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
		if (unlikely(rc)) {
			rc2 = NETDEV_TX_BUSY;
			goto unwind;
		}

		/* Move onto the next fragment? */
		if (state.in_len == 0) {
			if (++frag_i >= skb_shinfo(skb)->nr_frags)
				/* End of payload reached. */
				break;
			rc = tso_get_fragment(&state, efx,
					      skb_shinfo(skb)->frags + frag_i);
			if (rc)
				goto mem_err;
		}

		/* Start at new packet? */
		if (state.packet_space == 0 &&
		    tso_start_new_packet(tx_queue, skb, &state) < 0)
			goto mem_err;
	}

	/* Pass off to hardware */
	efx_nic_push_buffers(tx_queue);

	tx_queue->tso_bursts++;
	return NETDEV_TX_OK;

 mem_err:
	netif_err(efx, tx_err, efx->net_dev,
		  "Out of memory for TSO headers, or PCI mapping error\n");
	dev_kfree_skb_any(skb);

 unwind:
	/* Free the DMA mapping we were in the process of writing out */
	if (state.unmap_len) {
		if (state.unmap_single)
			pci_unmap_single(efx->pci_dev, state.unmap_addr,
					 state.unmap_len, PCI_DMA_TODEVICE);
		else
			pci_unmap_page(efx->pci_dev, state.unmap_addr,
				       state.unmap_len, PCI_DMA_TODEVICE);
	}

	efx_enqueue_unwind(tx_queue);
	return rc2;
}
Exemple #15
0
void zfLnxUsbDataIn_callback(urb_t *urb)
{
    zdev_t* dev = urb->context;
    struct usbdrv_private *macp = dev->ml_priv;
    zbuf_t *buf;
    zbuf_t *new_buf;
    int status;

#if ZM_USB_STREAM_MODE == 1
    static int remain_len = 0, check_pad = 0, check_len = 0;
    int index = 0;
    int chk_idx;
    u16_t pkt_len;
    u16_t pkt_tag;
    u16_t ii;
    zbuf_t *rxBufPool[8];
    u16_t rxBufPoolIndex = 0;
#endif

    /* Check status for URB */
    if (urb->status != 0){
        printk("zfLnxUsbDataIn_callback() : status=0x%x\n", urb->status);
        if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)
            && (urb->status != -ESHUTDOWN))
        {
                if (urb->status == -EPIPE){
                    //printk(KERN_ERR "nonzero read bulk status received: -EPIPE");
                    status = -1;
                }

                if (urb->status == -EPROTO){
                    //printk(KERN_ERR "nonzero read bulk status received: -EPROTO");
                    status = -1;
                }
        }

        //printk(KERN_ERR "urb->status: 0x%08x\n", urb->status);

        /* Dequeue skb buffer */
        buf = zfLnxGetUsbRxBuffer(dev);
        dev_kfree_skb_any(buf);
        #if 0
        /* Enqueue skb buffer */
        zfLnxPutUsbRxBuffer(dev, buf);

        /* Submit a Rx urb */
        zfLnxUsbIn(dev, urb, buf);
        #endif
        return;
    }

    if (urb->actual_length == 0)
    {
        printk(KERN_ERR "Get an URB whose length is zero");
        status = -1;
    }

    /* Dequeue skb buffer */
    buf = zfLnxGetUsbRxBuffer(dev);

    //zfwBufSetSize(dev, buf, urb->actual_length);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
    buf->tail = 0;
    buf->len = 0;
#else
    buf->tail = buf->data;
    buf->len = 0;
#endif

    BUG_ON((buf->tail + urb->actual_length) > buf->end);

    skb_put(buf, urb->actual_length);

#if ZM_USB_STREAM_MODE == 1
    if (remain_len != 0)
    {
        zbuf_t *remain_buf = macp->reamin_buf;

        index = remain_len;
        remain_len -= check_pad;

        /*  Copy data */
        memcpy(&(remain_buf->data[check_len]), buf->data, remain_len);
        check_len += remain_len;
        remain_len = 0;

        rxBufPool[rxBufPoolIndex++] = remain_buf;
    }

    while(index < urb->actual_length)
    {
        pkt_len = buf->data[index] + (buf->data[index+1] << 8);
        pkt_tag = buf->data[index+2] + (buf->data[index+3] << 8);

        if (pkt_tag == 0x4e00)
        {
            int pad_len;

            //printk("Get a packet, index: %d, pkt_len: 0x%04x\n", index, pkt_len);
            #if 0
            /* Dump data */
            for (ii = index; ii < pkt_len+4;)
            {
                printk("%02x ", (buf->data[ii] & 0xff));

                if ((++ii % 16) == 0)
                    printk("\n");
            }

            printk("\n");
            #endif

            pad_len = 4 - (pkt_len & 0x3);

            if(pad_len == 4)
                pad_len = 0;

            chk_idx = index;
            index = index + 4 + pkt_len + pad_len;

            if (index > ZM_MAX_RX_BUFFER_SIZE)
            {
                remain_len = index - ZM_MAX_RX_BUFFER_SIZE; // - pad_len;
                check_len = ZM_MAX_RX_BUFFER_SIZE - chk_idx - 4;
                check_pad = pad_len;

                /* Allocate a skb buffer */
                //new_buf = zfwBufAllocate(dev, ZM_MAX_RX_BUFFER_SIZE);
                new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);

                /* Set skb buffer length */
            #ifdef NET_SKBUFF_DATA_USES_OFFSET
                new_buf->tail = 0;
                new_buf->len = 0;
            #else
                new_buf->tail = new_buf->data;
                new_buf->len = 0;
            #endif

                skb_put(new_buf, pkt_len);

                /* Copy the buffer */
                memcpy(new_buf->data, &(buf->data[chk_idx+4]), check_len);

                /* Record the buffer pointer */
                macp->reamin_buf = new_buf;
            }
            else
            {
        #ifdef ZM_DONT_COPY_RX_BUFFER
                if (rxBufPoolIndex == 0)
                {
                    new_buf = skb_clone(buf, GFP_ATOMIC);

                    new_buf->data = &(buf->data[chk_idx+4]);
                    new_buf->len = pkt_len;
                }
                else
                {
        #endif
                /* Allocate a skb buffer */
                new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);

                /* Set skb buffer length */
            #ifdef NET_SKBUFF_DATA_USES_OFFSET
                new_buf->tail = 0;
                new_buf->len = 0;
            #else
                new_buf->tail = new_buf->data;
                new_buf->len = 0;
            #endif

                skb_put(new_buf, pkt_len);

                /* Copy the buffer */
                memcpy(new_buf->data, &(buf->data[chk_idx+4]), pkt_len);

        #ifdef ZM_DONT_COPY_RX_BUFFER
                }
        #endif
                rxBufPool[rxBufPoolIndex++] = new_buf;
            }
        }
        else
        {
            printk(KERN_ERR "Can't find tag, pkt_len: 0x%04x, tag: 0x%04x\n", pkt_len, pkt_tag);

            /* Free buffer */
            dev_kfree_skb_any(buf);

            /* Allocate a skb buffer */
            new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);

            /* Enqueue skb buffer */
            zfLnxPutUsbRxBuffer(dev, new_buf);

            /* Submit a Rx urb */
            zfLnxUsbIn(dev, urb, new_buf);

            return;
        }
    }

    /* Free buffer */
    dev_kfree_skb_any(buf);
#endif

    /* Allocate a skb buffer */
    new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);

    /* Enqueue skb buffer */
    zfLnxPutUsbRxBuffer(dev, new_buf);

    /* Submit a Rx urb */
    zfLnxUsbIn(dev, urb, new_buf);

#if ZM_USB_STREAM_MODE == 1
    for(ii = 0; ii < rxBufPoolIndex; ii++)
    {
        macp->usbCbFunctions.zfcbUsbRecv(dev, rxBufPool[ii]);
    }
#else
    /* pass data to upper layer */
    macp->usbCbFunctions.zfcbUsbRecv(dev, buf);
#endif
}
Exemple #16
0
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct device *dev = htt->ar->dev;
	struct sk_buff *txdesc = NULL;
	struct htt_cmd *cmd;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	u8 vdev_id = skb_cb->vdev_id;
	int len = 0;
	int msdu_id = -1;
	int res;


	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	len += sizeof(cmd->hdr);
	len += sizeof(cmd->mgmt_tx);

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	htt->pending_tx[msdu_id] = msdu;
	spin_unlock_bh(&htt->tx_lock);

	txdesc = ath10k_htc_alloc_skb(len);
	if (!txdesc) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res)
		goto err_free_txdesc;

	skb_put(txdesc, len);
	cmd = (struct htt_cmd *)txdesc->data;
	cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
	cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
	cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
	cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
	cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
	memcpy(cmd->mgmt_tx.hdr, msdu->data,
	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));

	skb_cb->htt.txbuf = NULL;

	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
	dev_kfree_skb_any(txdesc);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	htt->pending_tx[msdu_id] = NULL;
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	enum data_queue_qid qid = skb_get_queue_mapping(skb);
	struct data_queue *queue;

	/*
	 * Mac80211 might be calling this function while we are trying
	 * to remove the device or perhaps suspending it.
	 * Note that we can only stop the TX queues inside the TX path
	 * due to possible race conditions in mac80211.
	 */
	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
		goto exit_fail;

	/*
	 * Determine which queue to put packet on.
	 */
	if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
	    test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
		queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
	else
		queue = rt2x00queue_get_queue(rt2x00dev, qid);
	if (unlikely(!queue)) {
		ERROR(rt2x00dev,
		      "Attempt to send packet over invalid queue %d.\n"
		      "Please file bug report to %s.\n", qid, DRV_PROJECT);
		goto exit_fail;
	}

	/*
	 * If CTS/RTS is required. create and queue that frame first.
	 * Make sure we have at least enough entries available to send
	 * this CTS/RTS frame as well as the data frame.
	 * Note that when the driver has set the set_rts_threshold()
	 * callback function it doesn't need software generation of
	 * either RTS or CTS-to-self frame and handles everything
	 * inside the hardware.
	 */
	if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
						IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
	    !rt2x00dev->ops->hw->set_rts_threshold) {
		if (rt2x00queue_available(queue) <= 1)
			goto exit_fail;

		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
			goto exit_fail;
	}

	if (rt2x00queue_write_tx_frame(queue, skb, false))
		goto exit_fail;

	if (rt2x00queue_threshold(queue))
		ieee80211_stop_queue(rt2x00dev->hw, qid);

	return NETDEV_TX_OK;

 exit_fail:
	ieee80211_stop_queue(rt2x00dev->hw, qid);
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}
Exemple #18
0
static struct sk_buff
*
hfc_empty_fifo(struct BCState *bcs, int count)
{
	u_char *ptr;
	struct sk_buff *skb;
	struct IsdnCardState *cs = bcs->cs;
	int idx;
	int chksum;
	u_char stat, cip;

	if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
		debugl1(cs, "hfc_empty_fifo");
	idx = 0;
	if (count > HSCX_BUFMAX + 3) {
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "hfc_empty_fifo: incoming packet too large");
		cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
		while ((idx++ < count) && WaitNoBusy(cs))
			cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
		WaitNoBusy(cs);
		stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
				       HFC_CHANNEL(bcs->channel));
		WaitForBusy(cs);
		return (NULL);
	}
	if ((count < 4) && (bcs->mode != L1_MODE_TRANS)) {
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "hfc_empty_fifo: incoming packet too small");
		cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
		while ((idx++ < count) && WaitNoBusy(cs))
			cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
		WaitNoBusy(cs);
		stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
				       HFC_CHANNEL(bcs->channel));
		WaitForBusy(cs);
#ifdef ERROR_STATISTIC
		bcs->err_inv++;
#endif
		return (NULL);
	}
	if (bcs->mode == L1_MODE_TRANS)
	  count -= 1;
	else
	  count -= 3;
	if (!(skb = dev_alloc_skb(count)))
		printk(KERN_WARNING "HFC: receive out of memory\n");
	else {
		ptr = skb_put(skb, count);
		idx = 0;
		cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
		while ((idx < count) && WaitNoBusy(cs)) {
			*ptr++ = cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
			idx++;
		}
		if (idx != count) {
			debugl1(cs, "RFIFO BUSY error");
			printk(KERN_WARNING "HFC FIFO channel %d BUSY Error\n", bcs->channel);
			dev_kfree_skb_any(skb);
			if (bcs->mode != L1_MODE_TRANS) {
			  WaitNoBusy(cs);
			  stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
						 HFC_CHANNEL(bcs->channel));
			  WaitForBusy(cs);
			}
			return (NULL);
		}
		if (bcs->mode != L1_MODE_TRANS) {
		  WaitNoBusy(cs);
		  chksum = (cs->BC_Read_Reg(cs, HFC_DATA, cip) << 8);
		  WaitNoBusy(cs);
		  chksum += cs->BC_Read_Reg(cs, HFC_DATA, cip);
		  WaitNoBusy(cs);
		  stat = cs->BC_Read_Reg(cs, HFC_DATA, cip);
		  if (cs->debug & L1_DEB_HSCX)
		    debugl1(cs, "hfc_empty_fifo %d chksum %x stat %x",
			    bcs->channel, chksum, stat);
		  if (stat) {
		    debugl1(cs, "FIFO CRC error");
		    dev_kfree_skb_any(skb);
		    skb = NULL;
#ifdef ERROR_STATISTIC
		    bcs->err_crc++;
#endif
		  }
		  WaitNoBusy(cs);
		  stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
					 HFC_CHANNEL(bcs->channel));
		  WaitForBusy(cs);
		}
	}
	return (skb);
}
Exemple #19
0
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return -ENOMEM;
	}
	//skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			netif_dbg(dev, ifdown, dev->net, "device gone\n");
			netif_device_detach (dev->net);
			break;
		case -EHOSTUNREACH:
			retval = -ENOLINK;
			break;
		default:
			netif_dbg(dev, rx_err, dev->net,
				  "rx submit, %d\n", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
		}
	} else {
		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
	return retval;
}
Exemple #20
0
static void
hfc_fill_fifo(struct BCState *bcs)
{
	struct IsdnCardState *cs = bcs->cs;
	long flags;
	int idx, fcnt;
	int count;
	int z1, z2;
	u_char cip;

	if (!bcs->tx_skb)
		return;
	if (bcs->tx_skb->len <= 0)
		return;

	save_flags(flags);
	cli();
	cip = HFC_CIP | HFC_F1 | HFC_SEND | HFC_CHANNEL(bcs->channel);
	if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) {
	  cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip);
	  WaitForBusy(cs);
	}
	WaitNoBusy(cs);
	if (bcs->mode != L1_MODE_TRANS) {
	  bcs->hw.hfc.f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
	  cip = HFC_CIP | HFC_F2 | HFC_SEND | HFC_CHANNEL(bcs->channel);
	  WaitNoBusy(cs);
	  bcs->hw.hfc.f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
	  bcs->hw.hfc.send[bcs->hw.hfc.f1] = ReadZReg(bcs, HFC_Z1 | HFC_SEND | HFC_CHANNEL(bcs->channel));
	  if (cs->debug & L1_DEB_HSCX)
	    debugl1(cs, "hfc_fill_fifo %d f1(%d) f2(%d) z1(%x)",
		    bcs->channel, bcs->hw.hfc.f1, bcs->hw.hfc.f2,
		    bcs->hw.hfc.send[bcs->hw.hfc.f1]);
	  fcnt = bcs->hw.hfc.f1 - bcs->hw.hfc.f2;
	  if (fcnt < 0)
	    fcnt += 32;
	  if (fcnt > 30) {
	    if (cs->debug & L1_DEB_HSCX)
	      debugl1(cs, "hfc_fill_fifo more as 30 frames");
	    restore_flags(flags);
	    return;
	  }
	  count = GetFreeFifoBytes(bcs);
	} 
	else {
	  WaitForBusy(cs);
	  z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel));
	  z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel));
	  count = z1 - z2;
	  if (count < 0)
	    count += cs->hw.hfc.fifosize; 
	} /* L1_MODE_TRANS */
	if (cs->debug & L1_DEB_HSCX)
		debugl1(cs, "hfc_fill_fifo %d count(%ld/%d)",
			bcs->channel, bcs->tx_skb->len,
			count);
	if (count < bcs->tx_skb->len) {
		if (cs->debug & L1_DEB_HSCX)
			debugl1(cs, "hfc_fill_fifo no fifo mem");
		restore_flags(flags);
		return;
	}
	cip = HFC_CIP | HFC_FIFO_IN | HFC_SEND | HFC_CHANNEL(bcs->channel);
	idx = 0;
	while ((idx < bcs->tx_skb->len) && WaitNoBusy(cs))
		cs->BC_Write_Reg(cs, HFC_DATA_NODEB, cip, bcs->tx_skb->data[idx++]);
	if (idx != bcs->tx_skb->len) {
		debugl1(cs, "FIFO Send BUSY error");
		printk(KERN_WARNING "HFC S FIFO channel %d BUSY Error\n", bcs->channel);
	} else {
		count =  bcs->tx_skb->len;
		bcs->tx_cnt -= count;
		if (PACKET_NOACK == bcs->tx_skb->pkt_type)
			count = -1;
		dev_kfree_skb_any(bcs->tx_skb);
		bcs->tx_skb = NULL;
		if (bcs->mode != L1_MODE_TRANS) {
		  WaitForBusy(cs);
		  WaitNoBusy(cs);
		  cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F1_INC | HFC_SEND | HFC_CHANNEL(bcs->channel));
		}
		if (bcs->st->lli.l1writewakeup && (count >= 0))
			bcs->st->lli.l1writewakeup(bcs->st, count);
		test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
	}
	restore_flags(flags);
	return;
}
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed, but
	 * means receivers can't recover lost synch on their own (because
	 * new packets don't only start after a short RX).
	 */
	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

	if (dev->port_usb->is_fixed)
		size = max_t(size_t, size, dev->port_usb->fixed_out_len);

	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
	skb_reserve(skb, NET_IP_ALIGN);

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
	}
	return retval;
}
Exemple #22
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;
	static int workqueue_pinned;

	if (!workqueue_pinned) {
		struct cpumask cpus;

		cpumask_clear(&cpus);
		cpumask_set_cpu(0, &cpus);

		if (sched_setaffinity(current->pid, &cpus))
			pr_err("%s: sdio_dmux set CPU affinity failed\n",
					__func__);
		workqueue_pinned = 1;
	}

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		pr_err("%s: cannot allocate skb of size:%d + "
			"%d (NET_SKB_PAD)\n", __func__,
			sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
Exemple #23
0
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct hv_netvsc_packet *packet = NULL;
	int ret;
	unsigned int num_data_pgs;
	struct rndis_message *rndis_msg;
	struct rndis_packet *rndis_pkt;
	u32 rndis_msg_size;
	struct rndis_per_packet_info *ppi;
	u32 hash;
	struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
	struct hv_page_buffer *pb = page_buf;

	/* We will atmost need two pages to describe the rndis
	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
	 * of pages in a single packet. If skb is scattered around
	 * more pages we try linearizing it.
	 */

	num_data_pgs = netvsc_get_slots(skb) + 2;

	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
		++net_device_ctx->eth_stats.tx_scattered;

		if (skb_linearize(skb))
			goto no_memory;

		num_data_pgs = netvsc_get_slots(skb) + 2;
		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
			++net_device_ctx->eth_stats.tx_too_big;
			goto drop;
		}
	}

	/*
	 * Place the rndis header in the skb head room and
	 * the skb->cb will be used for hv_netvsc_packet
	 * structure.
	 */
	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
	if (ret)
		goto no_memory;

	/* Use the skb control buffer for building up the packet */
	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
			FIELD_SIZEOF(struct sk_buff, cb));
	packet = (struct hv_netvsc_packet *)skb->cb;

	packet->q_idx = skb_get_queue_mapping(skb);

	packet->total_data_buflen = skb->len;
	packet->total_bytes = skb->len;
	packet->total_packets = 1;

	rndis_msg = (struct rndis_message *)skb->head;

	memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);

	/* Add the rndis header */
	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
	rndis_msg->msg_len = packet->total_data_buflen;
	rndis_pkt = &rndis_msg->msg.pkt;
	rndis_pkt->data_offset = sizeof(struct rndis_packet);
	rndis_pkt->data_len = packet->total_data_buflen;
	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);

	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);

	hash = skb_get_hash_raw(skb);
	if (hash != 0 && net->real_num_tx_queues > 1) {
		rndis_msg_size += NDIS_HASH_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
				    NBL_HASH_VALUE);
		*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
	}

	if (skb_vlan_tag_present(skb)) {
		struct ndis_pkt_8021q_info *vlan;

		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
					IEEE_8021Q_INFO);
		vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
						ppi->ppi_offset);
		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
				VLAN_PRIO_SHIFT;
	}

	if (skb_is_gso(skb)) {
		struct ndis_tcp_lso_info *lso_info;

		rndis_msg_size += NDIS_LSO_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
				    TCP_LARGESEND_PKTINFO);

		lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
							ppi->ppi_offset);

		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
		if (skb->protocol == htons(ETH_P_IP)) {
			lso_info->lso_v2_transmit.ip_version =
				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
			ip_hdr(skb)->tot_len = 0;
			ip_hdr(skb)->check = 0;
			tcp_hdr(skb)->check =
				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
		} else {
			lso_info->lso_v2_transmit.ip_version =
				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
			ipv6_hdr(skb)->payload_len = 0;
			tcp_hdr(skb)->check =
				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
		}
		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
			struct ndis_tcp_ip_checksum_info *csum_info;

			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
					    TCPIP_CHKSUM_PKTINFO);

			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
									 ppi->ppi_offset);

			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);

			if (skb->protocol == htons(ETH_P_IP)) {
				csum_info->transmit.is_ipv4 = 1;

				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
					csum_info->transmit.tcp_checksum = 1;
				else
					csum_info->transmit.udp_checksum = 1;
			} else {
				csum_info->transmit.is_ipv6 = 1;

				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
					csum_info->transmit.tcp_checksum = 1;
				else
					csum_info->transmit.udp_checksum = 1;
			}
		} else {
			/* Can't do offload of this type of checksum */
			if (skb_checksum_help(skb))
				goto drop;
		}
	}

	/* Start filling in the page buffers with the rndis hdr */
	rndis_msg->msg_len += rndis_msg_size;
	packet->total_data_buflen = rndis_msg->msg_len;
	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
					       skb, packet, &pb);

	/* timestamp packet in software */
	skb_tx_timestamp(skb);
	ret = netvsc_send(net_device_ctx->device_ctx, packet,
			  rndis_msg, &pb, skb);
	if (likely(ret == 0))
		return NETDEV_TX_OK;

	if (ret == -EAGAIN) {
		++net_device_ctx->eth_stats.tx_busy;
		return NETDEV_TX_BUSY;
	}

	if (ret == -ENOSPC)
		++net_device_ctx->eth_stats.tx_no_space;

drop:
	dev_kfree_skb_any(skb);
	net->stats.tx_dropped++;

	return NETDEV_TX_OK;

no_memory:
	++net_device_ctx->eth_stats.tx_no_memory;
	goto drop;
}
Exemple #24
0
static void sdio_mux_send_open_cmd(uint32_t id)
{
	struct sdio_mux_hdr hdr = {
		.magic_num = SDIO_MUX_HDR_MAGIC_NO,
		.cmd = SDIO_MUX_HDR_CMD_OPEN,
		.reserved = 0,
		.ch_id = id,
		.pkt_len = 0,
		.pad_len = 0
	};

	sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
}

static void sdio_mux_write_data(struct work_struct *work)
{
	int rc, reschedule = 0;
	int notify = 0;
	struct sk_buff *skb;
	unsigned long flags;
	int avail;
	int ch_id;

	spin_lock_irqsave(&sdio_mux_write_lock, flags);
	while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
		ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;

		avail = sdio_write_avail(sdio_mux_ch);
		if (avail < skb->len) {
			/* we may have to wait for write avail
			 * notification from sdio al
			 */
			DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
					__func__, avail, skb->len);

			reschedule = 1;
			break;
		}
		spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
		rc = sdio_mux_write(skb);
		spin_lock_irqsave(&sdio_mux_write_lock, flags);
		if (rc == 0) {

			spin_lock(&sdio_ch[ch_id].lock);
			sdio_ch[ch_id].num_tx_pkts--;
			spin_unlock(&sdio_ch[ch_id].lock);

			if (sdio_ch[ch_id].write_done)
				sdio_ch[ch_id].write_done(
						sdio_ch[ch_id].priv, skb);
			else
				dev_kfree_skb_any(skb);
		} else if (rc == -EAGAIN || rc == -ENOMEM) {
			/* recoverable error - retry again later */
			reschedule = 1;
			break;
		} else if (rc == -ENODEV) {
			/*
			 * sdio_al suffered some kind of fatal error
			 * prevent future writes and clean up pending ones
			 */
			fatal_error = 1;
			do {
				ch_id = ((struct sdio_mux_hdr *)
						skb->data)->ch_id;
				spin_lock(&sdio_ch[ch_id].lock);
				sdio_ch[ch_id].num_tx_pkts--;
				spin_unlock(&sdio_ch[ch_id].lock);
				dev_kfree_skb_any(skb);
			} while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
			spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
			return;
		} else {
			/* unknown error condition - drop the
			 * skb and reschedule for the
			 * other skb's
			 */
			pr_err("%s: sdio_mux_write error %d"
				   " for ch %d, skb=%p\n",
				__func__, rc, ch_id, skb);
			notify = 1;
			break;
		}
	}

	if (reschedule) {
		if (sdio_ch_is_in_reset(ch_id)) {
			notify = 1;
		} else {
			__skb_queue_head(&sdio_mux_write_pool, skb);
			queue_delayed_work(sdio_mux_workqueue,
					&delayed_work_sdio_mux_write,
					msecs_to_jiffies(250)
					);
		}
	}

	if (notify) {
		spin_lock(&sdio_ch[ch_id].lock);
		sdio_ch[ch_id].num_tx_pkts--;
		spin_unlock(&sdio_ch[ch_id].lock);

		if (sdio_ch[ch_id].write_done)
			sdio_ch[ch_id].write_done(
				sdio_ch[ch_id].priv, skb);
		else
			dev_kfree_skb_any(skb);
	}
	spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
}
Exemple #25
0
struct sk_buff *AMSDU_Aggregation(
	struct rtllib_device 	*ieee,
	struct sk_buff_head 		*pSendList
	)
{
	struct sk_buff *	pSkb;
	struct sk_buff * 	pAggrSkb;
	u8		i;
	u32		total_length = 0;
	u32		skb_len, num_skb;
	pcb_desc 	pcb;
	u8 		amsdu_shdr[AMSDU_SUBHEADER_LEN];
	u8		padding = 0;
	u8		*p = NULL, *q=NULL;
	u16		ether_type;

	//
	// Calculate the total length
	//
	num_skb = skb_queue_len(pSendList);
	if(num_skb == 0)
		return NULL;
	if(num_skb == 1)
	{
		pSkb = (struct sk_buff *)skb_dequeue(pSendList);
		memset(pSkb->cb, 0, sizeof(pSkb->cb));
		pcb = (pcb_desc)(pSkb->cb + MAX_DEV_ADDR_SIZE);
		pcb->bFromAggrQ = true;
		return pSkb;
	}

	total_length += sizeof(struct ethhdr);
	for(i=0; i<num_skb; i++)	
	{
		pSkb= (struct sk_buff *)skb_dequeue(pSendList);
		if(pSkb->len <= (ETH_ALEN*2))
		{
			dev_kfree_skb_any(pSkb);
			continue;
		}
		skb_len = pSkb->len - ETH_ALEN*2 + SNAP_SIZE + AMSDU_SUBHEADER_LEN;
		if(i < (num_skb-1))
		{
			skb_len += ((4-skb_len%4)==4)?0:(4-skb_len%4);
		}
		total_length += skb_len;
		skb_queue_tail(pSendList, pSkb);
	}
	
	//
	// Create A-MSDU
	//
	pAggrSkb = dev_alloc_skb(total_length);
	if(NULL == pAggrSkb)
	{
		skb_queue_purge(pSendList);
		printk("%s: Can not alloc skb!\n", __FUNCTION__);
		return NULL;
	}
	skb_put(pAggrSkb,total_length);
	pAggrSkb->priority = pSkb->priority;

	//
	// Fill AMSDU attibutes within cb
	//
	memset(pAggrSkb->cb, 0, sizeof(pAggrSkb->cb));
	pcb = (pcb_desc)(pAggrSkb->cb + MAX_DEV_ADDR_SIZE);
	pcb->bFromAggrQ = true;
	pcb->bAMSDU = true;

	//printk("======== In %s: num_skb=%d total_length=%d\n", __FUNCTION__,num_skb, total_length);
	//
	// Make A-MSDU
	//
	memset(amsdu_shdr, 0, AMSDU_SUBHEADER_LEN);
	p = pAggrSkb->data;
	for(i=0; i<num_skb; i++)	
	{
		q = p;
		pSkb= (struct sk_buff *)skb_dequeue(pSendList);
		ether_type = ntohs(((struct ethhdr *)pSkb->data)->h_proto);

		skb_len = pSkb->len - sizeof(struct ethhdr) + AMSDU_SUBHEADER_LEN + SNAP_SIZE + sizeof(u16);
		if(i < (num_skb-1))
		{
			padding = ((4-skb_len%4)==4)?0:(4-skb_len%4);
			skb_len += padding;
		}
		if(i == 0)
		{
			memcpy(p, pSkb->data, sizeof(struct ethhdr));
			p += sizeof(struct ethhdr);
		}
		//if(memcmp(pSkb->data, pAggrSkb->data, sizeof(struct ethhdr)))
		//	printk(""MAC_FMT"-"MAC_FMT"\n",MAC_ARG(pSkb->data), MAC_ARG(pAggrSkb->data));
		memcpy(amsdu_shdr, pSkb->data, (ETH_ALEN*2));
		skb_pull(pSkb, sizeof(struct ethhdr));
		*(u16*)(amsdu_shdr+ETH_ALEN*2) = ntohs(pSkb->len + SNAP_SIZE + sizeof(u16));
		memcpy(p, amsdu_shdr, AMSDU_SUBHEADER_LEN);
		p += AMSDU_SUBHEADER_LEN;

		rtllib_put_snap(p, ether_type);
		p += SNAP_SIZE + sizeof(u16);

		memcpy(p, pSkb->data, pSkb->len);
		p += pSkb->len;
		if(padding > 0)
		{
			memset(p, 0, padding);
			p += padding;
			padding = 0;
		}
		dev_kfree_skb_any(pSkb);
	}
	
	//printk("-------%d\n",pAggrSkb->len);
	return pAggrSkb;
}
Exemple #26
0
int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct sdio_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb;

	if (id >= SDIO_DMUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!sdio_mux_initialized)
		return -ENODEV;
	if (fatal_error)
		return -ENODEV;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (sdio_ch_is_in_reset(id)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: port is in reset: %d\n", __func__,
				sdio_ch[id].status);
		return -ENETRESET;
	}
	if (!sdio_ch_is_local_open(id)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
		return -ENODEV;
	}
	if (sdio_ch[id].use_wm &&
			(sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: watermark exceeded: %d\n", __func__, id);
		return -EAGAIN;
	}
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

	spin_lock_irqsave(&sdio_mux_write_lock, flags);
	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_ATOMIC);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);
	__skb_queue_tail(&sdio_mux_write_pool, skb);

	spin_lock(&sdio_ch[id].lock);
	sdio_ch[id].num_tx_pkts++;
	spin_unlock(&sdio_ch[id].lock);

	queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
	spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
	return rc;
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	int awake;
	int ret = 0;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: rmnet_xmit called when "
			"netif_queue is stopped", dev->name);
		return 0;
	}

	spin_lock_irqsave(&p->lock, flags);
	awake = msm_bam_dmux_ul_power_vote();
	if (!awake) {
		/* send SKB once wakeup is complete */
		netif_stop_queue(dev);
		p->waiting_for_ul_skb = skb;
		spin_unlock_irqrestore(&p->lock, flags);
		ret = 0;
		goto exit;
	}
	spin_unlock_irqrestore(&p->lock, flags);

	ret = _rmnet_xmit(skb, dev);
	if (ret == -EPERM) {
		ret = NETDEV_TX_BUSY;
		goto exit;
	}

	/*
	 * detected SSR a bit early.  shut some things down now, and leave
	 * the rest to the main ssr handling code when that happens later
	 */
	if (ret == -EFAULT) {
		netif_carrier_off(dev);
		dev_kfree_skb_any(skb);
		ret = 0;
		goto exit;
	}

	if (ret == -EAGAIN) {
		/*
		 * This should not happen
		 * EAGAIN means we attempted to overflow the high watermark
		 * Clearly the queue is not stopped like it should be, so
		 * stop it and return BUSY to the TCP/IP framework.  It will
		 * retry this packet with the queue is restarted which happens
		 * in the write_done callback when the low watermark is hit.
		 */
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
		goto exit;
	}

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (msm_bam_dmux_is_ch_full(p->ch_id)) {
		netif_stop_queue(dev);
		DBG0("%s: High WM hit, stopping queue=%p\n",    __func__, skb);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);

exit:
	msm_bam_dmux_ul_power_unvote();
	return ret;
}
Exemple #28
0
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
	struct sk_buff *skb_out;
	struct cdc_mbim_state *info = (void *)&dev->data;
	struct cdc_ncm_ctx *ctx = info->ctx;
	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
	u16 tci = 0;
	bool is_ip;
	u8 *c;

	if (!ctx)
		goto error;

	if (skb) {
		if (skb->len <= ETH_HLEN)
			goto error;

		/* Some applications using e.g. packet sockets will
		 * bypass the VLAN acceleration and create tagged
		 * ethernet frames directly.  We primarily look for
		 * the accelerated out-of-band tag, but fall back if
		 * required
		 */
		skb_reset_mac_header(skb);
		if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
		    __vlan_get_tag(skb, &tci) == 0) {
			is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
			skb_pull(skb, VLAN_ETH_HLEN);
		} else {
			is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
			skb_pull(skb, ETH_HLEN);
		}

		/* mapping VLANs to MBIM sessions:
		 *   no tag     => IPS session <0>
		 *   1 - 255    => IPS session <vlanid>
		 *   256 - 511  => DSS session <vlanid - 256>
		 *   512 - 4095 => unsupported, drop
		 */
		switch (tci & 0x0f00) {
		case 0x0000: /* VLAN ID 0 - 255 */
			if (!is_ip)
				goto error;
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		case 0x0100: /* VLAN ID 256 - 511 */
			sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		default:
			netif_err(dev, tx_err, dev->net,
				  "unsupported tci=0x%04x\n", tci);
			goto error;
		}
	}

	spin_lock_bh(&ctx->mtx);
	skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
	spin_unlock_bh(&ctx->mtx);
	return skb_out;

error:
	if (skb)
		dev_kfree_skb_any(skb);

	return NULL;
}
Exemple #29
0
static void ctcmpc_send_sweep_req(struct channel *rch)
{
	struct net_device *dev = rch->netdev;
	struct ctcm_priv *priv;
	struct mpc_group *grp;
	struct th_sweep *header;
	struct sk_buff *sweep_skb;
	struct channel *ch;
	/* int rc = 0; */

	priv = dev->ml_priv;
	grp = priv->mpcg;
	ch = priv->channel[CTCM_WRITE];

	/* sweep processing is not complete until response and request */
	/* has completed for all read channels in group		       */
	if (grp->in_sweep == 0) {
		grp->in_sweep = 1;
		grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
		grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
	}

	sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);

	if (sweep_skb == NULL)	{
		/* rc = -ENOMEM; */
				goto nomem;
	}

	header = kmalloc(TH_SWEEP_LENGTH, gfp_type());

	if (!header) {
		dev_kfree_skb_any(sweep_skb);
		/* rc = -ENOMEM; */
				goto nomem;
	}

	header->th.th_seg	= 0x00 ;
	header->th.th_ch_flag	= TH_SWEEP_REQ;  /* 0x0f */
	header->th.th_blk_flag	= 0x00;
	header->th.th_is_xid	= 0x00;
	header->th.th_seq_num	= 0x00;
	header->sw.th_last_seq	= ch->th_seq_num;

	memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);

	kfree(header);

	dev->trans_start = jiffies;
	skb_queue_tail(&ch->sweep_queue, sweep_skb);

	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);

	return;

nomem:
	grp->in_sweep = 0;
	ctcm_clear_busy(dev);
	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);

	return;
}
static void
tx_iso_complete(struct urb *urb, struct pt_regs *regs)
{
	iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context;
	usb_fifo *fifo = context_iso_urb->owner_fifo;
	hfcusb_data *hfc = fifo->hfc;
	int k, tx_offset, num_isoc_packets, sink, len, current_len,
	    errcode;
	int frame_complete, transp_mode, fifon, status;
	__u8 threshbit;
	__u8 threshtable[8] = { 1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80 };

	fifon = fifo->fifonum;
	status = urb->status;

	tx_offset = 0;

	if (fifo->active && !status) {
		transp_mode = 0;
		if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS)
			transp_mode = TRUE;

		/* is FifoFull-threshold set for our channel? */
		threshbit = threshtable[fifon] & hfc->threshold_mask;
		num_isoc_packets = iso_packets[fifon];

		/* predict dataflow to avoid fifo overflow */
		if (fifon >= HFCUSB_D_TX) {
			sink = (threshbit) ? SINK_DMIN : SINK_DMAX;
		} else {
			sink = (threshbit) ? SINK_MIN : SINK_MAX;
		}
		fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe,
			      context_iso_urb->buffer, num_isoc_packets,
			      fifo->usb_packet_maxlen, fifo->intervall,
			      tx_iso_complete, urb->context);
		memset(context_iso_urb->buffer, 0,
		       sizeof(context_iso_urb->buffer));
		frame_complete = FALSE;
		/* Generate next Iso Packets */
		for (k = 0; k < num_isoc_packets; ++k) {
			if (fifo->skbuff) {
				len = fifo->skbuff->len;
				/* we lower data margin every msec */
				fifo->bit_line -= sink;
				current_len = (0 - fifo->bit_line) / 8;
				/* maximum 15 byte for every ISO packet makes our life easier */
				if (current_len > 14)
					current_len = 14;
				current_len =
				    (len <=
				     current_len) ? len : current_len;
				/* how much bit do we put on the line? */
				fifo->bit_line += current_len * 8;

				context_iso_urb->buffer[tx_offset] = 0;
				if (current_len == len) {
					if (!transp_mode) {
						/* here frame completion */
						context_iso_urb->
						    buffer[tx_offset] = 1;
						/* add 2 byte flags and 16bit CRC at end of ISDN frame */
						fifo->bit_line += 32;
					}
					frame_complete = TRUE;
				}

				memcpy(context_iso_urb->buffer +
				       tx_offset + 1, fifo->skbuff->data,
				       current_len);
				skb_pull(fifo->skbuff, current_len);

				/* define packet delimeters within the URB buffer */
				urb->iso_frame_desc[k].offset = tx_offset;
				urb->iso_frame_desc[k].length =
				    current_len + 1;

				tx_offset += (current_len + 1);
			} else {
				urb->iso_frame_desc[k].offset =
				    tx_offset++;

				urb->iso_frame_desc[k].length = 1;
				fifo->bit_line -= sink;	/* we lower data margin every msec */

				if (fifo->bit_line < BITLINE_INF) {
					fifo->bit_line = BITLINE_INF;
				}
			}

			if (frame_complete) {
				fifo->delete_flg = TRUE;
				fifo->hif->l1l2(fifo->hif,
						PH_DATA | CONFIRM,
						(void *) fifo->skbuff->
						truesize);
				if (fifo->skbuff && fifo->delete_flg) {
					dev_kfree_skb_any(fifo->skbuff);
					fifo->skbuff = NULL;
					fifo->delete_flg = FALSE;
				}
				frame_complete = FALSE;
			}
		}
		errcode = usb_submit_urb(urb, GFP_ATOMIC);
		if (errcode < 0) {
			printk(KERN_INFO
			       "HFC-S USB: error submitting ISO URB: %d \n",
			       errcode);
		}
	} else {
		if (status && !hfc->disc_flag) {
			printk(KERN_INFO
			       "HFC-S USB: tx_iso_complete : urb->status %s (%i), fifonum=%d\n",
			       symbolic(urb_errlist, status), status,
			       fifon);
		}
	}
}				/* tx_iso_complete */