Ejemplo n.º 1
0
void ccci_free_skb(struct sk_buff *skb, DATA_POLICY policy)
{
	CCCI_DBG_MSG(-1, BM, "%ps free skb %p, policy=%d, len=%d\n", __builtin_return_address(0),
		     skb, policy, skb_size(skb));
	switch (policy) {
	case RECYCLE:
		/* 1. reset sk_buff (take __alloc_skb as ref.) */
		skb->data = skb->head;
		skb->len = 0;
		skb_reset_tail_pointer(skb);
		/* 2. enqueue */
		if (skb_size(skb) < SKB_1_5K)
			ccci_skb_enqueue(&skb_pool_16, skb);
		else if (skb_size(skb) < SKB_4K)
			ccci_skb_enqueue(&skb_pool_1_5K, skb);
		else
			ccci_skb_enqueue(&skb_pool_4K, skb);
		break;
	case FREE:
#ifdef CCCI_MEM_BM_DEBUG
		if (ccci_skb_addr_checker(skb)) {
			CCCI_INF_MSG(-1, BM, "ccci_skb_addr_checker failed\n");
			dump_stack();
		}
#endif
		dev_kfree_skb_any(skb);
		break;
	case NOOP:
	default:
		break;
	};
}
Ejemplo n.º 2
0
static int rmnet_usb_data_dmux(struct sk_buff *skb,  struct urb *rx_urb)
{
	struct mux_hdr	*hdr;
	size_t		pad_len;
	size_t		total_len;
	unsigned int	mux_id;

	hdr = (struct mux_hdr *)skb->data;
	mux_id = hdr->mux_id;
	if (!mux_id  || mux_id > no_rmnet_insts_per_dev) {
		pr_err_ratelimited("%s: Invalid data channel id %u.\n",
				__func__, mux_id);
		return -EINVAL;
	}

	pad_len = hdr->padding_info >> MUX_PAD_SHIFT;
	if (pad_len > MAX_PAD_BYTES(4)) {
		pr_err_ratelimited("%s: Invalid pad len %d\n",
			__func__, pad_len);
		return -EINVAL;
	}

	total_len = le16_to_cpu(hdr->pkt_len_w_padding);
	if (!total_len || !(total_len - pad_len)) {
		pr_err_ratelimited("%s: Invalid pkt length %d\n", __func__,
				total_len);
		return -EINVAL;
	}

	skb->data = (unsigned char *)(hdr + 1);
	skb_reset_tail_pointer(skb);
	rx_urb->actual_length = total_len - pad_len;

	return mux_id - 1;
}
void ccci_free_skb(struct sk_buff *skb, DATA_POLICY policy)
{
	switch(policy) {
		case RECYCLE:	
			// 1. reset sk_buff (take __alloc_skb as ref.)
			skb->data = skb->head;
			skb->len = 0;
			skb_reset_tail_pointer(skb);
			// 2. enqueue
			if(skb_size(skb)<SKB_1_5K)
				skb_queue_tail(&skb_pool_16, skb);
			else if (skb_size(skb)<SKB_4K)
				skb_queue_tail(&skb_pool_1_5K, skb);
			else
				skb_queue_tail(&skb_pool_4K, skb);
			// TODO: add a shore queue for skb dump
			break;
		case FREE:
			dev_kfree_skb(skb);
			break;
		case NOOP:
		default:
			break;
	};
}
Ejemplo n.º 4
0
int genl_exec(genl_exec_func_t func, void *data)
{
	int ret;

	mutex_lock(&genl_exec_lock);

	init_completion(&done);
	skb_get(genlmsg_skb);
	genlmsg_put(genlmsg_skb, 0, 0, &genl_exec_family,
		    NLM_F_REQUEST, GENL_EXEC_RUN);

	genl_exec_function = func;
	genl_exec_data = data;

	/* There is no need to send msg to current namespace. */
	ret = genlmsg_unicast(&init_net, genlmsg_skb, 0);

	if (!ret) {
		wait_for_completion(&done);
		ret = genl_exec_function_ret;
	} else {
		pr_err("genl_exec send error %d\n", ret);
	}

	/* Wait for genetlink to kfree skb. */
	while (skb_shared(genlmsg_skb))
		cpu_relax();

	genlmsg_skb->data = genlmsg_skb->head;
	skb_reset_tail_pointer(genlmsg_skb);

	mutex_unlock(&genl_exec_lock);

	return ret;
}
Ejemplo n.º 5
0
void rtl8812au_recv_tasklet(void *priv)
{
	struct sk_buff 		*pskb;
	struct rtl_priv		*rtlpriv = (struct rtl_priv *)priv;
	struct recv_priv	*precvpriv = &rtlpriv->recvpriv;

	while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
		if ((rtlpriv->bDriverStopped == _TRUE)
		|| (rtlpriv->bSurpriseRemoved == _TRUE)) {
			DBG_8192C("recv_tasklet => bDriverStopped or bSurpriseRemoved \n");
			dev_kfree_skb_any(pskb);
			break;
		}

		recvbuf2recvframe(rtlpriv, pskb);

#ifdef CONFIG_PREALLOC_RECV_SKB

		skb_reset_tail_pointer(pskb);
		pskb->len = 0;
		skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
#else
		dev_kfree_skb_any(pskb);
#endif

	}

}
void usb_recv_tasklet(void *priv)
{
	_pkt			*pskb;
	_adapter		*padapter = (_adapter*)priv;
	struct recv_priv	*precvpriv = &padapter->recvpriv;
	struct recv_buf	*precvbuf = NULL;

	while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {

		if ((padapter->bDriverStopped == _TRUE)||(padapter->bSurpriseRemoved== _TRUE)) {
			DBG_8192C("recv_tasklet => bDriverStopped or bSurpriseRemoved \n");
#ifdef CONFIG_PREALLOC_RX_SKB_BUFFER
			if (rtw_free_skb_premem(pskb) != 0)
#endif //CONFIG_PREALLOC_RX_SKB_BUFFER
				rtw_skb_free(pskb);
			break;
		}

		recvbuf2recvframe(padapter, pskb);

		skb_reset_tail_pointer(pskb);
		pskb->len = 0;

		skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);

		if (NULL != (precvbuf = rtw_dequeue_recvbuf(&precvpriv->recv_buf_pending_queue))) {
			precvbuf->pskb = NULL;
			rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
		}
	}
}
Ejemplo n.º 7
0
void ccci_free_skb(struct sk_buff *skb, DATA_POLICY policy)
{
	CCCI_DBG_MSG(-1, BM, "%ps free skb %p, policy=%d\n", __builtin_return_address(0), skb, policy);
	switch(policy) {
		case RECYCLE:	
			// 1. reset sk_buff (take __alloc_skb as ref.)
			skb->data = skb->head;
			skb->len = 0;
			skb_reset_tail_pointer(skb);
			// 2. enqueue
			if(skb_size(skb) < SKB_1_5K) {
				ccci_skb_enqueue(&skb_pool_16, skb);
			} else if (skb_size(skb) < SKB_4K) {
				ccci_skb_enqueue(&skb_pool_1_5K, skb);
			} else {
				ccci_skb_enqueue(&skb_pool_4K, skb);
			}
			break;
		case FREE:
			dev_kfree_skb_any(skb);
			break;
		case NOOP:
		default:
			break;
	};
}
Ejemplo n.º 8
0
Archivo: skbuff.c Proyecto: foxwolf/yjd
/**
 *	skb_recycle_check - check if skb can be reused for receive
 *	@skb: buffer
 *	@skb_size: minimum receive buffer size
 *
 *	Checks that the skb passed in is not shared or cloned, and
 *	that it is linear and its head portion at least as large as
 *	skb_size so that it can be recycled as a receive buffer.
 *	If these conditions are met, this function does any necessary
 *	reference count dropping and cleans up the skbuff as if it
 *	just came from __alloc_skb().
 */
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
	struct skb_shared_info *shinfo;

	//	if (irqs_disabled())
	//		return false;

	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
		return false;

	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
	if (skb_end_pointer(skb) - skb->head < skb_size)
		return false;

	if (skb_shared(skb) || skb_cloned(skb))
		return false;

	skb_release_head_state(skb);

	shinfo = skb_shinfo(skb);
	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
	atomic_set(&shinfo->dataref, 1);

	memset(skb, 0, offsetof(struct sk_buff, tail));
	skb->data = skb->head + NET_SKB_PAD;
	skb_reset_tail_pointer(skb);

	return true;
} EXPORT_SYMBOL(skb_recycle_check);
Ejemplo n.º 9
0
void usb_recv_tasklet(void *priv)
{
	_pkt			*pskb;
	_adapter		*padapter = (_adapter *)priv;
	struct recv_priv	*precvpriv = &padapter->recvpriv;
	struct recv_buf	*precvbuf = NULL;

	while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {

		if (RTW_CANNOT_RUN(padapter)) {
			RTW_INFO("recv_tasklet => bDriverStopped(%s) OR bSurpriseRemoved(%s)\n"
				, rtw_is_drv_stopped(padapter) ? "True" : "False"
				, rtw_is_surprise_removed(padapter) ? "True" : "False");
			#ifdef CONFIG_PREALLOC_RX_SKB_BUFFER
			if (rtw_free_skb_premem(pskb) != 0)
			#endif /* CONFIG_PREALLOC_RX_SKB_BUFFER */
				rtw_skb_free(pskb);
			break;
		}

		recvbuf2recvframe(padapter, pskb);

		skb_reset_tail_pointer(pskb);
		pskb->len = 0;

		skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);

		precvbuf = rtw_dequeue_recvbuf(&precvpriv->recv_buf_pending_queue);
		if (NULL != precvbuf) {
			precvbuf->pskb = NULL;
			rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
		}
	}
}
Ejemplo n.º 10
0
static void p54u_rx_cb(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb;
	struct ieee80211_hw *dev = info->dev;
	struct p54u_priv *priv = dev->priv;

	if (unlikely(urb->status)) {
		info->urb = NULL;
		usb_free_urb(urb);
		return;
	}

	skb_unlink(skb, &priv->rx_queue);
	skb_put(skb, urb->actual_length);
	if (!priv->hw_type)
		skb_pull(skb, sizeof(struct net2280_tx_hdr));

	if (p54_rx(dev, skb)) {
		skb = dev_alloc_skb(MAX_RX_SIZE);
		if (unlikely(!skb)) {
			usb_free_urb(urb);
			/* TODO check rx queue length and refill *somewhere* */
			return;
		}

		info = (struct p54u_rx_info *) skb->cb;
		info->urb = urb;
		info->dev = dev;
		urb->transfer_buffer = skb_tail_pointer(skb);
		urb->context = skb;
		skb_queue_tail(&priv->rx_queue, skb);
	} else {
		if (!priv->hw_type)
			skb_push(skb, sizeof(struct net2280_tx_hdr));

		skb_reset_tail_pointer(skb);
		skb_trim(skb, 0);
		if (urb->transfer_buffer != skb_tail_pointer(skb)) {
			/* this should not happen */
			WARN_ON(1);
			urb->transfer_buffer = skb_tail_pointer(skb);
		}

		skb_queue_tail(&priv->rx_queue, skb);
	}

	usb_submit_urb(urb, GFP_ATOMIC);
}
Ejemplo n.º 11
0
static void ar9170_usb_rx_completed(struct urb *urb)
{
	struct sk_buff *skb = urb->context;
	struct ar9170_usb *aru = (struct ar9170_usb *)
		usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
	int err;

	if (!aru)
		goto free;

	switch (urb->status) {
	/* everything is fine */
	case 0:
		break;

	/* disconnect */
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;

	default:
		goto resubmit;
	}

	skb_put(skb, urb->actual_length);
	ar9170_rx(&aru->common, skb);

resubmit:
	skb_reset_tail_pointer(skb);
	skb_trim(skb, 0);

	usb_anchor_urb(urb, &aru->rx_submitted);
	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(urb);
		goto free;
	}

	return ;

free:
	dev_kfree_skb_irq(skb);
}
Ejemplo n.º 12
0
static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
{
	struct c2_rx_desc *rx_desc = elem->ht_desc;
	struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;

	if (rxp_hdr->status != RXP_HRXD_OK ||
	    rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
		pr_debug("BAD RXP_HRXD\n");
		pr_debug("  rx_desc : %p\n", rx_desc);
		pr_debug("    index : %Zu\n",
			elem - c2_port->rx_ring.start);
		pr_debug("    len   : %u\n", rx_desc->len);
		pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,
			(void *) __pa((unsigned long) rxp_hdr));
		pr_debug("    flags : 0x%x\n", rxp_hdr->flags);
		pr_debug("    status: 0x%x\n", rxp_hdr->status);
		pr_debug("    len   : %u\n", rxp_hdr->len);
		pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);
	}

	
	elem->skb->data = elem->skb->head;
	skb_reset_tail_pointer(elem->skb);

	
	memset(elem->skb->data, 0, sizeof(*rxp_hdr));

	
	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
	__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
	__raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
		     elem->hw_desc + C2_RXP_LEN);
	__raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
		     elem->hw_desc + C2_RXP_ADDR);
	__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
		     elem->hw_desc + C2_RXP_FLAGS);

	pr_debug("packet dropped\n");
	c2_port->netdev->stats.rx_dropped++;
}
Ejemplo n.º 13
0
static void bam_data_free_skb_to_pool(
	struct bam_data_port *port,
	struct sk_buff *skb)
{
	struct bam_data_ch_info *d;
	unsigned long flags;

	if (!port) {
		dev_kfree_skb_any(skb);
		return;
	}
	d = &port->data_ch;
	if (!d) {
		dev_kfree_skb_any(skb);
		return;
	}

	spin_lock_irqsave(&port->port_lock_ul, flags);
	skb->len = 0;
	skb_reset_tail_pointer(skb);
	__skb_queue_tail(&d->rx_skb_idle, skb);
	spin_unlock_irqrestore(&port->port_lock_ul, flags);
}
Ejemplo n.º 14
0
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		
		if (ctcm_checkalloc_buffer(ch)) {
			
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); 
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Ejemplo n.º 15
0
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Ejemplo n.º 16
0
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
				   struct tipc_nl_compat_msg *msg,
				   struct sk_buff *arg)
{
	int len = 0;
	int err;
	struct sk_buff *buf;
	struct nlmsghdr *nlmsg;
	struct netlink_callback cb;

	memset(&cb, 0, sizeof(cb));
	cb.nlh = (struct nlmsghdr *)arg->data;
	cb.skb = arg;

	buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	buf->sk = msg->dst_sk;

	do {
		int rem;

		len = (*cmd->dumpit)(buf, &cb);

		nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
			struct nlattr **attrs;

			err = tipc_nlmsg_parse(nlmsg, &attrs);
			if (err)
				goto err_out;

			err = (*cmd->format)(msg, attrs);
			if (err)
				goto err_out;

			if (tipc_skb_tailroom(msg->rep) <= 1) {
				err = -EMSGSIZE;
				goto err_out;
			}
		}

		skb_reset_tail_pointer(buf);
		buf->len = 0;

	} while (len);

	err = 0;

err_out:
	kfree_skb(buf);

	if (err == -EMSGSIZE) {
		/* The legacy API only considered messages filling
		 * "ULTRA_STRING_MAX_LEN" to be truncated.
		 */
		if ((TIPC_SKB_MAX - msg->rep->len) <= 1) {
			char *tail = skb_tail_pointer(msg->rep);

			if (*tail != '\0')
				sprintf(tail - sizeof(REPLY_TRUNCATED) - 1,
					REPLY_TRUNCATED);
		}

		return 0;
	}

	return err;
}
Ejemplo n.º 17
0
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
				    struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		rtl_is_special_data(hw, skb, false);

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
	}
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
				      struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		rtl_is_special_data(hw, skb, false);

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
		if (likely(rtl_action_proc(hw, skb, false))) {
			struct sk_buff *uskb = NULL;
			u8 *pdata;

			uskb = dev_alloc_skb(skb->len + 128);
			if (uskb) {	/* drop packet on allocation failure */
				memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
				       sizeof(rx_status));
				pdata = (u8 *)skb_put(uskb, skb->len);
				memcpy(pdata, skb->data, skb->len);
				ieee80211_rx_irqsafe(hw, uskb);
			}
			dev_kfree_skb_any(skb);
		} else {
			dev_kfree_skb_any(skb);
		}
	}
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct sk_buff *_skb;
	struct sk_buff_head rx_queue;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	skb_queue_head_init(&rx_queue);
	if (rtlusb->usb_rx_segregate_hdl)
		rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
	WARN_ON(skb_queue_empty(&rx_queue));
	while (!skb_queue_empty(&rx_queue)) {
		_skb = skb_dequeue(&rx_queue);
		_rtl_usb_rx_process_agg(hw, skb);
		ieee80211_rx_irqsafe(hw, skb);
	}
}

static void _rtl_rx_completed(struct urb *_urb)
{
	struct sk_buff *skb = (struct sk_buff *)_urb->context;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	int err = 0;

	if (unlikely(IS_USB_STOP(rtlusb)))
		goto free;

	if (likely(0 == _urb->status)) {
		/* If this code were moved to work queue, would CPU
		 * utilization be improved?  NOTE: We shall allocate another skb
		 * and reuse the original one.
		 */
		skb_put(skb, _urb->actual_length);

		if (likely(!rtlusb->usb_rx_segregate_hdl)) {
			struct sk_buff *_skb;
			_rtl_usb_rx_process_noagg(hw, skb);
			_skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
			if (IS_ERR(_skb)) {
				err = PTR_ERR(_skb);
				RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
					 "Can't allocate skb for bulk IN!\n");
				return;
			}
			skb = _skb;
		} else{
			/* TO DO */
			_rtl_rx_pre_process(hw, skb);
			pr_err("rx agg not supported\n");
		}
		goto resubmit;
	}

	switch (_urb->status) {
	/* disconnect */
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
		break;
	}

resubmit:
	skb_reset_tail_pointer(skb);
	skb_trim(skb, 0);

	usb_anchor_urb(_urb, &rtlusb->rx_submitted);
	err = usb_submit_urb(_urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(_urb);
		goto free;
	}
	return;

free:
	dev_kfree_skb_irq(skb);
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
	struct urb *urb;
	struct sk_buff *skb;
	int err;
	int i;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	WARN_ON(0 == rtlusb->rx_urb_num);
	/* 1600 == 1514 + max WLAN header + rtk info */
	WARN_ON(rtlusb->rx_max_size < 1600);

	for (i = 0; i < rtlusb->rx_urb_num; i++) {
		err = -ENOMEM;
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to alloc URB!!\n");
			goto err_out;
		}

		skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
		if (IS_ERR(skb)) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to prep_rx_urb!!\n");
			err = PTR_ERR(skb);
			goto err_out;
		}

		usb_anchor_urb(urb, &rtlusb->rx_submitted);
		err = usb_submit_urb(urb, GFP_KERNEL);
		if (err)
			goto err_out;
		usb_free_urb(urb);
	}
	return 0;

err_out:
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);
	return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
	int err;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	err = rtlpriv->cfg->ops->hw_init(hw);
	if (!err) {
		rtl_init_rx_config(hw);

		/* Enable software */
		SET_USB_START(rtlusb);
		/* should after adapter start and interrupt enable. */
		set_hal_start(rtlhal);

		/* Start bulk IN */
		_rtl_usb_receive(hw);
	}

	return err;
}
/**
 *
 *
 */

/*=======================  tx =========================================*/
static void rtl_usb_cleanup(struct ieee80211_hw *hw)
{
	u32 i;
	struct sk_buff *_skb;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct ieee80211_tx_info *txinfo;

	SET_USB_STOP(rtlusb);

	/* clean up rx stuff. */
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);

	/* clean up tx stuff */
	for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
		while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
			rtlusb->usb_tx_cleanup(hw, _skb);
			txinfo = IEEE80211_SKB_CB(_skb);
			ieee80211_tx_info_clear_status(txinfo);
			txinfo->flags |= IEEE80211_TX_STAT_ACK;
			ieee80211_tx_status_irqsafe(hw, _skb);
		}
		usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
	}
	usb_kill_anchored_urbs(&rtlusb->tx_submitted);
}
static int CVE_2010_3848_linux2_6_23_econet_sendmsg(struct kiocb *iocb, struct socket *sock,
			  struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
	struct net_device *dev;
	struct ec_addr addr;
	int err;
	unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
	struct sk_buff *skb;
	struct ec_cb *eb;
#endif
#ifdef CONFIG_ECONET_AUNUDP
	struct msghdr udpmsg;
	struct iovec iov[msg->msg_iovlen+1];
	struct aunhdr ah;
	struct sockaddr_in udpdest;
	__kernel_size_t size;
	int i;
	mm_segment_t oldfs;
#endif

	/*
	 *	Check the flags.
	 */

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
		return -EINVAL;

	/*
	 *	Get and verify the address.
	 */

	mutex_lock(&econet_mutex);

	if (saddr == NULL) {
		struct econet_sock *eo = ec_sk(sk);

		addr.station = eo->station;
		addr.net     = eo->net;
		port	     = eo->port;
		cb	     = eo->cb;
	} else {
		if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
			mutex_unlock(&econet_mutex);
			return -EINVAL;
		}
		addr.station = saddr->addr.station;
		addr.net = saddr->addr.net;
		port = saddr->port;
		cb = saddr->cb;
	}

	/* Look for a device with the right network number. */
	dev = net2dev_map[addr.net];

	/* If not directly reachable, use some default */
	if (dev == NULL) {
		dev = net2dev_map[0];
		/* No interfaces at all? */
		if (dev == NULL) {
			mutex_unlock(&econet_mutex);
			return -ENETDOWN;
		}
	}

	if (len + 15 > dev->mtu) {
		mutex_unlock(&econet_mutex);
		return -EMSGSIZE;
	}

	if (dev->type == ARPHRD_ECONET) {
		/* Real hardware Econet.  We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
		unsigned short proto = 0;

		dev_hold(dev);

		skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev),
					  msg->msg_flags & MSG_DONTWAIT, &err);
		if (skb==NULL)
			goto out_unlock;

		skb_reserve(skb, LL_RESERVED_SPACE(dev));
		skb_reset_network_header(skb);

		eb = (struct ec_cb *)&skb->cb;

		/* BUG: saddr may be NULL */
		eb->cookie = saddr->cookie;
		eb->sec = *saddr;
		eb->sent = ec_tx_done;

		if (dev->hard_header) {
			int res;
			struct ec_framehdr *fh;
			err = -EINVAL;
			res = dev->hard_header(skb, dev, ntohs(proto),
					       &addr, NULL, len);
			/* Poke in our control byte and
			   port number.  Hack, hack.  */
			fh = (struct ec_framehdr *)(skb->data);
			fh->cb = cb;
			fh->port = port;
			if (sock->type != SOCK_DGRAM) {
				skb_reset_tail_pointer(skb);
				skb->len = 0;
			} else if (res < 0)
				goto out_free;
		}

		/* Copy the data. Returns -EFAULT on error */
		err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
		skb->protocol = proto;
		skb->dev = dev;
		skb->priority = sk->sk_priority;
		if (err)
			goto out_free;

		err = -ENETDOWN;
		if (!(dev->flags & IFF_UP))
			goto out_free;

		/*
		 *	Now send it
		 */

		dev_queue_xmit(skb);
		dev_put(dev);
		mutex_unlock(&econet_mutex);
		return(len);

	out_free:
		kfree_skb(skb);
	out_unlock:
		if (dev)
			dev_put(dev);
#else
		err = -EPROTOTYPE;
#endif
		mutex_unlock(&econet_mutex);

		return err;
	}

#ifdef CONFIG_ECONET_AUNUDP
	/* AUN virtual Econet. */

	if (udpsock == NULL) {
		mutex_unlock(&econet_mutex);
		return -ENETDOWN;		/* No socket - can't send */
	}

	/* Make up a UDP datagram and hand it off to some higher intellect. */

	memset(&udpdest, 0, sizeof(udpdest));
	udpdest.sin_family = AF_INET;
	udpdest.sin_port = htons(AUN_PORT);

	/* At the moment we use the stupid Acorn scheme of Econet address
	   y.x maps to IP a.b.c.x.  This should be replaced with something
	   more flexible and more aware of subnet masks.  */
	{
		struct in_device *idev;
		unsigned long network = 0;

		rcu_read_lock();
		idev = __in_dev_get_rcu(dev);
		if (idev) {
			if (idev->ifa_list)
				network = ntohl(idev->ifa_list->ifa_address) &
					0xffffff00;		/* !!! */
		}
		rcu_read_unlock();
		udpdest.sin_addr.s_addr = htonl(network | addr.station);
	}

	ah.port = port;
	ah.cb = cb & 0x7f;
	ah.code = 2;		/* magic */
	ah.pad = 0;

	/* tack our header on the front of the iovec */
	size = sizeof(struct aunhdr);
	/*
	 * XXX: that is b0rken.  We can't mix userland and kernel pointers
	 * in iovec, since on a lot of platforms copy_from_user() will
	 * *not* work with the kernel and userland ones at the same time,
	 * regardless of what we do with set_fs().  And we are talking about
	 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
	 * apply.  Any suggestions on fixing that code?		-- AV
	 */
	iov[0].iov_base = (void *)&ah;
	iov[0].iov_len = size;
	for (i = 0; i < msg->msg_iovlen; i++) {
		void __user *base = msg->msg_iov[i].iov_base;
		size_t len = msg->msg_iov[i].iov_len;
		/* Check it now since we switch to KERNEL_DS later. */
		if (!access_ok(VERIFY_READ, base, len)) {
			mutex_unlock(&econet_mutex);
			return -EFAULT;
		}
		iov[i+1].iov_base = base;
		iov[i+1].iov_len = len;
		size += len;
	}

	/* Get a skbuff (no data, just holds our cb information) */
	if ((skb = sock_alloc_send_skb(sk, 0,
				       msg->msg_flags & MSG_DONTWAIT,
				       &err)) == NULL) {
		mutex_unlock(&econet_mutex);
		return err;
	}

	eb = (struct ec_cb *)&skb->cb;

	eb->cookie = saddr->cookie;
	eb->timeout = (5*HZ);
	eb->start = jiffies;
	ah.handle = aun_seq;
	eb->seq = (aun_seq++);
	eb->sec = *saddr;

	skb_queue_tail(&aun_queue, skb);

	udpmsg.msg_name = (void *)&udpdest;
	udpmsg.msg_namelen = sizeof(udpdest);
	udpmsg.msg_iov = &iov[0];
	udpmsg.msg_iovlen = msg->msg_iovlen + 1;
	udpmsg.msg_control = NULL;
	udpmsg.msg_controllen = 0;
	udpmsg.msg_flags=0;

	oldfs = get_fs(); set_fs(KERNEL_DS);	/* More privs :-) */
	err = sock_sendmsg(udpsock, &udpmsg, size);
	set_fs(oldfs);
#else
	err = -EPROTOTYPE;
#endif
	mutex_unlock(&econet_mutex);

	return err;
}
static void rtl8723as_recv_tasklet(void *priv)
{
	PADAPTER			padapter;
	PHAL_DATA_TYPE		pHalData;
	struct recv_priv		*precvpriv;
	struct recv_buf		*precvbuf;
	union recv_frame		*precvframe;
	struct recv_frame_hdr	*phdr;
	struct rx_pkt_attrib	*pattrib;
	_irqL	irql;
	u8		*ptr;
	u32		pkt_len, pkt_offset, skb_len, alloc_sz;
	_pkt		*pkt_copy = NULL;
	u8		shift_sz = 0, rx_report_sz = 0;


	padapter = (PADAPTER)priv;
	pHalData = GET_HAL_DATA(padapter);
	precvpriv = &padapter->recvpriv;

	do {
		precvbuf = rtw_dequeue_recvbuf(&precvpriv->recv_buf_pending_queue);
		if (NULL == precvbuf) break;

		ptr = precvbuf->pdata;

		while (ptr < precvbuf->ptail)
		{
			precvframe = rtw_alloc_recvframe(&precvpriv->free_recv_queue);
			if (precvframe == NULL) {
				RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("%s: no enough recv frame!\n",__FUNCTION__));
				rtw_enqueue_recvbuf_to_head(precvbuf, &precvpriv->recv_buf_pending_queue);

				// The case of can't allocte recvframe should be temporary,
				// schedule again and hope recvframe is available next time.
#ifdef PLATFORM_LINUX
				tasklet_schedule(&precvpriv->recv_tasklet);
#endif
				return;
			}

			//rx desc parsing
			update_recvframe_attrib(precvframe, (struct recv_stat*)ptr);

			pattrib = &precvframe->u.hdr.attrib;

			// fix Hardware RX data error, drop whole recv_buffer
			if ((!(pHalData->ReceiveConfig & RCR_ACRC32)) && pattrib->crc_err)
			{
				#if !(MP_DRIVER==1)
				DBG_8192C("%s()-%d: RX Warning! rx CRC ERROR !!\n", __FUNCTION__, __LINE__);
				#endif
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				break;
			}

			if (pHalData->ReceiveConfig & RCR_APP_BA_SSN)
				rx_report_sz = RXDESC_SIZE + 4 + pattrib->drvinfo_sz;
			else
				rx_report_sz = RXDESC_SIZE + pattrib->drvinfo_sz;

			pkt_offset = rx_report_sz + pattrib->pkt_len;

			if ((ptr + pkt_offset) > precvbuf->ptail) {
				DBG_8192C("%s()-%d: : next pkt len(%p,%d) exceed ptail(%p)!\n", __FUNCTION__, __LINE__, ptr, pkt_offset, precvbuf->ptail);
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				break;
			}

			if ((pattrib->crc_err) || (pattrib->icv_err))
			{
				DBG_8192C("%s: crc_err=%d icv_err=%d, skip!\n", __FUNCTION__, pattrib->crc_err, pattrib->icv_err);
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
			}
			else
			{
				//	Modified by Albert 20101213
				//	For 8 bytes IP header alignment.
				if (pattrib->qos)	//	Qos data, wireless lan header length is 26
				{
					shift_sz = 6;
				}
				else
				{
					shift_sz = 0;
				}

				skb_len = pattrib->pkt_len;

				// for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet.
				// modify alloc_sz for recvive crc error packet by thomas 2011-06-02
				if((pattrib->mfrag == 1)&&(pattrib->frag_num == 0)){
					//alloc_sz = 1664;	//1664 is 128 alignment.
					if(skb_len <= 1650)
						alloc_sz = 1664;
					else
						alloc_sz = skb_len + 14;
				}
				else {
					alloc_sz = skb_len;
					//	6 is for IP header 8 bytes alignment in QoS packet case.
					//	8 is for skb->data 4 bytes alignment.
					alloc_sz += 14;
				}

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
				pkt_copy = dev_alloc_skb(alloc_sz);
#else
				pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
#endif
				if(pkt_copy)
				{
					pkt_copy->dev = padapter->pnetdev;
					precvframe->u.hdr.pkt = pkt_copy;
					skb_reserve( pkt_copy, 8 - ((SIZE_PTR)( pkt_copy->data ) & 7 ));//force pkt_copy->data at 8-byte alignment address
					skb_reserve( pkt_copy, shift_sz );//force ip_hdr at 8-byte alignment address according to shift_sz.
					_rtw_memcpy(pkt_copy->data, (ptr + rx_report_sz), skb_len);
					precvframe->u.hdr.rx_head = pkt_copy->head;
					precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
					precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
				}
				else
				{
					if((pattrib->mfrag == 1)&&(pattrib->frag_num == 0))
					{
						DBG_8192C("rtl8723as_recv_tasklet: alloc_skb fail , drop frag frame \n");
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}

					precvframe->u.hdr.pkt = skb_clone(precvbuf->pskb, GFP_ATOMIC);
					if(precvframe->u.hdr.pkt)
					{
						_pkt	*pkt_clone = precvframe->u.hdr.pkt;

						pkt_clone->data = ptr + rx_report_sz;
						skb_reset_tail_pointer(pkt_clone);
						precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail
							= pkt_clone->data;
						precvframe->u.hdr.rx_end =  pkt_clone->data + skb_len;
					}
					else
					{
						DBG_8192C("rtl8723as_recv_tasklet: skb_clone fail\n");
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}
				}

				recvframe_put(precvframe, skb_len);
				//recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE);

				if (pHalData->ReceiveConfig & RCR_APPFCS)
					recvframe_pull_tail(precvframe, IEEE80211_FCS_LEN);

				// move to drv info position
				ptr += RXDESC_SIZE;

				// update drv info
				if (pHalData->ReceiveConfig & RCR_APP_BA_SSN) {
					//rtl8723s_update_bassn(padapter, pdrvinfo);
					ptr += 4;
				}

#ifdef CONFIG_CONCURRENT_MODE
				if(rtw_buddy_adapter_up(padapter))
				{
					if(pre_recv_entry(precvframe, precvbuf, (struct phy_stat*)ptr) != _SUCCESS)
					{
						RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,
							("recvbuf2recvframe: recv_entry(precvframe) != _SUCCESS\n"));
					}
				}
				else
#endif
				{
					if (pattrib->physt)
						update_recvframe_phyinfo(precvframe, (struct phy_stat*)ptr);

					if (rtw_recv_entry(precvframe) != _SUCCESS)
					{
						RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("%s: rtw_recv_entry(precvframe) != _SUCCESS\n",__FUNCTION__));
					}
				}
			}

			// Page size of receive package is 128 bytes alignment =>DMA AGG
			// refer to _InitTransferPageSize()
			pkt_offset = _RND128(pkt_offset);
			precvbuf->pdata += pkt_offset;
			ptr = precvbuf->pdata;
			precvframe = NULL;
			pkt_copy = NULL;
		}

		rtw_enqueue_recvbuf(precvbuf, &precvpriv->free_recv_buf_queue);
	} while (1);

}
Ejemplo n.º 20
0
static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
{
	unsigned int tlen, wlen = 0, clen = 0;
	struct ar9170_stream *rx_stream;
	u8 *tbuf;

	tbuf = buf;
	tlen = len;

	while (tlen >= 4) {
		rx_stream = (void *) tbuf;
		clen = le16_to_cpu(rx_stream->length);
		wlen = ALIGN(clen, 4);

		/* check if this is stream has a valid tag.*/
		if (rx_stream->tag != cpu_to_le16(AR9170_RX_STREAM_TAG)) {
			/*
			 * TODO: handle the highly unlikely event that the
			 * corrupted stream has the TAG at the right position.
			 */

			/* check if the frame can be repaired. */
			if (!ar->rx_failover_missing) {

				/* this is not "short read". */
				if (net_ratelimit()) {
					wiphy_err(ar->hw->wiphy,
						"missing tag!\n");
				}

				__carl9170_rx(ar, tbuf, tlen);
				return;
			}

			if (ar->rx_failover_missing > tlen) {
				if (net_ratelimit()) {
					wiphy_err(ar->hw->wiphy,
						"possible multi "
						"stream corruption!\n");
					goto err_telluser;
				} else {
					goto err_silent;
				}
			}

			memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
			ar->rx_failover_missing -= tlen;

			if (ar->rx_failover_missing <= 0) {
				/*
				 * nested carl9170_rx_stream call!
				 *
				 * termination is guranteed, even when the
				 * combined frame also have an element with
				 * a bad tag.
				 */

				ar->rx_failover_missing = 0;
				carl9170_rx_stream(ar, ar->rx_failover->data,
						   ar->rx_failover->len);

				skb_reset_tail_pointer(ar->rx_failover);
				skb_trim(ar->rx_failover, 0);
			}

			return;
		}

		/* check if stream is clipped */
		if (wlen > tlen - 4) {
			if (ar->rx_failover_missing) {
				/* TODO: handle double stream corruption. */
				if (net_ratelimit()) {
					wiphy_err(ar->hw->wiphy, "double rx "
						"stream corruption!\n");
					goto err_telluser;
				} else {
					goto err_silent;
				}
			}

			/*
			 * save incomplete data set.
			 * the firmware will resend the missing bits when
			 * the rx - descriptor comes round again.
			 */

			memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
			ar->rx_failover_missing = clen - tlen;
			return;
		}
		__carl9170_rx(ar, rx_stream->payload, clen);

		tbuf += wlen + 4;
		tlen -= wlen + 4;
	}

	if (tlen) {
		if (net_ratelimit()) {
			wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed "
				"data left in rx stream!\n", tlen);
		}

		goto err_telluser;
	}

	return;

err_telluser:
	wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, "
		"data:%d, rx:%d, pending:%d ]\n", clen, wlen, tlen,
		ar->rx_failover_missing);

	if (ar->rx_failover_missing)
		print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
				     ar->rx_failover->data,
				     ar->rx_failover->len);

	print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
			     buf, len);

	wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if "
		"you see this message frequently.\n");

err_silent:
	if (ar->rx_failover_missing) {
		skb_reset_tail_pointer(ar->rx_failover);
		skb_trim(ar->rx_failover, 0);
		ar->rx_failover_missing = 0;
	}
}
Ejemplo n.º 21
0
/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
   and -1 on error (which may be packet drops or other errors). */
int
nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
{
	struct mbuf *m = a->m;
	struct ifnet *ifp = a->ifp;
	u_int len = a->len;
	netdev_tx_t ret;

	/* We know that the driver needs to prepend ifp->needed_headroom bytes
	 * to each packet to be transmitted. We then reset the mbuf pointers
	 * to the correct initial state:
	 *    ___________________________________________
	 *    ^           ^                             ^
	 *    |           |                             |
	 *   head        data                          end
	 *               tail
	 *
	 * which correspond to an empty buffer with exactly
	 * ifp->needed_headroom bytes between head and data.
	 */
	m->len = 0;
	m->data = m->head + ifp->needed_headroom;
	skb_reset_tail_pointer(m);
	skb_reset_mac_header(m);
	skb_reset_network_header(m);

	/* Copy a netmap buffer into the mbuf.
	 * TODO Support the slot flags (NS_MOREFRAG, NS_INDIRECT). */
	skb_copy_to_linear_data(m, a->addr, len); // skb_store_bits(m, 0, addr, len);
	skb_put(m, len);

	/* Hold a reference on this, we are going to recycle mbufs as
	 * much as possible. */
	NM_ATOMIC_INC(&m->users);

	/* On linux m->dev is not reliable, since it can be changed by the
	 * ndo_start_xmit() callback. This happens, for instance, with veth
	 * and bridge drivers. For this reason, the nm_os_generic_xmit_frame()
	 * implementation for linux stores a copy of m->dev into the
	 * destructor_arg field. */
	m->dev = ifp;
	skb_shinfo(m)->destructor_arg = m->dev;

	/* Tell generic_ndo_start_xmit() to pass this mbuf to the driver. */
	skb_set_queue_mapping(m, a->ring_nr);
	m->priority = a->qevent ? NM_MAGIC_PRIORITY_TXQE : NM_MAGIC_PRIORITY_TX;

	ret = dev_queue_xmit(m);

	if (unlikely(ret != NET_XMIT_SUCCESS)) {
		/* Reset priority, so that generic_netmap_tx_clean() can
		 * reclaim this mbuf. */
		m->priority = 0;

		/* Qdisc queue is full (this cannot happen with
		 * the netmap-aware qdisc, see exaplanation in
		 * netmap_generic_txsync), or qdisc is being
		 * deactivated. In the latter case dev_queue_xmit()
		 * does not call the enqueue method and returns
		 * NET_XMIT_DROP.
		 * If there is no carrier, the generic qdisc is
		 * not yet active (is pending in the qdisc_sleeping
		 * field), and so the temporary noop qdisc enqueue
		 * method will drop the packet and return NET_XMIT_CN.
		 */
		RD(3, "Warning: dev_queue_xmit() is dropping [%d]", ret);
		return -1;
	}

	return 0;
}
static void rtl8723bs_recv_tasklet(void *priv)
{
	PADAPTER			padapter;
	PHAL_DATA_TYPE		pHalData;
	struct recv_priv		*precvpriv;
	struct recv_buf 	*precvbuf;
	union recv_frame		*precvframe;
	struct recv_frame_hdr	*phdr;
	struct rx_pkt_attrib	*pattrib;
	_irqL	irql;
	u8		*ptr;
	u32 	pkt_len, pkt_offset, skb_len, alloc_sz;
	_pkt		*pkt_copy = NULL;
	u8		shift_sz = 0, rx_report_sz = 0;


	padapter = (PADAPTER)priv;
	pHalData = GET_HAL_DATA(padapter);
	precvpriv = &padapter->recvpriv;

	do {
		precvbuf = rtw_dequeue_recvbuf(&precvpriv->recv_buf_pending_queue);
		if (NULL == precvbuf) break;

		ptr = precvbuf->pdata;

		while (ptr < precvbuf->ptail)
		{
			precvframe = rtw_alloc_recvframe(&precvpriv->free_recv_queue);
			if (precvframe == NULL)
			{
				DBG_8192C("%s: no enough recv frame!\n", __FUNCTION__);
				rtw_enqueue_recvbuf_to_head(precvbuf, &precvpriv->recv_buf_pending_queue);

				// The case of can't allocte recvframe should be temporary,
				// schedule again and hope recvframe is available next time.
#ifdef PLATFORM_LINUX
				tasklet_schedule(&precvpriv->recv_tasklet);
#endif
				return;
			}

			//rx desc parsing
			rtl8723b_query_rx_desc_status(precvframe, ptr);

			pattrib = &precvframe->u.hdr.attrib;

			// fix Hardware RX data error, drop whole recv_buffer
			if ((!(pHalData->ReceiveConfig & RCR_ACRC32)) && pattrib->crc_err)
			{
#if !(MP_DRIVER==1)
				DBG_8192C("%s()-%d: RX Warning! rx CRC ERROR !!\n", __FUNCTION__, __LINE__);
#endif
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				break;
			}

			rx_report_sz = RXDESC_SIZE + pattrib->drvinfo_sz;
			pkt_offset = rx_report_sz + pattrib->shift_sz + pattrib->pkt_len;

			if ((ptr + pkt_offset) > precvbuf->ptail) {
				DBG_8192C("%s()-%d: : next pkt len(%p,%d) exceed ptail(%p)!\n", __FUNCTION__, __LINE__, ptr, pkt_offset, precvbuf->ptail);
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				break;
			}

			if ((pattrib->crc_err) || (pattrib->icv_err))
			{
#ifdef CONFIG_MP_INCLUDED
				if (padapter->registrypriv.mp_mode == 1)
				{
					if ((check_fwstate(&padapter->mlmepriv, WIFI_MP_STATE) == _TRUE))//&&(padapter->mppriv.check_mp_pkt == 0))
					{
						if (pattrib->crc_err == 1)
							padapter->mppriv.rx_crcerrpktcount++;
					}
				}
				else
#endif
				{
					DBG_8192C("%s: crc_err=%d icv_err=%d, skip!\n", __FUNCTION__, pattrib->crc_err, pattrib->icv_err);
				}
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
			}
			else
			{
				//	Modified by Albert 20101213
				//	For 8 bytes IP header alignment.
				if (pattrib->qos)	//	Qos data, wireless lan header length is 26
				{
					shift_sz = 6;
				}
				else
				{
					shift_sz = 0;
				}

				skb_len = pattrib->pkt_len;

				// for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet.
				// modify alloc_sz for recvive crc error packet by thomas 2011-06-02
				if((pattrib->mfrag == 1)&&(pattrib->frag_num == 0)){
					//alloc_sz = 1664;	//1664 is 128 alignment.
					if(skb_len <= 1650)
						alloc_sz = 1664;
					else
						alloc_sz = skb_len + 14;
				}
				else {
					alloc_sz = skb_len;
					//	6 is for IP header 8 bytes alignment in QoS packet case.
					//	8 is for skb->data 4 bytes alignment.
					alloc_sz += 14;
				}

				pkt_copy = rtw_skb_alloc(alloc_sz);

				if (pkt_copy)
				{
					pkt_copy->dev = padapter->pnetdev;
					precvframe->u.hdr.pkt = pkt_copy;
					skb_reserve( pkt_copy, 8 - ((SIZE_PTR)( pkt_copy->data ) & 7 ));//force pkt_copy->data at 8-byte alignment address
					skb_reserve( pkt_copy, shift_sz );//force ip_hdr at 8-byte alignment address according to shift_sz.
					_rtw_memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
					precvframe->u.hdr.rx_head = pkt_copy->head;
					precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
					precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
				}
				else
				{
					if((pattrib->mfrag == 1)&&(pattrib->frag_num == 0))
					{
						DBG_8192C("%s: alloc_skb fail, drop frag frame\n", __FUNCTION__);
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}

					precvframe->u.hdr.pkt = rtw_skb_clone(precvbuf->pskb);
					if(precvframe->u.hdr.pkt)
					{
						_pkt	*pkt_clone = precvframe->u.hdr.pkt;

						pkt_clone->data = ptr + rx_report_sz + pattrib->shift_sz;
						skb_reset_tail_pointer(pkt_clone);
						precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail
							= pkt_clone->data;
						precvframe->u.hdr.rx_end =	pkt_clone->data + skb_len;
					}
					else
					{
						DBG_8192C("%s: rtw_skb_clone fail\n", __FUNCTION__);
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}
				}

				recvframe_put(precvframe, skb_len);
				//recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE);

				if (pHalData->ReceiveConfig & RCR_APPFCS)
					recvframe_pull_tail(precvframe, IEEE80211_FCS_LEN);

				// move to drv info position
				ptr += RXDESC_SIZE;

				// update drv info
				if (pHalData->ReceiveConfig & RCR_APP_BA_SSN) {
					//rtl8723s_update_bassn(padapter, pdrvinfo);
					ptr += 4;
				}

				if (pattrib->pkt_rpt_type == NORMAL_RX) {
					// skip the rx packet with abnormal length
					if (pattrib->pkt_len < 14 || pattrib->pkt_len > 8192) {	
						DBG_8192C("skip abnormal rx packet(%d)\n", pattrib->pkt_len);
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}

#ifdef CONFIG_CONCURRENT_MODE
					if (rtw_buddy_adapter_up(padapter))
					{
						if (pre_recv_entry(precvframe, precvbuf, ptr) != _SUCCESS) {
							RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,
								("recvbuf2recvframe: recv_entry(precvframe) != _SUCCESS\n"));
						}
					}
					else
#endif
					{
						if (pattrib->physt)
							rx_query_phy_status(precvframe, ptr);

						if (rtw_recv_entry(precvframe) != _SUCCESS) {
							RT_TRACE(_module_rtl871x_recv_c_, _drv_dump_, ("%s: rtw_recv_entry(precvframe) != _SUCCESS\n",__FUNCTION__));
						}
					}
				}
				else {
#ifdef CONFIG_C2H_PACKET_EN
					if (pattrib->pkt_rpt_type == C2H_PACKET) {
						rtl8723b_c2h_packet_handler(padapter, precvframe->u.hdr.rx_data, pattrib->pkt_len);
					}
					else {
						DBG_8192C("%s: [WARNNING] RX type(%d) not be handled!\n",
							__FUNCTION__, pattrib->pkt_rpt_type);
					}
#endif
					rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				}
			}

			pkt_offset = _RND8(pkt_offset);
			precvbuf->pdata += pkt_offset;
			ptr = precvbuf->pdata;
			precvframe = NULL;
			pkt_copy = NULL;
		}

		rtw_enqueue_recvbuf(precvbuf, &precvpriv->free_recv_buf_queue);
	} while (1);

}
Ejemplo n.º 23
0
static void rtl8723bs_recv_tasklet(void *priv)
{
	PADAPTER			padapter;
	PHAL_DATA_TYPE		pHalData;
	struct recv_priv		*precvpriv;
	struct recv_buf		*precvbuf;
	union recv_frame		*precvframe;
	struct rx_pkt_attrib	*pattrib;
	u8		*ptr;
	u32	pkt_offset, skb_len, alloc_sz;
	_pkt		*pkt_copy = NULL;
	u8		shift_sz = 0, rx_report_sz = 0;


	padapter = (PADAPTER)priv;
	pHalData = GET_HAL_DATA(padapter);
	precvpriv = &padapter->recvpriv;

	do {
		precvbuf = rtw_dequeue_recvbuf(&precvpriv->recv_buf_pending_queue);
		if (NULL == precvbuf) break;

		ptr = precvbuf->pdata;

		while (ptr < precvbuf->ptail)
		{
			precvframe = rtw_alloc_recvframe(&precvpriv->free_recv_queue);
			if (precvframe == NULL)
			{
				DBG_8192C("%s: no enough recv frame!\n", __FUNCTION__);
				rtw_enqueue_recvbuf_to_head(precvbuf, &precvpriv->recv_buf_pending_queue);

				/*  The case of can't allocte recvframe should be temporary, */
				/*  schedule again and hope recvframe is available next time. */
				tasklet_schedule(&precvpriv->recv_tasklet);
				return;
			}

			/* rx desc parsing */
			update_recvframe_attrib(padapter, precvframe, (struct recv_stat*)ptr);

			pattrib = &precvframe->u.hdr.attrib;

			/*  fix Hardware RX data error, drop whole recv_buffer */
			if ((!(pHalData->ReceiveConfig & RCR_ACRC32)) && pattrib->crc_err)
			{
				DBG_8192C("%s()-%d: RX Warning! rx CRC ERROR !!\n", __FUNCTION__, __LINE__);
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				break;
			}

			rx_report_sz = RXDESC_SIZE + pattrib->drvinfo_sz;
			pkt_offset = rx_report_sz + pattrib->shift_sz + pattrib->pkt_len;

			if ((ptr + pkt_offset) > precvbuf->ptail) {
				DBG_8192C("%s()-%d: : next pkt len(%p,%d) exceed ptail(%p)!\n", __FUNCTION__, __LINE__, ptr, pkt_offset, precvbuf->ptail);
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
				break;
			}

			if ((pattrib->crc_err) || (pattrib->icv_err))
			{
				{
					DBG_8192C("%s: crc_err =%d icv_err =%d, skip!\n", __FUNCTION__, pattrib->crc_err, pattrib->icv_err);
				}
				rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
			}
			else
			{
				/* 	Modified by Albert 20101213 */
				/* 	For 8 bytes IP header alignment. */
				if (pattrib->qos)	/* 	Qos data, wireless lan header length is 26 */
				{
					shift_sz = 6;
				}
				else
				{
					shift_sz = 0;
				}

				skb_len = pattrib->pkt_len;

				/*  for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */
				/*  modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */
				if ((pattrib->mfrag == 1)&&(pattrib->frag_num == 0)){
					if (skb_len <= 1650)
						alloc_sz = 1664;
					else
						alloc_sz = skb_len + 14;
				}
				else {
					alloc_sz = skb_len;
					/* 	6 is for IP header 8 bytes alignment in QoS packet case. */
					/* 	8 is for skb->data 4 bytes alignment. */
					alloc_sz += 14;
				}

				pkt_copy = rtw_skb_alloc(alloc_sz);

				if (pkt_copy)
				{
					pkt_copy->dev = padapter->pnetdev;
					precvframe->u.hdr.pkt = pkt_copy;
					skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
					skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
					memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
					precvframe->u.hdr.rx_head = pkt_copy->head;
					precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
					precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
				}
				else
				{
					if ((pattrib->mfrag == 1)&&(pattrib->frag_num == 0))
					{
						DBG_8192C("%s: alloc_skb fail, drop frag frame\n", __FUNCTION__);
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}

					precvframe->u.hdr.pkt = rtw_skb_clone(precvbuf->pskb);
					if (precvframe->u.hdr.pkt)
					{
						_pkt	*pkt_clone = precvframe->u.hdr.pkt;

						pkt_clone->data = ptr + rx_report_sz + pattrib->shift_sz;
						skb_reset_tail_pointer(pkt_clone);
						precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail
							= pkt_clone->data;
						precvframe->u.hdr.rx_end =	pkt_clone->data + skb_len;
					}
					else
					{
						DBG_8192C("%s: rtw_skb_clone fail\n", __FUNCTION__);
						rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
						break;
					}
				}

				recvframe_put(precvframe, skb_len);
				/* recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE); */

				if (pHalData->ReceiveConfig & RCR_APPFCS)
					recvframe_pull_tail(precvframe, IEEE80211_FCS_LEN);

				/*  move to drv info position */
				ptr += RXDESC_SIZE;

				/*  update drv info */
				if (pHalData->ReceiveConfig & RCR_APP_BA_SSN) {
					/* rtl8723s_update_bassn(padapter, pdrvinfo); */
					ptr += 4;
				}

				if (pattrib->pkt_rpt_type == NORMAL_RX)/* Normal rx packet */
				{
					if (pattrib->physt)
						update_recvframe_phyinfo(precvframe, (struct phy_stat*)ptr);

					if (rtw_recv_entry(precvframe) != _SUCCESS)
					{
						RT_TRACE(_module_rtl871x_recv_c_, _drv_dump_, ("%s: rtw_recv_entry(precvframe) != _SUCCESS\n", __FUNCTION__));
					}
				}
				else if (pattrib->pkt_rpt_type == C2H_PACKET)
				{
					C2H_EVT_HDR	C2hEvent;

					u16 len_c2h = pattrib->pkt_len;
					u8 *pbuf_c2h = precvframe->u.hdr.rx_data;
					u8 *pdata_c2h;

					C2hEvent.CmdID = pbuf_c2h[0];
					C2hEvent.CmdSeq = pbuf_c2h[1];
					C2hEvent.CmdLen = (len_c2h -2);
					pdata_c2h = pbuf_c2h+2;

					if (C2hEvent.CmdID == C2H_CCX_TX_RPT)
					{
						CCX_FwC2HTxRpt_8723b(padapter, pdata_c2h, C2hEvent.CmdLen);
					}
					else
					{
						rtl8723bs_c2h_packet_handler(padapter, precvframe->u.hdr.rx_data, pattrib->pkt_len);
					}

					rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);

				}
			}

			pkt_offset = _RND8(pkt_offset);
			precvbuf->pdata += pkt_offset;
			ptr = precvbuf->pdata;
			precvframe = NULL;
			pkt_copy = NULL;
		}

		rtw_enqueue_recvbuf(precvbuf, &precvpriv->free_recv_buf_queue);
	} while (1);

}
Ejemplo n.º 24
0
inline struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length) {
	unsigned long flags;
	struct sk_buff_head *h;
	struct sk_buff *skb = NULL;

	if (unlikely(length > SKB_RECYCLE_SIZE)) {
		return NULL;
	}

	h = &get_cpu_var(recycle_list);
	local_irq_save(flags);
	skb = __skb_dequeue(h);
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
	if (unlikely(!skb)) {
		uint8_t head;
		spin_lock(&glob_recycler.lock);
		/* If global recycle list is not empty, use global buffers */
		head = glob_recycler.head;
		if (likely(head != glob_recycler.tail)) {
			/* Move SKBs from global list to CPU pool */
			skb_queue_splice_init(&glob_recycler.pool[head], h);
			head = (head + 1) & SKB_RECYCLE_MAX_SHARED_POOLS_MASK;
			glob_recycler.head = head;
			spin_unlock(&glob_recycler.lock);
			/* We have refilled the CPU pool - dequeue */
			skb = __skb_dequeue(h);
		} else {
			spin_unlock(&glob_recycler.lock);
		}
	}
#endif
	local_irq_restore(flags);
	put_cpu_var(recycle_list);

	if (likely(skb)) {
		struct skb_shared_info *shinfo;

		/*
		 * We're about to write a large amount to the skb to
		 * zero most of the structure so prefetch the start
		 * of the shinfo region now so it's in the D-cache
		 * before we start to write that.
		 */
		shinfo = skb_shinfo(skb);
		prefetchw(shinfo);

		zero_struct(skb, offsetof(struct sk_buff, tail));
		skb->data = skb->head;
		skb_reset_tail_pointer(skb);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
		skb->mac_header = ~0U;
#endif
		zero_struct(shinfo, offsetof(struct skb_shared_info, dataref));
		atomic_set(&shinfo->dataref, 1);

		skb_reserve(skb, NET_SKB_PAD);

		if (dev) {
			skb->dev = dev;
		}
	}

	return skb;
}
Ejemplo n.º 25
0
Archivo: skbuff.c Proyecto: foxwolf/yjd
struct sk_buff *__alloc_skb(const char* name, unsigned int size, gfp_t gfp_mask, int fclone, int
	node)
{
	uint32_t ret_size;
	//	struct kmem_cache *cache;
	struct skb_shared_info *shinfo;
	struct sk_buff *skb;
	u8 *data;
#if USE_MEM_DEBUG
	skb = mem_calloc_ex(name, sizeof(struct sk_buff), 1);
#else
	skb = mem_calloc(sizeof(struct sk_buff), 1);
#endif
	if (!skb)
		goto out;

	g_skb_alloc_size += get_mem_size(skb);
	
	size = SKB_DATA_ALIGN(size);
	data = mem_malloc(size + sizeof(struct skb_shared_info));

	if (!data)
		goto nodata;
	//	prefetchw(data + size);

	g_skb_alloc_size += get_mem_size(data);
	/*
	 * Only clear those fields we need to clear, not those that we will
	 * actually initialise below. Hence, don't put any more fields after
	 * the tail pointer in struct sk_buff!
	 */
	memset(skb, 0, offsetof(struct sk_buff, tail));
	skb->truesize = size + sizeof(struct sk_buff);
	atomic_set(&skb->users, 1);
	skb->head = data;
	skb->data = data;
	skb_reset_tail_pointer(skb);
	skb->end = skb->tail + size;
	#ifdef NET_SKBUFF_DATA_USES_OFFSET
	skb->mac_header = ~0U;
	#endif 
	//SET_MONITOR_ITEM_VALUE(_g_skb_alloc_size, g_skb_alloc_size);
	/* make sure we initialize shinfo sequentially */
	shinfo = skb_shinfo(skb);
	memset(shinfo, 0, /*offsetof(struct skb_shared_info, dataref)*/sizeof(struct skb_shared_info));
	atomic_set(&shinfo->dataref, 1);
	//	kmemcheck_annotate_variable(shinfo->destructor_arg);

	if (fclone)
	{
		p_err("fclone\n");
	} 
out: 
	return skb;
nodata: 
	ret_size = kmem_cache_free(cache, skb);

	g_skb_alloc_size -= ret_size;
	skb = NULL;
	goto out;
}
Ejemplo n.º 26
0
static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
				      const u8 *bssid, const int beacon_int,
				      struct ieee80211_channel *chan,
				      const u32 basic_rates,
				      const u16 capability, u64 tsf)
{
	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
	struct ieee80211_local *local = sdata->local;
	int rates, i;
	struct sk_buff *skb;
	struct ieee80211_mgmt *mgmt;
	u8 *pos;
	struct ieee80211_supported_band *sband;
	struct cfg80211_bss *bss;
	u32 bss_change;
	u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
	enum nl80211_channel_type channel_type;

	lockdep_assert_held(&ifibss->mtx);

	/* Reset own TSF to allow time synchronization work. */
	drv_reset_tsf(local, sdata);

	skb = ifibss->skb;
	RCU_INIT_POINTER(ifibss->presp, NULL);
	synchronize_rcu();
	skb->data = skb->head;
	skb->len = 0;
	skb_reset_tail_pointer(skb);
	skb_reserve(skb, sdata->local->hw.extra_tx_headroom);

	if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
		sta_info_flush(sdata->local, sdata);

	/* if merging, indicate to driver that we leave the old IBSS */
	if (sdata->vif.bss_conf.ibss_joined) {
		sdata->vif.bss_conf.ibss_joined = false;
		netif_carrier_off(sdata->dev);
		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
	}

	memcpy(ifibss->bssid, bssid, ETH_ALEN);

	sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;

	channel_type = ifibss->channel_type;
	if (channel_type > NL80211_CHAN_HT20 &&
	    !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
		channel_type = NL80211_CHAN_HT20;
	if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
		/* can only fail due to HT40+/- mismatch */
		channel_type = NL80211_CHAN_HT20;
		WARN_ON(!ieee80211_set_channel_type(local, sdata,
						    NL80211_CHAN_HT20));
	}
	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);

	sband = local->hw.wiphy->bands[chan->band];

	/* build supported rates array */
	pos = supp_rates;
	for (i = 0; i < sband->n_bitrates; i++) {
		int rate = sband->bitrates[i].bitrate;
		u8 basic = 0;
		if (basic_rates & BIT(i))
			basic = 0x80;
		*pos++ = basic | (u8) (rate / 5);
	}

	/* Build IBSS probe response */
	mgmt = (void *) skb_put(skb, 24 + sizeof(mgmt->u.beacon));
	memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_PROBE_RESP);
	memset(mgmt->da, 0xff, ETH_ALEN);
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
	memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
	mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
	mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
	mgmt->u.beacon.capab_info = cpu_to_le16(capability);

	pos = skb_put(skb, 2 + ifibss->ssid_len);
	*pos++ = WLAN_EID_SSID;
	*pos++ = ifibss->ssid_len;
	memcpy(pos, ifibss->ssid, ifibss->ssid_len);

	rates = sband->n_bitrates;
	if (rates > 8)
		rates = 8;
	pos = skb_put(skb, 2 + rates);
	*pos++ = WLAN_EID_SUPP_RATES;
	*pos++ = rates;
	memcpy(pos, supp_rates, rates);

	if (sband->band == IEEE80211_BAND_2GHZ) {
		pos = skb_put(skb, 2 + 1);
		*pos++ = WLAN_EID_DS_PARAMS;
		*pos++ = 1;
		*pos++ = ieee80211_frequency_to_channel(chan->center_freq);
	}

	pos = skb_put(skb, 2 + 2);
	*pos++ = WLAN_EID_IBSS_PARAMS;
	*pos++ = 2;
	/* FIX: set ATIM window based on scan results */
	*pos++ = 0;
	*pos++ = 0;

	if (sband->n_bitrates > 8) {
		rates = sband->n_bitrates - 8;
		pos = skb_put(skb, 2 + rates);
		*pos++ = WLAN_EID_EXT_SUPP_RATES;
		*pos++ = rates;
		memcpy(pos, &supp_rates[8], rates);
	}

	if (ifibss->ie_len)
		memcpy(skb_put(skb, ifibss->ie_len),
		       ifibss->ie, ifibss->ie_len);

	/* add HT capability and information IEs */
	if (channel_type && sband->ht_cap.ht_supported) {
		pos = skb_put(skb, 4 +
				   sizeof(struct ieee80211_ht_cap) +
				   sizeof(struct ieee80211_ht_info));
		pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
						sband->ht_cap.cap);
		pos = ieee80211_ie_build_ht_info(pos,
						 &sband->ht_cap,
						 chan,
						 channel_type);
	}

	if (local->hw.queues >= 4) {
		pos = skb_put(skb, 9);
		*pos++ = WLAN_EID_VENDOR_SPECIFIC;
		*pos++ = 7; /* len */
		*pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
		*pos++ = 0x50;
		*pos++ = 0xf2;
		*pos++ = 2; /* WME */
		*pos++ = 0; /* WME info */
		*pos++ = 1; /* WME ver */
		*pos++ = 0; /* U-APSD no in use */
	}

	RCU_INIT_POINTER(ifibss->presp, skb);

	sdata->vif.bss_conf.beacon_int = beacon_int;
	sdata->vif.bss_conf.basic_rates = basic_rates;
	bss_change = BSS_CHANGED_BEACON_INT;
	bss_change |= ieee80211_reset_erp_info(sdata);
	bss_change |= BSS_CHANGED_BSSID;
	bss_change |= BSS_CHANGED_BEACON;
	bss_change |= BSS_CHANGED_BEACON_ENABLED;
	bss_change |= BSS_CHANGED_BASIC_RATES;
	bss_change |= BSS_CHANGED_HT;
	bss_change |= BSS_CHANGED_IBSS;
	sdata->vif.bss_conf.ibss_joined = true;
	ieee80211_bss_info_change_notify(sdata, bss_change);

	ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);

	ifibss->state = IEEE80211_IBSS_MLME_JOINED;
	mod_timer(&ifibss->timer,
		  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));

	bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
					mgmt, skb->len, 0, GFP_KERNEL);
	cfg80211_put_bss(bss);
	netif_carrier_on(sdata->dev);
	cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
}