static int ifb_open(struct net_device *dev)
{
	struct ifb_private *dp = netdev_priv(dev);

	tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
	__skb_queue_head_init(&dp->rq);
	__skb_queue_head_init(&dp->tq);
	netif_start_queue(dev);

	return 0;
}
예제 #2
0
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
			  const struct Qdisc_ops *ops,
			  struct netlink_ext_ack *extack)
{
	void *p;
	struct Qdisc *sch;
	unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
	int err = -ENOBUFS;
	struct net_device *dev;

	if (!dev_queue) {
		NL_SET_ERR_MSG(extack, "No device queue given");
		err = -EINVAL;
		goto errout;
	}

	dev = dev_queue->dev;
	p = kzalloc_node(size, GFP_KERNEL,
			 netdev_queue_numa_node_read(dev_queue));

	if (!p)
		goto errout;
	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
	/* if we got non aligned memory, ask more and do alignment ourself */
	if (sch != p) {
		kfree(p);
		p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
				 netdev_queue_numa_node_read(dev_queue));
		if (!p)
			goto errout;
		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
		sch->padded = (char *) sch - (char *) p;
	}
	__skb_queue_head_init(&sch->gso_skb);
	__skb_queue_head_init(&sch->skb_bad_txq);
	qdisc_skb_head_init(&sch->q);
	spin_lock_init(&sch->q.lock);

	if (ops->static_flags & TCQ_F_CPUSTATS) {
		sch->cpu_bstats =
			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
		if (!sch->cpu_bstats)
			goto errout1;

		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
		if (!sch->cpu_qstats) {
			free_percpu(sch->cpu_bstats);
			goto errout1;
		}
	}
예제 #3
0
파일: dma.c 프로젝트: Lyude/linux
static void mt76x0_tx_tasklet(unsigned long data)
{
	struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
	struct sk_buff_head skbs;
	unsigned long flags;

	__skb_queue_head_init(&skbs);

	spin_lock_irqsave(&dev->tx_lock, flags);

	set_bit(MT76_MORE_STATS, &dev->mt76.state);
	if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
		queue_delayed_work(dev->stat_wq, &dev->stat_work,
				   msecs_to_jiffies(10));

	skb_queue_splice_init(&dev->tx_skb_done, &skbs);

	spin_unlock_irqrestore(&dev->tx_lock, flags);

	while (!skb_queue_empty(&skbs)) {
		struct sk_buff *skb = __skb_dequeue(&skbs);

		mt76x0_tx_status(dev, skb);
	}
}
예제 #4
0
파일: aoedev.c 프로젝트: 08opt/linux
static void
skbpoolfree(struct aoedev *d)
{
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&d->skbpool, skb, tmp)
		skbfree(skb);

	__skb_queue_head_init(&d->skbpool);
}
예제 #5
0
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
			       struct mv88e6xxx_port_hwtstamp *ps,
			       struct sk_buff *skb, u16 reg,
			       struct sk_buff_head *rxq)
{
	u16 buf[4] = { 0 }, status, seq_id;
	struct skb_shared_hwtstamps *shwt;
	struct sk_buff_head received;
	u64 ns, timelo, timehi;
	unsigned long flags;
	int err;

	/* The latched timestamp belongs to one of the received frames. */
	__skb_queue_head_init(&received);
	spin_lock_irqsave(&rxq->lock, flags);
	skb_queue_splice_tail_init(rxq, &received);
	spin_unlock_irqrestore(&rxq->lock, flags);

	mutex_lock(&chip->reg_lock);
	err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
				      reg, buf, ARRAY_SIZE(buf));
	mutex_unlock(&chip->reg_lock);
	if (err)
		pr_err("failed to get the receive time stamp\n");

	status = buf[0];
	timelo = buf[1];
	timehi = buf[2];
	seq_id = buf[3];

	if (status & MV88E6XXX_PTP_TS_VALID) {
		mutex_lock(&chip->reg_lock);
		err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
		mutex_unlock(&chip->reg_lock);
		if (err)
			pr_err("failed to clear the receive status\n");
	}
	/* Since the device can only handle one time stamp at a time,
	 * we purge any extra frames from the queue.
	 */
	for ( ; skb; skb = __skb_dequeue(&received)) {
		if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
			ns = timehi << 16 | timelo;

			mutex_lock(&chip->reg_lock);
			ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
			mutex_unlock(&chip->reg_lock);
			shwt = skb_hwtstamps(skb);
			memset(shwt, 0, sizeof(*shwt));
			shwt->hwtstamp = ns_to_ktime(ns);
			status &= ~MV88E6XXX_PTP_TS_VALID;
		}
		netif_rx_ni(skb);
	}
}
예제 #6
0
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
	int prio;
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);

	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
		__skb_queue_head_init(band2list(priv, prio));

	/* Can by-pass the queue discipline */
	qdisc->flags |= TCQ_F_CAN_BYPASS;
	return 0;
}
예제 #7
0
파일: ifb.c 프로젝트: AlexShiLucky/linux
static int ifb_dev_init(struct net_device *dev)
{
	struct ifb_dev_private *dp = netdev_priv(dev);
	struct ifb_q_private *txp;
	int i;

	txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
	if (!txp)
		return -ENOMEM;
	dp->tx_private = txp;
	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
		txp->txqnum = i;
		txp->dev = dev;
		__skb_queue_head_init(&txp->rq);
		__skb_queue_head_init(&txp->tq);
		u64_stats_init(&txp->rsync);
		u64_stats_init(&txp->tsync);
		tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
			     (unsigned long)txp);
		netif_tx_start_queue(netdev_get_tx_queue(dev, i));
	}
	return 0;
}
예제 #8
0
파일: mac80211.c 프로젝트: openwrt/mt76
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
			   struct napi_struct *napi)
{
	struct sk_buff_head frames;
	struct sk_buff *skb;

	__skb_queue_head_init(&frames);

	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
		mt76_check_sta(dev, skb);
		mt76_rx_aggr_reorder(skb, &frames);
	}

	mt76_rx_complete(dev, &frames, napi);
}
예제 #9
0
파일: ip6_output.c 프로젝트: guanhe0/kernel
struct sk_buff *ip6_make_skb(struct sock *sk,
			     int getfrag(void *from, char *to, int offset,
					 int len, int odd, struct sk_buff *skb),
			     void *from, int length, int transhdrlen,
			     int hlimit, int tclass,
			     struct ipv6_txoptions *opt, struct flowi6 *fl6,
			     struct rt6_info *rt, unsigned int flags,
			     int dontfrag)
{
	struct inet_cork_full cork;
	struct inet6_cork v6_cork;
	struct sk_buff_head queue;
	int exthdrlen = (opt ? opt->opt_flen : 0);
	int err;

	if (flags & MSG_PROBE)
		return NULL;

	__skb_queue_head_init(&queue);

	cork.base.flags = 0;
	cork.base.addr = 0;
	cork.base.opt = NULL;
	v6_cork.opt = NULL;
	err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
	if (err)
		return ERR_PTR(err);

	if (dontfrag < 0)
		dontfrag = inet6_sk(sk)->dontfrag;

	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
				&current->task_frag, getfrag, from,
				length + exthdrlen, transhdrlen + exthdrlen,
				flags, dontfrag);
	if (err) {
		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
		return ERR_PTR(err);
	}

	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
}
예제 #10
0
static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
{
	unsigned long		flags;


	spin_lock_irqsave(&list->lock, flags);
	//HTC: ensure next and prev pointer are both valid before call __skb_unlink()
	if (skb->next != NULL && skb->prev!=NULL)
		__skb_unlink(skb, list);
	else {
		pr_info("%s(%d) skb next:%p prev:%p !!!\n", __func__, __LINE__, skb->next, skb->prev);
		list->qlen--;
	}

	//HTC: if qlen is already 0, but list->next != list, it means the list is corrupted
	//     call __skb_queue_head_init() to recover the list to inital state
	if ((list->qlen == 0) && !skb_queue_empty(list)){
		pr_info("%s(%d) __skb_queue_head_init list:%p next:%p prev:%p !!!\n", __func__, __LINE__, list, list->next, list->prev);
		__skb_queue_head_init(list);
	}
	spin_unlock(&list->lock);
	spin_lock(&dev->done.lock);
	__skb_queue_tail(&dev->done, skb);

    //HTC+++
	if (!test_bit (EVENT_DEV_ASLEEP, &dev->flags) && (dev->done.qlen > USBNET_DONE_QUEUE_HIGH_WATERMARK))
		pr_info("%s(%d) [USBNET] dev->done.qlen:%d\n", __func__, __LINE__, dev->done.qlen);
    //HTC---

	if (dev->done.qlen == 1)
		tasklet_schedule(&dev->bh);
	//HTC+++
	else if (dev->done.qlen > USBNET_DONE_QUEUE_HIGH_WATERMARK) {
		// HALT HSIC RX
		if (!test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
			netdev_err(dev->net, "!!! [USBNET] dev->done.qlen %d > USBNET_DONE_QUEUE_HIGH_WATERMARK, set EVENT_RX_HALT !!!\n", dev->done.qlen);
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
		}
	}
	//HTC---
	spin_unlock_irqrestore(&dev->done.lock, flags);
}
예제 #11
0
파일: xfrm_device.c 프로젝트: krzk/linux
void xfrm_dev_backlog(struct softnet_data *sd)
{
	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
	struct sk_buff_head list;
	struct sk_buff *skb;

	if (skb_queue_empty(xfrm_backlog))
		return;

	__skb_queue_head_init(&list);

	spin_lock(&xfrm_backlog->lock);
	skb_queue_splice_init(xfrm_backlog, &list);
	spin_unlock(&xfrm_backlog->lock);

	while (!skb_queue_empty(&list)) {
		skb = __skb_dequeue(&list);
		xfrm_dev_resume(skb);
	}

}
예제 #12
0
/*
 * When q->perturbation is changed, we rehash all queued skbs
 * to avoid OOO (Out Of Order) effects.
 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
 * counters.
 */
static void sfq_rehash(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
	int i;
	struct sfq_slot *slot;
	struct sk_buff_head list;
	int dropped = 0;

	__skb_queue_head_init(&list);

	for (i = 0; i < q->maxflows; i++) {
		slot = &q->slots[i];
		if (!slot->qlen)
			continue;
		while (slot->qlen) {
			skb = slot_dequeue_head(slot);
			sfq_dec(q, i);
			__skb_queue_tail(&list, skb);
		}
		slot->backlog = 0;
		red_set_vars(&slot->vars);
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
	}
	q->tail = NULL;

	while ((skb = __skb_dequeue(&list)) != NULL) {
		unsigned int hash = sfq_hash(q, skb);
		sfq_index x = q->ht[hash];

		slot = &q->slots[x];
		if (x == SFQ_EMPTY_SLOT) {
			x = q->dep[0].next; /* get a free slot */
			if (x >= SFQ_MAX_FLOWS) {
drop:				sch->qstats.backlog -= qdisc_pkt_len(skb);
				kfree_skb(skb);
				dropped++;
				continue;
			}
			q->ht[hash] = x;
			slot = &q->slots[x];
			slot->hash = hash;
		}
		if (slot->qlen >= q->maxdepth)
			goto drop;
		slot_queue_add(slot, skb);
		if (q->red_parms)
			slot->vars.qavg = red_calc_qavg(q->red_parms,
							&slot->vars,
							slot->backlog);
		slot->backlog += qdisc_pkt_len(skb);
		sfq_inc(q, x);
		if (slot->qlen == 1) {		/* The flow is new */
			if (q->tail == NULL) {	/* It is the first flow */
				slot->next = x;
			} else {
				slot->next = q->tail->next;
				q->tail->next = x;
			}
			q->tail = slot;
			slot->allot = q->scaled_quantum;
		}
	}
	sch->q.qlen -= dropped;
	qdisc_tree_decrease_qlen(sch, dropped);
}
예제 #13
0
static int nci_queue_tx_data_frags(struct nci_dev *ndev,
					__u8 conn_id,
					struct sk_buff *skb) {
	int total_len = skb->len;
	unsigned char *data = skb->data;
	unsigned long flags;
	struct sk_buff_head frags_q;
	struct sk_buff *skb_frag;
	int frag_len;
	int rc = 0;

	nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);

	__skb_queue_head_init(&frags_q);

	while (total_len) {
		frag_len =
			min_t(int, total_len, ndev->max_data_pkt_payload_size);

		skb_frag = nci_skb_alloc(ndev,
					(NCI_DATA_HDR_SIZE + frag_len),
					GFP_KERNEL);
		if (skb_frag == NULL) {
			rc = -ENOMEM;
			goto free_exit;
		}
		skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);

		/* first, copy the data */
		memcpy(skb_put(skb_frag, frag_len), data, frag_len);

		/* second, set the header */
		nci_push_data_hdr(ndev, conn_id, skb_frag,
		((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT)));

		__skb_queue_tail(&frags_q, skb_frag);

		data += frag_len;
		total_len -= frag_len;

		nfc_dbg("frag_len %d, remaining total_len %d",
			frag_len, total_len);
	}

	/* queue all fragments atomically */
	spin_lock_irqsave(&ndev->tx_q.lock, flags);

	while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
		__skb_queue_tail(&ndev->tx_q, skb_frag);

	spin_unlock_irqrestore(&ndev->tx_q.lock, flags);

	/* free the original skb */
	kfree_skb(skb);

	goto exit;

free_exit:
	while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
		kfree_skb(skb_frag);

exit:
	return rc;
}
예제 #14
0
/*
 * This function processes the received buffer.
 *
 * The function looks into the RxPD and performs sanity tests on the
 * received buffer to ensure its a valid packet, before processing it
 * further. If the packet is determined to be aggregated, it is
 * de-aggregated accordingly. Non-unicast packets are sent directly to
 * the kernel/upper layers. Unicast packets are handed over to the
 * Rx reordering routine if 11n is enabled.
 *
 * The completion callback is called after processing in complete.
 */
int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
				  struct sk_buff *skb)
{
	int ret = 0;
	struct rxpd *local_rx_pd;
	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
	struct rx_packet_hdr *rx_pkt_hdr;
	u8 ta[ETH_ALEN];
	u16 rx_pkt_type;
	struct mwifiex_private *priv =
			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
					       rx_info->bss_type);

	if (!priv)
		return -1;

	local_rx_pd = (struct rxpd *) (skb->data);
	rx_pkt_type = local_rx_pd->rx_pkt_type;

	rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
					local_rx_pd->rx_pkt_offset);

	if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
	    (u16) skb->len) {
		dev_err(adapter->dev, "wrong rx packet: len=%d,"
			" rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
		       local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
		priv->stats.rx_dropped++;
		dev_kfree_skb_any(skb);
		return ret;
	}

	if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
		struct sk_buff_head list;
		struct sk_buff *rx_skb;

		__skb_queue_head_init(&list);

		skb_pull(skb, local_rx_pd->rx_pkt_offset);
		skb_trim(skb, local_rx_pd->rx_pkt_length);

		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
					 priv->wdev->iftype, 0, false);

		while (!skb_queue_empty(&list)) {
			rx_skb = __skb_dequeue(&list);
			ret = mwifiex_recv_packet(adapter, rx_skb);
			if (ret == -1)
				dev_err(adapter->dev, "Rx of A-MSDU failed");
		}
		return 0;
	}

	/*
	 * If the packet is not an unicast packet then send the packet
	 * directly to os. Don't pass thru rx reordering
	 */
	if (!IS_11N_ENABLED(priv) ||
	    memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
		mwifiex_process_rx_packet(adapter, skb);
		return ret;
	}

	if (mwifiex_queuing_ra_based(priv)) {
		memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
	} else {
		if (rx_pkt_type != PKT_TYPE_BAR)
			priv->rx_seq[local_rx_pd->priority] =
						local_rx_pd->seq_num;
		memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
		       ETH_ALEN);
	}

	/* Reorder and send to OS */
	ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
					     local_rx_pd->priority, ta,
					     (u8) local_rx_pd->rx_pkt_type,
					     skb);

	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
		dev_kfree_skb_any(skb);

	if (ret)
		priv->stats.rx_dropped++;

	return ret;
}
예제 #15
0
파일: bcmsdh.c 프로젝트: Anjali05/linux
/**
 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
 * @sdiodev: brcmfmac sdio device
 * @func: SDIO function
 * @write: direction flag
 * @addr: dongle memory address as source/destination
 * @pkt: skb pointer
 *
 * This function takes the respbonsibility as the interface function to MMC
 * stack for block data access. It assumes that the skb passed down by the
 * caller has already been padded and aligned.
 */
static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
				 struct sdio_func *func,
				 bool write, u32 addr,
				 struct sk_buff_head *pktlist)
{
	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
	unsigned int max_req_sz, src_offset, dst_offset;
	unsigned char *pkt_data, *orig_data, *dst_data;
	struct sk_buff_head local_list, *target_list;
	struct sk_buff *pkt_next = NULL, *src;
	unsigned short max_seg_cnt;
	struct mmc_request mmc_req;
	struct mmc_command mmc_cmd;
	struct mmc_data mmc_dat;
	struct scatterlist *sgl;
	int ret = 0;

	if (!pktlist->qlen)
		return -EINVAL;

	target_list = pktlist;
	/* for host with broken sg support, prepare a page aligned list */
	__skb_queue_head_init(&local_list);
	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
		req_sz = 0;
		skb_queue_walk(pktlist, pkt_next)
			req_sz += pkt_next->len;
		req_sz = ALIGN(req_sz, func->cur_blksize);
		while (req_sz > PAGE_SIZE) {
			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
			if (pkt_next == NULL) {
				ret = -ENOMEM;
				goto exit;
			}
			__skb_queue_tail(&local_list, pkt_next);
			req_sz -= PAGE_SIZE;
		}
		pkt_next = brcmu_pkt_buf_get_skb(req_sz);
		if (pkt_next == NULL) {
			ret = -ENOMEM;
			goto exit;
		}
		__skb_queue_tail(&local_list, pkt_next);
		target_list = &local_list;
	}

	func_blk_sz = func->cur_blksize;
	max_req_sz = sdiodev->max_request_size;
	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
			    target_list->qlen);

	memset(&mmc_req, 0, sizeof(struct mmc_request));
	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
	memset(&mmc_dat, 0, sizeof(struct mmc_data));

	mmc_dat.sg = sdiodev->sgtable.sgl;
	mmc_dat.blksz = func_blk_sz;
	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
	mmc_cmd.arg |= (func->num & 0x7) << 28;	/* SDIO func num */
	mmc_cmd.arg |= 1 << 27;			/* block mode */
	/* for function 1 the addr will be incremented */
	mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
	mmc_req.cmd = &mmc_cmd;
	mmc_req.data = &mmc_dat;

	req_sz = 0;
	sg_cnt = 0;
	sgl = sdiodev->sgtable.sgl;
	skb_queue_walk(target_list, pkt_next) {
		pkt_offset = 0;
		while (pkt_offset < pkt_next->len) {
			pkt_data = pkt_next->data + pkt_offset;
			sg_data_sz = pkt_next->len - pkt_offset;
			if (sg_data_sz > sdiodev->max_segment_size)
				sg_data_sz = sdiodev->max_segment_size;
			if (sg_data_sz > max_req_sz - req_sz)
				sg_data_sz = max_req_sz - req_sz;

			sg_set_buf(sgl, pkt_data, sg_data_sz);
			sg_cnt++;

			sgl = sg_next(sgl);
			req_sz += sg_data_sz;
			pkt_offset += sg_data_sz;
			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
				ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
						     sg_cnt, req_sz, func_blk_sz,
						     &addr, sdiodev, func, write);
				if (ret)
					goto exit_queue_walk;
				req_sz = 0;
				sg_cnt = 0;
				sgl = sdiodev->sgtable.sgl;
			}
		}
	}
예제 #16
0
파일: bcmsdh.c 프로젝트: Lyude/linux
/**
 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
 * @sdiodev: brcmfmac sdio device
 * @func: SDIO function
 * @write: direction flag
 * @addr: dongle memory address as source/destination
 * @pkt: skb pointer
 *
 * This function takes the respbonsibility as the interface function to MMC
 * stack for block data access. It assumes that the skb passed down by the
 * caller has already been padded and aligned.
 */
static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
				 struct sdio_func *func,
				 bool write, u32 addr,
				 struct sk_buff_head *pktlist)
{
	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
	unsigned int max_req_sz, orig_offset, dst_offset;
	unsigned short max_seg_cnt, seg_sz;
	unsigned char *pkt_data, *orig_data, *dst_data;
	struct sk_buff *pkt_next = NULL, *local_pkt_next;
	struct sk_buff_head local_list, *target_list;
	struct mmc_request mmc_req;
	struct mmc_command mmc_cmd;
	struct mmc_data mmc_dat;
	struct scatterlist *sgl;
	int ret = 0;

	if (!pktlist->qlen)
		return -EINVAL;

	target_list = pktlist;
	/* for host with broken sg support, prepare a page aligned list */
	__skb_queue_head_init(&local_list);
	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
		req_sz = 0;
		skb_queue_walk(pktlist, pkt_next)
			req_sz += pkt_next->len;
		req_sz = ALIGN(req_sz, func->cur_blksize);
		while (req_sz > PAGE_SIZE) {
			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
			if (pkt_next == NULL) {
				ret = -ENOMEM;
				goto exit;
			}
			__skb_queue_tail(&local_list, pkt_next);
			req_sz -= PAGE_SIZE;
		}
		pkt_next = brcmu_pkt_buf_get_skb(req_sz);
		if (pkt_next == NULL) {
			ret = -ENOMEM;
			goto exit;
		}
		__skb_queue_tail(&local_list, pkt_next);
		target_list = &local_list;
	}

	func_blk_sz = func->cur_blksize;
	max_req_sz = sdiodev->max_request_size;
	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
			    target_list->qlen);
	seg_sz = target_list->qlen;
	pkt_offset = 0;
	pkt_next = target_list->next;

	memset(&mmc_req, 0, sizeof(struct mmc_request));
	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
	memset(&mmc_dat, 0, sizeof(struct mmc_data));

	mmc_dat.sg = sdiodev->sgtable.sgl;
	mmc_dat.blksz = func_blk_sz;
	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
	mmc_cmd.arg |= (func->num & 0x7) << 28;	/* SDIO func num */
	mmc_cmd.arg |= 1 << 27;			/* block mode */
	/* for function 1 the addr will be incremented */
	mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
	mmc_req.cmd = &mmc_cmd;
	mmc_req.data = &mmc_dat;

	while (seg_sz) {
		req_sz = 0;
		sg_cnt = 0;
		sgl = sdiodev->sgtable.sgl;
		/* prep sg table */
		while (pkt_next != (struct sk_buff *)target_list) {
			pkt_data = pkt_next->data + pkt_offset;
			sg_data_sz = pkt_next->len - pkt_offset;
			if (sg_data_sz > sdiodev->max_segment_size)
				sg_data_sz = sdiodev->max_segment_size;
			if (sg_data_sz > max_req_sz - req_sz)
				sg_data_sz = max_req_sz - req_sz;

			sg_set_buf(sgl, pkt_data, sg_data_sz);

			sg_cnt++;
			sgl = sg_next(sgl);
			req_sz += sg_data_sz;
			pkt_offset += sg_data_sz;
			if (pkt_offset == pkt_next->len) {
				pkt_offset = 0;
				pkt_next = pkt_next->next;
			}

			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
				break;
		}
		seg_sz -= sg_cnt;

		if (req_sz % func_blk_sz != 0) {
			brcmf_err("sg request length %u is not %u aligned\n",
				  req_sz, func_blk_sz);
			ret = -ENOTBLK;
			goto exit;
		}

		mmc_dat.sg_len = sg_cnt;
		mmc_dat.blocks = req_sz / func_blk_sz;
		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;	/* address */
		mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;	/* block count */
		/* incrementing addr for function 1 */
		if (func->num == 1)
			addr += req_sz;

		mmc_set_data_timeout(&mmc_dat, func->card);
		mmc_wait_for_req(func->card->host, &mmc_req);

		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
		if (ret == -ENOMEDIUM) {
			brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
			break;
		} else if (ret != 0) {
			brcmf_err("CMD53 sg block %s failed %d\n",
				  write ? "write" : "read", ret);
			ret = -EIO;
			break;
		}
	}

	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
		local_pkt_next = local_list.next;
		orig_offset = 0;
		skb_queue_walk(pktlist, pkt_next) {
			dst_offset = 0;
			do {
				req_sz = local_pkt_next->len - orig_offset;
				req_sz = min_t(uint, pkt_next->len - dst_offset,
					       req_sz);
				orig_data = local_pkt_next->data + orig_offset;
				dst_data = pkt_next->data + dst_offset;
				memcpy(dst_data, orig_data, req_sz);
				orig_offset += req_sz;
				dst_offset += req_sz;
				if (orig_offset == local_pkt_next->len) {
					orig_offset = 0;
					local_pkt_next = local_pkt_next->next;
				}
				if (dst_offset == pkt_next->len)
					break;
			} while (!skb_queue_empty(&local_list));
		}
예제 #17
0
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
			       struct iwl_device_cmd *cmd)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
	struct ieee80211_hdr *hdr;
	u32 status = le16_to_cpu(tx_resp->status.status);
	u16 ssn = iwlagn_get_scd_ssn(tx_resp);
	int tid;
	int sta_id;
	int freed;
	struct ieee80211_tx_info *info;
	unsigned long flags;
	struct sk_buff_head skbs;
	struct sk_buff *skb;
	struct iwl_rxon_context *ctx;
	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);

	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
		IWLAGN_TX_RES_TID_POS;
	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
		IWLAGN_TX_RES_RA_POS;

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	if (is_agg)
		iwl_rx_reply_tx_agg(priv, tx_resp);

	if (tx_resp->frame_count == 1) {
		u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
		next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);

		if (is_agg) {
			/* If this is an aggregation queue, we can rely on the
			 * ssn since the wifi sequence number corresponds to
			 * the index in the TFD ring (%256).
			 * The seq_ctl is the sequence control of the packet
			 * to which this Tx response relates. But if there is a
			 * hole in the bitmap of the BA we received, this Tx
			 * response may allow to reclaim the hole and all the
			 * subsequent packets that were already acked.
			 * In that case, seq_ctl != ssn, and the next packet
			 * to be reclaimed will be ssn and not seq_ctl.
			 */
			next_reclaimed = ssn;
		}

		__skb_queue_head_init(&skbs);
		priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed;

		IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
					  next_reclaimed);

		/*we can free until ssn % q.n_bd not inclusive */
		WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
				  ssn, status, &skbs));
		iwlagn_check_ratid_empty(priv, sta_id, tid);
		freed = 0;
		while (!skb_queue_empty(&skbs)) {
			skb = __skb_dequeue(&skbs);
			hdr = (struct ieee80211_hdr *)skb->data;

			if (!ieee80211_is_data_qos(hdr->frame_control))
				priv->last_seq_ctl = tx_resp->seq_ctl;

			info = IEEE80211_SKB_CB(skb);
			ctx = info->driver_data[0];
			kmem_cache_free(priv->tx_cmd_pool,
					(info->driver_data[1]));

			memset(&info->status, 0, sizeof(info->status));

			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
			    iwl_is_associated_ctx(ctx) && ctx->vif &&
			    ctx->vif->type == NL80211_IFTYPE_STATION) {
				ctx->last_tx_rejected = true;
				iwl_trans_stop_queue(trans(priv), txq_id,
					"Tx on passive channel");

				IWL_DEBUG_TX_REPLY(priv,
					   "TXQ %d status %s (0x%08x) "
					   "rate_n_flags 0x%x retries %d\n",
					   txq_id,
					   iwl_get_tx_fail_reason(status),
					   status,
					   le32_to_cpu(tx_resp->rate_n_flags),
					   tx_resp->failure_frame);

				IWL_DEBUG_TX_REPLY(priv,
					   "FrameCnt = %d, idx=%d\n",
					   tx_resp->frame_count, cmd_index);
			}

			/* check if BAR is needed */
			if (is_agg && !iwl_is_tx_success(status))
				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
				     tx_resp, is_agg);
			if (!is_agg)
				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);

			ieee80211_tx_status_irqsafe(priv->hw, skb);

			freed++;
		}

		WARN_ON(!is_agg && freed != 1);
	}

	iwl_check_abort_status(priv, tx_resp->frame_count, status);
	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
	return 0;
}
예제 #18
0
static int
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct sk_buff_head queue;
	struct aoedev *d;
	struct buf *buf;
	ulong flags;

	blk_queue_bounce(q, &bio);

	if (bio == NULL) {
		printk(KERN_ERR "aoe: bio is NULL\n");
		BUG();
		return 0;
	}
	d = bio->bi_bdev->bd_disk->private_data;
	if (d == NULL) {
		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	}
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
	buf->stime = jiffies;
	buf->bio = bio;
	buf->resid = bio->bi_size;
	buf->sector = bio->bi_sector;
	buf->bv = &bio->bi_io_vec[bio->bi_idx];
	buf->bv_resid = buf->bv->bv_len;
	WARN_ON(buf->bv_resid == 0);
	buf->bv_off = buf->bv->bv_offset;

	spin_lock_irqsave(&d->lock, flags);

	if ((d->flags & DEVFL_UP) == 0) {
		pr_info_ratelimited("aoe: device %ld.%d is not up\n",
			d->aoemajor, d->aoeminor);
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, -ENXIO);
		return 0;
	}

	list_add_tail(&buf->bufs, &d->bufq);

	aoecmd_work(d);
	__skb_queue_head_init(&queue);
	skb_queue_splice_init(&d->sendq, &queue);

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(&queue);

	return 0;
}
예제 #19
0
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
			       struct iwl_device_cmd *cmd)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
	struct ieee80211_hdr *hdr;
	u32 status = le16_to_cpu(tx_resp->status.status);
	u32 ssn = iwlagn_get_scd_ssn(tx_resp);
	int tid;
	int sta_id;
	int freed;
	struct ieee80211_tx_info *info;
	unsigned long flags;
	struct sk_buff_head skbs;
	struct sk_buff *skb;
	struct iwl_rxon_context *ctx;
	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);

	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
		IWLAGN_TX_RES_TID_POS;
	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
		IWLAGN_TX_RES_RA_POS;

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	if (is_agg)
		iwl_rx_reply_tx_agg(priv, tx_resp);

	if (tx_resp->frame_count == 1) {
		IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn);
		__skb_queue_head_init(&skbs);
		/*we can free until ssn % q.n_bd not inclusive */
		iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
				  ssn, status, &skbs);
		freed = 0;
		while (!skb_queue_empty(&skbs)) {
			skb = __skb_dequeue(&skbs);
			hdr = (struct ieee80211_hdr *)skb->data;

			if (!ieee80211_is_data_qos(hdr->frame_control))
				priv->last_seq_ctl = tx_resp->seq_ctl;

			info = IEEE80211_SKB_CB(skb);
			ctx = info->driver_data[0];
			kmem_cache_free(priv->tx_cmd_pool,
					(info->driver_data[1]));

			memset(&info->status, 0, sizeof(info->status));

			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
			    iwl_is_associated_ctx(ctx) && ctx->vif &&
			    ctx->vif->type == NL80211_IFTYPE_STATION) {
				ctx->last_tx_rejected = true;
				iwl_trans_stop_queue(trans(priv), txq_id,
					"Tx on passive channel");

				IWL_DEBUG_TX_REPLY(priv,
					   "TXQ %d status %s (0x%08x) "
					   "rate_n_flags 0x%x retries %d\n",
					   txq_id,
					   iwl_get_tx_fail_reason(status),
					   status,
					   le32_to_cpu(tx_resp->rate_n_flags),
					   tx_resp->failure_frame);

				IWL_DEBUG_TX_REPLY(priv,
					   "FrameCnt = %d, idx=%d\n",
					   tx_resp->frame_count, cmd_index);
			}

			/* check if BAR is needed */
			if (is_agg && !iwl_is_tx_success(status))
				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
				     tx_resp, is_agg);
			if (!is_agg)
				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);

			ieee80211_tx_status_irqsafe(priv->hw, skb);

			freed++;
		}

		WARN_ON(!is_agg && freed != 1);
	}

	iwl_check_abort_status(priv, tx_resp->frame_count, status);
	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
	return 0;
}
예제 #20
0
파일: msg.c 프로젝트: 383530895/linux
/**
 * tipc_msg_build - create buffer chain containing specified header and data
 * @mhdr: Message header, to be prepended to data
 * @m: User message
 * @offset: Posision in iov to start copying from
 * @dsz: Total length of user data
 * @pktmax: Max packet size that can be used
 * @list: Buffer or chain of buffers to be returned to caller
 *
 * Returns message data size or errno: -ENOMEM, -EFAULT
 */
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
		   int dsz, int pktmax, struct sk_buff_head *list)
{
	int mhsz = msg_hdr_sz(mhdr);
	int msz = mhsz + dsz;
	int pktno = 1;
	int pktsz;
	int pktrem = pktmax;
	int drem = dsz;
	struct tipc_msg pkthdr;
	struct sk_buff *skb;
	char *pktpos;
	int rc;

	msg_set_size(mhdr, msz);

	/* No fragmentation needed? */
	if (likely(msz <= pktmax)) {
		skb = tipc_buf_acquire(msz);
		if (unlikely(!skb))
			return -ENOMEM;
		__skb_queue_tail(list, skb);
		skb_copy_to_linear_data(skb, mhdr, mhsz);
		pktpos = skb->data + mhsz;
		if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
						 dsz))
			return dsz;
		rc = -EFAULT;
		goto error;
	}

	/* Prepare reusable fragment header */
	tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
		      INT_H_SIZE, msg_destnode(mhdr));
	msg_set_size(&pkthdr, pktmax);
	msg_set_fragm_no(&pkthdr, pktno);

	/* Prepare first fragment */
	skb = tipc_buf_acquire(pktmax);
	if (!skb)
		return -ENOMEM;
	__skb_queue_tail(list, skb);
	pktpos = skb->data;
	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
	pktpos += INT_H_SIZE;
	pktrem -= INT_H_SIZE;
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
	pktpos += mhsz;
	pktrem -= mhsz;

	do {
		if (drem < pktrem)
			pktrem = drem;

		if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
			rc = -EFAULT;
			goto error;
		}
		drem -= pktrem;
		offset += pktrem;

		if (!drem)
			break;

		/* Prepare new fragment: */
		if (drem < (pktmax - INT_H_SIZE))
			pktsz = drem + INT_H_SIZE;
		else
			pktsz = pktmax;
		skb = tipc_buf_acquire(pktsz);
		if (!skb) {
			rc = -ENOMEM;
			goto error;
		}
		__skb_queue_tail(list, skb);
		msg_set_type(&pkthdr, FRAGMENT);
		msg_set_size(&pkthdr, pktsz);
		msg_set_fragm_no(&pkthdr, ++pktno);
		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
		pktpos = skb->data + INT_H_SIZE;
		pktrem = pktsz - INT_H_SIZE;

	} while (1);
	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
	return dsz;
error:
	__skb_queue_purge(list);
	__skb_queue_head_init(list);
	return rc;
}
예제 #21
0
/*
 * This function processes the packet received on AP interface.
 *
 * The function looks into the RxPD and performs sanity tests on the
 * received buffer to ensure its a valid packet before processing it
 * further. If the packet is determined to be aggregated, it is
 * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
 *
 * The completion callback is called after processing is complete.
 */
int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
				  struct sk_buff *skb)
{
	int ret;
	struct uap_rxpd *uap_rx_pd;
	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
	struct rx_packet_hdr *rx_pkt_hdr;
	u16 rx_pkt_type;
	u8 ta[ETH_ALEN], pkt_type;
	struct mwifiex_sta_node *node;

	struct mwifiex_private *priv =
			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
					       rx_info->bss_type);

	if (!priv)
		return -1;

	uap_rx_pd = (struct uap_rxpd *)(skb->data);
	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);

	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
		dev_err(adapter->dev,
			"wrong rx packet: len=%d, offset=%d, length=%d\n",
			skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
			le16_to_cpu(uap_rx_pd->rx_pkt_length));
		priv->stats.rx_dropped++;

		if (adapter->if_ops.data_complete)
			adapter->if_ops.data_complete(adapter, skb);
		else
			dev_kfree_skb_any(skb);

		return 0;
	}

	if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
		struct sk_buff_head list;
		struct sk_buff *rx_skb;

		__skb_queue_head_init(&list);
		skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
		skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));

		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
					 priv->wdev->iftype, 0, false);

		while (!skb_queue_empty(&list)) {
			rx_skb = __skb_dequeue(&list);
			ret = mwifiex_recv_packet(adapter, rx_skb);
			if (ret)
				dev_err(adapter->dev,
					"AP:Rx A-MSDU failed");
		}

		return 0;
	}

	memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);

	if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
		node = mwifiex_get_sta_entry(priv, ta);
		if (node)
			node->rx_seq[uap_rx_pd->priority] =
						le16_to_cpu(uap_rx_pd->seq_num);
	}

	if (!priv->ap_11n_enabled ||
	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
		ret = mwifiex_handle_uap_rx_forward(priv, skb);
		return ret;
	}

	/* Reorder and send to kernel */
	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
	ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
					 uap_rx_pd->priority, ta, pkt_type,
					 skb);

	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
		if (adapter->if_ops.data_complete)
			adapter->if_ops.data_complete(adapter, skb);
		else
			dev_kfree_skb_any(skb);
	}

	if (ret)
		priv->stats.rx_dropped++;

	return ret;
}
예제 #22
0
/**
 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
 *
 * Handles block-acknowledge notification from device, which reports success
 * of frames sent via aggregation.
 */
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
				   struct iwl_rx_mem_buffer *rxb,
				   struct iwl_device_cmd *cmd)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
	struct iwl_ht_agg *agg;
	struct sk_buff_head reclaimed_skbs;
	struct ieee80211_tx_info *info;
	struct ieee80211_hdr *hdr;
	struct sk_buff *skb;
	unsigned long flags;
	int sta_id;
	int tid;
	int freed;

	/* "flow" corresponds to Tx queue */
	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);

	/* "ssn" is start of block-ack Tx window, corresponds to index
	 * (in Tx queue's circular buffer) of first TFD/frame in window */
	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);

	if (scd_flow >= hw_params(priv).max_txq_num) {
		IWL_ERR(priv,
			"BUG_ON scd_flow is bigger than number of queues\n");
		return 0;
	}

	sta_id = ba_resp->sta_id;
	tid = ba_resp->tid;
	agg = &priv->shrd->tid_data[sta_id][tid].agg;

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	if (unlikely(agg->txq_id != scd_flow)) {
		/*
		 * FIXME: this is a uCode bug which need to be addressed,
		 * log the information and return for now!
		 * since it is possible happen very often and in order
		 * not to fill the syslog, don't enable the logging by default
		 */
		IWL_DEBUG_TX_REPLY(priv,
			"BA scd_flow %d does not match txq_id %d\n",
			scd_flow, agg->txq_id);
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	if (unlikely(!agg->wait_for_ba)) {
		if (unlikely(ba_resp->bitmap))
			IWL_ERR(priv, "Received BA when not expected\n");
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
			   "sta_id = %d\n",
			   agg->wait_for_ba,
			   (u8 *) &ba_resp->sta_addr_lo32,
			   ba_resp->sta_id);
	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
			   "scd_flow = %d, scd_ssn = %d\n",
			   ba_resp->tid, ba_resp->seq_ctl,
			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
			   scd_flow, ba_resp_scd_ssn);

	/* Mark that the expected block-ack response arrived */
	agg->wait_for_ba = false;

	/* Sanity check values reported by uCode */
	if (ba_resp->txed_2_done > ba_resp->txed) {
		IWL_DEBUG_TX_REPLY(priv,
			"bogus sent(%d) and ack(%d) count\n",
			ba_resp->txed, ba_resp->txed_2_done);
		/*
		 * set txed_2_done = txed,
		 * so it won't impact rate scale
		 */
		ba_resp->txed = ba_resp->txed_2_done;
	}
	IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
			ba_resp->txed, ba_resp->txed_2_done);

	__skb_queue_head_init(&reclaimed_skbs);

	/* Release all TFDs before the SSN, i.e. all TFDs in front of
	 * block-ack window (we assume that they've been successfully
	 * transmitted ... if not, it's too late anyway). */
	iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
			  0, &reclaimed_skbs);
	freed = 0;
	while (!skb_queue_empty(&reclaimed_skbs)) {

		skb = __skb_dequeue(&reclaimed_skbs);
		hdr = (struct ieee80211_hdr *)skb->data;

		if (ieee80211_is_data_qos(hdr->frame_control))
			freed++;
		else
			WARN_ON_ONCE(1);

		info = IEEE80211_SKB_CB(skb);
		kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));

		if (freed == 1) {
			/* this is the first skb we deliver in this batch */
			/* put the rate scaling data there */
			info = IEEE80211_SKB_CB(skb);
			memset(&info->status, 0, sizeof(info->status));
			info->flags |= IEEE80211_TX_STAT_ACK;
			info->flags |= IEEE80211_TX_STAT_AMPDU;
			info->status.ampdu_ack_len = ba_resp->txed_2_done;
			info->status.ampdu_len = ba_resp->txed;
			iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
						    info);
		}

		ieee80211_tx_status_irqsafe(priv->hw, skb);
	}

	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
	return 0;
}
예제 #23
0
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct l2cap_pinfo *pi = l2cap_pi(sk);
	struct sk_buff *skb;
	struct sk_buff_head seg_queue;
	int err;
	u8 amp_id;

	BT_DBG("sock %p, sk %p", sock, sk);

	err = sock_error(sk);
	if (err)
		return err;

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	lock_sock(sk);

	if (sk->sk_state != BT_CONNECTED) {
		err = -ENOTCONN;
		goto done;
	}

	/* Connectionless channel */
	if (sk->sk_type == SOCK_DGRAM) {
		skb = l2cap_create_connless_pdu(sk, msg, len);
		if (IS_ERR(skb)) {
			err = PTR_ERR(skb);
		} else {
			l2cap_do_send(sk, skb);
			err = len;
		}
		goto done;
	}

	switch (pi->mode) {
	case L2CAP_MODE_BASIC:
		/* Check outgoing MTU */
		if (len > pi->omtu) {
			err = -EMSGSIZE;
			goto done;
		}

		/* Create a basic PDU */
		skb = l2cap_create_basic_pdu(sk, msg, len);
		if (IS_ERR(skb)) {
			err = PTR_ERR(skb);
			goto done;
		}

		l2cap_do_send(sk, skb);
		err = len;
		break;

	case L2CAP_MODE_ERTM:
	case L2CAP_MODE_STREAMING:

		/* Check outgoing MTU */
		if (len > pi->omtu) {
			err = -EMSGSIZE;
			goto done;
		}

		__skb_queue_head_init(&seg_queue);

		/* Do segmentation before calling in to the state machine,
		 * since it's possible to block while waiting for memory
		 * allocation.
		 */
		amp_id = pi->amp_id;
		err = l2cap_segment_sdu(sk, &seg_queue, msg, len, 0);

		/* The socket lock is released while segmenting, so check
		 * that the socket is still connected
		 */
		if (sk->sk_state != BT_CONNECTED) {
			__skb_queue_purge(&seg_queue);
			err = -ENOTCONN;
		}

		if (err) {
			BT_DBG("Error %d, sk_sndbuf %d, sk_wmem_alloc %d",
				err, sk->sk_sndbuf,
				atomic_read(&sk->sk_wmem_alloc));
			break;
		}

		if (pi->amp_id != amp_id) {
			/* Channel moved while unlocked. Resegment. */
			err = l2cap_resegment_queue(sk, &seg_queue);

			if (err)
				break;
		}

		if (pi->mode != L2CAP_MODE_STREAMING)
			err = l2cap_ertm_tx(sk, 0, &seg_queue,
				L2CAP_ERTM_EVENT_DATA_REQUEST);
		else
			err = l2cap_strm_tx(sk, &seg_queue);
		if (!err)
			err = len;

		/* If the skbs were not queued for sending, they'll still be in
		 * seg_queue and need to be purged.
		 */
		__skb_queue_purge(&seg_queue);
		break;

	default:
		BT_DBG("bad state %1.1x", pi->mode);
		err = -EBADFD;
	}

done:
	release_sock(sk);
	return err;
}
예제 #24
0
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct l2cap_pinfo *pi = l2cap_pi(sk);
	struct sk_buff *skb;
	struct sk_buff_head seg_queue;
	int err;
	u8 amp_id;

	BT_DBG("sock %p, sk %p", sock, sk);

	err = sock_error(sk);
	if (err)
		return err;

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	lock_sock(sk);

	if (sk->sk_state != BT_CONNECTED) {
		err = -ENOTCONN;
		goto done;
	}

	
	if (sk->sk_type == SOCK_DGRAM) {
		skb = l2cap_create_connless_pdu(sk, msg, len);
		if (IS_ERR(skb)) {
			err = PTR_ERR(skb);
		} else {
			l2cap_do_send(sk, skb);
			err = len;
		}
		goto done;
	}

	switch (pi->mode) {
	case L2CAP_MODE_BASIC:
		
		if (len > pi->omtu) {
			err = -EMSGSIZE;
			goto done;
		}

		
		skb = l2cap_create_basic_pdu(sk, msg, len);
		if (IS_ERR(skb)) {
			err = PTR_ERR(skb);
			goto done;
		}

		l2cap_do_send(sk, skb);
		err = len;
		break;

	case L2CAP_MODE_ERTM:
	case L2CAP_MODE_STREAMING:

		
		if (len > pi->omtu) {
			err = -EMSGSIZE;
			goto done;
		}

		__skb_queue_head_init(&seg_queue);

		amp_id = pi->amp_id;
		err = l2cap_segment_sdu(sk, &seg_queue, msg, len, 0);

		if (sk->sk_state != BT_CONNECTED) {
			__skb_queue_purge(&seg_queue);
			err = -ENOTCONN;
		}

		if (err) {
			BT_DBG("Error %d, sk_sndbuf %d, sk_wmem_alloc %d",
				err, sk->sk_sndbuf,
				atomic_read(&sk->sk_wmem_alloc));
			break;
		}

		if (pi->amp_id != amp_id) {
			
			err = l2cap_resegment_queue(sk, &seg_queue);

			if (err)
				break;
		}

		if (pi->mode != L2CAP_MODE_STREAMING)
			err = l2cap_ertm_tx(sk, 0, &seg_queue,
				L2CAP_ERTM_EVENT_DATA_REQUEST);
		else
			err = l2cap_strm_tx(sk, &seg_queue);
		if (!err)
			err = len;

		__skb_queue_purge(&seg_queue);
		break;

	default:
		BT_DBG("bad state %1.1x", pi->mode);
		err = -EBADFD;
	}

done:
	release_sock(sk);
	return err;
}