Ejemplo n.º 1
0
Archivo: dma.c Proyecto: Lyude/linux
static void mt76x0_tx_tasklet(unsigned long data)
{
	struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
	struct sk_buff_head skbs;
	unsigned long flags;

	__skb_queue_head_init(&skbs);

	spin_lock_irqsave(&dev->tx_lock, flags);

	set_bit(MT76_MORE_STATS, &dev->mt76.state);
	if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
		queue_delayed_work(dev->stat_wq, &dev->stat_work,
				   msecs_to_jiffies(10));

	skb_queue_splice_init(&dev->tx_skb_done, &skbs);

	spin_unlock_irqrestore(&dev->tx_lock, flags);

	while (!skb_queue_empty(&skbs)) {
		struct sk_buff *skb = __skb_dequeue(&skbs);

		mt76x0_tx_status(dev, skb);
	}
}
Ejemplo n.º 2
0
void gether_disconnect(struct gether *link)
{
    struct eth_dev		*dev = link->ioport;
    struct usb_request	*req;
    struct sk_buff		*skb;

    if (!dev)
        return;

    DBG(dev, "%s\n", __func__);

    netif_stop_queue(dev->net);
    netif_carrier_off(dev->net);

    usb_ep_disable(link->in_ep);
    spin_lock(&dev->req_lock);
    while (!list_empty(&dev->tx_reqs)) {
        req = container_of(dev->tx_reqs.next,
                           struct usb_request, list);
        list_del(&req->list);

        spin_unlock(&dev->req_lock);
        usb_ep_free_request(link->in_ep, req);
        spin_lock(&dev->req_lock);
    }
    spin_unlock(&dev->req_lock);
    link->in_ep->driver_data = NULL;
    link->in_ep->desc = NULL;

    usb_ep_disable(link->out_ep);
    spin_lock(&dev->req_lock);
    while (!list_empty(&dev->rx_reqs)) {
        req = container_of(dev->rx_reqs.next,
                           struct usb_request, list);
        list_del(&req->list);

        spin_unlock(&dev->req_lock);
        usb_ep_free_request(link->out_ep, req);
        spin_lock(&dev->req_lock);
    }
    spin_unlock(&dev->req_lock);

    spin_lock(&dev->rx_frames.lock);
    while ((skb = __skb_dequeue(&dev->rx_frames)))
        dev_kfree_skb_any(skb);
    spin_unlock(&dev->rx_frames.lock);

    link->out_ep->driver_data = NULL;
    link->out_ep->desc = NULL;


    dev->header_len = 0;
    dev->unwrap = NULL;
    dev->wrap = NULL;

    spin_lock(&dev->lock);
    dev->port_usb = NULL;
    link->ioport = NULL;
    spin_unlock(&dev->lock);
}
Ejemplo n.º 3
0
/* This function should be called with port_lock_ul lock taken */
static struct sk_buff *bam_data_alloc_skb_from_pool(
	struct bam_data_port *port)
{
	struct bam_data_ch_info *d;
	struct sk_buff *skb;

	if (!port)
		return NULL;
	d = &port->data_ch;
	if (!d)
		return NULL;

	if (d->rx_skb_idle.qlen == 0) {
		/*
		 * In case skb idle pool is empty, we allow to allocate more
		 * skbs so we dynamically enlarge the pool size when needed.
		 * Therefore, in steady state this dynamic allocation will
		 * stop when the pool will arrive to its optimal size.
		 */
		pr_debug("%s: allocate skb\n", __func__);
		skb = alloc_skb(d->rx_buffer_size + BAM_MUX_HDR, GFP_ATOMIC);
		if (!skb) {
			pr_err("%s: alloc skb failed\n", __func__);
			goto alloc_exit;
		}
	} else {
		pr_debug("%s: pull skb from pool\n", __func__);
		skb = __skb_dequeue(&d->rx_skb_idle);
	}

alloc_exit:
	return skb;
}
Ejemplo n.º 4
0
bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
			     struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	struct rtl8192_tx_ring *ring;
	struct rtl_tx_desc *pdesc;
	struct sk_buff *pskb = NULL;
	u8 own;
	unsigned long flags;

	ring = &rtlpci->tx_ring[BEACON_QUEUE];

	pskb = __skb_dequeue(&ring->queue);
	kfree_skb(pskb);
	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);

	pdesc = &ring->desc[0];
	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);

	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);

	__skb_queue_tail(&ring->queue, skb);

	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);

	rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);

	return true;
}
struct sk_buff *dev_alloc_skb(uint32_t length)
{
    struct sk_buff *skb;

    if (length > SKB_ALLOC_LEN-sizeof(struct sk_buff))
    {
            printf("%s: attempt to allocate skb longer than %d\n",
                   __FUNCTION__, (uint32_t)(SKB_ALLOC_LEN-sizeof(struct sk_buff)));
            return NULL;
    }
    bdmf_fastlock_lock(&skb_lock);
    skb = __skb_dequeue(&free_skb_list);
    if (!skb)
    {
            bdmf_fastlock_unlock(&skb_lock);
            skb = bdmf_mem_alloc(NULL, BDMF_MEM_CACHE, SKB_ALLOC_LEN, SKB_ALLOC_ALIGN);
            if (!skb)
                    return NULL;
            bdmf_fastlock_lock(&skb_lock);
    }
    else
            --skb_free_count;
    ++skb_alloc_count;
    bdmf_fastlock_unlock(&skb_lock);

    memset(skb, 0, sizeof(struct sk_buff));
    skb->magic = SKB_MAGIC;
    skb->data = (uint8_t *)skb + sizeof(struct sk_buff) + SKB_RESERVE;
    skb->tail = skb->data;
    skb->len = 0;
    skb->end = (uint8_t *)skb + SKB_ALLOC_LEN;
    return skb;
}
Ejemplo n.º 6
0
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
	const struct netdev_queue *txq = q->dev_queue;
	spinlock_t *lock = NULL;
	struct sk_buff *skb;

	if (q->flags & TCQ_F_NOLOCK) {
		lock = qdisc_lock(q);
		spin_lock(lock);
	}

	skb = skb_peek(&q->skb_bad_txq);
	if (skb) {
		/* check the reason of requeuing without tx lock first */
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			skb = __skb_dequeue(&q->skb_bad_txq);
			if (qdisc_is_percpu_stats(q)) {
				qdisc_qstats_cpu_backlog_dec(q, skb);
				qdisc_qstats_cpu_qlen_dec(q);
			} else {
				qdisc_qstats_backlog_dec(q, skb);
				q->q.qlen--;
			}
		} else {
			skb = NULL;
		}
	}

	if (lock)
		spin_unlock(lock);

	return skb;
}
Ejemplo n.º 7
0
/* Requeue received messages for a kcm socket to other kcm sockets. This is
 * called with a kcm socket is receive disabled.
 * RX mux lock held.
 */
static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
{
	struct sk_buff *skb;
	struct kcm_sock *kcm;

	while ((skb = __skb_dequeue(head))) {
		/* Reset destructor to avoid calling kcm_rcv_ready */
		skb->destructor = sock_rfree;
		skb_orphan(skb);
try_again:
		if (list_empty(&mux->kcm_rx_waiters)) {
			skb_queue_tail(&mux->rx_hold_queue, skb);
			continue;
		}

		kcm = list_first_entry(&mux->kcm_rx_waiters,
				       struct kcm_sock, wait_rx_list);

		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
			/* Should mean socket buffer full */
			list_del(&kcm->wait_rx_list);
			kcm->rx_wait = false;

			/* Commit rx_wait to read in kcm_free */
			smp_wmb();

			goto try_again;
		}
	}
}
Ejemplo n.º 8
0
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
{
	struct sk_buff *skb;

	/*
	 *	Play the pending entries through our router
	 */

	while((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
		if (skb->nh.iph->version == 0) {
			int err;
			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));

			if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
				nlh->nlmsg_len = skb->tail - (u8*)nlh;
			} else {
				nlh->nlmsg_type = NLMSG_ERROR;
				nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
				skb_trim(skb, nlh->nlmsg_len);
				((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
			}
			err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
		} else {
	 		DBPRINT("ipmr_cache_resolve - IP packet from %u.%u.%u.%u -> %u.%u.%u.%u with NextProtocol= %d\n",
			NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr), skb->nh.iph->protocol);
			ip_mr_forward(skb, c, 0);
		}
	}
}
Ejemplo n.º 9
0
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
{
	struct sk_buff *skb;
	struct nlmsgerr *e;

	/*
	 *	Play the pending entries through our router
	 */

	while((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
		if (skb->nh.iph->version == 0) {
			int err;
			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));

			if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
				nlh->nlmsg_len = skb->tail - (u8*)nlh;
			} else {
				nlh->nlmsg_type = NLMSG_ERROR;
				nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
				skb_trim(skb, nlh->nlmsg_len);
				e = NLMSG_DATA(nlh);
				e->error = -EMSGSIZE;
				memset(&e->msg, 0, sizeof(e->msg));
			}
			err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
		} else
			ip_mr_forward(skb, c, 0);
	}
}
Ejemplo n.º 10
0
void data_bridge_close(unsigned int id)
{
	struct data_bridge	*dev;
	struct sk_buff		*skb;
	unsigned long		flags;

	if (id >= MAX_BRIDGE_DEVICES)
		return;

	dev  = __dev[id];
	if (!dev || !dev->brdg)
		return;

	dev_dbg(&dev->udev->dev, "%s:\n", __func__);

	usb_unlink_anchored_urbs(&dev->tx_active);
	usb_unlink_anchored_urbs(&dev->rx_active);
	usb_unlink_anchored_urbs(&dev->delayed);

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while ((skb = __skb_dequeue(&dev->rx_done)))
		dev_kfree_skb_any(skb);
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);

	dev->brdg = NULL;
}
Ejemplo n.º 11
0
static struct
sk_buff *sfq_q_dequeue(struct sfq_sched_data *q)
{
	struct sk_buff *skb;
	sfq_index a, old_a;

	/* No active slots */
	if (q->tail == q->depth)
		return NULL;

	a = old_a = q->next[q->tail];

	/* Grab packet */
	skb = __skb_dequeue(&q->qs[a]);
	sfq_dec(q, a);

	/* Is the slot empty? */
	if (q->qs[a].qlen == 0) {
		q->ht[q->hash[a]] = q->depth;
		a = q->next[a];
		if (a == old_a) {
			q->tail = q->depth;
			return skb;
		}
		q->next[q->tail] = a;
		q->allot[a] += q->quantum;
	} else if ((q->allot[a] -= skb->len) <= 0) {
		q->tail = a;
		a = q->next[a];
		q->allot[a] += q->quantum;
	}

	return skb;
}
Ejemplo n.º 12
0
/* This is the specific function called from codel_dequeue()
 * to dequeue a packet from queue. Note: backlog is handled in
 * codel, we dont need to reduce it here.
 */
static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
{
	struct sk_buff *skb = __skb_dequeue(&sch->q);

	prefetch(&skb->end); /* we'll need skb_shinfo() */
	return skb;
}
Ejemplo n.º 13
0
static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
{
	struct rtl8180_priv *priv = dev->priv;
	struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];

	while (skb_queue_len(&ring->queue)) {
		struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
		struct sk_buff *skb;
		struct ieee80211_tx_info *info;
		u32 flags = le32_to_cpu(entry->flags);

		if (flags & RTL818X_TX_DESC_FLAG_OWN)
			return;

		ring->idx = (ring->idx + 1) % ring->entries;
		skb = __skb_dequeue(&ring->queue);
		pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
				 skb->len, PCI_DMA_TODEVICE);

		info = IEEE80211_SKB_CB(skb);
		ieee80211_tx_info_clear_status(info);

		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
		    (flags & RTL818X_TX_DESC_FLAG_TX_OK))
			info->flags |= IEEE80211_TX_STAT_ACK;

		info->status.rates[0].count = (flags & 0xFF) + 1;

		ieee80211_tx_status_irqsafe(dev, skb);
		if (ring->entries - skb_queue_len(&ring->queue) == 2)
			ieee80211_wake_queue(dev, prio);
	}
}
Ejemplo n.º 14
0
static void data_bridge_close_asus(unsigned int id)
{
	struct data_bridge	*dev;
	struct sk_buff		*skb;
	unsigned long		flags;

	if (id >= MAX_BRIDGE_DEVICES)
		return;

	dev  = __dev[id];
	if (!dev)
		return;

	dev_dbg(&dev->intf->dev, "%s:\n", __func__);

	cancel_work_sync(&dev->kevent);
	cancel_work_sync(&dev->process_rx_w);

	usb_kill_anchored_urbs(&dev->tx_active);
	usb_kill_anchored_urbs(&dev->rx_active);

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while ((skb = __skb_dequeue(&dev->rx_done)))
		dev_kfree_skb_any(skb);
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);

	is_open_asus = false;
}
Ejemplo n.º 15
0
Archivo: ip6mr.c Proyecto: 274914765/C
static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
{
    struct sk_buff *skb;

    /*
     *    Play the pending entries through our router
     */

    while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
        if (ipv6_hdr(skb)->version == 0) {
            int err;
            struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));

            if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
                nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
            } else {
                nlh->nlmsg_type = NLMSG_ERROR;
                nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
                skb_trim(skb, nlh->nlmsg_len);
                ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
            }
            err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
        } else
            ip6_mr_forward(skb, c);
    }
}
Ejemplo n.º 16
0
struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
	struct sk_buff *ret;

	pthread_mutex_lock(&list->lock);
	ret = __skb_dequeue(list);
	pthread_mutex_unlock(&list->lock);
	return ret;
}
Ejemplo n.º 17
0
static void discard_rx_queue(struct sock *sk)
{
	struct sk_buff *buf;

	while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
		atomic_dec(&tipc_queue_size);
		kfree_skb(buf);
	}
}
Ejemplo n.º 18
0
static void reject_rx_queue(struct sock *sk)
{
	struct sk_buff *buf;

	while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
		tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
		atomic_dec(&tipc_queue_size);
	}
}
Ejemplo n.º 19
0
/**
 * release - destroy a TIPC socket
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
 *
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
static int release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport;
	struct sk_buff *buf;
	int res;

	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
		return 0;

	tport = tipc_sk_port(sk);
	lock_sock(sk);

	/*
	 * Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer)
	 */
	while (sock->state != SS_DISCONNECTING) {
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf == NULL)
			break;
		atomic_dec(&tipc_queue_size);
		if (TIPC_SKB_CB(buf)->handle != 0)
			kfree_skb(buf);
		else {
			if ((sock->state == SS_CONNECTING) ||
			    (sock->state == SS_CONNECTED)) {
				sock->state = SS_DISCONNECTING;
				tipc_disconnect(tport->ref);
			}
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
		}
	}

	/*
	 * Delete TIPC port; this ensures no more messages are queued
	 * (also disconnects an active connection & sends a 'FIN-' to peer)
	 */
	res = tipc_deleteport(tport->ref);

	/* Discard any remaining (connection-based) messages in receive queue */
	discard_rx_queue(sk);

	/* Reject any messages that accumulated in backlog queue */
	sock->state = SS_DISCONNECTING;
	release_sock(sk);

	sock_put(sk);
	sock->sk = NULL;

	return res;
}
static struct sk_buff *
bfifo_dequeue(struct Qdisc* sch)
{
	struct sk_buff *skb;

	skb = __skb_dequeue(&sch->q);
	if (skb)
		sch->stats.backlog -= skb->len;
	return skb;
}
Ejemplo n.º 21
0
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
			       struct mv88e6xxx_port_hwtstamp *ps,
			       struct sk_buff *skb, u16 reg,
			       struct sk_buff_head *rxq)
{
	u16 buf[4] = { 0 }, status, seq_id;
	struct skb_shared_hwtstamps *shwt;
	struct sk_buff_head received;
	u64 ns, timelo, timehi;
	unsigned long flags;
	int err;

	/* The latched timestamp belongs to one of the received frames. */
	__skb_queue_head_init(&received);
	spin_lock_irqsave(&rxq->lock, flags);
	skb_queue_splice_tail_init(rxq, &received);
	spin_unlock_irqrestore(&rxq->lock, flags);

	mutex_lock(&chip->reg_lock);
	err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
				      reg, buf, ARRAY_SIZE(buf));
	mutex_unlock(&chip->reg_lock);
	if (err)
		pr_err("failed to get the receive time stamp\n");

	status = buf[0];
	timelo = buf[1];
	timehi = buf[2];
	seq_id = buf[3];

	if (status & MV88E6XXX_PTP_TS_VALID) {
		mutex_lock(&chip->reg_lock);
		err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
		mutex_unlock(&chip->reg_lock);
		if (err)
			pr_err("failed to clear the receive status\n");
	}
	/* Since the device can only handle one time stamp at a time,
	 * we purge any extra frames from the queue.
	 */
	for ( ; skb; skb = __skb_dequeue(&received)) {
		if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
			ns = timehi << 16 | timelo;

			mutex_lock(&chip->reg_lock);
			ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
			mutex_unlock(&chip->reg_lock);
			shwt = skb_hwtstamps(skb);
			memset(shwt, 0, sizeof(*shwt));
			shwt->hwtstamp = ns_to_ktime(ns);
			status &= ~MV88E6XXX_PTP_TS_VALID;
		}
		netif_rx_ni(skb);
	}
}
Ejemplo n.º 22
0
/* This is the specific function called from codel_dequeue()
 * to dequeue a packet from queue. Note: backlog is handled in
 * codel, we dont need to reduce it here.
 */
static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
	struct Qdisc *sch = ctx;
	struct sk_buff *skb = __skb_dequeue(&sch->q);

	if (skb)
		sch->qstats.backlog -= qdisc_pkt_len(skb);

	prefetch(&skb->end); /* we'll need skb_shinfo() */
	return skb;
}
Ejemplo n.º 23
0
static void bam_data_write_toipa(struct work_struct *w)
{
	struct bam_data_port	*port;
	struct bam_data_ch_info	*d;
	struct sk_buff		*skb;
	int			ret;
	int			qlen;
	unsigned long		flags;

	d = container_of(w, struct bam_data_ch_info, write_tobam_w);
	port = d->port;

	spin_lock_irqsave(&port->port_lock_ul, flags);
	if (!port->port_usb) {
		spin_unlock_irqrestore(&port->port_lock_ul, flags);
		return;
	}

	while (d->pending_with_bam < BAM_PENDING_LIMIT &&
	       usb_bam_get_prod_granted(d->dst_connection_idx)) {
		skb =  __skb_dequeue(&d->rx_skb_q);
		if (!skb)
			break;

		d->pending_with_bam++;

		pr_debug("%s: port:%p d:%p pbam:%u pno:%d\n", __func__,
				port, d, d->pending_with_bam, port->port_num);

		spin_unlock_irqrestore(&port->port_lock_ul, flags);
		ret = ipa_tx_dp(IPA_CLIENT_USB_PROD, skb, NULL);
		spin_lock_irqsave(&port->port_lock_ul, flags);
		if (ret) {
			pr_debug("%s: write error:%d\n", __func__, ret);
			d->pending_with_bam--;
			spin_unlock_irqrestore(&port->port_lock_ul, flags);
			bam_data_free_skb_to_pool(port, skb);
			spin_lock_irqsave(&port->port_lock_ul, flags);
			break;
		}
	}

	qlen = d->rx_skb_q.qlen;

	spin_unlock_irqrestore(&port->port_lock_ul, flags);

	if (qlen < bam_ipa_rx_fctrl_dis_thld) {
		if (d->rx_flow_control_triggered) {
			d->rx_flow_control_disable++;
			d->rx_flow_control_triggered = 0;
		}
		bam_data_start_rx(port);
	}
}
Ejemplo n.º 24
0
static int codel_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct codel_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_CODEL_MAX + 1];
	unsigned int qlen, dropped = 0;
	int err;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	if (tb[TCA_CODEL_TARGET]) {
		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);

		q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_CODEL_CE_THRESHOLD]) {
		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);

		q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_CODEL_INTERVAL]) {
		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);

		q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_CODEL_LIMIT])
		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);

	if (tb[TCA_CODEL_ECN])
		q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);

	qlen = sch->q.qlen;
	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = __skb_dequeue(&sch->q);

		dropped += qdisc_pkt_len(skb);
		qdisc_qstats_backlog_dec(sch, skb);
		qdisc_drop(skb, sch);
	}
	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);

	sch_tree_unlock(sch);
	return 0;
}
Ejemplo n.º 25
0
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct sk_buff *skb;

	napi_disable(&vi->napi);

	/* networking core has neutered skb_xmit_done/skb_recv_done, so don't
	 * worry about races vs. get(). */
	vi->rvq->vq_ops->shutdown(vi->rvq);
	while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
		kfree_skb(skb);
		vi->num--;
	}
	vi->svq->vq_ops->shutdown(vi->svq);
	while ((skb = __skb_dequeue(&vi->send)) != NULL)
		kfree_skb(skb);

	BUG_ON(vi->num != 0);
	return 0;
}
Ejemplo n.º 26
0
static struct sk_buff *ccci_skb_dequeue(struct ccci_skb_queue *queue)
{
	unsigned long flags;
	struct sk_buff *result;

	spin_lock_irqsave(&queue->skb_list.lock, flags);
	result = __skb_dequeue(&queue->skb_list);
	if(queue->skb_list.qlen < queue->max_len/RELOAD_TH)
		queue_work(pool_reload_work_queue, &queue->reload_work);
	spin_unlock_irqrestore(&queue->skb_list.lock, flags);
	return result;
}
Ejemplo n.º 27
0
static int shutdown(struct socket *sock, int how)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	int res;

	if (how != SHUT_RDWR)
		return -EINVAL;

	lock_sock(sk);

	switch (sock->state) {
	case SS_CONNECTING:
	case SS_CONNECTED:

		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
restart:
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf) {
			atomic_dec(&tipc_queue_size);
			if (TIPC_SKB_CB(buf)->handle != 0) {
				kfree_skb(buf);
				goto restart;
			}
			tipc_disconnect(tport->ref);
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		} else {
			tipc_shutdown(tport->ref);
		}

		sock->state = SS_DISCONNECTING;

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages; wake up sleeping tasks */

		discard_rx_queue(sk);
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	release_sock(sk);
	return res;
}
Ejemplo n.º 28
0
/*
 * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = trans_pcie->txq[txq_id];

	spin_lock_bh(&txq->lock);
	while (txq->write_ptr != txq->read_ptr) {
		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
				   txq_id, txq->read_ptr);

		if (txq_id != trans_pcie->cmd_queue) {
			int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
			struct sk_buff *skb = txq->entries[idx].skb;

			if (WARN_ON_ONCE(!skb))
				continue;

			iwl_pcie_free_tso_page(trans_pcie, skb);
		}
		iwl_pcie_gen2_free_tfd(trans, txq);
		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);

		if (txq->read_ptr == txq->write_ptr) {
			unsigned long flags;

			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
			if (txq_id != trans_pcie->cmd_queue) {
				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
					      txq->id);
				iwl_trans_unref(trans);
			} else if (trans_pcie->ref_cmd_in_flight) {
				trans_pcie->ref_cmd_in_flight = false;
				IWL_DEBUG_RPM(trans,
					      "clear ref_cmd_in_flight\n");
				iwl_trans_unref(trans);
			}
			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
		}
	}

	while (!skb_queue_empty(&txq->overflow_q)) {
		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);

		iwl_op_mode_free_skb(trans->op_mode, skb);
	}

	spin_unlock_bh(&txq->lock);

	/* just in case - this queue may have been stopped */
	iwl_wake_queue(trans, txq);
}
Ejemplo n.º 29
0
/**
 * shutdown - shutdown socket connection
 * @sock: socket structure
 * @how: direction to close (must be SHUT_RDWR)
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
 *
 * Returns 0 on success, errno otherwise
 */
static int shutdown(struct socket *sock, int how)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	int res;

	if (how != SHUT_RDWR)
		return -EINVAL;

	lock_sock(sk);

	switch (sock->state) {
	case SS_CONNECTING:
	case SS_CONNECTED:

restart:
		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf) {
			if (TIPC_SKB_CB(buf)->handle != 0) {
				kfree_skb(buf);
				goto restart;
			}
			tipc_disconnect(tport->ref);
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		} else {
			tipc_shutdown(tport->ref);
		}

		sock->state = SS_DISCONNECTING;

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages */
		__skb_queue_purge(&sk->sk_receive_queue);

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	release_sock(sk);
	return res;
}
Ejemplo n.º 30
0
void ath9k_wmi_event_tasklet(unsigned long data)
{
	struct wmi *wmi = (struct wmi *)data;
	struct ath9k_htc_priv *priv = wmi->drv_priv;
	struct wmi_cmd_hdr *hdr;
	void *wmi_event;
	struct wmi_event_swba *swba;
	struct sk_buff *skb = NULL;
	unsigned long flags;
	u16 cmd_id;

	do {
		spin_lock_irqsave(&wmi->wmi_lock, flags);
		skb = __skb_dequeue(&wmi->wmi_event_queue);
		if (!skb) {
			spin_unlock_irqrestore(&wmi->wmi_lock, flags);
			return;
		}
		spin_unlock_irqrestore(&wmi->wmi_lock, flags);

		hdr = (struct wmi_cmd_hdr *) skb->data;
		cmd_id = be16_to_cpu(hdr->command_id);
		wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));

		switch (cmd_id) {
		case WMI_SWBA_EVENTID:
			swba = (struct wmi_event_swba *) wmi_event;
			ath9k_htc_swba(priv, swba);
			break;
		case WMI_FATAL_EVENTID:
			ieee80211_queue_work(wmi->drv_priv->hw,
					     &wmi->drv_priv->fatal_work);
			break;
		case WMI_TXSTATUS_EVENTID:
			spin_lock_bh(&priv->tx.tx_lock);
			if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
				spin_unlock_bh(&priv->tx.tx_lock);
				break;
			}
			spin_unlock_bh(&priv->tx.tx_lock);

			ath9k_htc_txstatus(priv, wmi_event);
			break;
		default:
			break;
		}

		kfree_skb(skb);
	} while (1);
}