Exemplo n.º 1
0
/*
 *	All outgoing AX.25 I frames pass via this routine. Therefore this is
 *	where the fragmentation of frames takes place. If fragment is set to
 *	zero then we are not allowed to do fragmentation, even if the frame
 *	is too large.
 */
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char *p;
	int frontlen, len, fragno, ka9qfrag, first = 1;

	if (paclen < 16) {
		WARN_ON_ONCE(1);
		kfree_skb(skb);
		return;
	}

	if ((skb->len - 1) > paclen) {
		if (*skb->data == AX25_P_TEXT) {
			skb_pull(skb, 1); /* skip PID */
			ka9qfrag = 0;
		} else {
			paclen -= 2;	/* Allow for fragment control info */
			ka9qfrag = 1;
		}

		fragno = skb->len / paclen;
		if (skb->len % paclen == 0) fragno--;

		frontlen = skb_headroom(skb);	/* Address space + CTRL */

		while (skb->len > 0) {
			spin_lock_bh(&ax25_frag_lock);
			if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
				spin_unlock_bh(&ax25_frag_lock);
				printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
				return;
			}

			if (skb->sk != NULL)
				skb_set_owner_w(skbn, skb->sk);

			spin_unlock_bh(&ax25_frag_lock);

			len = (paclen > skb->len) ? skb->len : paclen;

			if (ka9qfrag == 1) {
				skb_reserve(skbn, frontlen + 2);
				skb_set_network_header(skbn,
						      skb_network_offset(skb));
				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
				p = skb_push(skbn, 2);

				*p++ = AX25_P_SEGMENT;

				*p = fragno--;
				if (first) {
					*p |= AX25_SEG_FIRST;
					first = 0;
				}
			} else {
				skb_reserve(skbn, frontlen + 1);
				skb_set_network_header(skbn,
						      skb_network_offset(skb));
				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
				p = skb_push(skbn, 1);
				*p = AX25_P_TEXT;
			}

			skb_pull(skb, len);
			skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
		}

		kfree_skb(skb);
	} else {
		skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
	}

	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
	case AX25_PROTO_STD_SIMPLEX:
	case AX25_PROTO_STD_DUPLEX:
		ax25_kick(ax25);
		break;

#ifdef CONFIG_AX25_DAMA_SLAVE
	/*
	 * A DAMA slave is _required_ to work as normal AX.25L2V2
	 * if no DAMA master is available.
	 */
	case AX25_PROTO_DAMA_SLAVE:
		if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
		break;
#endif
	}
}
Exemplo n.º 2
0
static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
{
	struct sk_buff *nskb;
	unsigned int datalen;
	u16 errcode, datahandle;

	datalen = skb->len - CAPIMSG_LEN(skb->data);
	if (mp->tty) {
		if (mp->tty->ldisc.receive_buf == 0) {
			printk(KERN_ERR "capi: ldisc has no receive_buf function\n");
			return -1;
		}
		if (mp->ttyinstop) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
			printk(KERN_DEBUG "capi: recv tty throttled\n");
#endif
			return -1;
		}
		if (mp->tty->ldisc.receive_room &&
		    mp->tty->ldisc.receive_room(mp->tty) < datalen) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
			printk(KERN_DEBUG "capi: no room in tty\n");
#endif
			return -1;
		}
		if ((nskb = gen_data_b3_resp_for(mp, skb)) == 0) {
			printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
			return -1;
		}
		datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4);
		errcode = (*capifuncs->capi_put_message)(mp->applid, nskb);
		if (errcode != CAPI_NOERROR) {
			printk(KERN_ERR "capi: send DATA_B3_RESP failed=%x\n",
					errcode);
			kfree_skb(nskb);
			return -1;
		}
		(void)skb_pull(skb, CAPIMSG_LEN(skb->data));
#ifdef _DEBUG_DATAFLOW
		printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => ldisc\n",
					datahandle, skb->len);
#endif
		mp->tty->ldisc.receive_buf(mp->tty, skb->data, 0, skb->len);
		kfree_skb(skb);
		return 0;

	} else if (mp->file) {
		if (skb_queue_len(&mp->recvqueue) > CAPINC_MAX_RECVQUEUE) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
			printk(KERN_DEBUG "capi: no room in raw queue\n");
#endif
			return -1;
		}
		if ((nskb = gen_data_b3_resp_for(mp, skb)) == 0) {
			printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
			return -1;
		}
		datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4);
		errcode = (*capifuncs->capi_put_message)(mp->applid, nskb);
		if (errcode != CAPI_NOERROR) {
			printk(KERN_ERR "capi: send DATA_B3_RESP failed=%x\n",
					errcode);
			kfree_skb(nskb);
			return -1;
		}
		(void)skb_pull(skb, CAPIMSG_LEN(skb->data));
#ifdef _DEBUG_DATAFLOW
		printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => raw\n",
					datahandle, skb->len);
#endif
		skb_queue_tail(&mp->recvqueue, skb);
		wake_up_interruptible(&mp->recvwait);
		return 0;
	}
#ifdef _DEBUG_DATAFLOW
	printk(KERN_DEBUG "capi: currently no receiver\n");
#endif
	return -1;
}
static void
hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
{
    u_char r;
    struct BCState *bcs = cs->bcs + hscx;
    struct sk_buff *skb;
    int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags)? 64: 32;
    int count;

    if (!test_bit(BC_FLG_INIT, &bcs->Flag))
        return;

    if (val & 0x80) {	/* RME */
        r = READHSCX(cs, hscx, HSCX_RSTA);
        if ((r & 0xf0) != 0xa0) {
            if (!(r & 0x80)) {
                if (cs->debug & L1_DEB_WARN)
                    debugl1(cs, "HSCX invalid frame");
#ifdef ERROR_STATISTIC
                bcs->err_inv++;
#endif
            }
            if ((r & 0x40) && bcs->mode) {
                if (cs->debug & L1_DEB_WARN)
                    debugl1(cs, "HSCX RDO mode=%d",
                            bcs->mode);
#ifdef ERROR_STATISTIC
                bcs->err_rdo++;
#endif
            }
            if (!(r & 0x20)) {
                if (cs->debug & L1_DEB_WARN)
                    debugl1(cs, "HSCX CRC error");
#ifdef ERROR_STATISTIC
                bcs->err_crc++;
#endif
            }
            WriteHSCXCMDR(cs, hscx, 0x80);
        } else {
            count = READHSCX(cs, hscx, HSCX_RBCL) & (
                        test_bit(HW_IPAC, &cs->HW_Flags)? 0x3f: 0x1f);
            if (count == 0)
                count = fifo_size;
            hscx_empty_fifo(bcs, count);
            if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
                if (cs->debug & L1_DEB_HSCX_FIFO)
                    debugl1(cs, "HX Frame %d", count);
                if (!(skb = dev_alloc_skb(count)))
                    printk(KERN_WARNING "HSCX: receive out of memory\n");
                else {
                    memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
                    skb_queue_tail(&bcs->rqueue, skb);
                }
            }
        }
        bcs->hw.hscx.rcvidx = 0;
        schedule_event(bcs, B_RCVBUFREADY);
    }
    if (val & 0x40) {	/* RPF */
        hscx_empty_fifo(bcs, fifo_size);
        if (bcs->mode == L1_MODE_TRANS) {
            /* receive audio data */
            if (!(skb = dev_alloc_skb(fifo_size)))
                printk(KERN_WARNING "HiSax: receive out of memory\n");
            else {
                memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
                skb_queue_tail(&bcs->rqueue, skb);
            }
            bcs->hw.hscx.rcvidx = 0;
            schedule_event(bcs, B_RCVBUFREADY);
        }
    }
    if (val & 0x10) {	/* XPR */
        if (bcs->tx_skb) {
            if (bcs->tx_skb->len) {
                hscx_fill_fifo(bcs);
                return;
            } else {
                if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
                        (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
                    u_long	flags;
                    spin_lock_irqsave(&bcs->aclock, flags);
                    bcs->ackcnt += bcs->hw.hscx.count;
                    spin_unlock_irqrestore(&bcs->aclock, flags);
                    schedule_event(bcs, B_ACKPENDING);
                }
                dev_kfree_skb_irq(bcs->tx_skb);
                bcs->hw.hscx.count = 0;
                bcs->tx_skb = NULL;
            }
        }
        if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
            bcs->hw.hscx.count = 0;
            test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
            hscx_fill_fifo(bcs);
        } else {
            test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
            schedule_event(bcs, B_XMTBUFREADY);
        }
    }
}
Exemplo n.º 4
0
/* Queue an skb to a connected sock.
 * Socket lock must be held. */
static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *hdr = pnp_hdr(skb);
	struct sk_buff_head *queue;
	int err = 0;

	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);

	switch (hdr->message_id) {
	case PNS_PEP_CONNECT_REQ:
		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
		break;

	case PNS_PEP_DISCONNECT_REQ:
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		sk->sk_state = TCP_CLOSE_WAIT;
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_state_change(sk);
		break;

	case PNS_PEP_ENABLE_REQ:
		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		break;

	case PNS_PEP_RESET_REQ:
		switch (hdr->state_after_reset) {
		case PN_PIPE_DISABLE:
			pn->init_enable = 0;
			break;
		case PN_PIPE_ENABLE:
			pn->init_enable = 1;
			break;
		default: /* not allowed to send an error here!? */
			err = -EINVAL;
			goto out;
		}
		/* fall through */
	case PNS_PEP_DISABLE_REQ:
		atomic_set(&pn->tx_credits, 0);
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		break;

	case PNS_PEP_CTRL_REQ:
		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
			atomic_inc(&sk->sk_drops);
			break;
		}
		__skb_pull(skb, 4);
		queue = &pn->ctrlreq_queue;
		goto queue;

	case PNS_PIPE_DATA:
		__skb_pull(skb, 3); /* Pipe data header */
		if (!pn_flow_safe(pn->rx_fc)) {
			err = sock_queue_rcv_skb(sk, skb);
			if (!err)
				return 0;
			if (err == -ENOMEM)
				atomic_inc(&sk->sk_drops);
			break;
		}

		if (pn->rx_credits == 0) {
			atomic_inc(&sk->sk_drops);
			err = -ENOBUFS;
			break;
		}
		pn->rx_credits--;
		queue = &sk->sk_receive_queue;
		goto queue;

	case PNS_PEP_STATUS_IND:
		pipe_rcv_status(sk, skb);
		break;

	case PNS_PIPE_REDIRECTED_IND:
		err = pipe_rcv_created(sk, skb);
		break;

	case PNS_PIPE_CREATED_IND:
		err = pipe_rcv_created(sk, skb);
		if (err)
			break;
		/* fall through */
	case PNS_PIPE_RESET_IND:
		if (!pn->init_enable)
			break;
		/* fall through */
	case PNS_PIPE_ENABLED_IND:
		if (!pn_flow_safe(pn->tx_fc)) {
			atomic_set(&pn->tx_credits, 1);
			sk->sk_write_space(sk);
		}
		if (sk->sk_state == TCP_ESTABLISHED)
			break; /* Nothing to do */
		sk->sk_state = TCP_ESTABLISHED;
		pipe_grant_credits(sk);
		break;

	case PNS_PIPE_DISABLED_IND:
		sk->sk_state = TCP_SYN_RECV;
		pn->rx_credits = 0;
		break;

	default:
		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
				hdr->message_id);
		err = -EINVAL;
	}
out:
	kfree_skb(skb);
	return err;

queue:
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
	err = skb->len;
	skb_queue_tail(queue, skb);
	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, err);
	return 0;
}
static int btusb_probe(struct usb_interface *intf,
				const struct usb_device_id *id)
{
	struct usb_endpoint_descriptor *ep_desc;
	struct btusb_data *data;
	struct hci_dev *hdev;
	int i, err;

	BT_DBG("intf %p id %p", intf, id);

	/* interface numbers are hardcoded in the spec */
	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
		return -ENODEV;

	if (!id->driver_info) {
		const struct usb_device_id *match;
		match = usb_match_id(intf, blacklist_table);
		if (match)
			id = match;
	}

	if (id->driver_info == BTUSB_IGNORE)
		return -ENODEV;

	if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER)
		return -ENODEV;

	if (ignore_csr && id->driver_info & BTUSB_CSR)
		return -ENODEV;

	if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
		return -ENODEV;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
		ep_desc = &intf->cur_altsetting->endpoint[i].desc;

		if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
			data->intr_ep = ep_desc;
			continue;
		}

		if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
			data->bulk_tx_ep = ep_desc;
			continue;
		}

		if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
			data->bulk_rx_ep = ep_desc;
			continue;
		}
	}

	if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
		kfree(data);
		return -ENODEV;
	}

	data->cmdreq_type = USB_TYPE_CLASS;

	data->udev = interface_to_usbdev(intf);
	data->intf = intf;

	spin_lock_init(&data->lock);

	INIT_WORK(&data->work, btusb_work);
	INIT_WORK(&data->waker, btusb_waker);
	spin_lock_init(&data->txlock);

	init_usb_anchor(&data->tx_anchor);
	init_usb_anchor(&data->intr_anchor);
	init_usb_anchor(&data->bulk_anchor);
	init_usb_anchor(&data->isoc_anchor);
	init_usb_anchor(&data->deferred);

	hdev = hci_alloc_dev();
	if (!hdev) {
		kfree(data);
		return -ENOMEM;
	}

	hdev->bus = HCI_USB;
	hdev->driver_data = data;

	data->hdev = hdev;

	SET_HCIDEV_DEV(hdev, &intf->dev);

	hdev->open     = btusb_open;
	hdev->close    = btusb_close;
	hdev->flush    = btusb_flush;
	hdev->send     = btusb_send_frame;
	hdev->destruct = btusb_destruct;
	hdev->notify   = btusb_notify;

	hdev->owner = THIS_MODULE;

	/* Interface numbers are hardcoded in the specification */
	data->isoc = usb_ifnum_to_if(data->udev, 1);

	if (!reset)
		set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);

	if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
		if (!disable_scofix)
			set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_BROKEN_ISOC)
		data->isoc = NULL;

	if (id->driver_info & BTUSB_DIGIANSWER) {
		data->cmdreq_type = USB_TYPE_VENDOR;
		set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_CSR) {
		struct usb_device *udev = data->udev;

		/* Old firmware would otherwise execute USB reset */
		if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
			set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_SNIFFER) {
		struct usb_device *udev = data->udev;

		/* New sniffer firmware has crippled HCI interface */
		if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
			set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);

		data->isoc = NULL;
	}

	if (id->driver_info & BTUSB_BCM92035) {
		unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
		struct sk_buff *skb;

		skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
		if (skb) {
			memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
			skb_queue_tail(&hdev->driver_init, skb);
		}
	}

	if (data->isoc) {
		err = usb_driver_claim_interface(&btusb_driver,
							data->isoc, data);
		if (err < 0) {
			hci_free_dev(hdev);
			kfree(data);
			return err;
		}
	}

	err = hci_register_dev(hdev);
	if (err < 0) {
		hci_free_dev(hdev);
		kfree(data);
		return err;
	}

	usb_set_intfdata(intf, data);

	return 0;
}
Exemplo n.º 6
0
static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct clip_priv *clip_priv = PRIV(dev);
	struct atmarp_entry *entry;
	struct atm_vcc *vcc;
	int old;
	unsigned long flags;

	pr_debug("clip_start_xmit (skb %p)\n", skb);
	if (!skb->dst) {
		printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->dst->neighbour) {
#if 0
		skb->dst->neighbour = clip_find_neighbour(skb->dst, 1);
		if (!skb->dst->neighbour) {
			dev_kfree_skb(skb);	/* lost that one */
			clip_priv->stats.tx_dropped++;
			return 0;
		}
#endif
		printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	entry = NEIGH2ENTRY(skb->dst->neighbour);
	if (!entry->vccs) {
		if (time_after(jiffies, entry->expires)) {
			/* should be resolved */
			entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
			to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
		}
		if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
			skb_queue_tail(&entry->neigh->arp_queue, skb);
		else {
			dev_kfree_skb(skb);
			clip_priv->stats.tx_dropped++;
		}
		return 0;
	}
	pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
	ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
	pr_debug("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc);
	if (entry->vccs->encap) {
		void *here;

		here = skb_push(skb, RFC1483LLC_LEN);
		memcpy(here, llc_oui, sizeof(llc_oui));
		((__be16 *) here)[3] = skb->protocol;
	}
	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = vcc->atm_options;
	entry->vccs->last_use = jiffies;
	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
	old = xchg(&entry->vccs->xoff, 1);	/* assume XOFF ... */
	if (old) {
		printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
		return 0;
	}
	clip_priv->stats.tx_packets++;
	clip_priv->stats.tx_bytes += skb->len;
	vcc->send(vcc, skb);
	if (atm_may_send(vcc, 0)) {
		entry->vccs->xoff = 0;
		return 0;
	}
	spin_lock_irqsave(&clip_priv->xoff_lock, flags);
	netif_stop_queue(dev);	/* XOFF -> throttle immediately */
	barrier();
	if (!entry->vccs->xoff)
		netif_start_queue(dev);
	/* Oh, we just raced with clip_pop. netif_start_queue should be
	   good enough, because nothing should really be asleep because
	   of the brief netif_stop_queue. If this isn't true or if it
	   changes, use netif_wake_queue instead. */
	spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
	return 0;
}
Exemplo n.º 7
0
Arquivo: usb.c Projeto: Lyude/linux
/* This function handles received packet. Necessary action is taken based on
 * cmd/event/data.
 */
static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
			    struct sk_buff *skb, u8 ep)
{
	u32 recv_type;
	__le32 tmp;
	int ret;

	if (adapter->hs_activated)
		mwifiex_process_hs_config(adapter);

	if (skb->len < INTF_HEADER_LEN) {
		mwifiex_dbg(adapter, ERROR,
			    "%s: invalid skb->len\n", __func__);
		return -1;
	}

	switch (ep) {
	case MWIFIEX_USB_EP_CMD_EVENT:
		mwifiex_dbg(adapter, EVENT,
			    "%s: EP_CMD_EVENT\n", __func__);
		skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
		recv_type = le32_to_cpu(tmp);
		skb_pull(skb, INTF_HEADER_LEN);

		switch (recv_type) {
		case MWIFIEX_USB_TYPE_CMD:
			if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
				mwifiex_dbg(adapter, ERROR,
					    "CMD: skb->len too large\n");
				ret = -1;
				goto exit_restore_skb;
			} else if (!adapter->curr_cmd) {
				mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n");
				if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
					mwifiex_process_sleep_confirm_resp(
							adapter, skb->data,
							skb->len);
					ret = 0;
					goto exit_restore_skb;
				}
				ret = -1;
				goto exit_restore_skb;
			}

			adapter->curr_cmd->resp_skb = skb;
			adapter->cmd_resp_received = true;
			break;
		case MWIFIEX_USB_TYPE_EVENT:
			if (skb->len < sizeof(u32)) {
				mwifiex_dbg(adapter, ERROR,
					    "EVENT: skb->len too small\n");
				ret = -1;
				goto exit_restore_skb;
			}
			skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
			adapter->event_cause = le32_to_cpu(tmp);
			mwifiex_dbg(adapter, EVENT,
				    "event_cause %#x\n", adapter->event_cause);

			if (skb->len > MAX_EVENT_SIZE) {
				mwifiex_dbg(adapter, ERROR,
					    "EVENT: event body too large\n");
				ret = -1;
				goto exit_restore_skb;
			}

			memcpy(adapter->event_body, skb->data +
			       MWIFIEX_EVENT_HEADER_LEN, skb->len);

			adapter->event_received = true;
			adapter->event_skb = skb;
			break;
		default:
			mwifiex_dbg(adapter, ERROR,
				    "unknown recv_type %#x\n", recv_type);
			return -1;
		}
		break;
	case MWIFIEX_USB_EP_DATA:
		mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__);
		if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
			mwifiex_dbg(adapter, ERROR,
				    "DATA: skb->len too large\n");
			return -1;
		}

		skb_queue_tail(&adapter->rx_data_q, skb);
		adapter->data_received = true;
		atomic_inc(&adapter->rx_pending);
		break;
	default:
		mwifiex_dbg(adapter, ERROR,
			    "%s: unknown endport %#x\n", __func__, ep);
		return -1;
	}

	return -EINPROGRESS;

exit_restore_skb:
	/* The buffer will be reused for further cmds/events */
	skb_push(skb, INTF_HEADER_LEN);

	return ret;
}
Exemplo n.º 8
0
static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
					  struct iovec const *msg_sect,
					  size_t total_len)
{
	struct sk_buff_head *msg_head;
	struct sk_buff *msg;
	int i, copied, first = 1;
	int data_size = 0, request_size, offset;
	void *data;

	for (i = 0; i < num_sect; i++)
		data_size += msg_sect[i].iov_len;

	if (!data_size)
		return NULL;

	msg_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
	if (!msg_head) {
		pr_err("%s: cannot allocate skb_head\n", __func__);
		return NULL;
	}
	skb_queue_head_init(msg_head);

	for (copied = 1, i = 0; copied && (i < num_sect); i++) {
		data_size = msg_sect[i].iov_len;
		offset = 0;
		while (offset != msg_sect[i].iov_len) {
			request_size = data_size;
			if (first)
				request_size += IPC_ROUTER_HDR_SIZE;

			msg = alloc_skb(request_size, GFP_KERNEL);
			if (!msg) {
				if (request_size <= (PAGE_SIZE/2)) {
					pr_err("%s: cannot allocated skb\n",
						__func__);
					goto msg_build_failure;
				}
				data_size = data_size / 2;
				continue;
			}

			if (first) {
				skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
				first = 0;
			}

			data = skb_put(msg, data_size);
			copied = !copy_from_user(msg->data,
					msg_sect[i].iov_base + offset,
					data_size);
			if (!copied) {
				pr_err("%s: copy_from_user failed\n",
					__func__);
				kfree_skb(msg);
				goto msg_build_failure;
			}
			skb_queue_tail(msg_head, msg);
			offset += data_size;
			data_size = msg_sect[i].iov_len - offset;
		}
	}
	return msg_head;

msg_build_failure:
	while (!skb_queue_empty(msg_head)) {
		msg = skb_dequeue(msg_head);
		kfree_skb(msg);
	}
	kfree(msg_head);
	return NULL;
}
Exemplo n.º 9
0
void
isac_interrupt(struct IsdnCardState *cs, u_char val)
{
	u_char exval, v1;
	struct sk_buff *skb;
	unsigned int count;

	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "ISAC interrupt %x", val);
	if (val & 0x80) {	/* RME */
		exval = cs->readisac(cs, ISAC_RSTA);
		if ((exval & 0x70) != 0x20) {
			if (exval & 0x40) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "ISAC RDO");
#ifdef ERROR_STATISTIC
				cs->err_rx++;
#endif
			}
			if (!(exval & 0x20)) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "ISAC CRC error");
#ifdef ERROR_STATISTIC
				cs->err_crc++;
#endif
			}
			cs->writeisac(cs, ISAC_CMDR, 0x80);
		} else {
			count = cs->readisac(cs, ISAC_RBCL) & 0x1f;
			if (count == 0)
				count = 32;
			isac_empty_fifo(cs, count);
			if ((count = cs->rcvidx) > 0) {
				cs->rcvidx = 0;
				if (!(skb = alloc_skb(count, GFP_ATOMIC)))
					printk(KERN_WARNING "HiSax: D receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), cs->rcvbuf, count);
					skb_queue_tail(&cs->rq, skb);
				}
			}
		}
		cs->rcvidx = 0;
		schedule_event(cs, D_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		isac_empty_fifo(cs, 32);
	}
	if (val & 0x20) {	/* RSC */
		/* never */
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "ISAC RSC interrupt");
	}
	if (val & 0x10) {	/* XPR */
		if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);
		if (cs->tx_skb) {
			if (cs->tx_skb->len) {
				isac_fill_fifo(cs);
				goto afterXPR;
			} else {
				dev_kfree_skb_irq(cs->tx_skb);
				cs->tx_cnt = 0;
				cs->tx_skb = NULL;
			}
		}
		if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
			cs->tx_cnt = 0;
			isac_fill_fifo(cs);
		} else
			schedule_event(cs, D_XMTBUFREADY);
	}
      afterXPR:
	if (val & 0x04) {	/* CISQ */
		exval = cs->readisac(cs, ISAC_CIR0);
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "ISAC CIR0 %02X", exval );
		if (exval & 2) {
			cs->dc.isac.ph_state = (exval >> 2) & 0xf;
			if (cs->debug & L1_DEB_ISAC)
				debugl1(cs, "ph_state change %x", cs->dc.isac.ph_state);
			schedule_event(cs, D_L1STATECHANGE);
		}
Exemplo n.º 10
0
static int bfusb_send_frame(struct sk_buff *skb)
{
	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
	struct bfusb_data *data;
	struct sk_buff *nskb;
	unsigned char buf[3];
	int sent = 0, size, count;

	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);

	if (!hdev) {
		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
		return -ENODEV;
	}

	if (!test_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	data = hdev->driver_data;

	switch (bt_cb(skb)->pkt_type) {
	case HCI_COMMAND_PKT:
		hdev->stat.cmd_tx++;
		break;
	case HCI_ACLDATA_PKT:
		hdev->stat.acl_tx++;
		break;
	case HCI_SCODATA_PKT:
		hdev->stat.sco_tx++;
		break;
	};

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);

	count = skb->len;

	/* Max HCI frame size seems to be 1511 + 1 */
	nskb = bt_skb_alloc(count + 32, GFP_ATOMIC);
	if (!nskb) {
		BT_ERR("Can't allocate memory for new packet");
		return -ENOMEM;
	}

	nskb->dev = (void *) data;

	while (count) {
		size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE);

		buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0);
		buf[1] = 0x00;
		buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;

		memcpy(skb_put(nskb, 3), buf, 3);
		skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);

		sent  += size;
		count -= size;
	}

	/* Don't send frame with multiple size of bulk max packet */
	if ((nskb->len % data->bulk_pkt_size) == 0) {
		buf[0] = 0xdd;
		buf[1] = 0x00;
		memcpy(skb_put(nskb, 2), buf, 2);
	}

	read_lock(&data->lock);

	skb_queue_tail(&data->transmit_q, nskb);
	bfusb_tx_wakeup(data);

	read_unlock(&data->lock);

	kfree_skb(skb);

	return 0;
}
Exemplo n.º 11
0
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
	struct net_device *dev = (struct net_device *)vcc->proto_data;
	struct sk_buff *new_skb;
	eg_cache_entry *eg;
	struct mpoa_client *mpc;
	uint32_t tag;
	char *tmp;
	
	ddprintk("mpoa: (%s) mpc_push:\n", dev->name);
	if (skb == NULL) {
		dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name);
		mpc_vcc_close(vcc, dev);
		return;
	}
	
	skb->dev = dev;
	if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) {
		dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name);
		skb_queue_tail(&vcc->sk->receive_queue, skb);           /* Pass control packets to daemon */
		wake_up(&vcc->sleep);
		return;
	}

	/* data coming over the shortcut */
	atm_return(vcc, skb->truesize);

	mpc = find_mpc_by_lec(dev);
	if (mpc == NULL) {
		printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name);
		return;
	}

	if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
		ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name);

	} else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
		printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name);
		printk("           mpc_push: non-tagged data unsupported, purging\n");
		dev_kfree_skb_any(skb);
		return;
	} else {
		printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name);
		dev_kfree_skb_any(skb);
		return;
	}

	tmp = skb->data + sizeof(struct llc_snap_hdr);
	tag = *(uint32_t *)tmp;

	eg = mpc->eg_ops->get_by_tag(tag, mpc);
	if (eg == NULL) {
		printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n",
		       dev->name,tag);
		purge_egress_shortcut(vcc, NULL);
		dev_kfree_skb_any(skb);
		return;
	}
	
	/*
	 * See if ingress MPC is using shortcut we opened as a return channel.
	 * This means we have a bi-directional vcc opened by us.
	 */ 
	if (eg->shortcut == NULL) {
		eg->shortcut = vcc;
		printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name);
	}

	skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */
	new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */
	dev_kfree_skb_any(skb);
	if (new_skb == NULL){
		mpc->eg_ops->put(eg);
		return;
	}
	skb_push(new_skb, eg->ctrl_info.DH_length);     /* add MAC header */
	memcpy(new_skb->data, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length);
	new_skb->protocol = eth_type_trans(new_skb, dev);
	new_skb->nh.raw = new_skb->data;

	eg->latest_ip_addr = new_skb->nh.iph->saddr;
	eg->packets_rcvd++;
	mpc->eg_ops->put(eg);

	memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
	netif_rx(new_skb);

	return;
}
Exemplo n.º 12
0
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
				    *cq_desc, struct vnic_rq_buf *buf,
				    int skipped __attribute__((unused)),
				    void *opaque)
{
	struct fnic *fnic = vnic_dev_priv(rq->vdev);
	struct sk_buff *skb;
	struct fc_frame *fp;
	unsigned int eth_hdrs_stripped;
	u8 type, color, eop, sop, ingress_port, vlan_stripped;
	u8 fcoe = 0, fcoe_sof, fcoe_eof;
	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
	u8 fcs_ok = 1, packet_error = 0;
	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
	u32 rss_hash;
	u16 exchange_id, tmpl;
	u8 sof = 0;
	u8 eof = 0;
	u32 fcp_bytes_written = 0;
	unsigned long flags;

	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
			 PCI_DMA_FROMDEVICE);
	skb = buf->os_buf;
	buf->os_buf = NULL;

	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
	if (type == CQ_DESC_TYPE_RQ_FCP) {
		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
				   &type, &color, &q_number, &completed_index,
				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
				   &tmpl, &fcp_bytes_written, &sof, &eof,
				   &ingress_port, &packet_error,
				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
				   &vlan);
		eth_hdrs_stripped = 1;

	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
				    &type, &color, &q_number, &completed_index,
				    &ingress_port, &fcoe, &eop, &sop,
				    &rss_type, &csum_not_calc, &rss_hash,
				    &bytes_written, &packet_error,
				    &vlan_stripped, &vlan, &checksum,
				    &fcoe_sof, &fcoe_fc_crc_ok,
				    &fcoe_enc_error, &fcoe_eof,
				    &tcp_udp_csum_ok, &udp, &tcp,
				    &ipv4_csum_ok, &ipv6, &ipv4,
				    &ipv4_fragment, &fcs_ok);
		eth_hdrs_stripped = 0;

	} else {
		/* wrong CQ type*/
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic rq_cmpl wrong cq type x%x\n", type);
		goto drop;
	}

	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
			     "fnic rq_cmpl fcoe x%x fcsok x%x"
			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
			     " x%x\n",
			     fcoe, fcs_ok, packet_error,
			     fcoe_fc_crc_ok, fcoe_enc_error);
		goto drop;
	}

	if (eth_hdrs_stripped)
		fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
	else if (fnic_import_rq_eth_pkt(skb, bytes_written))
		goto drop;

	fp = (struct fc_frame *)skb;

	/*
	 * If frame is an ELS response that matches the cached FLOGI OX_ID,
	 * and is accept, issue flogi_reg_request copy wq request to firmware
	 * to register the S_ID and determine whether FC_OUI mode or GW mode.
	 */
	if (is_matching_flogi_resp_frame(fnic, fp)) {
		if (!eth_hdrs_stripped) {
			if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
				fnic_handle_flogi_resp(fnic, fp);
				return;
			}
			/*
			 * Recd. Flogi reject. No point registering
			 * with fw, but forward to libFC
			 */
			goto forward;
		}
		goto drop;
	}
	if (!eth_hdrs_stripped)
		goto drop;

forward:
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	if (fnic->stop_rx_link_events) {
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
		goto drop;
	}
	/* Use fr_flags to indicate whether succ. flogi resp or not */
	fr_flags(fp) = 0;
	fr_dev(fp) = fnic->lport;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	skb_queue_tail(&fnic->frame_queue, skb);
	queue_work(fnic_event_queue, &fnic->frame_work);

	return;
drop:
	dev_kfree_skb_irq(skb);
}
Exemplo n.º 13
0
void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
{
	struct recv_buf	*precvbuf = (struct recv_buf *)purb->context;	
	_adapter 			*padapter =(_adapter *)precvbuf->adapter;
	struct recv_priv	*precvpriv = &padapter->recvpriv;	
	
	RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete!!!\n"));
		
	ATOMIC_DEC(&(precvpriv->rx_pending_cnt));
	
	if(RTW_CANNOT_RX(padapter))
	{
		RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", padapter->bDriverStopped, padapter->bSurpriseRemoved));
		DBG_8192C("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) \n", 
			__FUNCTION__,padapter->bDriverStopped, padapter->bSurpriseRemoved);
		goto exit;
	}

	if(purb->status==0)//SUCCESS
	{
		if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE))
		{
			RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
			rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
			DBG_8192C("%s()-%d: RX Warning!\n", __FUNCTION__, __LINE__);	
		}
		else 
		{	
			rtw_reset_continual_io_error(adapter_to_dvobj(padapter));
			
			precvbuf->transfer_len = purb->actual_length;			
			skb_put(precvbuf->pskb, purb->actual_length);	
			skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb);

			#ifndef CONFIG_FIX_NR_BULKIN_BUFFER
			if (skb_queue_len(&precvpriv->rx_skb_queue)<=1)
			#endif
				tasklet_schedule(&precvpriv->recv_tasklet);

			precvbuf->pskb = NULL;
			rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);			
		}		
	}
	else
	{
		RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete : purb->status(%d) != 0 \n", purb->status));
	
		DBG_8192C("###=> usb_read_port_complete => urb status(%d)\n", purb->status);

		if(rtw_inc_and_chk_continual_io_error(adapter_to_dvobj(padapter)) == _TRUE ){
			padapter->bSurpriseRemoved = _TRUE;
		}

		switch(purb->status) {
			case -EINVAL:
			case -EPIPE:			
			case -ENODEV:
			case -ESHUTDOWN:
				//padapter->bSurpriseRemoved=_TRUE;
				//RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete:bSurpriseRemoved=TRUE\n"));
			case -ENOENT:
				padapter->bDriverStopped=_TRUE;			
				RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete:bDriverStopped=TRUE\n"));
				break;
			case -EPROTO:
			case -EILSEQ:
			case -ETIME:
			case -ECOMM:
			case -EOVERFLOW:
				#ifdef DBG_CONFIG_ERROR_DETECT	
				{	
					HAL_DATA_TYPE	*pHalData = GET_HAL_DATA(padapter);
					pHalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;			
				}
				#endif
				rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);			
				break;
			case -EINPROGRESS:
				DBG_8192C("ERROR: URB IS IN PROGRESS!/n");
				break;
			default:
				break;				
		}
		
	}	

exit:	
	
_func_exit_;
	
}
Exemplo n.º 14
0
void ax25_kick(ax25_cb *ax25)
{
	struct sk_buff *skb, *skbn;
	int last = 1;
	unsigned short start, end, next;

	if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
		return;

	if (ax25->condition & AX25_COND_PEER_RX_BUSY)
		return;

	if (skb_peek(&ax25->write_queue) == NULL)
		return;

	start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
	end   = (ax25->va + ax25->window) % ax25->modulus;

	if (start == end)
		return;

	/*
	 * Transmit data until either we're out of data to send or
	 * the window is full. Send a poll on the final I frame if
	 * the window is filled.
	 */

	/*
	 * Dequeue the frame and copy it.
	 * Check for race with ax25_clear_queues().
	 */
	skb  = skb_dequeue(&ax25->write_queue);
	if (!skb)
		return;

	ax25->vs = start;

	do {
		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
			skb_queue_head(&ax25->write_queue, skb);
			break;
		}

		if (skb->sk != NULL)
			skb_set_owner_w(skbn, skb->sk);

		next = (ax25->vs + 1) % ax25->modulus;
		last = (next == end);

		/*
		 * Transmit the frame copy.
		 * bke 960114: do not set the Poll bit on the last frame
		 * in DAMA mode.
		 */
		switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
		case AX25_PROTO_STD_SIMPLEX:
		case AX25_PROTO_STD_DUPLEX:
			ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
			break;

#ifdef CONFIG_AX25_DAMA_SLAVE
		case AX25_PROTO_DAMA_SLAVE:
			ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
			break;
#endif
		}

		ax25->vs = next;

		/*
		 * Requeue the original data frame.
		 */
		skb_queue_tail(&ax25->ack_queue, skb);

	} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);

	ax25->condition &= ~AX25_COND_ACK_PENDING;

	if (!ax25_t1timer_running(ax25)) {
		ax25_stop_t3timer(ax25);
		ax25_calculate_t1(ax25);
		ax25_start_t1timer(ax25);
	}
}
Exemplo n.º 15
0
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	if (do_debug_ccw)
		ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
					sizeof(struct ccw1) * 3);
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Exemplo n.º 16
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;
	bool		queue = 0;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}

		if (!status)
			queue = 1;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

clean:
	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->rx_reqs);
	spin_unlock(&dev->req_lock);

	if (queue)
		queue_work(uether_wq, &dev->rx_work);
}
Exemplo n.º 17
0
static void ctcmpc_send_sweep_req(struct channel *rch)
{
	struct net_device *dev = rch->netdev;
	struct ctcm_priv *priv;
	struct mpc_group *grp;
	struct th_sweep *header;
	struct sk_buff *sweep_skb;
	struct channel *ch;
	/* int rc = 0; */

	priv = dev->ml_priv;
	grp = priv->mpcg;
	ch = priv->channel[CTCM_WRITE];

	/* sweep processing is not complete until response and request */
	/* has completed for all read channels in group		       */
	if (grp->in_sweep == 0) {
		grp->in_sweep = 1;
		grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
		grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
	}

	sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);

	if (sweep_skb == NULL)	{
		/* rc = -ENOMEM; */
				goto nomem;
	}

	header = kmalloc(TH_SWEEP_LENGTH, gfp_type());

	if (!header) {
		dev_kfree_skb_any(sweep_skb);
		/* rc = -ENOMEM; */
				goto nomem;
	}

	header->th.th_seg	= 0x00 ;
	header->th.th_ch_flag	= TH_SWEEP_REQ;  /* 0x0f */
	header->th.th_blk_flag	= 0x00;
	header->th.th_is_xid	= 0x00;
	header->th.th_seq_num	= 0x00;
	header->sw.th_last_seq	= ch->th_seq_num;

	memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);

	kfree(header);

	dev->trans_start = jiffies;
	skb_queue_tail(&ch->sweep_queue, sweep_skb);

	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);

	return;

nomem:
	grp->in_sweep = 0;
	ctcm_clear_busy(dev);
	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);

	return;
}
Exemplo n.º 18
0
static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
{
	skb_queue_tail(&sk->sk_write_queue, skb);
}
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
                            struct scm_cookie *scm)
{
	struct sock *sk = sock->sk;
	struct hci_dev *hdev;
	struct sk_buff *skb;
	int err;

	BT_DBG("sock %p sk %p", sock, sk);

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
		return -EINVAL;

	if (len < 4)
		return -EINVAL;
	
	lock_sock(sk);

	if (!(hdev = hci_pi(sk)->hdev)) {
		err = -EBADFD;
		goto done;
	}

	if (!(skb = bluez_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
		goto done;

	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
		err = -EFAULT;
		goto drop;
	}

	skb->pkt_type = *((unsigned char *) skb->data);
	skb_pull(skb, 1);
	skb->dev = (void *) hdev;

	if (skb->pkt_type == HCI_COMMAND_PKT) {
		u16 opcode = __le16_to_cpu(get_unaligned((u16 *)skb->data));
		u16 ogf = cmd_opcode_ogf(opcode);
		u16 ocf = cmd_opcode_ocf(opcode);

		if (((ogf > HCI_SFLT_MAX_OGF) || 
				!hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
		    			!capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto drop;
		}

		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) {
			skb_queue_tail(&hdev->raw_q, skb);
			hci_sched_tx(hdev);
		} else {
			skb_queue_tail(&hdev->cmd_q, skb);
			hci_sched_cmd(hdev);
		}
	} else {
		if (!capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto drop;
		}

		skb_queue_tail(&hdev->raw_q, skb);
		hci_sched_tx(hdev);
	}

	err = len;

done:
	release_sock(sk);
	return err;

drop:
	kfree_skb(skb);
	goto done;
}
Exemplo n.º 20
0
/*
 * State machine for state 3, Connected State.
 * The handling of the timer(s) is in file nr_timer.c
 * Handling of state 0 and connection release is in netrom.c.
 */
static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
	struct sk_buff_head temp_queue;
	struct sk_buff *skbn;
	unsigned short save_vr;
	unsigned short nr, ns;
	int queued = 0;

	nr = skb->data[18];
	ns = skb->data[17];

	switch (frametype) {

		case NR_CONNREQ:
			nr_write_internal(sk, NR_CONNACK);
			break;

		case NR_DISCREQ:
			nr_write_internal(sk, NR_DISCACK);
			nr_disconnect(sk, 0);
			break;

		case NR_CONNACK | NR_CHOKE_FLAG:
		case NR_DISCACK:
			nr_disconnect(sk, ECONNRESET);
			break;

		case NR_INFOACK:
		case NR_INFOACK | NR_CHOKE_FLAG:
		case NR_INFOACK | NR_NAK_FLAG:
		case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG:
			if (frametype & NR_CHOKE_FLAG) {
				sk->protinfo.nr->condition |= NR_COND_PEER_RX_BUSY;
				nr_start_t4timer(sk);
			} else {
				sk->protinfo.nr->condition &= ~NR_COND_PEER_RX_BUSY;
				nr_stop_t4timer(sk);
			}
			if (!nr_validate_nr(sk, nr)) {
				break;
			}
			if (frametype & NR_NAK_FLAG) {
				nr_frames_acked(sk, nr);
				nr_send_nak_frame(sk);
			} else {
				if (sk->protinfo.nr->condition & NR_COND_PEER_RX_BUSY) {
					nr_frames_acked(sk, nr);
				} else {
					nr_check_iframes_acked(sk, nr);
				}
			}
			break;

		case NR_INFO:
		case NR_INFO | NR_NAK_FLAG:
		case NR_INFO | NR_CHOKE_FLAG:
		case NR_INFO | NR_MORE_FLAG:
		case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG:
		case NR_INFO | NR_CHOKE_FLAG | NR_MORE_FLAG:
		case NR_INFO | NR_NAK_FLAG | NR_MORE_FLAG:
		case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG | NR_MORE_FLAG:
			if (frametype & NR_CHOKE_FLAG) {
				sk->protinfo.nr->condition |= NR_COND_PEER_RX_BUSY;
				nr_start_t4timer(sk);
			} else {
				sk->protinfo.nr->condition &= ~NR_COND_PEER_RX_BUSY;
				nr_stop_t4timer(sk);
			}
			if (nr_validate_nr(sk, nr)) {
				if (frametype & NR_NAK_FLAG) {
					nr_frames_acked(sk, nr);
					nr_send_nak_frame(sk);
				} else {
					if (sk->protinfo.nr->condition & NR_COND_PEER_RX_BUSY) {
						nr_frames_acked(sk, nr);
					} else {
						nr_check_iframes_acked(sk, nr);
					}
				}
			}
			queued = 1;
			skb_queue_head(&sk->protinfo.nr->reseq_queue, skb);
			if (sk->protinfo.nr->condition & NR_COND_OWN_RX_BUSY)
				break;
			skb_queue_head_init(&temp_queue);
			do {
				save_vr = sk->protinfo.nr->vr;
				while ((skbn = skb_dequeue(&sk->protinfo.nr->reseq_queue)) != NULL) {
					ns = skbn->data[17];
					if (ns == sk->protinfo.nr->vr) {
						if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) {
							sk->protinfo.nr->vr = (sk->protinfo.nr->vr + 1) % NR_MODULUS;
						} else {
							sk->protinfo.nr->condition |= NR_COND_OWN_RX_BUSY;
							skb_queue_tail(&temp_queue, skbn);
						}
					} else if (nr_in_rx_window(sk, ns)) {
						skb_queue_tail(&temp_queue, skbn);
					} else {
						kfree_skb(skbn);
					}
				}
				while ((skbn = skb_dequeue(&temp_queue)) != NULL) {
					skb_queue_tail(&sk->protinfo.nr->reseq_queue, skbn);
				}
			} while (save_vr != sk->protinfo.nr->vr);
			/*
			 * Window is full, ack it immediately.
			 */
			if (((sk->protinfo.nr->vl + sk->protinfo.nr->window) % NR_MODULUS) == sk->protinfo.nr->vr) {
				nr_enquiry_response(sk);
			} else {
				if (!(sk->protinfo.nr->condition & NR_COND_ACK_PENDING)) {
					sk->protinfo.nr->condition |= NR_COND_ACK_PENDING;
					nr_start_t2timer(sk);
				}
			}
			break;

		default:
			break;
	}

	return queued;
}
Exemplo n.º 21
0
Arquivo: usb.c Projeto: Lyude/linux
/* This function prepare data packet to be send under usb tx aggregation
 * protocol, check current usb aggregation status, link packet to aggrgation
 * list if possible, work flow as below:
 * (1) if only 1 packet available, add usb tx aggregation header and send.
 * (2) if packet is able to aggregated, link it to current aggregation list.
 * (3) if packet is not able to aggregated, aggregate and send exist packets
 *     in aggrgation list. Then, link packet in the list if there is more
 *     packet in transmit queue, otherwise try to transmit single packet.
 */
static int mwifiex_usb_aggr_tx_data(struct mwifiex_adapter *adapter, u8 ep,
				    struct sk_buff *skb,
				    struct mwifiex_tx_param *tx_param,
				    struct usb_tx_data_port *port)
{
	u8 *payload, pad;
	u16 align = adapter->bus_aggr.tx_aggr_align;
	struct sk_buff *skb_send = NULL;
	struct urb_context *context = NULL;
	struct txpd *local_tx_pd =
		(struct txpd *)((u8 *)skb->data + adapter->intf_hdr_len);
	u8 f_send_aggr_buf = 0;
	u8 f_send_cur_buf = 0;
	u8 f_precopy_cur_buf = 0;
	u8 f_postcopy_cur_buf = 0;
	u32 timeout;
	int ret;

	/* padding to ensure each packet alginment */
	pad = (align - (skb->len & (align - 1))) % align;

	if (tx_param && tx_param->next_pkt_len) {
		/* next packet available in tx queue*/
		if (port->tx_aggr.aggr_len + skb->len + pad >
		    adapter->bus_aggr.tx_aggr_max_size) {
			f_send_aggr_buf = 1;
			f_postcopy_cur_buf = 1;
		} else {
			/* current packet could be aggregated*/
			f_precopy_cur_buf = 1;

			if (port->tx_aggr.aggr_len + skb->len + pad +
			    tx_param->next_pkt_len >
			    adapter->bus_aggr.tx_aggr_max_size ||
			    port->tx_aggr.aggr_num + 2 >
			    adapter->bus_aggr.tx_aggr_max_num) {
			    /* next packet could not be aggregated
			     * send current aggregation buffer
			     */
				f_send_aggr_buf = 1;
			}
		}
	} else {
		/* last packet in tx queue */
		if (port->tx_aggr.aggr_num > 0) {
			/* pending packets in aggregation buffer*/
			if (port->tx_aggr.aggr_len + skb->len + pad >
			    adapter->bus_aggr.tx_aggr_max_size) {
				/* current packet not be able to aggregated,
				 * send aggr buffer first, then send packet.
				 */
				f_send_cur_buf = 1;
			} else {
				/* last packet, Aggregation and send */
				f_precopy_cur_buf = 1;
			}

			f_send_aggr_buf = 1;
		} else {
			/* no pending packets in aggregation buffer,
			 * send current packet immediately
			 */
			 f_send_cur_buf = 1;
		}
	}

	if (local_tx_pd->flags & MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET) {
		/* Send NULL packet immediately*/
		if (f_precopy_cur_buf) {
			if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
				f_precopy_cur_buf = 0;
				f_send_aggr_buf = 0;
				f_send_cur_buf = 1;
			} else {
				f_send_aggr_buf = 1;
			}
		} else if (f_postcopy_cur_buf) {
			f_send_cur_buf = 1;
			f_postcopy_cur_buf = 0;
		}
	}

	if (f_precopy_cur_buf) {
		skb_queue_tail(&port->tx_aggr.aggr_list, skb);
		port->tx_aggr.aggr_len += (skb->len + pad);
		port->tx_aggr.aggr_num++;
		if (f_send_aggr_buf)
			goto send_aggr_buf;

		/* packet will not been send immediately,
		 * set a timer to make sure it will be sent under
		 * strict time limit. Dynamically fit the timeout
		 * value, according to packets number in aggr_list
		 */
		if (!port->tx_aggr.timer_cnxt.is_hold_timer_set) {
			port->tx_aggr.timer_cnxt.hold_tmo_msecs =
					MWIFIEX_USB_TX_AGGR_TMO_MIN;
			timeout =
				port->tx_aggr.timer_cnxt.hold_tmo_msecs;
			mod_timer(&port->tx_aggr.timer_cnxt.hold_timer,
				  jiffies + msecs_to_jiffies(timeout));
			port->tx_aggr.timer_cnxt.is_hold_timer_set = true;
		} else {
			if (port->tx_aggr.timer_cnxt.hold_tmo_msecs <
			    MWIFIEX_USB_TX_AGGR_TMO_MAX) {
				/* Dyanmic fit timeout */
				timeout =
				++port->tx_aggr.timer_cnxt.hold_tmo_msecs;
				mod_timer(&port->tx_aggr.timer_cnxt.hold_timer,
					  jiffies + msecs_to_jiffies(timeout));
			}
		}
	}

send_aggr_buf:
	if (f_send_aggr_buf) {
		ret = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send);
		if (!ret) {
			context = &port->tx_data_list[port->tx_data_ix++];
			ret = mwifiex_usb_construct_send_urb(adapter, port, ep,
							     context, skb_send);
			if (ret == -1)
				mwifiex_write_data_complete(adapter, skb_send,
							    0, -1);
		}
	}

	if (f_send_cur_buf) {
		if (f_send_aggr_buf) {
			if (atomic_read(&port->tx_data_urb_pending) >=
			    MWIFIEX_TX_DATA_URB) {
				port->block_status = true;
				adapter->data_sent =
					mwifiex_usb_data_sent(adapter);
				/* no available urb, postcopy packet*/
				f_postcopy_cur_buf = 1;
				goto postcopy_cur_buf;
			}

			if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
				port->tx_data_ix = 0;
		}

		payload = skb->data;
		*(u16 *)&payload[2] =
			cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
		*(u16 *)payload = cpu_to_le16(skb->len);
		skb_send = skb;
		context = &port->tx_data_list[port->tx_data_ix++];
		return mwifiex_usb_construct_send_urb(adapter, port, ep,
						      context, skb_send);
	}

postcopy_cur_buf:
	if (f_postcopy_cur_buf) {
		skb_queue_tail(&port->tx_aggr.aggr_list, skb);
		port->tx_aggr.aggr_len += (skb->len + pad);
		port->tx_aggr.aggr_num++;
		/* New aggregation begin, start timer */
		if (!port->tx_aggr.timer_cnxt.is_hold_timer_set) {
			port->tx_aggr.timer_cnxt.hold_tmo_msecs =
					MWIFIEX_USB_TX_AGGR_TMO_MIN;
			timeout = port->tx_aggr.timer_cnxt.hold_tmo_msecs;
			mod_timer(&port->tx_aggr.timer_cnxt.hold_timer,
				  jiffies + msecs_to_jiffies(timeout));
			port->tx_aggr.timer_cnxt.is_hold_timer_set = true;
		}
	}

	return -EINPROGRESS;
}
Exemplo n.º 22
0
void
main_irq_hfc(struct BCState *bcs)
{
	struct IsdnCardState *cs = bcs->cs;
	int z1, z2, rcnt;
	u_char f1, f2, cip;
	int receive, transmit, count = 5;
	struct sk_buff *skb;

Begin:
	count--;
	cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel);
	if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) {
		cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip);
		WaitForBusy(cs);
	}
	WaitNoBusy(cs);
	receive = 0;
	if (bcs->mode == L1_MODE_HDLC) {
		f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
		cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel);
		WaitNoBusy(cs);
		f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
		if (f1 != f2) {
			if (cs->debug & L1_DEB_HSCX)
				debugl1(cs, "hfc rec %d f1(%d) f2(%d)",
					bcs->channel, f1, f2);
			receive = 1;
		}
	}
	if (receive || (bcs->mode == L1_MODE_TRANS)) {
		WaitForBusy(cs);
		z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel));
		z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel));
		rcnt = z1 - z2;
		if (rcnt < 0)
			rcnt += cs->hw.hfc.fifosize;
		if ((bcs->mode == L1_MODE_HDLC) || (rcnt)) {
			rcnt++;
			if (cs->debug & L1_DEB_HSCX)
				debugl1(cs, "hfc rec %d z1(%x) z2(%x) cnt(%d)",
					bcs->channel, z1, z2, rcnt);
			/*              sti(); */
			if ((skb = hfc_empty_fifo(bcs, rcnt))) {
				skb_queue_tail(&bcs->rqueue, skb);
				schedule_event(bcs, B_RCVBUFREADY);
			}
		}
		receive = 1;
	}
	if (bcs->tx_skb) {
		transmit = 1;
		test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
		hfc_fill_fifo(bcs);
		if (test_bit(BC_FLG_BUSY, &bcs->Flag))
			transmit = 0;
	} else {
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			transmit = 1;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			hfc_fill_fifo(bcs);
			if (test_bit(BC_FLG_BUSY, &bcs->Flag))
				transmit = 0;
		} else {
			transmit = 0;
			schedule_event(bcs, B_XMTBUFREADY);
		}
	}
	if ((receive || transmit) && count)
		goto Begin;
	return;
}
Exemplo n.º 23
0
int x25_output(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char header[X25_EXT_MIN_LEN];
	int err, frontlen, len;
	int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
	struct x25_sock *x25 = x25_sk(sk);
	int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
						    X25_STD_MIN_LEN;
	int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);

	if (skb->len - header_len > max_len) {
		
		skb_copy_from_linear_data(skb, header, header_len);
		skb_pull(skb, header_len);

		frontlen = skb_headroom(skb);

		while (skb->len > 0) {
			release_sock(sk);
			skbn = sock_alloc_send_skb(sk, frontlen + max_len,
						   noblock, &err);
			lock_sock(sk);
			if (!skbn) {
				if (err == -EWOULDBLOCK && noblock){
					kfree_skb(skb);
					return sent;
				}
				SOCK_DEBUG(sk, "x25_output: fragment alloc"
					       " failed, err=%d, %d bytes "
					       "sent\n", err, sent);
				return err;
			}

			skb_reserve(skbn, frontlen);

			len = max_len > skb->len ? skb->len : max_len;

			
			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
			skb_pull(skb, len);

			
			skb_push(skbn, header_len);
			skb_copy_to_linear_data(skbn, header, header_len);

			if (skb->len > 0) {
				if (x25->neighbour->extended)
					skbn->data[3] |= X25_EXT_M_BIT;
				else
					skbn->data[2] |= X25_STD_M_BIT;
			}

			skb_queue_tail(&sk->sk_write_queue, skbn);
			sent += len;
		}

		kfree_skb(skb);
	} else {
		skb_queue_tail(&sk->sk_write_queue, skb);
		sent = skb->len - header_len;
	}
	return sent;
}
/*
 * segment the img and use the ptr and length to remember info on each segment
 *
 */
bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len)
{
	struct r8192_priv   *priv = ieee80211_priv(dev);
	bool		    rt_status = true;
	u16		    frag_threshold;
	u16		    frag_length, frag_offset = 0;
	//u16		    total_size;
	int		    i;

	rt_firmware	    *pfirmware = priv->pFirmware;
	struct sk_buff	    *skb;
	unsigned char	    *seg_ptr;
	cb_desc		    *tcb_desc;
	u8                  bLastIniPkt;

	firmware_init_param(dev);
	//Fragmentation might be required
	frag_threshold = pfirmware->cmdpacket_frag_thresold;
	do {
		if((buffer_len - frag_offset) > frag_threshold) {
			frag_length = frag_threshold ;
			bLastIniPkt = 0;

		} else {
			frag_length = buffer_len - frag_offset;
			bLastIniPkt = 1;

		}

		/* Allocate skb buffer to contain firmware info and tx descriptor info
		 * add 4 to avoid packet appending overflow.
		 * */
		#ifdef RTL8192U
		skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
		#else
		skb  = dev_alloc_skb(frag_length + 4);
		#endif
		memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
		tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->queue_index = TXCMD_QUEUE;
		tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
		tcb_desc->bLastIniPkt = bLastIniPkt;

		#ifdef RTL8192U
		skb_reserve(skb, USB_HWDESC_HEADER_LEN);
		#endif
		seg_ptr = skb->data;
		/*
		 * Transform from little endian to big endian
		 * and pending  zero
		 */
		for(i=0 ; i < frag_length; i+=4) {
			*seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0;
			*seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0;
			*seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0;
			*seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0;
		}
		tcb_desc->txbuf_size= (u16)i;
		skb_put(skb, i);

		if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
			(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
			(priv->ieee80211->queue_stop) ) {
			RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
			skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
		} else {
			priv->ieee80211->softmac_hard_start_xmit(skb,dev);
		}

		code_virtual_address += frag_length;
		frag_offset += frag_length;

	}while(frag_offset < buffer_len);

	return rt_status;

}
Exemplo n.º 25
0
int	rtl8188eu_init_recv_priv(_adapter *padapter)
{
    struct recv_priv	*precvpriv = &padapter->recvpriv;
    int	i, res = _SUCCESS;
    struct recv_buf *precvbuf;

#ifdef CONFIG_RECV_THREAD_MODE
    _rtw_init_sema(&precvpriv->recv_sema, 0);//will be removed
    _rtw_init_sema(&precvpriv->terminate_recvthread_sema, 0);//will be removed
#endif

#ifdef PLATFORM_LINUX
    tasklet_init(&precvpriv->recv_tasklet,
                 (void(*)(unsigned long))rtl8188eu_recv_tasklet,
                 (unsigned long)padapter);
#endif

#ifdef CONFIG_USB_INTERRUPT_IN_PIPE
#ifdef PLATFORM_LINUX
    precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
    if(precvpriv->int_in_urb == NULL) {
        DBG_8192C("alloc_urb for interrupt in endpoint fail !!!!\n");
    }
#endif
    precvpriv->int_in_buf = rtw_zmalloc(sizeof(INTERRUPT_MSG_FORMAT_EX));
    if(precvpriv->int_in_buf == NULL) {
        DBG_8192C("alloc_mem for interrupt in endpoint fail !!!!\n");
    }
#endif

    //init recv_buf
    _rtw_init_queue(&precvpriv->free_recv_buf_queue);

#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
    _rtw_init_queue(&precvpriv->recv_buf_pending_queue);
#endif	// CONFIG_USE_USB_BUFFER_ALLOC_RX

    precvpriv->pallocated_recv_buf = rtw_zmalloc(NR_RECVBUFF *sizeof(struct recv_buf) + 4);
    if(precvpriv->pallocated_recv_buf==NULL) {
        res= _FAIL;
        RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("alloc recv_buf fail!\n"));
        goto exit;
    }
    _rtw_memset(precvpriv->pallocated_recv_buf, 0, NR_RECVBUFF *sizeof(struct recv_buf) + 4);

    precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_recv_buf), 4);
    //precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
    //						((uint) (precvpriv->pallocated_recv_buf) &(4-1));


    precvbuf = (struct recv_buf*)precvpriv->precv_buf;

    for(i=0; i < NR_RECVBUFF ; i++)
    {
        _rtw_init_listhead(&precvbuf->list);

        _rtw_spinlock_init(&precvbuf->recvbuf_lock);

        precvbuf->alloc_sz = MAX_RECVBUF_SZ;

        res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
        if(res==_FAIL)
            break;

        precvbuf->ref_cnt = 0;
        precvbuf->adapter =padapter;


        //rtw_list_insert_tail(&precvbuf->list, &(precvpriv->free_recv_buf_queue.queue));

        precvbuf++;

    }

    precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;

#ifdef PLATFORM_LINUX

    skb_queue_head_init(&precvpriv->rx_skb_queue);

#ifdef CONFIG_PREALLOC_RECV_SKB
    {
        int i;
        SIZE_PTR tmpaddr=0;
        SIZE_PTR alignment=0;
        struct sk_buff *pskb=NULL;

        skb_queue_head_init(&precvpriv->free_recv_skb_queue);

        for(i=0; i<NR_PREALLOC_RECV_SKB; i++)
        {

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
            pskb = dev_alloc_skb(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#else
            pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#endif

            if(pskb)
            {
                pskb->dev = padapter->pnetdev;

                tmpaddr = (SIZE_PTR)pskb->data;
                alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
                skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));

                skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
            }

            pskb=NULL;

        }
    }
#endif

#endif

exit:

    return res;

}
Exemplo n.º 26
0
void iwm_tx_worker(struct work_struct *work)
{
	struct iwm_priv *iwm;
	struct iwm_tx_info *tx_info = NULL;
	struct sk_buff *skb;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	struct iwm_tid_info *tid_info;
	int cmdlen, ret, pool_id;

	txq = container_of(work, struct iwm_tx_queue, worker);
	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);

	pool_id = queue_to_pool_id(txq->id);

	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	       !skb_queue_empty(&txq->queue)) {

		spin_lock_bh(&txq->lock);
		skb = skb_dequeue(&txq->queue);
		spin_unlock_bh(&txq->lock);

		tx_info = skb_to_tx_info(skb);
		sta_info = &iwm->sta_table[tx_info->sta];
		if (!sta_info->valid) {
			IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
			kfree_skb(skb);
			continue;
		}

		tid_info = &sta_info->tid_info[tx_info->tid];

		mutex_lock(&tid_info->mutex);

		/*
		 * If the RAxTID is stopped, we queue the skb to the stopped
		 * queue.
		 * Whenever we'll get a UMAC notification to resume the tx flow
		 * for this RAxTID, we'll merge back the stopped queue into the
		 * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
		 */
		if (tid_info->stopped) {
			IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
				   tx_info->sta, tx_info->tid);
			spin_lock_bh(&txq->lock);
			skb_queue_tail(&txq->stopped_queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			continue;
		}

		cmdlen = IWM_UDMA_HDR_LEN + skb->len;

		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
			   tx_info->color);

		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
			iwm_tx_send_concat_packets(iwm, txq);

		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
		if (ret) {
			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
				   "%d, Tx worker stopped\n", txq->id);
			spin_lock_bh(&txq->lock);
			skb_queue_head(&txq->queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			break;
		}

		txq->concat_ptr = txq->concat_buf + txq->concat_count;
		tid_info->last_seq_num =
			iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
		txq->concat_count += ALIGN(cmdlen, 16);

		mutex_unlock(&tid_info->mutex);

		kfree_skb(skb);
	}

	iwm_tx_send_concat_packets(iwm, txq);

	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
	}
}
Exemplo n.º 27
0
static void capi_signal(u16 applid, void *param)
{
	struct capidev *cdev = (struct capidev *)param;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
	struct capiminor *mp;
	u16 datahandle;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
	struct capincci *np;
	struct sk_buff *skb = 0;
	u32 ncci;

	(void) (*capifuncs->capi_get_message) (applid, &skb);
	if (!skb) {
		printk(KERN_ERR "BUG: capi_signal: no skb\n");
		return;
	}

	if (CAPIMSG_COMMAND(skb->data) != CAPI_DATA_B3) {
		skb_queue_tail(&cdev->recvqueue, skb);
		wake_up_interruptible(&cdev->recvwait);
		return;
	}
	ncci = CAPIMSG_CONTROL(skb->data);
	for (np = cdev->nccis; np && np->ncci != ncci; np = np->next)
		;
	if (!np) {
		printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
		skb_queue_tail(&cdev->recvqueue, skb);
		wake_up_interruptible(&cdev->recvwait);
		return;
	}
#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
	skb_queue_tail(&cdev->recvqueue, skb);
	wake_up_interruptible(&cdev->recvwait);
#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
	mp = np->minorp;
	if (!mp) {
		skb_queue_tail(&cdev->recvqueue, skb);
		wake_up_interruptible(&cdev->recvwait);
		return;
	}


	if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_IND) {
		datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4+4+2);
#ifdef _DEBUG_DATAFLOW
		printk(KERN_DEBUG "capi_signal: DATA_B3_IND %u len=%d\n",
				datahandle, skb->len-CAPIMSG_LEN(skb->data));
#endif
		skb_queue_tail(&mp->inqueue, skb);
		mp->inbytes += skb->len;
		handle_minor_recv(mp);

	} else if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_CONF) {

		datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4);
#ifdef _DEBUG_DATAFLOW
		printk(KERN_DEBUG "capi_signal: DATA_B3_CONF %u 0x%x\n",
				datahandle,
				CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4+2));
#endif
		kfree_skb(skb);
		(void)capiminor_del_ack(mp, datahandle);
		if (mp->tty) {
			if (mp->tty->ldisc.write_wakeup)
				mp->tty->ldisc.write_wakeup(mp->tty);
		} else {
			wake_up_interruptible(&mp->sendwait);
		}
		(void)handle_minor_send(mp);

	} else {
		/* ups, let capi application handle it :-) */
		skb_queue_tail(&cdev->recvqueue, skb);
		wake_up_interruptible(&cdev->recvwait);
	}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
}
Exemplo n.º 28
0
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct iwm_priv *iwm = ndev_to_iwm(netdev);
	struct net_device *ndev = iwm_to_ndev(iwm);
	struct wireless_dev *wdev = iwm_to_wdev(iwm);
	struct iwm_tx_info *tx_info;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	u8 *dst_addr, sta_id;
	u16 queue;
	int ret;


	if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
			   "not associated\n");
		netif_tx_stop_all_queues(netdev);
		goto drop;
	}

	queue = skb_get_queue_mapping(skb);
	BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */

	txq = &iwm->txq[queue];

	/* No free space for Tx, tx_worker is too slow */
	if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
	    (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
		netif_stop_subqueue(netdev, queue);
		return NETDEV_TX_BUSY;
	}

	ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
				       iwm->bssid, 0);
	if (ret) {
		IWM_ERR(iwm, "build wifi header failed\n");
		goto drop;
	}

	dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;

	for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
		sta_info = &iwm->sta_table[sta_id];
		if (sta_info->valid &&
		    !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
			break;
	}

	if (sta_id == IWM_STA_TABLE_NUM) {
		IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
			dst_addr);
		goto drop;
	}

	tx_info = skb_to_tx_info(skb);
	tx_info->sta = sta_id;
	tx_info->color = sta_info->color;
	/* UMAC uses TID 8 (vs. 0) for non QoS packets */
	if (sta_info->qos)
		tx_info->tid = skb->priority;
	else
		tx_info->tid = IWM_UMAC_MGMT_TID;

	spin_lock_bh(&iwm->txq[queue].lock);
	skb_queue_tail(&iwm->txq[queue].queue, skb);
	spin_unlock_bh(&iwm->txq[queue].lock);

	queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);

	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += skb->len;
	return NETDEV_TX_OK;

 drop:
	ndev->stats.tx_dropped++;
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}
Exemplo n.º 29
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context, *skb2;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}
		skb = NULL;

		skb2 = skb_dequeue(&dev->rx_frames);
		while (skb2) {
			if (status < 0
					|| ETH_HLEN > skb2->len
					|| skb2->len > VLAN_ETH_FRAME_LEN) {
				dev->net->stats.rx_errors++;
				dev->net->stats.rx_length_errors++;
				DBG(dev, "rx length %d\n", skb2->len);
				dev_kfree_skb_any(skb2);
				goto next_frame;
			}
			skb2->protocol = eth_type_trans(skb2, dev->net);
			dev->net->stats.rx_packets++;
			dev->net->stats.rx_bytes += skb2->len;

			/* no buffer copies needed, unless hardware can't
			 * use skb buffers.
			 */
			status = netif_rx(skb2);
next_frame:
			skb2 = skb_dequeue(&dev->rx_frames);
		}
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any(skb);
	if (!netif_running(dev->net)) {
clean:
		spin_lock(&dev->req_lock);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock(&dev->req_lock);
		req = NULL;
	}
	if (req)
		rx_submit(dev, req, GFP_ATOMIC);
}
Exemplo n.º 30
0
Arquivo: tx.c Projeto: 03199618/linux
static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
{
	struct ieee80211_tx_info *info;
	struct sk_buff *skb;
	int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
	bool tx_success;

	/* check for id legality */
	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
		wl1271_warning("illegal id in tx completion: %d", id);
		return;
	}

	/* a zero bit indicates Tx success */
	tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));

	skb = wl->tx_frames[id];
	info = IEEE80211_SKB_CB(skb);

	if (wl12xx_is_dummy_packet(wl, skb)) {
		wl1271_free_tx_id(wl, id);
		return;
	}

	/* update the TX status info */
	if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
		info->flags |= IEEE80211_TX_STAT_ACK;
	/*
	 * first pass info->control.vif while it's valid, and then fill out
	 * the info->status structures
	 */
	wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]);

	info->status.rates[0].count = 1; /* no data about retries */
	info->status.ack_signal = -1;

	if (!tx_success)
		wl->stats.retry_count++;

	/*
	 * TODO: update sequence number for encryption? seems to be
	 * unsupported for now. needed for recovery with encryption.
	 */

	/* remove private header from packet */
	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));

	/* remove TKIP header space if present */
	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
	    info->control.hw_key &&
	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
	}

	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d",
		     id, skb, tx_success);

	/* return the packet to the stack */
	skb_queue_tail(&wl->deferred_tx_queue, skb);
	queue_work(wl->freezable_wq, &wl->netstack_work);
	wl1271_free_tx_id(wl, id);
}