Example #1
0
void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
			gfp_t gfp, int oth)
{
	struct dn_scp *scp = DN_SK(sk);
	struct dn_skb_cb *cb = DN_SKB_CB(skb);
	unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;

	cb->xmit_count = 0;
	dn_nsp_mk_data_header(sk, skb, oth);

	/*
	 * Slow start: If we have been idle for more than
	 * one RTT, then reset window to min size.
	 */
	if ((jiffies - scp->stamp) > t)
		scp->snd_window = NSP_MIN_WINDOW;

	if (oth)
		skb_queue_tail(&scp->other_xmit_queue, skb);
	else
		skb_queue_tail(&scp->data_xmit_queue, skb);

	if (scp->flowrem_sw != DN_SEND)
		return;

	dn_nsp_clone_and_send(skb, gfp);
}
Example #2
0
/*
 * handles intercepted messages that were arriving in the socket's Rx queue
 * - called with the socket receive queue lock held to ensure message ordering
 * - called with softirqs disabled
 */
static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
			       struct sk_buff *skb)
{
	struct afs_call *call = (struct afs_call *) user_call_ID;

	_enter("%p,,%u", call, skb->mark);

	_debug("ICPT %p{%u} [%d]",
	       skb, skb->mark, atomic_read(&afs_outstanding_skbs));

	ASSERTCMP(sk, ==, afs_socket->sk);
	atomic_inc(&afs_outstanding_skbs);

	if (!call) {
		/* its an incoming call for our callback service */
		skb_queue_tail(&afs_incoming_calls, skb);
		queue_work(afs_wq, &afs_collect_incoming_call_work);
	} else {
		/* route the messages directly to the appropriate call */
		skb_queue_tail(&call->rx_queue, skb);
		call->wait_mode->rx_wakeup(call);
	}

	_leave("");
}
Example #3
0
static void r8712_usb_read_port_complete(struct urb *purb)
{
	uint isevt, *pbuf;
	struct recv_buf	*precvbuf = (struct recv_buf *)purb->context;
	struct _adapter *padapter = (struct _adapter *)precvbuf->adapter;
	struct recv_priv *precvpriv = &padapter->recvpriv;

	if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
		return;
	if (purb->status == 0) { /* SUCCESS */
		if ((purb->actual_length > (MAX_RECVBUF_SZ)) ||
		    (purb->actual_length < RXDESC_SIZE)) {
			r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
				  (unsigned char *)precvbuf);
		} else {
			_pkt *pskb = precvbuf->pskb;

			precvbuf->transfer_len = purb->actual_length;
			pbuf = (uint *)precvbuf->pbuf;
			isevt = le32_to_cpu(*(pbuf + 1)) & 0x1ff;
			if ((isevt & 0x1ff) == 0x1ff) {
				r8712_rxcmd_event_hdl(padapter, pbuf);
				skb_queue_tail(&precvpriv->rx_skb_queue, pskb);
				r8712_read_port(padapter, precvpriv->ff_hwaddr,
						0, (unsigned char *)precvbuf);
			} else {
				skb_put(pskb, purb->actual_length);
				skb_queue_tail(&precvpriv->rx_skb_queue, pskb);
				tasklet_hi_schedule(&precvpriv->recv_tasklet);
				r8712_read_port(padapter, precvpriv->ff_hwaddr,
						0, (unsigned char *)precvbuf);
			}
		}
	} else {
		switch (purb->status) {
		case -EINVAL:
		case -EPIPE:
		case -ENODEV:
		case -ESHUTDOWN:
			padapter->bDriverStopped = true;
			break;
		case -ENOENT:
			if (!padapter->bSuspended) {
				padapter->bDriverStopped = true;
				break;
			}
			/* Fall through. */
		case -EPROTO:
			r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
				  (unsigned char *)precvbuf);
			break;
		case -EINPROGRESS:
			netdev_err(padapter->pnetdev, "ERROR: URB IS IN PROGRESS!\n");
			break;
		default:
			break;
		}
	}
}
Example #4
0
/* may be cabrcmed from two simultaneous tasklets */
static int brcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
	//unsigned long flags = 0;
	struct brcm_struct *brcm = hu->priv;

	BT_DBG("hu %p skb %p", hu, skb);

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
#if 0
	/* lock hcibrcm state */
	spin_lock_irqsave(&brcm->hcibrcm_lock, flags);

	/* act according to current state */
	switch (brcm->hcibrcm_state) {
	case HCIBRCM_AWAKE:
		BT_DBG("device awake, sending normabrcmy");
		skb_queue_tail(&brcm->txq, skb);
		break;
	case HCIBRCM_ASLEEP:
		BT_DBG("device asleep, waking up and queueing packet");
		__brcm_msm_serial_clock_on(hu->tty);
		/* save packet for later */
		skb_queue_tail(&brcm->tx_wait_q, skb);
		/* awake device */
		if (send_hcibrcm_cmd(HCIBRCM_WAKE_UP_IND, hu) < 0) {
			BT_ERR("cannot wake up device");
			break;
		}
		brcm->hcibrcm_state = HCIBRCM_ASLEEP_TO_AWAKE;
		break;
	case HCIBRCM_ASLEEP_TO_AWAKE:
		BT_DBG("device waking up, queueing packet");
		/* transient state; just keep packet for later */
		skb_queue_tail(&brcm->tx_wait_q, skb);
		break;
	default:
		BT_ERR("ibrcmegal hcibrcm state: %ld (losing packet)", brcm->hcibrcm_state);
		kfree_skb(skb);
		break;
	}

	spin_unlock_irqrestore(&brcm->hcibrcm_lock, flags);
#endif

	skb_queue_tail(&brcm->txq, skb);

        brcm->is_there_activity = 1;
        if (brcm->hcibrcm_state == HCIBRCM_ASLEEP)
        {
            BT_DBG("Asserting wake signal, moves to AWAKE");
            /* assert BT_WAKE signal */
            assert_bt_wake();
            brcm->hcibrcm_state = HCIBRCM_AWAKE;
        }

	return 0;
}
/* may be called from two simultaneous tasklets */
static int ibs_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
	unsigned long flags = 0;
	struct ibs_struct *ibs = hu->priv;

	BT_DBG("hu %p skb %p", hu, skb);

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);

	/* lock hci_ibs state */
	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);

	/* act according to current state */
	switch (ibs->tx_ibs_state) {
	case HCI_IBS_TX_AWAKE:
		BT_DBG("device awake, sending normally");
		skb_queue_tail(&ibs->txq, skb);
		mod_timer(&ibs->tx_idle_timer, jiffies + tx_idle_delay);
		break;

	case HCI_IBS_TX_ASLEEP:
		BT_DBG("device asleep, waking up and queueing packet");
		ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
		/* save packet for later */
		skb_queue_tail(&ibs->tx_wait_q, skb);
		/* awake device */
		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
			BT_ERR("cannot send WAKE to device");
			break;
		}
		ibs->ibs_sent_wakes++; /* debug */

		/* start retransmit timer */
		mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);

		ibs->tx_ibs_state = HCI_IBS_TX_WAKING;
		break;

	case HCI_IBS_TX_WAKING:
		BT_DBG("device waking up, queueing packet");
		/* transient state; just keep packet for later */
		skb_queue_tail(&ibs->tx_wait_q, skb);
		break;

	default:
		BT_ERR("illegal tx state: %ld (losing packet)",
			ibs->tx_ibs_state);
		kfree_skb(skb);
		break;
	}

	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);

	return 0;
}
/* may be called from two simultaneous tasklets */
static int ibs_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
	unsigned long flags = 0;
	struct ibs_struct *ibs = hu->priv;

	BT_DBG("hu %p skb %p", hu, skb);

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);

	/* lock hci_ibs state */
	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);

	/* act according to current state */
	switch (ibs->tx_ibs_state) {
	case HCI_IBS_TX_AWAKE:
		BT_DBG("device awake, sending normally");
		skb_queue_tail(&ibs->txq, skb);
		mod_timer(&ibs->tx_idle_timer, jiffies + tx_idle_delay);
		break;

	case HCI_IBS_TX_ASLEEP:
		/* QualComm baseLine update 1030 to 1040 begin */
		BT_DBG("device asleep, waking up and queueing packet");
		/* save packet for later */
		skb_queue_tail(&ibs->tx_wait_q, skb);
		ibs->tx_ibs_state = HCI_IBS_TX_WAKING;
		/* schedule a work queue to wake up device */
		ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_DEVICE;
		queue_work(ibs->workqueue, &ibs->ws_ibs);
		/* QualComm baseLine update 1030 to 1040 end */
		break;

	case HCI_IBS_TX_WAKING:
		BT_DBG("device waking up, queueing packet");
		/* transient state; just keep packet for later */
		skb_queue_tail(&ibs->tx_wait_q, skb);
		break;

	default:
		BT_ERR("illegal tx state: %ld (losing packet)",
			ibs->tx_ibs_state);
		kfree_skb(skb);
		break;
	}

	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);

	return 0;
}
Example #7
0
static int btuart_hci_send_frame(struct sk_buff *skb)
{
	btuart_info_t *info;
	struct hci_dev *hdev = (struct hci_dev *)(skb->dev);

	if (!hdev) {
		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
		return -ENODEV;
	}

	info = (btuart_info_t *)(hdev->driver_data);

	switch (skb->pkt_type) {
	case HCI_COMMAND_PKT:
		hdev->stat.cmd_tx++;
		break;
	case HCI_ACLDATA_PKT:
		hdev->stat.acl_tx++;
		break;
	case HCI_SCODATA_PKT:
		hdev->stat.sco_tx++;
		break;
	};

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &(skb->pkt_type), 1);
	skb_queue_tail(&(info->txq), skb);

	btuart_write_wakeup(info);

	return 0;
}
Example #8
0
void c4_release_appl(struct capi_ctr *ctrl, __u16 appl)
{
	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
	avmcard *card = cinfo->card;
	struct sk_buff *skb;
	void *p;

	if (ctrl->cnr == card->cardnr) {
		skb = alloc_skb(7, GFP_ATOMIC);
		if (!skb) {
			printk(KERN_CRIT "%s: no memory, lost release appl.\n",
						card->name);
			return;
		}
		p = skb->data;
		_put_byte(&p, 0);
		_put_byte(&p, 0);
		_put_byte(&p, SEND_RELEASE);
		_put_word(&p, appl);

		skb_put(skb, (__u8 *)p - (__u8 *)skb->data);
		skb_queue_tail(&card->dma->send_queue, skb);
		c4_dispatch_tx(card);
	}
}
Example #9
0
void
l3_msg(struct PStack *st, int pr, void *arg)
{
	switch (pr) {
	case (DL_DATA | REQUEST):
		if (st->l3.l3m.state == ST_L3_LC_ESTAB) {
			st->l3.l3l2(st, pr, arg);
		} else {
			struct sk_buff *skb = arg;

			skb_queue_tail(&st->l3.squeue, skb);
			FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
		}
		break;
	case (DL_ESTABLISH | REQUEST):
		FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
		break;
	case (DL_ESTABLISH | CONFIRM):
		FsmEvent(&st->l3.l3m, EV_ESTABLISH_CNF, NULL);
		break;
	case (DL_ESTABLISH | INDICATION):
		FsmEvent(&st->l3.l3m, EV_ESTABLISH_IND, NULL);
		break;
	case (DL_RELEASE | INDICATION):
		FsmEvent(&st->l3.l3m, EV_RELEASE_IND, NULL);
		break;
	case (DL_RELEASE | CONFIRM):
		FsmEvent(&st->l3.l3m, EV_RELEASE_CNF, NULL);
		break;
	case (DL_RELEASE | REQUEST):
		FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
		break;
	}
}
void rose_transmit_link(struct sk_buff *skb, struct rose_neigh *neigh)
{
	unsigned char *dptr;

	if (neigh->loopback) {
		rose_loopback_queue(skb, neigh);
		return;
	}

	if (!rose_link_up(neigh))
		neigh->restarted = 0;

	dptr = skb_push(skb, 1);
	*dptr++ = AX25_P_ROSE;

	if (neigh->restarted) {
		if (!rose_send_frame(skb, neigh))
			kfree_skb(skb);
	} else {
		skb_queue_tail(&neigh->queue, skb);

		if (!rose_t0timer_running(neigh)) {
			rose_transmit_restart_request(neigh);
			neigh->dce_mode = 0;
			rose_start_t0timer(neigh);
		}
	}
}
Example #11
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
		skb_queue_tail(&dev->rxq_pause, skb);
		return;
	}

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += skb->len;

	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
		  skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));

	if (skb_defer_rx_timestamp(skb))
		return;

	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS)
		netif_dbg(dev, rx_err, dev->net,
			  "netif_rx status %d\n", status);
}
Example #12
0
/* Send NCI data */
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
{
	int rc = 0;

	nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);

	/* check if the packet need to be fragmented */
	if (skb->len <= ndev->max_data_pkt_payload_size) {
		/* no need to fragment packet */
		nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);

		skb_queue_tail(&ndev->tx_q, skb);
	} else {
		/* fragment packet and queue the fragments */
		rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
		if (rc) {
			nfc_err("failed to fragment tx data packet");
			goto free_exit;
		}
	}

	queue_work(ndev->tx_wq, &ndev->tx_work);

	goto exit;

free_exit:
	kfree_skb(skb);

exit:
	return rc;
}
Example #13
0
static void hisax_b_l2l1(struct PStack *st, int pr, void *arg)
{
	struct BCState *bcs = st->l1.bcs;
	struct hisax_b_if *b_if = bcs->hw.b_if;

	switch (pr) {
	case PH_ACTIVATE | REQUEST:
		B_L2L1(b_if, pr, (void *)(unsigned long)st->l1.mode);
		break;
	case PH_DATA | REQUEST:
	case PH_PULL | INDICATION:
		// FIXME lock?
		if (!test_and_set_bit(BC_FLG_BUSY, &bcs->Flag)) {
			B_L2L1(b_if, PH_DATA | REQUEST, arg);
		} else {
			skb_queue_tail(&bcs->squeue, arg);
		}
		break;
	case PH_PULL | REQUEST:
		if (!test_bit(BC_FLG_BUSY, &bcs->Flag))
			st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
		else
			set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
		break;
	case PH_DEACTIVATE | REQUEST:
		test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
		skb_queue_purge(&bcs->squeue);
	default:
		B_L2L1(b_if, pr, arg);
		break;
	}
}
Example #14
0
void b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
{
	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
	avmcard *card = cinfo->card;
	skb_queue_tail(&card->dma->send_queue, skb);
	b1dma_dispatch_tx(card);
}
Example #15
0
/* Send NCI command */
int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
{
	struct nci_ctrl_hdr *hdr;
	struct sk_buff *skb;

	pr_debug("opcode 0x%x, plen %d\n", opcode, plen);

	skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
	if (!skb) {
		pr_err("no memory for command\n");
		return -ENOMEM;
	}

	hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
	hdr->gid = nci_opcode_gid(opcode);
	hdr->oid = nci_opcode_oid(opcode);
	hdr->plen = plen;

	nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
	nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);

	if (plen)
		memcpy(skb_put(skb, plen), payload, plen);

	skb->dev = (void *) ndev;

	skb_queue_tail(&ndev->cmd_q, skb);
	queue_work(ndev->cmd_wq, &ndev->cmd_work);

	return 0;
}
Example #16
0
File: tun.c Project: VTun/tun
/* Net device start xmit */
static int tun_net_xmit(struct sk_buff *skb, struct device *dev)
{
   struct tun_struct *tun = (struct tun_struct *)dev->priv;

   DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->name, skb->len);

   if( dev->tbusy )
      return 1;

   /* Queue frame */ 
   skb_queue_tail(&tun->txq, skb);
   if( skb_queue_len(&tun->txq) >= TUN_TXQ_SIZE )
      dev->tbusy = 1;

   if( tun->flags & TUN_FASYNC )
#ifdef NEW_FASYNC 
      kill_fasync(&tun->fasync, SIGIO, POLL_IN);
#else     
      kill_fasync(&tun->fasync, SIGIO);
#endif

   /* Wake up process */ 
   wake_up_interruptible(&tun->read_wait);

   return 0;
}
Example #17
0
/*
 * Append a packet to the transmit buffer-queue.
 *
 * Parameter:
 *   channel = Number of B-channel
 *   skb     = packet to send.
 *   card    = pointer to card-struct
 * Return:
 *   Number of bytes transferred, -E??? on error
 */
static int
isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
{
	int len = skb->len;
	unsigned long flags;
	struct sk_buff *nskb;

	if (len > 4000) {
		printk(KERN_WARNING
		       "isdnloop: Send packet too large\n");
		return -EINVAL;
	}
	if (len) {
		if (!(card->flags & (channel) ? ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE))
			return 0;
		if (card->sndcount[channel] > ISDNLOOP_MAX_SQUEUE)
			return 0;
		spin_lock_irqsave(&card->isdnloop_lock, flags);
		nskb = dev_alloc_skb(skb->len);
		if (nskb) {
			skb_copy_from_linear_data(skb,
						  skb_put(nskb, len), len);
			skb_queue_tail(&card->bqueue[channel], nskb);
			dev_kfree_skb(skb);
		} else
			len = 0;
		card->sndcount[channel] += len;
		spin_unlock_irqrestore(&card->isdnloop_lock, flags);
	}
	return len;
}
Example #18
0
/*
 * Append a packet to the transmit buffer-queue.
 *
 * Parameter:
 *   channel = Number of B-channel
 *   skb     = packet to send.
 *   card    = pointer to card-struct
 * Return:
 *   Number of bytes transferred, -E??? on error
 */
static int
isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
{
	int len = skb->len;
	unsigned long flags;
	struct sk_buff *nskb;

	if (len > 4000) {
		printk(KERN_WARNING
		       "isdnloop: Send packet too large\n");
		return -EINVAL;
	}
	if (len) {
		if (!(card->flags & (channel) ? ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE))
			return 0;
		if (card->sndcount[channel] > ISDNLOOP_MAX_SQUEUE)
			return 0;
		save_flags(flags);
		cli();
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (nskb) {
			skb_queue_tail(&card->bqueue[channel], nskb);
			dev_kfree_skb(skb);
		} else
			len = 0;
		card->sndcount[channel] += len;
		restore_flags(flags);
	}
	return len;
}
Example #19
0
static int generic_restore_queues(struct sock *sk, struct cpt_sock_image *si,
                                  loff_t pos, struct cpt_context *ctx)
{
    loff_t endpos;

    pos = pos + si->cpt_hdrlen;
    endpos = pos + si->cpt_next;
    while (pos < endpos) {
        struct sk_buff *skb;
        __u32 type;

        skb = rst_skb(sk, &pos, NULL, &type, ctx);
        if (IS_ERR(skb)) {
            if (PTR_ERR(skb) == -EINVAL) {
                int err;

                err = rst_sock_attr(&pos, sk, ctx);
                if (err)
                    return err;
            }
            return PTR_ERR(skb);
        }

        if (type == CPT_SKB_RQ) {
            skb_set_owner_r(skb, sk);
            skb_queue_tail(&sk->sk_receive_queue, skb);
        } else {
            wprintk_ctx("strange socket queue type %u\n", type);
            kfree_skb(skb);
        }
    }
    return 0;
}
Example #20
0
static int vhci_send_frame(struct sk_buff *skb)
{
	struct hci_dev* hdev = (struct hci_dev *) skb->dev;
	struct vhci_data *data;

	if (!hdev) {
		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
		return -ENODEV;
	}

	if (!test_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	data = hdev->driver_data;

	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
	skb_queue_tail(&data->readq, skb);

	if (data->flags & VHCI_FASYNC)
		kill_fasync(&data->fasync, SIGIO, POLL_IN);

	wake_up_interruptible(&data->read_wait);

	return 0;
}
Example #21
0
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
{
	int rc = 0;

	pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);

	/*                                           */
	if (skb->len <= ndev->max_data_pkt_payload_size) {
		/*                            */
		nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);

		skb_queue_tail(&ndev->tx_q, skb);
	} else {
		/*                                         */
		rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
		if (rc) {
			pr_err("failed to fragment tx data packet\n");
			goto free_exit;
		}
	}

	queue_work(ndev->tx_wq, &ndev->tx_work);

	goto exit;

free_exit:
	kfree_skb(skb);

exit:
	return rc;
}
Example #22
0
/*
 * Builds and sends an HCI_IBS command packet.
 * These are very simple packets with only 1 cmd byte
 */
static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
{
	int err = 0;
	struct sk_buff *skb = NULL;
	struct ibs_struct *ibs = hu->priv;
	struct hci_ibs_cmd *hci_ibs_packet;

	BT_DBG("hu %p cmd 0x%x", hu, cmd);

	/* allocate packet */
	skb = bt_skb_alloc(1, GFP_ATOMIC);
	if (!skb) {
		BT_ERR("cannot allocate memory for HCI_IBS packet");
		err = -ENOMEM;
		goto out;
	}

	/* prepare packet */
	hci_ibs_packet = (struct hci_ibs_cmd *) skb_put(skb, 1);
	hci_ibs_packet->cmd = cmd;
	skb->dev = (void *) hu->hdev;

	/* send packet */
	skb_queue_tail(&ibs->txq, skb);
out:
	return err;
}
Example #23
0
static void
modem_l2l1(struct PStack *st, int pr, void *arg)
{
	struct BCState *bcs = st->l1.bcs;
	struct sk_buff *skb = arg;
	u_long flags;

	if (pr == (PH_DATA | REQUEST)) {
		spin_lock_irqsave(&bcs->cs->lock, flags);
		if (bcs->tx_skb) {
			skb_queue_tail(&bcs->squeue, skb);
		} else {
			bcs->tx_skb = skb;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			bcs->hw.hscx.count = 0;
			write_modem(bcs);
		}
		spin_unlock_irqrestore(&bcs->cs->lock, flags);
	} else if (pr == (PH_ACTIVATE | REQUEST)) {
		test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
		st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
		set_arcofi(bcs->cs, st->l1.bc);
		mstartup(bcs->cs);
		modem_set_dial(bcs->cs, test_bit(FLG_ORIG, &st->l2.flag));
		bcs->cs->hw.elsa.MFlag=2;
	} else if (pr == (PH_DEACTIVATE | REQUEST)) {
		test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
		bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
		arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
		interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
		bcs->cs->hw.elsa.MFlag=1;
	} else {
;
	}
}
Example #24
0
static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
{
    struct sock *sk;
    struct k_message *purge_msg;
    struct sk_buff *skb;

    dprintk("mpoa: purge_egress_shortcut: entering\n");
    if (vcc == NULL) {
        printk("mpoa: purge_egress_shortcut: vcc == NULL\n");
        return;
    }

    skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
    if (skb == NULL) {
        printk("mpoa: purge_egress_shortcut: out of memory\n");
        return;
    }

    skb_put(skb, sizeof(struct k_message));
    memset(skb->data, 0, sizeof(struct k_message));
    purge_msg = (struct k_message *)skb->data;
    purge_msg->type = DATA_PLANE_PURGE;
    if (entry != NULL)
        purge_msg->content.eg_info = entry->ctrl_info;

    atm_force_charge(vcc, skb->truesize);

    sk = sk_atm(vcc);
    skb_queue_tail(&sk->sk_receive_queue, skb);
    sk->sk_data_ready(sk, skb->len);
    dprintk("mpoa: purge_egress_shortcut: exiting:\n");

    return;
}
Example #25
0
static void hisax_d_l2l1(struct PStack *st, int pr, void *arg)
{
	struct IsdnCardState *cs = st->l1.hardware;
	struct hisax_d_if *hisax_d_if = cs->hw.hisax_d_if;
	struct sk_buff *skb = arg;

	switch (pr) {
	case PH_DATA | REQUEST:
	case PH_PULL | INDICATION:
		if (cs->debug & DEB_DLOG_HEX)
			LogFrame(cs, skb->data, skb->len);
		if (cs->debug & DEB_DLOG_VERBOSE)
			dlogframe(cs, skb, 0);
		Logl2Frame(cs, skb, "PH_DATA_REQ", 0);
		// FIXME lock?
		if (!test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			D_L2L1(hisax_d_if, PH_DATA | REQUEST, skb);
		else
			skb_queue_tail(&cs->sq, skb);
		break;
	case PH_PULL | REQUEST:
		if (!test_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
		else
			set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
		break;
	default:
		D_L2L1(hisax_d_if, pr, arg);
		break;
	}
}
Example #26
0
/**
 *	llc_station_send_pdu - queues PDU to send
 *	@skb: Address of the PDU
 *
 *	Queues a PDU to send to the MAC layer.
 */
static void llc_station_send_pdu(struct sk_buff *skb)
{
    skb_queue_tail(&llc_main_station.mac_pdu_q, skb);
    while ((skb = skb_dequeue(&llc_main_station.mac_pdu_q)) != NULL)
        if (dev_queue_xmit(skb))
            break;
}
Example #27
0
/*
 * Copy of sock_queue_rcv_skb (from sock.h) without
 * bh_lock_sock() (its already held when this is called) which
 * also allows data and other data to be queued to a socket.
 */
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
{
	int err;

	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
	   number of warnings when compiling with -W --ANK
	 */
	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
	    (unsigned int)sk->sk_rcvbuf) {
		err = -ENOMEM;
		goto out;
	}

	err = sk_filter(sk, skb);
	if (err)
		goto out;

	skb_set_owner_r(skb, sk);
	skb_queue_tail(queue, skb);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk);
out:
	return err;
}
Example #28
0
/**
 *	llc_station_state_process: queue event and try to process queue.
 *	@skb: Address of the event
 *
 *	Queues an event (on the station event queue) for handling by the
 *	station state machine and attempts to process any queued-up events.
 */
static void llc_station_state_process(struct sk_buff *skb)
{
    spin_lock_bh(&llc_main_station.ev_q.lock);
    skb_queue_tail(&llc_main_station.ev_q.list, skb);
    llc_station_service_events();
    spin_unlock_bh(&llc_main_station.ev_q.lock);
}
Example #29
0
static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
	if (dev->driver_info->rx_fixup &&
	    !dev->driver_info->rx_fixup (dev, skb)) {
		/* With RX_ASSEMBLE, rx_fixup() must update counters */
		if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
			dev->net->stats.rx_errors++;
		goto done;
	}
	// else network stack removes extra byte if we forced a short packet

	if (skb->len) {
		/* all data was already cloned from skb inside the driver */
		if (dev->driver_info->flags & FLAG_MULTI_PACKET)
			dev_kfree_skb_any(skb);
		else
			usbnet_skb_return(dev, skb);
		return;
	}

	netif_dbg(dev, rx_err, dev->net, "drop\n");
	dev->net->stats.rx_errors++;
done:
	skb_queue_tail(&dev->done, skb);
}
Example #30
0
/**
 * rsi_rx_done_handler() - This function is called when a packet is received
 *			   from USB stack. This is callback to recieve done.
 * @urb: Received URB.
 *
 * Return: None.
 */
static void rsi_rx_done_handler(struct urb *urb)
{
	struct rx_usb_ctrl_block *rx_cb = urb->context;
	struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data;
	int status = -EINVAL;

	if (urb->status)
		goto out;

	if (urb->actual_length <= 0) {
		rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__);
		goto out;
	}
	if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) {
		rsi_dbg(INFO_ZONE, "Max RX packets reached\n");
		goto out;
	}
	skb_put(rx_cb->rx_skb, urb->actual_length);
	skb_queue_tail(&dev->rx_q, rx_cb->rx_skb);

	rsi_set_event(&dev->rx_thread.event);
	status = 0;

out:
	if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num))
		rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__);

	if (status)
		dev_kfree_skb(rx_cb->rx_skb);
}