示例#1
0
文件: hdlc_ppp.c 项目: mdamt/linux
static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
		      u8 id, unsigned int len, const void *data)
{
	struct sk_buff *skb;
	struct cp_header *cp;
	unsigned int magic_len = 0;
	static u32 magic;

#if DEBUG_CP
	int i;
	char *ptr;
#endif

	if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
		magic_len = sizeof(magic);

	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
			    sizeof(struct cp_header) + magic_len + len);
	if (!skb) {
		netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
		return;
	}
	skb_reserve(skb, sizeof(struct hdlc_header));

	cp = skb_put(skb, sizeof(struct cp_header));
	cp->code = code;
	cp->id = id;
	cp->len = htons(sizeof(struct cp_header) + magic_len + len);

	if (magic_len)
		skb_put_data(skb, &magic, magic_len);
	if (len)
		skb_put_data(skb, data, len);

#if DEBUG_CP
	BUG_ON(code >= CP_CODES);
	ptr = debug_buffer;
	*ptr = '\x0';
	for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
		sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
		ptr += strlen(ptr);
	}
	printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
	       proto_name(pid), code_names[code], id, debug_buffer);
#endif

	ppp_hard_header(skb, dev, pid, NULL, NULL, 0);

	skb->priority = TC_PRIO_CONTROL;
	skb->dev = dev;
	skb_reset_network_header(skb);
	skb_queue_tail(&tx_queue, skb);
}
示例#2
0
/* Allocate a SKB and copy packet data to it */
static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
					      struct cfv_info *cfv,
					      u8 *frm, u32 frm_len)
{
	struct sk_buff *skb;
	u32 cfpkt_len, pad_len;

	*err = 0;
	/* Verify that packet size with down-link header and mtu size */
	if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
		netdev_err(cfv->ndev,
			   "Invalid frmlen:%u  mtu:%u hr:%d tr:%d\n",
			   frm_len, cfv->mru,  cfv->rx_hr,
			   cfv->rx_tr);
		*err = -EPROTO;
		return NULL;
	}

	cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
	pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);

	skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
	if (!skb) {
		*err = -ENOMEM;
		return NULL;
	}

	skb_reserve(skb, cfv->rx_hr + pad_len);

	skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len);
	return skb;
}
示例#3
0
文件: x25_asy.c 项目: Lyude/linux
static void x25_asy_bump(struct x25_asy *sl)
{
	struct net_device *dev = sl->dev;
	struct sk_buff *skb;
	int count;
	int err;

	count = sl->rcount;
	dev->stats.rx_bytes += count;

	skb = dev_alloc_skb(count+1);
	if (skb == NULL) {
		netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
		dev->stats.rx_dropped++;
		return;
	}
	skb_push(skb, 1);	/* LAPB internal control */
	skb_put_data(skb, sl->rbuff, count);
	skb->protocol = x25_type_trans(skb, sl->dev);
	err = lapb_data_received(skb->dev, skb);
	if (err != LAPB_OK) {
		kfree_skb(skb);
		printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
	} else {
		netif_rx(skb);
		dev->stats.rx_packets++;
	}
}
示例#4
0
文件: dn_nsp_out.c 项目: avagin/linux
void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
{
	struct dn_scp *scp = DN_SK(sk);
	struct sk_buff *skb = NULL;
	struct nsp_conn_init_msg *msg;
	__u8 len = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);

	if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
		return;

	msg = skb_put(skb, sizeof(*msg));
	msg->msgflg = 0x28;
	msg->dstaddr = scp->addrrem;
	msg->srcaddr = scp->addrloc;
	msg->services = scp->services_loc;
	msg->info = scp->info_loc;
	msg->segsize = cpu_to_le16(scp->segsize_loc);

	skb_put_u8(skb, len);

	if (len > 0)
		skb_put_data(skb, scp->conndata_out.opt_data, len);


	dn_nsp_send(skb);

	scp->persist = dn_nsp_persist(sk);
	scp->persist_fxn = dn_nsp_retrans_conn_conf;
}
示例#5
0
static void
at86rf230_rx_read_frame_complete(void *context)
{
	struct at86rf230_state_change *ctx = context;
	struct at86rf230_local *lp = ctx->lp;
	const u8 *buf = ctx->buf;
	struct sk_buff *skb;
	u8 len, lqi;

	len = buf[1];
	if (!ieee802154_is_valid_psdu_len(len)) {
		dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
		len = IEEE802154_MTU;
	}
	lqi = buf[2 + len];

	skb = dev_alloc_skb(IEEE802154_MTU);
	if (!skb) {
		dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
		kfree(ctx);
		return;
	}

	skb_put_data(skb, buf + 2, len);
	ieee802154_rx_irqsafe(lp->hw, skb, lqi);
	kfree(ctx);
}
示例#6
0
struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
{
	struct sk_buff *skb2;
	struct sk_buff *skb = pkt_to_skb(pkt);
	struct cfpkt *tmppkt;
	u8 *split = skb->data + pos;
	u16 len2nd = skb_tail_pointer(skb) - split;

	if (unlikely(is_erronous(pkt)))
		return NULL;

	if (skb->data + pos > skb_tail_pointer(skb)) {
		PKT_ERROR(pkt, "trying to split beyond end of packet\n");
		return NULL;
	}

	/* Create a new packet for the second part of the data */
	tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
				  PKT_PREFIX);
	if (tmppkt == NULL)
		return NULL;
	skb2 = pkt_to_skb(tmppkt);


	if (skb2 == NULL)
		return NULL;

	skb_put_data(skb2, split, len2nd);

	/* Reduce the length of the original packet */
	skb_trim(skb, pos);

	skb2->priority = skb->priority;
	return skb_to_pkt(skb2);
}
示例#7
0
rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
{
	struct r8192_priv   *priv = ieee80211_priv(dev);
	struct sk_buff	    *skb;
	struct cb_desc	    *tcb_desc;

	/* Get TCB and local buffer from common pool.
	 * (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
	 */
	skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
	if (!skb)
		return RT_STATUS_FAILURE;
	memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
	tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
	tcb_desc->queue_index = TXCMD_QUEUE;
	tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
	tcb_desc->bLastIniPkt = 0;
	skb_reserve(skb, USB_HWDESC_HEADER_LEN);
	skb_put_data(skb, pData, DataLen);
	tcb_desc->txbuf_size = (u16)DataLen;

	if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
	    (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
	    (priv->ieee80211->queue_stop)) {
		RT_TRACE(COMP_FIRMWARE, "=== NULL packet ======> tx full!\n");
		skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
	} else {
		priv->ieee80211->softmac_hard_start_xmit(skb, dev);
	}

	return RT_STATUS_SUCCESS;
}
示例#8
0
文件: ipheth.c 项目: Lyude/linux
static void ipheth_rcvbulk_callback(struct urb *urb)
{
	struct ipheth_device *dev;
	struct sk_buff *skb;
	int status;
	char *buf;
	int len;

	dev = urb->context;
	if (dev == NULL)
		return;

	status = urb->status;
	switch (status) {
	case -ENOENT:
	case -ECONNRESET:
	case -ESHUTDOWN:
		return;
	case 0:
		break;
	default:
		dev_err(&dev->intf->dev, "%s: urb status: %d\n",
			__func__, status);
		return;
	}

	if (urb->actual_length <= IPHETH_IP_ALIGN) {
		dev->net->stats.rx_length_errors++;
		return;
	}
	len = urb->actual_length - IPHETH_IP_ALIGN;
	buf = urb->transfer_buffer + IPHETH_IP_ALIGN;

	skb = dev_alloc_skb(len);
	if (!skb) {
		dev_err(&dev->intf->dev, "%s: dev_alloc_skb: -ENOMEM\n",
			__func__);
		dev->net->stats.rx_dropped++;
		return;
	}

	skb_put_data(skb, buf, len);
	skb->dev = dev->net;
	skb->protocol = eth_type_trans(skb, dev->net);

	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += len;
	dev->confirmed_pairing = true;
	netif_rx(skb);
	ipheth_rx_submit(dev, GFP_ATOMIC);
}
示例#9
0
文件: rx.c 项目: mdamt/linux
/*
 * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
 *
 * Adds the rxb to a new skb and give it to mac80211
 */
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
					    struct ieee80211_sta *sta,
					    struct napi_struct *napi,
					    struct sk_buff *skb,
					    struct ieee80211_hdr *hdr, u16 len,
					    u8 crypt_len,
					    struct iwl_rx_cmd_buffer *rxb)
{
	unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
	unsigned int fraglen;

	/*
	 * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
	 * but those are all multiples of 4 long) all goes away, but we
	 * want the *end* of it, which is going to be the start of the IP
	 * header, to be aligned when it gets pulled in.
	 * The beginning of the skb->data is aligned on at least a 4-byte
	 * boundary after allocation. Everything here is aligned at least
	 * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
	 * the result.
	 */
	skb_reserve(skb, hdrlen & 3);

	/* If frame is small enough to fit in skb->head, pull it completely.
	 * If not, only pull ieee80211_hdr (including crypto if present, and
	 * an additional 8 bytes for SNAP/ethertype, see below) so that
	 * splice() or TCP coalesce are more efficient.
	 *
	 * Since, in addition, ieee80211_data_to_8023() always pull in at
	 * least 8 bytes (possibly more for mesh) we can do the same here
	 * to save the cost of doing it later. That still doesn't pull in
	 * the actual IP header since the typical case has a SNAP header.
	 * If the latter changes (there are efforts in the standards group
	 * to do so) we should revisit this and ieee80211_data_to_8023().
	 */
	hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;

	skb_put_data(skb, hdr, hdrlen);
	fraglen = len - hdrlen;

	if (fraglen) {
		int offset = (void *)hdr + hdrlen -
			     rxb_addr(rxb) + rxb_offset(rxb);

		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
				fraglen, rxb->truesize);
	}

	ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
示例#10
0
static struct sk_buff *
mt76x02u_mcu_msg_alloc(const void *data, int len)
{
	struct sk_buff *skb;

	skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
	if (!skb)
		return NULL;

	skb_reserve(skb, MT_CMD_HDR_LEN);
	skb_put_data(skb, data, len);

	return skb;
}
示例#11
0
struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
			   struct cfpkt *addpkt,
			   u16 expectlen)
{
	struct sk_buff *dst = pkt_to_skb(dstpkt);
	struct sk_buff *add = pkt_to_skb(addpkt);
	u16 addlen = skb_headlen(add);
	u16 neededtailspace;
	struct sk_buff *tmp;
	u16 dstlen;
	u16 createlen;
	if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
		return dstpkt;
	}
	if (expectlen > addlen)
		neededtailspace = expectlen;
	else
		neededtailspace = addlen;

	if (dst->tail + neededtailspace > dst->end) {
		/* Create a dumplicate of 'dst' with more tail space */
		struct cfpkt *tmppkt;
		dstlen = skb_headlen(dst);
		createlen = dstlen + neededtailspace;
		tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX);
		if (tmppkt == NULL)
			return NULL;
		tmp = pkt_to_skb(tmppkt);
		skb_put_data(tmp, dst->data, dstlen);
		cfpkt_destroy(dstpkt);
		dst = tmp;
	}
	skb_put_data(dst, add->data, skb_headlen(add));
	cfpkt_destroy(addpkt);
	return skb_to_pkt(dst);
}
示例#12
0
文件: isdnloop.c 项目: mdamt/linux
/*
 * Simulate a card's response by appending it to the cards
 * message queue.
 *
 * Parameter:
 *   card = pointer to card struct.
 *   s    = pointer to message-string.
 *   ch   = channel: 0 = generic messages, 1 and 2 = D-channel messages.
 * Return:
 *   0 on success, 1 on memory squeeze.
 */
static int
isdnloop_fake(isdnloop_card *card, char *s, int ch)
{
	struct sk_buff *skb;
	int len = strlen(s) + ((ch >= 0) ? 3 : 0);
	skb = dev_alloc_skb(len);
	if (!skb) {
		printk(KERN_WARNING "isdnloop: Out of memory in isdnloop_fake\n");
		return 1;
	}
	if (ch >= 0)
		sprintf(skb_put(skb, 3), "%02d;", ch);
	skb_put_data(skb, s, strlen(s));
	skb_queue_tail(&card->dqueue, skb);
	return 0;
}
示例#13
0
static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
				const void *param, u8 len)
{
	struct sk_buff *skb;
	struct hci_command_hdr *hdr;

	if (priv->surprise_removed) {
		BT_ERR("Card is removed");
		return -EFAULT;
	}

	skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_KERNEL);
	if (!skb) {
		BT_ERR("No free skb");
		return -ENOMEM;
	}

	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
	hdr->opcode = cpu_to_le16(opcode);
	hdr->plen = len;

	if (len)
		skb_put_data(skb, param, len);

	hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT;

	skb_queue_head(&priv->adapter->tx_queue, skb);

	priv->btmrvl_dev.sendcmdflag = true;

	priv->adapter->cmd_complete = false;

	wake_up_interruptible(&priv->main_thread.wait_q);

	if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
					      priv->adapter->cmd_complete ||
					      priv->surprise_removed,
					      WAIT_UNTIL_CMD_RESP))
		return -ETIMEDOUT;

	if (priv->surprise_removed)
		return -EFAULT;

	return 0;
}
示例#14
0
文件: hci_qca.c 项目: Anjali05/linux
static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
{
	struct hci_uart *hu = hci_get_drvdata(hdev);
	struct qca_data *qca = hu->priv;
	struct qca_serdev *qcadev;
	struct sk_buff *skb;
	u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };

	if (baudrate > QCA_BAUDRATE_3200000)
		return -EINVAL;

	cmd[4] = baudrate;

	skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
	if (!skb) {
		bt_dev_err(hdev, "Failed to allocate baudrate packet");
		return -ENOMEM;
	}

	/* Assign commands to change baudrate and packet type. */
	skb_put_data(skb, cmd, sizeof(cmd));
	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;

	skb_queue_tail(&qca->txq, skb);
	hci_uart_tx_wakeup(hu);

	qcadev = serdev_device_get_drvdata(hu->serdev);

	/* Wait for the baudrate change request to be sent */

	while (!skb_queue_empty(&qca->txq))
		usleep_range(100, 200);

	serdev_device_wait_until_sent(hu->serdev,
		      msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));

	/* Give the controller time to process the request */
	if (qcadev->btsoc_type == QCA_WCN3990)
		msleep(10);
	else
		msleep(300);

	return 0;
}
示例#15
0
文件: i2c.c 项目: AlexShiLucky/linux
static int nfcmrvl_i2c_read(struct nfcmrvl_i2c_drv_data *drv_data,
			    struct sk_buff **skb)
{
	int ret;
	struct nci_ctrl_hdr nci_hdr;

	/* Read NCI header to know the payload size */
	ret = i2c_master_recv(drv_data->i2c, (u8 *)&nci_hdr, NCI_CTRL_HDR_SIZE);
	if (ret != NCI_CTRL_HDR_SIZE) {
		nfc_err(&drv_data->i2c->dev, "cannot read NCI header\n");
		return -EBADMSG;
	}

	if (nci_hdr.plen > NCI_MAX_PAYLOAD_SIZE) {
		nfc_err(&drv_data->i2c->dev, "invalid packet payload size\n");
		return -EBADMSG;
	}

	*skb = nci_skb_alloc(drv_data->priv->ndev,
			     nci_hdr.plen + NCI_CTRL_HDR_SIZE, GFP_KERNEL);
	if (!*skb)
		return -ENOMEM;

	/* Copy NCI header into the SKB */
	skb_put_data(*skb, &nci_hdr, NCI_CTRL_HDR_SIZE);

	if (nci_hdr.plen) {
		/* Read the NCI payload */
		ret = i2c_master_recv(drv_data->i2c,
				      skb_put(*skb, nci_hdr.plen),
				      nci_hdr.plen);

		if (ret != nci_hdr.plen) {
			nfc_err(&drv_data->i2c->dev,
				"Invalid frame payload length: %u (expected %u)\n",
				ret, nci_hdr.plen);
			kfree_skb(*skb);
			return -EBADMSG;
		}
	}

	return 0;
}
示例#16
0
文件: netvsc_drv.c 项目: mdamt/linux
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
					     struct napi_struct *napi,
					     const struct ndis_tcp_ip_checksum_info *csum_info,
					     const struct ndis_pkt_8021q_info *vlan,
					     void *data, u32 buflen)
{
	struct sk_buff *skb;

	skb = napi_alloc_skb(napi, buflen);
	if (!skb)
		return skb;

	/*
	 * Copy to skb. This copy is needed here since the memory pointed by
	 * hv_netvsc_packet cannot be deallocated
	 */
	skb_put_data(skb, data, buflen);

	skb->protocol = eth_type_trans(skb, net);

	/* skb is already created with CHECKSUM_NONE */
	skb_checksum_none_assert(skb);

	/*
	 * In Linux, the IP checksum is always checked.
	 * Do L4 checksum offload if enabled and present.
	 */
	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
		if (csum_info->receive.tcp_checksum_succeeded ||
		    csum_info->receive.udp_checksum_succeeded)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	if (vlan) {
		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
				       vlan_tci);
	}

	return skb;
}
示例#17
0
文件: i2c.c 项目: mdamt/linux
static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
{
	struct sk_buff *skb;
	size_t hdr_size;
	size_t data_len;
	char hdr[4];
	int ret;

	hdr_size = (phy->mode == S3FWRN5_MODE_NCI) ?
		NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE;
	ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size);
	if (ret < 0)
		return ret;

	if (ret < hdr_size)
		return -EBADMSG;

	data_len = (phy->mode == S3FWRN5_MODE_NCI) ?
		((struct nci_ctrl_hdr *)hdr)->plen :
		((struct s3fwrn5_fw_header *)hdr)->len;

	skb = alloc_skb(hdr_size + data_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	skb_put_data(skb, hdr, hdr_size);

	if (data_len == 0)
		goto out;

	ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len);
	if (ret != data_len) {
		kfree_skb(skb);
		return -EBADMSG;
	}

out:
	return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
}
示例#18
0
文件: pcie.c 项目: Lyude/linux
static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
{
	struct qtnf_pcie_bus_priv *priv = arg;
	struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
	struct sk_buff *skb;

	if (unlikely(len == 0)) {
		pr_warn("zero length packet received\n");
		return;
	}

	skb = __dev_alloc_skb(len, GFP_KERNEL);

	if (unlikely(!skb)) {
		pr_err("failed to allocate skb\n");
		return;
	}

	skb_put_data(skb, buf, len);

	qtnf_trans_handle_rx_ctl_packet(bus, skb);
}
示例#19
0
文件: net.c 项目: avagin/linux
static int comp_rx_data(struct mbo *mbo)
{
	const u32 zero = 0;
	struct net_dev_context *nd;
	char *buf = mbo->virt_address;
	u32 len = mbo->processed_length;
	struct sk_buff *skb;
	struct net_device *dev;
	unsigned int skb_len;
	int ret = 0;

	nd = get_net_dev_hold(mbo->ifp);
	if (!nd)
		return -EIO;

	if (nd->rx.ch_id != mbo->hdm_channel_id) {
		ret = -EIO;
		goto put_nd;
	}

	dev = nd->dev;

	if (nd->is_mamac) {
		if (!pms_is_mamac(buf, len)) {
			ret = -EIO;
			goto put_nd;
		}

		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
	} else {
		if (!PMS_IS_MEP(buf, len)) {
			ret = -EIO;
			goto put_nd;
		}

		skb = dev_alloc_skb(len - MEP_HDR_LEN);
	}

	if (!skb) {
		dev->stats.rx_dropped++;
		pr_err_once("drop packet: no memory for skb\n");
		goto out;
	}

	skb->dev = dev;

	if (nd->is_mamac) {
		/* dest */
		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);

		/* src */
		skb_put_data(skb, &zero, 4);
		skb_put_data(skb, buf + 5, 2);

		/* eth type */
		skb_put_data(skb, buf + 10, 2);

		buf += MDP_HDR_LEN;
		len -= MDP_HDR_LEN;
	} else {
		buf += MEP_HDR_LEN;
		len -= MEP_HDR_LEN;
	}

	skb_put_data(skb, buf, len);
	skb->protocol = eth_type_trans(skb, dev);
	skb_len = skb->len;
	if (netif_rx(skb) == NET_RX_SUCCESS) {
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb_len;
	} else {
		dev->stats.rx_dropped++;
	}

out:
	most_put_mbo(mbo);

put_nd:
	dev_put(nd->dev);
	return ret;
}
示例#20
0
/* Incoming data */
static void zd1201_usbrx(struct urb *urb)
{
	struct zd1201 *zd = urb->context;
	int free = 0;
	unsigned char *data = urb->transfer_buffer;
	struct sk_buff *skb;
	unsigned char type;

	if (!zd)
		return;

	switch(urb->status) {
		case -EILSEQ:
		case -ENODEV:
		case -ETIME:
		case -ENOENT:
		case -EPIPE:
		case -EOVERFLOW:
		case -ESHUTDOWN:
			dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n",
			    zd->dev->name, urb->status);
			free = 1;
			goto exit;
	}
	
	if (urb->status != 0 || urb->actual_length == 0)
		goto resubmit;

	type = data[0];
	if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) {
		memcpy(zd->rxdata, data, urb->actual_length);
		zd->rxlen = urb->actual_length;
		zd->rxdatas = 1;
		wake_up(&zd->rxdataq);
	}
	/* Info frame */
	if (type == ZD1201_PACKET_INQUIRE) {
		int i = 0;
		unsigned short infotype, copylen;
		infotype = le16_to_cpu(*(__le16*)&data[6]);

		if (infotype == ZD1201_INF_LINKSTATUS) {
			short linkstatus;

			linkstatus = le16_to_cpu(*(__le16*)&data[8]);
			switch(linkstatus) {
				case 1:
					netif_carrier_on(zd->dev);
					break;
				case 2:
					netif_carrier_off(zd->dev);
					break;
				case 3:
					netif_carrier_off(zd->dev);
					break;
				case 4:
					netif_carrier_on(zd->dev);
					break;
				default:
					netif_carrier_off(zd->dev);
			}
			goto resubmit;
		}
		if (infotype == ZD1201_INF_ASSOCSTATUS) {
			short status = le16_to_cpu(*(__le16*)(data+8));
			int event;
			union iwreq_data wrqu;

			switch (status) {
				case ZD1201_ASSOCSTATUS_STAASSOC:
				case ZD1201_ASSOCSTATUS_REASSOC:
					event = IWEVREGISTERED;
					break;
				case ZD1201_ASSOCSTATUS_DISASSOC:
				case ZD1201_ASSOCSTATUS_ASSOCFAIL:
				case ZD1201_ASSOCSTATUS_AUTHFAIL:
				default:
					event = IWEVEXPIRED;
			}
			memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN);
			wrqu.addr.sa_family = ARPHRD_ETHER;

			/* Send event to user space */
			wireless_send_event(zd->dev, event, &wrqu, NULL);

			goto resubmit;
		}
		if (infotype == ZD1201_INF_AUTHREQ) {
			union iwreq_data wrqu;

			memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN);
			wrqu.addr.sa_family = ARPHRD_ETHER;
			/* There isn't a event that trully fits this request.
			   We assume that userspace will be smart enough to
			   see a new station being expired and sends back a
			   authstation ioctl to authorize it. */
			wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL);
			goto resubmit;
		}
		/* Other infotypes are handled outside this handler */
		zd->rxlen = 0;
		while (i < urb->actual_length) {
			copylen = le16_to_cpu(*(__le16*)&data[i+2]);
			/* Sanity check, sometimes we get junk */
			if (copylen+zd->rxlen > sizeof(zd->rxdata))
				break;
			memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen);
			zd->rxlen += copylen;
			i += 64;
		}
		if (i >= urb->actual_length) {
			zd->rxdatas = 1;
			wake_up(&zd->rxdataq);
		}
		goto  resubmit;
	}
	/* Actual data */
	if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
		int datalen = urb->actual_length-1;
		unsigned short len, fc, seq;

		len = ntohs(*(__be16 *)&data[datalen-2]);
		if (len>datalen)
			len=datalen;
		fc = le16_to_cpu(*(__le16 *)&data[datalen-16]);
		seq = le16_to_cpu(*(__le16 *)&data[datalen-24]);

		if (zd->monitor) {
			if (datalen < 24)
				goto resubmit;
			if (!(skb = dev_alloc_skb(datalen+24)))
				goto resubmit;
			
			skb_put_data(skb, &data[datalen - 16], 2);
			skb_put_data(skb, &data[datalen - 2], 2);
			skb_put_data(skb, &data[datalen - 14], 6);
			skb_put_data(skb, &data[datalen - 22], 6);
			skb_put_data(skb, &data[datalen - 8], 6);
			skb_put_data(skb, &data[datalen - 24], 2);
			skb_put_data(skb, data, len);
			skb->protocol = eth_type_trans(skb, zd->dev);
			zd->dev->stats.rx_packets++;
			zd->dev->stats.rx_bytes += skb->len;
			netif_rx(skb);
			goto resubmit;
		}
			
		if ((seq & IEEE80211_SCTL_FRAG) ||
		    (fc & IEEE80211_FCTL_MOREFRAGS)) {
			struct zd1201_frag *frag = NULL;
			char *ptr;

			if (datalen<14)
				goto resubmit;
			if ((seq & IEEE80211_SCTL_FRAG) == 0) {
				frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
				if (!frag)
					goto resubmit;
				skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2);
				if (!skb) {
					kfree(frag);
					goto resubmit;
				}
				frag->skb = skb;
				frag->seq = seq & IEEE80211_SCTL_SEQ;
				skb_reserve(skb, 2);
				skb_put_data(skb, &data[datalen - 14], 12);
				skb_put_data(skb, &data[6], 2);
				skb_put_data(skb, data + 8, len);
				hlist_add_head(&frag->fnode, &zd->fraglist);
				goto resubmit;
			}
			hlist_for_each_entry(frag, &zd->fraglist, fnode)
				if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
					break;
			if (!frag)
				goto resubmit;
			skb = frag->skb;
			ptr = skb_put(skb, len);
			if (ptr)
				memcpy(ptr, data+8, len);
			if (fc & IEEE80211_FCTL_MOREFRAGS)
				goto resubmit;
			hlist_del_init(&frag->fnode);
			kfree(frag);
		} else {
			if (datalen<14)
示例#21
0
static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
				     struct net_device *dev)
{
	u32 rtap_len, ret = 0;
	struct wilc_wfi_mon_priv  *mon_priv;
	struct sk_buff *skb2;
	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;

	if (!wilc_wfi_mon)
		return -EFAULT;

	mon_priv = netdev_priv(wilc_wfi_mon);
	if (!mon_priv)
		return -EFAULT;
	rtap_len = ieee80211_get_radiotap_len(skb->data);
	if (skb->len < rtap_len)
		return -1;

	skb_pull(skb, rtap_len);

	if (skb->data[0] == 0xc0 && is_broadcast_ether_addr(&skb->data[4])) {
		skb2 = dev_alloc_skb(skb->len + sizeof(*cb_hdr));
		if (!skb2)
			return -ENOMEM;

		skb_put_data(skb2, skb->data, skb->len);

		cb_hdr = skb_push(skb2, sizeof(*cb_hdr));
		memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));

		cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */

		cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr));

		cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT);

		cb_hdr->rate = 5;
		cb_hdr->tx_flags = 0x0004;

		skb2->dev = wilc_wfi_mon;
		skb_reset_mac_header(skb2);
		skb2->ip_summed = CHECKSUM_UNNECESSARY;
		skb2->pkt_type = PACKET_OTHERHOST;
		skb2->protocol = htons(ETH_P_802_2);
		memset(skb2->cb, 0, sizeof(skb2->cb));

		netif_rx(skb2);

		return 0;
	}
	skb->dev = mon_priv->real_ndev;

	memcpy(srcadd, &skb->data[10], 6);
	memcpy(bssid, &skb->data[16], 6);
	/*
	 * Identify if data or mgmt packet, if source address and bssid
	 * fields are equal send it to mgmt frames handler
	 */
	if (!(memcmp(srcadd, bssid, 6))) {
		ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
		if (ret)
			netdev_err(dev, "fail to mgmt tx\n");
		dev_kfree_skb(skb);
	} else {
		ret = wilc_mac_xmit(skb, mon_priv->real_ndev);
	}

	return ret;
}
示例#22
0
void wilc_wfi_monitor_rx(u8 *buff, u32 size)
{
	u32 header, pkt_offset;
	struct sk_buff *skb = NULL;
	struct wilc_wfi_radiotap_hdr *hdr;
	struct wilc_wfi_radiotap_cb_hdr *cb_hdr;

	if (!wilc_wfi_mon)
		return;

	if (!netif_running(wilc_wfi_mon))
		return;

	/* Get WILC header */
	memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
	le32_to_cpus(&header);
	/*
	 * The packet offset field contain info about what type of management
	 * the frame we are dealing with and ack status
	 */
	pkt_offset = GET_PKT_OFFSET(header);

	if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
		/* hostapd callback mgmt frame */

		skb = dev_alloc_skb(size + sizeof(*cb_hdr));
		if (!skb)
			return;

		skb_put_data(skb, buff, size);

		cb_hdr = skb_push(skb, sizeof(*cb_hdr));
		memset(cb_hdr, 0, sizeof(*cb_hdr));

		cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */

		cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr));

		cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT);

		cb_hdr->rate = 5;

		if (pkt_offset & IS_MGMT_STATUS_SUCCES)	{
			/* success */
			cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_RTS;
		} else {
			cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_FAIL;
		}

	} else {
		skb = dev_alloc_skb(size + sizeof(*hdr));

		if (!skb)
			return;

		skb_put_data(skb, buff, size);
		hdr = skb_push(skb, sizeof(*hdr));
		memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
		hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
		hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
		hdr->hdr.it_present = cpu_to_le32
				(1 << IEEE80211_RADIOTAP_RATE);
		hdr->rate = 5;
	}

	skb->dev = wilc_wfi_mon;
	skb_reset_mac_header(skb);
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	skb->pkt_type = PACKET_OTHERHOST;
	skb->protocol = htons(ETH_P_802_2);
	memset(skb->cb, 0, sizeof(skb->cb));

	netif_rx(skb);
}
示例#23
0
文件: dn_nsp_out.c 项目: avagin/linux
void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
{
	struct dn_scp *scp = DN_SK(sk);
	struct nsp_conn_init_msg *msg;
	unsigned char aux;
	unsigned char menuver;
	struct dn_skb_cb *cb;
	unsigned char type = 1;
	gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
	struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);

	if (!skb)
		return;

	cb  = DN_SKB_CB(skb);
	msg = skb_put(skb, sizeof(*msg));

	msg->msgflg	= msgflg;
	msg->dstaddr	= 0x0000;		/* Remote Node will assign it*/

	msg->srcaddr	= scp->addrloc;
	msg->services	= scp->services_loc;	/* Requested flow control    */
	msg->info	= scp->info_loc;	/* Version Number            */
	msg->segsize	= cpu_to_le16(scp->segsize_loc);	/* Max segment size  */

	if (scp->peer.sdn_objnum)
		type = 0;

	skb_put(skb, dn_sockaddr2username(&scp->peer,
					  skb_tail_pointer(skb), type));
	skb_put(skb, dn_sockaddr2username(&scp->addr,
					  skb_tail_pointer(skb), 2));

	menuver = DN_MENUVER_ACC | DN_MENUVER_USR;
	if (scp->peer.sdn_flags & SDF_PROXY)
		menuver |= DN_MENUVER_PRX;
	if (scp->peer.sdn_flags & SDF_UICPROXY)
		menuver |= DN_MENUVER_UIC;

	skb_put_u8(skb, menuver);	/* Menu Version		*/

	aux = scp->accessdata.acc_userl;
	skb_put_u8(skb, aux);
	if (aux > 0)
		skb_put_data(skb, scp->accessdata.acc_user, aux);

	aux = scp->accessdata.acc_passl;
	skb_put_u8(skb, aux);
	if (aux > 0)
		skb_put_data(skb, scp->accessdata.acc_pass, aux);

	aux = scp->accessdata.acc_accl;
	skb_put_u8(skb, aux);
	if (aux > 0)
		skb_put_data(skb, scp->accessdata.acc_acc, aux);

	aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
	skb_put_u8(skb, aux);
	if (aux > 0)
		skb_put_data(skb, scp->conndata_out.opt_data, aux);

	scp->persist = dn_nsp_persist(sk);
	scp->persist_fxn = dn_nsp_retrans_conninit;

	cb->rt_flags = DN_RT_F_RQR;

	dn_nsp_send(skb);
}
示例#24
0
static void
jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
{
	u_char r;
	struct BCState *bcs = cs->bcs + jade;
	struct sk_buff *skb;
	int fifo_size = 32;
	int count;
	int i_jade = (int) jade; /* To satisfy the compiler */

	if (!test_bit(BC_FLG_INIT, &bcs->Flag))
		return;

	if (val & 0x80) {	/* RME */
		r = READJADE(cs, i_jade, jade_HDLC_RSTA);
		if ((r & 0xf0) != 0xa0) {
			if (!(r & 0x80))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "JADE %s invalid frame", (jade ? "B" : "A"));
			if ((r & 0x40) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "JADE %c RDO mode=%d", 'A' + jade, bcs->mode);
			if (!(r & 0x20))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "JADE %c CRC error", 'A' + jade);
			WriteJADECMDR(cs, jade, jade_HDLC_RCMD, jadeRCMD_RMC);
		} else {
			count = READJADE(cs, i_jade, jade_HDLC_RBCL) & 0x1F;
			if (count == 0)
				count = fifo_size;
			jade_empty_fifo(bcs, count);
			if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "HX Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "JADE %s receive out of memory\n", (jade ? "B" : "A"));
				else {
					skb_put_data(skb, bcs->hw.hscx.rcvbuf,
						     count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.hscx.rcvidx = 0;
		schedule_event(bcs, B_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		jade_empty_fifo(bcs, fifo_size);
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(fifo_size)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				skb_put_data(skb, bcs->hw.hscx.rcvbuf,
					     fifo_size);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.hscx.rcvidx = 0;
			schedule_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & 0x10) {	/* XPR */
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				jade_fill_fifo(bcs);
				return;
			} else {
				if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
				    (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
					u_long	flags;
					spin_lock_irqsave(&bcs->aclock, flags);
					bcs->ackcnt += bcs->hw.hscx.count;
					spin_unlock_irqrestore(&bcs->aclock, flags);
					schedule_event(bcs, B_ACKPENDING);
				}
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.hscx.count = 0;
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.hscx.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			jade_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			schedule_event(bcs, B_XMTBUFREADY);
		}
	}
}
示例#25
0
static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
{
	struct rtllib_device *ieee = (struct rtllib_device *)
				     netdev_priv_rsl(dev);
	struct rtllib_txb *txb = NULL;
	struct rtllib_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	int qos_actived = ieee->current_network.qos_data.active;
	u8 dest[ETH_ALEN];
	u8 src[ETH_ALEN];
	struct lib80211_crypt_data *crypt = NULL;
	struct cb_desc *tcb_desc;
	u8 bIsMulticast = false;
	u8 IsAmsdu = false;
	bool	bdhcp = false;

	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, don't bother
	 * creating it...
	 */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
	   IEEE_SOFTMAC_TX_QUEUE)) ||
	   ((!ieee->softmac_data_hard_start_xmit &&
	   (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		netdev_warn(ieee->dev, "No xmit handler.\n");
		goto success;
	}


	if (likely(ieee->raw_tx == 0)) {
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			netdev_warn(ieee->dev, "skb too small (%d).\n",
				    skb->len);
			goto success;
		}
		/* Save source and destination addresses */
		ether_addr_copy(dest, skb->data);
		ether_addr_copy(src, skb->data + ETH_ALEN);

		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		if (ieee->iw_mode == IW_MODE_MONITOR) {
			txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
			if (unlikely(!txb)) {
				netdev_warn(ieee->dev,
					    "Could not allocate TXB\n");
				goto failed;
			}

			txb->encrypted = 0;
			txb->payload_size = cpu_to_le16(skb->len);
			skb_put_data(txb->fragments[0], skb->data, skb->len);

			goto success;
		}

		if (skb->len > 282) {
			if (ether_type == ETH_P_IP) {
				const struct iphdr *ip = (struct iphdr *)
					((u8 *)skb->data+14);
				if (ip->protocol == IPPROTO_UDP) {
					struct udphdr *udp;

					udp = (struct udphdr *)((u8 *)ip +
					      (ip->ihl << 2));
					if (((((u8 *)udp)[1] == 68) &&
					   (((u8 *)udp)[3] == 67)) ||
					   ((((u8 *)udp)[1] == 67) &&
					   (((u8 *)udp)[3] == 68))) {
						bdhcp = true;
						ieee->LPSDelayCnt = 200;
					}
				}
			} else if (ether_type == ETH_P_ARP) {
				netdev_info(ieee->dev,
					    "=================>DHCP Protocol start tx ARP pkt!!\n");
				bdhcp = true;
				ieee->LPSDelayCnt =
					 ieee->current_network.tim.tim_count;
			}
		}

		skb->priority = rtllib_classify(skb, IsAmsdu);
		crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;
		if (!encrypt && ieee->ieee802_1x &&
		    ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE -
				sizeof(u16));
			netdev_dbg(ieee->dev,
				   "TX: IEEE 802.11 EAPOL frame: %s\n",
				   eap_get_type(eap->type));
		}

		/* Advance the SKB to the start of the payload */
		skb_pull(skb, sizeof(struct ethhdr));

		/* Determine total amount of storage required for TXB packets */
		bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
		else
			fc = RTLLIB_FTYPE_DATA;

		if (qos_actived)
			fc |= RTLLIB_STYPE_QOS_DATA;
		else
			fc |= RTLLIB_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= RTLLIB_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			 * Addr3 = DA
			 */
			ether_addr_copy(header.addr1,
					ieee->current_network.bssid);
			ether_addr_copy(header.addr2, src);
			if (IsAmsdu)
				ether_addr_copy(header.addr3,
						ieee->current_network.bssid);
			else
				ether_addr_copy(header.addr3, dest);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			 * Addr3 = BSSID
			 */
			ether_addr_copy(header.addr1, dest);
			ether_addr_copy(header.addr2, src);
			ether_addr_copy(header.addr3,
					ieee->current_network.bssid);
		}

		bIsMulticast = is_multicast_ether_addr(header.addr1);

		header.frame_ctl = cpu_to_le16(fc);

		/* Determine fragmentation size based on destination (multicast
		 * and broadcast are not fragmented)
		 */
		if (bIsMulticast) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		} else {
			frag_size = ieee->fts;
			qos_ctl = 0;
		}

		if (qos_actived) {
			hdr_len = RTLLIB_3ADDR_LEN + 2;

			/* in case we are a client verify acm is not set for this ac */
			while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
				netdev_info(ieee->dev, "skb->priority = %x\n",
						skb->priority);
				if (wme_downgrade_ac(skb))
					break;
				netdev_info(ieee->dev, "converted skb->priority = %x\n",
					   skb->priority);
			}

			qos_ctl |= skb->priority;
			header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);

		} else {
			hdr_len = RTLLIB_3ADDR_LEN;
		}
		/* Determine amount of payload per fragment.  Regardless of if
		 * this stack is providing the full 802.11 header, one will
		 * eventually be affixed to this fragment -- so we must account
		 * for it when determining the amount of payload space.
		 */
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
			bytes_per_frag -= RTLLIB_FCS_LEN;

		/* Each fragment may need to have room for encrypting
		 * pre/postfix
		 */
		if (encrypt) {
			bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
				crypt->ops->extra_mpdu_postfix_len +
				crypt->ops->extra_msdu_prefix_len +
				crypt->ops->extra_msdu_postfix_len;
		}
		/* Number of fragments is the total bytes_per_frag /
		 * payload_per_fragment
		 */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		/* When we allocate the TXB we allocate enough space for the
		 * reserve and full fragment bytes (bytes_per_frag doesn't
		 * include prefix, postfix, header, FCS, etc.)
		 */
		txb = rtllib_alloc_txb(nr_frags, frag_size +
				       ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
			netdev_warn(ieee->dev, "Could not allocate TXB\n");
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = cpu_to_le16(bytes);

		if (qos_actived)
			txb->queue_index = UP2AC(skb->priority);
		else
			txb->queue_index = WME_AC_BE;

		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (struct cb_desc *)(skb_frag->cb +
				    MAX_DEV_ADDR_SIZE);
			if (qos_actived) {
				skb_frag->priority = skb->priority;
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BE;
				tcb_desc->queue_index = WME_AC_BE;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt) {
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag,
					    crypt->ops->extra_mpdu_prefix_len +
					    crypt->ops->extra_msdu_prefix_len);
			} else {
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = skb_put_data(skb_frag, &header, hdr_len);

			/* If this is not the last fragment, then add the
			 * MOREFRAGS bit to the frame control
			 */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | RTLLIB_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				/* The last fragment has the remaining length */
				bytes = bytes_last_frag;
			}
			if ((qos_actived) && (!bIsMulticast)) {
				frag_hdr->seq_ctl =
					 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
							     header.addr1));
				frag_hdr->seq_ctl =
					 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
			} else {
				frag_hdr->seq_ctl =
					 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}
			/* Put a SNAP header on the first fragment */
			if (i == 0) {
				rtllib_put_snap(
					skb_put(skb_frag, SNAP_SIZE +
					sizeof(u16)), ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			skb_put_data(skb_frag, skb->data, bytes);

			/* Advance the SKB... */
			skb_pull(skb, bytes);

			/* Encryption routine will move the header forward in
			 * order to insert the IV between the header and the
			 * payload
			 */
			if (encrypt)
				rtllib_encrypt_fragment(ieee, skb_frag,
							hdr_len);
			if (ieee->config &
			   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if ((qos_actived) && (!bIsMulticast)) {
			if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
				ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
			else
				ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
			if (ieee->seq_ctrl[0] == 0xFFF)
				ieee->seq_ctrl[0] = 0;
			else
					ieee->seq_ctrl[0]++;
		}
	} else {
		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
			netdev_warn(ieee->dev, "skb too small (%d).\n",
				    skb->len);
			goto success;
		}

		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
		if (!txb) {
			netdev_warn(ieee->dev, "Could not allocate TXB\n");
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = cpu_to_le16(skb->len);
		skb_put_data(txb->fragments[0], skb->data, skb->len);
	}

 success:
	if (txb) {
		struct cb_desc *tcb_desc = (struct cb_desc *)
				(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		tcb_desc->priority = skb->priority;

		if (ether_type == ETH_P_PAE) {
			if (ieee->pHTInfo->IOTAction &
			    HT_IOT_ACT_WA_IOT_Broadcom) {
				tcb_desc->data_rate =
					 MgntQuery_TxRateExcludeCCKRates(ieee);
				tcb_desc->bTxDisableRateFallBack = false;
			} else {
				tcb_desc->data_rate = ieee->basic_rate;
				tcb_desc->bTxDisableRateFallBack = 1;
			}


			tcb_desc->RATRIndex = 7;
			tcb_desc->bTxUseDriverAssingedRate = 1;
		} else {
			if (is_multicast_ether_addr(header.addr1))
				tcb_desc->bMulticast = 1;
			if (is_broadcast_ether_addr(header.addr1))
				tcb_desc->bBroadcast = 1;
			rtllib_txrate_selectmode(ieee, tcb_desc);
			if (tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
				tcb_desc->data_rate = ieee->basic_rate;
			else
				tcb_desc->data_rate = rtllib_current_rate(ieee);

			if (bdhcp) {
				if (ieee->pHTInfo->IOTAction &
				    HT_IOT_ACT_WA_IOT_Broadcom) {
					tcb_desc->data_rate =
					   MgntQuery_TxRateExcludeCCKRates(ieee);
					tcb_desc->bTxDisableRateFallBack = false;
				} else {
					tcb_desc->data_rate = MGN_1M;
					tcb_desc->bTxDisableRateFallBack = 1;
				}


				tcb_desc->RATRIndex = 7;
				tcb_desc->bTxUseDriverAssingedRate = 1;
				tcb_desc->bdhcp = 1;
			}

			rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
			rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
						tcb_desc);
			rtllib_query_HTCapShortGI(ieee, tcb_desc);
			rtllib_query_BandwidthMode(ieee, tcb_desc);
			rtllib_query_protectionmode(ieee, tcb_desc,
						    txb->fragments[0]);
		}
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
			dev->stats.tx_packets++;
			dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
			rtllib_softmac_xmit(txb, ieee);
		} else {
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += le16_to_cpu(txb->payload_size);
				return 0;
			}
			rtllib_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}

int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
	memset(skb->cb, 0, sizeof(skb->cb));
	return rtllib_xmit_inter(skb, dev);
}
示例#26
0
/*
 * Decode frames received on the B/D channel.
 * Note that this function will be called continuously
 * with 64Kbit/s / 16Kbit/s of data and hence it will be
 * called 50 times per second with 20 ISOC descriptors.
 * Called at interrupt.
 */
static void usb_in_complete(struct urb *urb)
{
	struct st5481_in *in = urb->context;
	unsigned char *ptr;
	struct sk_buff *skb;
	int len, count, status;

	if (unlikely(urb->status < 0)) {
		switch (urb->status) {
		case -ENOENT:
		case -ESHUTDOWN:
		case -ECONNRESET:
			DBG(1, "urb killed status %d", urb->status);
			return; // Give up
		default:
			WARNING("urb status %d", urb->status);
			break;
		}
	}

	DBG_ISO_PACKET(0x80, urb);

	len = st5481_isoc_flatten(urb);
	ptr = urb->transfer_buffer;
	while (len > 0) {
		if (in->mode == L1_MODE_TRANS) {
			memcpy(in->rcvbuf, ptr, len);
			status = len;
			len = 0;
		} else {
			status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count,
						 in->rcvbuf, in->bufsize);
			ptr += count;
			len -= count;
		}

		if (status > 0) {
			// Good frame received
			DBG(4, "count=%d", status);
			DBG_PACKET(0x400, in->rcvbuf, status);
			if (!(skb = dev_alloc_skb(status))) {
				WARNING("receive out of memory\n");
				break;
			}
			skb_put_data(skb, in->rcvbuf, status);
			in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
		} else if (status == -HDLC_CRC_ERROR) {
			INFO("CRC error");
		} else if (status == -HDLC_FRAMING_ERROR) {
			INFO("framing error");
		} else if (status == -HDLC_LENGTH_ERROR) {
			INFO("length error");
		}
	}

	// Prepare URB for next transfer
	urb->dev = in->adapter->usb_dev;
	urb->actual_length = 0;

	SUBMIT_URB(urb, GFP_ATOMIC);
}
示例#27
0
/**
 * kim_int_recv - receive function called during firmware download
 *	firmware download responses on different UART drivers
 *	have been observed to come in bursts of different
 *	tty_receive and hence the logic
 */
static void kim_int_recv(struct kim_data_s *kim_gdata,
	const unsigned char *data, long count)
{
	const unsigned char *ptr;
	int len = 0;
	unsigned char *plen;

	pr_debug("%s", __func__);
	/* Decode received bytes here */
	ptr = data;
	if (unlikely(ptr == NULL)) {
		pr_err(" received null from TTY ");
		return;
	}

	while (count) {
		if (kim_gdata->rx_count) {
			len = min_t(unsigned int, kim_gdata->rx_count, count);
			skb_put_data(kim_gdata->rx_skb, ptr, len);
			kim_gdata->rx_count -= len;
			count -= len;
			ptr += len;

			if (kim_gdata->rx_count)
				continue;

			/* Check ST RX state machine , where are we? */
			switch (kim_gdata->rx_state) {
				/* Waiting for complete packet ? */
			case ST_W4_DATA:
				pr_debug("Complete pkt received");
				validate_firmware_response(kim_gdata);
				kim_gdata->rx_state = ST_W4_PACKET_TYPE;
				kim_gdata->rx_skb = NULL;
				continue;
				/* Waiting for Bluetooth event header ? */
			case ST_W4_HEADER:
				plen =
				(unsigned char *)&kim_gdata->rx_skb->data[1];
				pr_debug("event hdr: plen 0x%02x\n", *plen);
				kim_check_data_len(kim_gdata, *plen);
				continue;
			}	/* end of switch */
		}		/* end of if rx_state */
		switch (*ptr) {
			/* Bluetooth event packet? */
		case 0x04:
			kim_gdata->rx_state = ST_W4_HEADER;
			kim_gdata->rx_count = 2;
			break;
		default:
			pr_info("unknown packet");
			ptr++;
			count--;
			continue;
		}
		ptr++;
		count--;
		kim_gdata->rx_skb =
			alloc_skb(1024+8, GFP_ATOMIC);
		if (!kim_gdata->rx_skb) {
			pr_err("can't allocate mem for new packet");
			kim_gdata->rx_state = ST_W4_PACKET_TYPE;
			kim_gdata->rx_count = 0;
			return;
		}
		skb_reserve(kim_gdata->rx_skb, 8);
		kim_gdata->rx_skb->cb[0] = 4;
		kim_gdata->rx_skb->cb[1] = 0;

	}
示例#28
0
文件: lpc_eth.c 项目: Lyude/linux
static int __lpc_handle_recv(struct net_device *ndev, int budget)
{
	struct netdata_local *pldat = netdev_priv(ndev);
	struct sk_buff *skb;
	u32 rxconsidx, len, ethst;
	struct rx_status_t *prxstat;
	int rx_done = 0;

	/* Get the current RX buffer indexes */
	rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
	while (rx_done < budget && rxconsidx !=
			readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
		/* Get pointer to receive status */
		prxstat = &pldat->rx_stat_v[rxconsidx];
		len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;

		/* Status error? */
		ethst = prxstat->statusinfo;
		if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
		    (RXSTATUS_ERROR | RXSTATUS_RANGE))
			ethst &= ~RXSTATUS_ERROR;

		if (ethst & RXSTATUS_ERROR) {
			int si = prxstat->statusinfo;
			/* Check statuses */
			if (si & RXSTATUS_OVERRUN) {
				/* Overrun error */
				ndev->stats.rx_fifo_errors++;
			} else if (si & RXSTATUS_CRC) {
				/* CRC error */
				ndev->stats.rx_crc_errors++;
			} else if (si & RXSTATUS_LENGTH) {
				/* Length error */
				ndev->stats.rx_length_errors++;
			} else if (si & RXSTATUS_ERROR) {
				/* Other error */
				ndev->stats.rx_length_errors++;
			}
			ndev->stats.rx_errors++;
		} else {
			/* Packet is good */
			skb = dev_alloc_skb(len);
			if (!skb) {
				ndev->stats.rx_dropped++;
			} else {
				/* Copy packet from buffer */
				skb_put_data(skb,
					     pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
					     len);

				/* Pass to upper layer */
				skb->protocol = eth_type_trans(skb, ndev);
				netif_receive_skb(skb);
				ndev->stats.rx_packets++;
				ndev->stats.rx_bytes += len;
			}
		}

		/* Increment consume index */
		rxconsidx = rxconsidx + 1;
		if (rxconsidx >= ENET_RX_DESC)
			rxconsidx = 0;
		writel(rxconsidx,
		       LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
		rx_done++;
	}

	return rx_done;
}
示例#29
0
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
			    const unsigned char *buffer, int count,
			    const struct h4_recv_pkt *pkts, int pkts_count)
{
	struct hci_uart *hu = hci_get_drvdata(hdev);
	u8 alignment = hu->alignment ? hu->alignment : 1;

	while (count) {
		int i, len;

		/* remove padding bytes from buffer */
		for (; hu->padding && count > 0; hu->padding--) {
			count--;
			buffer++;
		}
		if (!count)
			break;

		if (!skb) {
			for (i = 0; i < pkts_count; i++) {
				if (buffer[0] != (&pkts[i])->type)
					continue;

				skb = bt_skb_alloc((&pkts[i])->maxlen,
						   GFP_ATOMIC);
				if (!skb)
					return ERR_PTR(-ENOMEM);

				hci_skb_pkt_type(skb) = (&pkts[i])->type;
				hci_skb_expect(skb) = (&pkts[i])->hlen;
				break;
			}

			/* Check for invalid packet type */
			if (!skb)
				return ERR_PTR(-EILSEQ);

			count -= 1;
			buffer += 1;
		}

		len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
		skb_put_data(skb, buffer, len);

		count -= len;
		buffer += len;

		/* Check for partial packet */
		if (skb->len < hci_skb_expect(skb))
			continue;

		for (i = 0; i < pkts_count; i++) {
			if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
				break;
		}

		if (i >= pkts_count) {
			kfree_skb(skb);
			return ERR_PTR(-EILSEQ);
		}

		if (skb->len == (&pkts[i])->hlen) {
			u16 dlen;

			switch ((&pkts[i])->lsize) {
			case 0:
				/* No variable data length */
				dlen = 0;
				break;
			case 1:
				/* Single octet variable length */
				dlen = skb->data[(&pkts[i])->loff];
				hci_skb_expect(skb) += dlen;

				if (skb_tailroom(skb) < dlen) {
					kfree_skb(skb);
					return ERR_PTR(-EMSGSIZE);
				}
				break;
			case 2:
				/* Double octet variable length */
				dlen = get_unaligned_le16(skb->data +
							  (&pkts[i])->loff);
				hci_skb_expect(skb) += dlen;

				if (skb_tailroom(skb) < dlen) {
					kfree_skb(skb);
					return ERR_PTR(-EMSGSIZE);
				}
				break;
			default:
				/* Unsupported variable length */
				kfree_skb(skb);
				return ERR_PTR(-EILSEQ);
			}

			if (!dlen) {
				hu->padding = (skb->len - 1) % alignment;
				hu->padding = (alignment - hu->padding) % alignment;

				/* No more data, complete frame */
				(&pkts[i])->recv(hdev, skb);
				skb = NULL;
			}
		} else {
			hu->padding = (skb->len - 1) % alignment;
			hu->padding = (alignment - hu->padding) % alignment;

			/* Complete frame */
			(&pkts[i])->recv(hdev, skb);
			skb = NULL;
		}
	}

	return skb;
}
示例#30
0
文件: scan.c 项目: Anjali05/linux
int cw1200_hw_scan(struct ieee80211_hw *hw,
		   struct ieee80211_vif *vif,
		   struct ieee80211_scan_request *hw_req)
{
	struct cw1200_common *priv = hw->priv;
	struct cfg80211_scan_request *req = &hw_req->req;
	struct wsm_template_frame frame = {
		.frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
	};
	int i, ret;

	if (!priv->vif)
		return -EINVAL;

	/* Scan when P2P_GO corrupt firmware MiniAP mode */
	if (priv->join_status == CW1200_JOIN_STATUS_AP)
		return -EOPNOTSUPP;

	if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
		req->n_ssids = 0;

	wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n",
		  req->n_ssids);

	if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
		return -EINVAL;

	/* will be unlocked in cw1200_scan_work() */
	down(&priv->scan.lock);
	mutex_lock(&priv->conf_mutex);

	frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
		req->ie_len);
	if (!frame.skb) {
		mutex_unlock(&priv->conf_mutex);
		up(&priv->scan.lock);
		return -ENOMEM;
	}

	if (req->ie_len)
		skb_put_data(frame.skb, req->ie, req->ie_len);

	ret = wsm_set_template_frame(priv, &frame);
	if (!ret) {
		/* Host want to be the probe responder. */
		ret = wsm_set_probe_responder(priv, true);
	}
	if (ret) {
		dev_kfree_skb(frame.skb);
		mutex_unlock(&priv->conf_mutex);
		up(&priv->scan.lock);
		return ret;
	}

	wsm_lock_tx(priv);

	BUG_ON(priv->scan.req);
	priv->scan.req = req;
	priv->scan.n_ssids = 0;
	priv->scan.status = 0;
	priv->scan.begin = &req->channels[0];
	priv->scan.curr = priv->scan.begin;
	priv->scan.end = &req->channels[req->n_channels];
	priv->scan.output_power = priv->output_power;

	for (i = 0; i < req->n_ssids; ++i) {
		struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids];
		memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
		dst->length = req->ssids[i].ssid_len;
		++priv->scan.n_ssids;
	}

	if (frame.skb)
		dev_kfree_skb(frame.skb);
	mutex_unlock(&priv->conf_mutex);
	queue_work(priv->workqueue, &priv->scan.work);
	return 0;
}