Esempio n. 1
0
/*
 * Decode frames received on the B/D channel.
 * Note that this function will be called continously
 * with 64Kbit/s / 16Kbit/s of data and hence it will be 
 * called 50 times per second with 20 ISOC descriptors. 
 * Called at interrupt.
 */
static void usb_in_complete(struct urb *urb)
{
	struct st5481_in *in = urb->context;
	unsigned char *ptr;
	struct sk_buff *skb;
	int len, count, status;

	if (unlikely(urb->status < 0)) {
		switch (urb->status) {
			case -ENOENT:
			case -ESHUTDOWN:
			case -ECONNRESET:
				DBG(1,"urb killed status %d", urb->status);
				return; // Give up
			default: 
				WARNING("urb status %d",urb->status);
				break;
		}
	}

	DBG_ISO_PACKET(0x80,urb);

	len = st5481_isoc_flatten(urb);
	ptr = urb->transfer_buffer;
	while (len > 0) {
		if (in->mode == L1_MODE_TRANS) {
			memcpy(in->rcvbuf, ptr, len);
			status = len;
			len = 0;
		} else {
			status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count,
				in->rcvbuf, in->bufsize);
			ptr += count;
			len -= count;
		}
		
		if (status > 0) {
			// Good frame received
			DBG(4,"count=%d",status);
			DBG_PACKET(0x400, in->rcvbuf, status);
			if (!(skb = dev_alloc_skb(status))) {
				WARNING("receive out of memory\n");
				break;
			}
			memcpy(skb_put(skb, status), in->rcvbuf, status);
			in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
		} else if (status == -HDLC_CRC_ERROR) {
			INFO("CRC error");
		} else if (status == -HDLC_FRAMING_ERROR) {
			INFO("framing error");
		} else if (status == -HDLC_LENGTH_ERROR) {
			INFO("length error");
		}
	}

	// Prepare URB for next transfer
	urb->dev = in->adapter->usb_dev;
	urb->actual_length = 0;

	SUBMIT_URB(urb, GFP_ATOMIC);
}
Esempio n. 2
0
/**
 * mesh_path_error_tx - Sends a PERR mesh management frame
 *
 * @ttl: allowed remaining hops
 * @target: broken destination
 * @target_sn: SN of the broken destination
 * @target_rcode: reason code for this PERR
 * @ra: node this frame is addressed to
 * @sdata: local mesh subif
 *
 * Note: This function may be called with driver locks taken that the driver
 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 * frame directly but add it to the pending queue instead.
 */
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
		       u8 ttl, const u8 *target, u32 target_sn,
		       u16 target_rcode, const u8 *ra)
{
	struct ieee80211_local *local = sdata->local;
	struct sk_buff *skb;
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_mgmt *mgmt;
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);

	if (time_before(jiffies, ifmsh->next_perr))
		return -EAGAIN;

	skb = dev_alloc_skb(local->tx_headroom +
			    sdata->encrypt_headroom +
			    IEEE80211_ENCRYPT_TAILROOM +
			    hdr_len +
			    2 + 15 /* PERR IE */);
	if (!skb)
		return -1;
	skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);

	memcpy(mgmt->da, ra, ETH_ALEN);
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
	ie_len = 15;
	pos = skb_put(skb, 2 + ie_len);
	*pos++ = WLAN_EID_PERR;
	*pos++ = ie_len;
	/* ttl */
	*pos++ = ttl;
	/* number of destinations */
	*pos++ = 1;
	/*
	 * flags bit, bit 1 is unset if we know the sequence number and
	 * bit 2 is set if we have a reason code
	 */
	*pos = 0;
	if (!target_sn)
		*pos |= MP_F_USN;
	if (target_rcode)
		*pos |= MP_F_RCODE;
	pos++;
	memcpy(pos, target, ETH_ALEN);
	pos += ETH_ALEN;
	put_unaligned_le32(target_sn, pos);
	pos += 4;
	put_unaligned_le16(target_rcode, pos);

	/* see note in function header */
	prepare_frame_for_deferred_tx(sdata, skb);
	ifmsh->next_perr = TU_TO_EXP_TIME(
				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
	ieee80211_add_pending_skb(local, skb);
	return 0;
}
Esempio n. 3
0
bool cmpk_message_handle_tx(
	struct net_device *dev,
	u8	*code_virtual_address,
	u32	packettype,
	u32	buffer_len)
{

	bool				rt_status = true;
	struct r8192_priv *priv = rtllib_priv(dev);
	u16				frag_threshold;
	u16				frag_length = 0, frag_offset = 0;
	struct rt_firmware *pfirmware = priv->pFirmware;
	struct sk_buff		*skb;
	unsigned char		*seg_ptr;
	struct cb_desc *tcb_desc;
	u8				bLastIniPkt;

	struct tx_fwinfo_8190pci *pTxFwInfo = NULL;

	RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, buffer_len);
	firmware_init_param(dev);
	frag_threshold = pfirmware->cmdpacket_frag_thresold;

	do {
		if ((buffer_len - frag_offset) > frag_threshold) {
			frag_length = frag_threshold ;
			bLastIniPkt = 0;

		} else {
			frag_length = (u16)(buffer_len - frag_offset);
			bLastIniPkt = 1;
		}

		skb  = dev_alloc_skb(frag_length +
				     priv->rtllib->tx_headroom + 4);

		if (skb == NULL) {
			rt_status = false;
			goto Failed;
		}

		memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
		tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->queue_index = TXCMD_QUEUE;
		tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
		tcb_desc->bLastIniPkt = bLastIniPkt;
		tcb_desc->pkt_size = frag_length;

		seg_ptr = skb_put(skb, priv->rtllib->tx_headroom);
		pTxFwInfo = (struct tx_fwinfo_8190pci *)seg_ptr;
		memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
		memset(pTxFwInfo, 0x12, 8);

		seg_ptr = skb_put(skb, frag_length);
		memcpy(seg_ptr, code_virtual_address, (u32)frag_length);

		priv->rtllib->softmac_hard_start_xmit(skb, dev);

		code_virtual_address += frag_length;
		frag_offset += frag_length;

	} while (frag_offset < buffer_len);

	write_nic_byte(dev, TPPoll, TPPoll_CQ);
Failed:
	return rt_status;
}
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
				struct data_queue *queue,
				struct sk_buff *frag_skb)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
	struct ieee80211_tx_info *rts_info;
	struct sk_buff *skb;
	unsigned int data_length;
	int retval = 0;

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		data_length = sizeof(struct ieee80211_cts);
	else
		data_length = sizeof(struct ieee80211_rts);

	skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
	if (unlikely(!skb)) {
		rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
		return -ENOMEM;
	}

	skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
	skb_put(skb, data_length);

	/*
	 * Copy TX information over from original frame to
	 * RTS/CTS frame. Note that we set the no encryption flag
	 * since we don't want this frame to be encrypted.
	 * RTS frames should be acked, while CTS-to-self frames
	 * should not. The ready for TX flag is cleared to prevent
	 * it being automatically send when the descriptor is
	 * written to the hardware.
	 */
	memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
	rts_info = IEEE80211_SKB_CB(skb);
	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
	else
		rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;

	/* Disable hardware encryption */
	rts_info->control.hw_key = NULL;

	/*
	 * RTS/CTS frame should use the length of the frame plus any
	 * encryption overhead that will be added by the hardware.
	 */
	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
					frag_skb->data, data_length, tx_info,
					(struct ieee80211_cts *)(skb->data));
	else
		ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
				  frag_skb->data, data_length, tx_info,
				  (struct ieee80211_rts *)(skb->data));

	retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
	if (retval) {
		dev_kfree_skb_any(skb);
		rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
	}

	return retval;
}
Esempio n. 5
0
static ssize_t ieee80211_if_parse_tkip_mic_test(
	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
	struct ieee80211_local *local = sdata->local;
	u8 addr[ETH_ALEN];
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	__le16 fc;

	/*
	 * Assume colon-delimited MAC address with possible white space
	 * following.
	 */
	if (buflen < 3 * ETH_ALEN - 1)
		return -EINVAL;
	if (hwaddr_aton(buf, addr) < 0)
		return -EINVAL;

	if (!ieee80211_sdata_running(sdata))
		return -ENOTCONN;

	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
	if (!skb)
		return -ENOMEM;
	skb_reserve(skb, local->hw.extra_tx_headroom);

	hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
	memset(hdr, 0, 24);
	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);

	switch (sdata->vif.type) {
	case NL80211_IFTYPE_AP:
		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
		/* DA BSSID SA */
		memcpy(hdr->addr1, addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
		break;
	case NL80211_IFTYPE_STATION:
		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
		/* BSSID SA DA */
		if (sdata->vif.bss_conf.bssid == NULL) {
			dev_kfree_skb(skb);
			return -ENOTCONN;
		}
		memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, addr, ETH_ALEN);
		break;
	default:
		dev_kfree_skb(skb);
		return -EOPNOTSUPP;
	}
	hdr->frame_control = fc;

	/*
	 * Add some length to the test frame to make it look bit more valid.
	 * The exact contents does not matter since the recipient is required
	 * to drop this because of the Michael MIC failure.
	 */
	memset(skb_put(skb, 50), 0, 50);

	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;

	ieee80211_tx_skb(sdata, skb);

	return buflen;
}
Esempio n. 6
0
/* ----------------------------------------------------------------------------
mace_rx
	Receives packets.
---------------------------------------------------------------------------- */
static int mace_rx(struct net_device *dev, unsigned char RxCnt)
{
  mace_private *lp = netdev_priv(dev);
  unsigned int ioaddr = dev->base_addr;
  unsigned char rx_framecnt;
  unsigned short rx_status;

  while (
    ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
    (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
    (RxCnt--)
  ) {
    rx_status = inw(ioaddr + AM2150_RCV);

    pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
	  " 0x%X.\n", dev->name, rx_framecnt, rx_status);

    if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
      lp->linux_stats.rx_errors++;
      if (rx_status & MACE_RCVFS_OFLO) {
        lp->mace_stats.oflo++;
      }
      if (rx_status & MACE_RCVFS_CLSN) {
        lp->mace_stats.clsn++;
      }
      if (rx_status & MACE_RCVFS_FRAM) {
	lp->mace_stats.fram++;
      }
      if (rx_status & MACE_RCVFS_FCS) {
        lp->mace_stats.fcs++;
      }
    } else {
      short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
        /* Auto Strip is off, always subtract 4 */
      struct sk_buff *skb;

      lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
        /* runt packet count */
      lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
        /* rcv collision count */

      pr_debug("    receiving packet size 0x%X rx_status"
	    " 0x%X.\n", pkt_len, rx_status);

      skb = dev_alloc_skb(pkt_len+2);

      if (skb != NULL) {
	skb_reserve(skb, 2);
	insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
	if (pkt_len & 1)
	    *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
	skb->protocol = eth_type_trans(skb, dev);
	
	netif_rx(skb); /* Send the packet to the upper (protocol) layers. */

	lp->linux_stats.rx_packets++;
	lp->linux_stats.rx_bytes += pkt_len;
	outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
	continue;
      } else {
	pr_debug("%s: couldn't allocate a sk_buff of size"
	      " %d.\n", dev->name, pkt_len);
	lp->linux_stats.rx_dropped++;
      }
    }
Esempio n. 7
0
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
	struct recvq __iomem *rq = mp->rq;
	struct recvq __iomem *rqa = mp->rqack;
	int entry		= sbus_readl(&rqa->head);
	int limit		= sbus_readl(&rqa->tail);
	int drops;

	DRX(("entry[%d] limit[%d] ", entry, limit));
	if (entry == limit)
		return;
	drops = 0;
	DRX(("\n"));
	while (entry != limit) {
		struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
		u32 csum		= sbus_readl(&rxdack->csum);
		int len			= sbus_readl(&rxdack->myri_scatters[0].len);
		int index		= sbus_readl(&rxdack->ctx);
		struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
		struct sk_buff *skb	= mp->rx_skbs[index];

		/* Ack it. */
		sbus_writel(NEXT_RX(entry), &rqa->head);

		/* Check for errors. */
		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
		dma_sync_single_for_cpu(&mp->myri_op->dev,
					sbus_readl(&rxd->myri_scatters[0].addr),
					RX_ALLOC_SIZE, DMA_FROM_DEVICE);
		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
			DRX(("ERROR["));
			dev->stats.rx_errors++;
			if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
				DRX(("BAD_LENGTH] "));
				dev->stats.rx_length_errors++;
			} else {
				DRX(("NO_PADDING] "));
				dev->stats.rx_frame_errors++;
			}

			/* Return it to the LANAI. */
	drop_it:
			drops++;
			DRX(("DROP "));
			dev->stats.rx_dropped++;
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
			goto next;
		}

		DRX(("len[%d] ", len));
		if (len > RX_COPY_THRESHOLD) {
			struct sk_buff *new_skb;
			u32 dma_addr;

			DRX(("BIGBUFF "));
			new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
			if (new_skb == NULL) {
				DRX(("skb_alloc(FAILED) "));
				goto drop_it;
			}
			dma_unmap_single(&mp->myri_op->dev,
					 sbus_readl(&rxd->myri_scatters[0].addr),
					 RX_ALLOC_SIZE,
					 DMA_FROM_DEVICE);
			mp->rx_skbs[index] = new_skb;
			new_skb->dev = dev;
			skb_put(new_skb, RX_ALLOC_SIZE);
			dma_addr = dma_map_single(&mp->myri_op->dev,
						  new_skb->data,
						  RX_ALLOC_SIZE,
						  DMA_FROM_DEVICE);
			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			/* Trim the original skb for the netif. */
			DRX(("trim(%d) ", len));
			skb_trim(skb, len);
		} else {
			struct sk_buff *copy_skb = dev_alloc_skb(len);

			DRX(("SMALLBUFF "));
			if (copy_skb == NULL) {
				DRX(("dev_alloc_skb(FAILED) "));
				goto drop_it;
			}
			/* DMA sync already done above. */
			copy_skb->dev = dev;
			DRX(("resv_and_put "));
			skb_put(copy_skb, len);
			skb_copy_from_linear_data(skb, copy_skb->data, len);

			/* Reuse original ring buffer. */
			DRX(("reuse "));
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			skb = copy_skb;
		}

		/* Just like the happy meal we get checksums from this card. */
		skb->csum = csum;
		skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */

		skb->protocol = myri_type_trans(skb, dev);
		DRX(("prot[%04x] netif_rx ", skb->protocol));
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	next:
		DRX(("NEXT\n"));
		entry = NEXT_RX(entry);
	}
}
static int kingsun_net_open(struct net_device *netdev)
{
	struct kingsun_cb *kingsun = netdev_priv(netdev);
	int err = -ENOMEM;
	char hwname[16];

	
	kingsun->receiving = 0;

	
	kingsun->rx_buff.in_frame = FALSE;
	kingsun->rx_buff.state = OUTSIDE_FRAME;
	kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
	kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
	if (!kingsun->rx_buff.skb)
		goto free_mem;

	skb_reserve(kingsun->rx_buff.skb, 1);
	kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
	do_gettimeofday(&kingsun->rx_time);

	kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->rx_urb)
		goto free_mem;

	kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->tx_urb)
		goto free_mem;

	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
	if (!kingsun->irlap) {
		err("kingsun-sir: irlap_open failed");
		goto free_mem;
	}

	
	usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
			  usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
			  kingsun->in_buf, kingsun->max_rx,
			  kingsun_rcv_irq, kingsun, 1);
	kingsun->rx_urb->status = 0;
	err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
	if (err) {
		err("kingsun-sir: first urb-submit failed: %d", err);
		goto close_irlap;
	}

	netif_start_queue(netdev);


	return 0;

 close_irlap:
	irlap_close(kingsun->irlap);
 free_mem:
	if (kingsun->tx_urb) {
		usb_free_urb(kingsun->tx_urb);
		kingsun->tx_urb = NULL;
	}
	if (kingsun->rx_urb) {
		usb_free_urb(kingsun->rx_urb);
		kingsun->rx_urb = NULL;
	}
	if (kingsun->rx_buff.skb) {
		kfree_skb(kingsun->rx_buff.skb);
		kingsun->rx_buff.skb = NULL;
		kingsun->rx_buff.head = NULL;
	}
	return err;
}
Esempio n. 9
0
File: amd7930.c Progetto: nhanh0/hah
static void
Bchan_rcv_bh(struct BCState *bcs)
{
	struct IsdnCardState *cs = bcs->cs;
	struct amd7930_hw *hw = &bcs->hw.amd7930;
	struct sk_buff *skb;
	int len;

	if (cs->debug & L1_DEB_HSCX) {
		char tmp[1024];

		sprintf(tmp, "amd7930_Bchan_rcv (%d/%d)",
			hw->rv_buff_in, hw->rv_buff_out);
		debugl1(cs, tmp);
		QuickHex(tmp, hw->rv_buff + hw->rv_buff_out,
			 RCV_BUFSIZE/RCV_BUFBLKS);
		debugl1(cs, tmp);
	}

	do {
		if (bcs->mode == L1_MODE_HDLC) {
			while ((len = read_raw_hdlc_data(hw->hdlc_state,
							 hw->rv_buff + hw->rv_buff_out, RCV_BUFSIZE/RCV_BUFBLKS,
							 hw->rv_skb->tail, HSCX_BUFMAX))) {
				if (len > 0 && (cs->debug & L1_DEB_HSCX_FIFO)) {
					char tmp[1024];
					char *t = tmp;

					t += sprintf(t, "amd7930_Bchan_rcv %c cnt %d", bcs->channel ? 'B' : 'A', len);
					QuickHex(t, hw->rv_skb->tail, len);
					debugl1(cs, tmp);
				}

				if (len > HSCX_BUFMAX/2) {
					/* Large packet received */

					if (!(skb = dev_alloc_skb(HSCX_BUFMAX))) {
						printk(KERN_WARNING "amd7930: receive out of memory");
					} else {
						skb_put(hw->rv_skb, len);
						skb_queue_tail(&bcs->rqueue, hw->rv_skb);
						hw->rv_skb = skb;
						bcs->event |= 1 << B_RCVBUFREADY;
						queue_task(&bcs->tqueue, &tq_immediate);
					}
				} else if (len > 0) {
					/* Small packet received */

					if (!(skb = dev_alloc_skb(len))) {
						printk(KERN_WARNING "amd7930: receive out of memory\n");
					} else {
						memcpy(skb_put(skb, len), hw->rv_skb->tail, len);
						skb_queue_tail(&bcs->rqueue, skb);
						bcs->event |= 1 << B_RCVBUFREADY;
						queue_task(&bcs->tqueue, &tq_immediate);
						mark_bh(IMMEDIATE_BH);
					}
				} else {
					/* Reception Error */
					/* printk("amd7930: B channel receive error\n"); */
				}
			}
		} else if (bcs->mode == L1_MODE_TRANS) {
			if (!(skb = dev_alloc_skb(RCV_BUFSIZE/RCV_BUFBLKS))) {
				printk(KERN_WARNING "amd7930: receive out of memory\n");
			} else {
				memcpy(skb_put(skb, RCV_BUFSIZE/RCV_BUFBLKS),
				       hw->rv_buff + hw->rv_buff_out,
				       RCV_BUFSIZE/RCV_BUFBLKS);
				skb_queue_tail(&bcs->rqueue, skb);
				bcs->event |= 1 << B_RCVBUFREADY;
				queue_task(&bcs->tqueue, &tq_immediate);
				mark_bh(IMMEDIATE_BH);
			}
		}

		if (hw->rv_buff_in == hw->rv_buff_out) {
			/* Buffer was filled up - need to restart receiver */
			amd7930_brecv(0, bcs->channel,
				      hw->rv_buff + hw->rv_buff_in,
				      RCV_BUFSIZE/RCV_BUFBLKS,
				      (void *) &Bchan_recv_callback,
				      (void *) bcs);
		}

		hw->rv_buff_out += RCV_BUFSIZE/RCV_BUFBLKS;
		hw->rv_buff_out %= RCV_BUFSIZE;

	} while (hw->rv_buff_in != hw->rv_buff_out);
}
Esempio n. 10
0
/******************************************************************************
    Network device configuration functions
******************************************************************************/
static int
islpci_alloc_memory(islpci_private *priv)
{
	int counter;

#if VERBOSE > SHOW_ERROR_MESSAGES
	printk(KERN_DEBUG "islpci_alloc_memory\n");
#endif

	/* remap the PCI device base address to accessible */
	if (!(priv->device_base =
	      ioremap(pci_resource_start(priv->pdev, 0),
		      ISL38XX_PCI_MEM_SIZE))) {
		/* error in remapping the PCI device memory address range */
		printk(KERN_ERR "PCI memory remapping failed\n");
		return -1;
	}

	/* memory layout for consistent DMA region:
	 *
	 * Area 1: Control Block for the device interface
	 * Area 2: Power Save Mode Buffer for temporary frame storage. Be aware that
	 *         the number of supported stations in the AP determines the minimal
	 *         size of the buffer !
	 */

	/* perform the allocation */
	priv->driver_mem_address = pci_alloc_consistent(priv->pdev,
							HOST_MEM_BLOCK,
							&priv->
							device_host_address);

	if (!priv->driver_mem_address) {
		/* error allocating the block of PCI memory */
		printk(KERN_ERR "%s: could not allocate DMA memory, aborting!",
		       "prism54");
		return -1;
	}

	/* assign the Control Block to the first address of the allocated area */
	priv->control_block =
	    (isl38xx_control_block *) priv->driver_mem_address;

	/* set the Power Save Buffer pointer directly behind the CB */
	priv->device_psm_buffer =
		priv->device_host_address + CONTROL_BLOCK_SIZE;

	/* make sure all buffer pointers are initialized */
	for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
		priv->control_block->driver_curr_frag[counter] = cpu_to_le32(0);
		priv->control_block->device_curr_frag[counter] = cpu_to_le32(0);
	}

	priv->index_mgmt_rx = 0;
	memset(priv->mgmt_rx, 0, sizeof(priv->mgmt_rx));
	memset(priv->mgmt_tx, 0, sizeof(priv->mgmt_tx));

	/* allocate rx queue for management frames */
	if (islpci_mgmt_rx_fill(priv->ndev) < 0)
		goto out_free;

	/* now get the data rx skb's */
	memset(priv->data_low_rx, 0, sizeof (priv->data_low_rx));
	memset(priv->pci_map_rx_address, 0, sizeof (priv->pci_map_rx_address));

	for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
		struct sk_buff *skb;

		/* allocate an sk_buff for received data frames storage
		 * each frame on receive size consists of 1 fragment
		 * include any required allignment operations */
		if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) {
			/* error allocating an sk_buff structure elements */
			printk(KERN_ERR "Error allocating skb.\n");
			skb = NULL;
			goto out_free;
		}
		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
		/* add the new allocated sk_buff to the buffer array */
		priv->data_low_rx[counter] = skb;

		/* map the allocated skb data area to pci */
		priv->pci_map_rx_address[counter] =
		    pci_map_single(priv->pdev, (void *) skb->data,
				   MAX_FRAGMENT_SIZE_RX + 2,
				   PCI_DMA_FROMDEVICE);
		if (!priv->pci_map_rx_address[counter]) {
			/* error mapping the buffer to device
			   accessible memory address */
			printk(KERN_ERR "failed to map skb DMA'able\n");
			goto out_free;
		}
	}

	prism54_acl_init(&priv->acl);
	prism54_wpa_bss_ie_init(priv);
	if (mgt_init(priv))
		goto out_free;

	return 0;
 out_free:
	islpci_free_memory(priv);
	return -1;
}
Esempio n. 11
0
static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
{
	struct adm8211_priv *priv = dev->priv;
	unsigned int entry = priv->cur_rx % priv->rx_ring_size;
	u32 status;
	unsigned int pktlen;
	struct sk_buff *skb, *newskb;
	unsigned int limit = priv->rx_ring_size;
	u8 rssi, rate;

	while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) {
		if (!limit--)
			break;

		status = le32_to_cpu(priv->rx_ring[entry].status);
		rate = (status & RDES0_STATUS_RXDR) >> 12;
		rssi = le32_to_cpu(priv->rx_ring[entry].length) &
			RDES1_STATUS_RSSI;

		pktlen = status & RDES0_STATUS_FL;
		if (pktlen > RX_PKT_SIZE) {
			if (net_ratelimit())
				wiphy_debug(dev->wiphy, "frame too long (%d)\n",
					    pktlen);
			pktlen = RX_PKT_SIZE;
		}

		if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) {
			skb = NULL; /* old buffer will be reused */
			/* TODO: update RX error stats */
			/* TODO: check RDES0_STATUS_CRC*E */
		} else if (pktlen < RX_COPY_BREAK) {
			skb = dev_alloc_skb(pktlen);
			if (skb) {
				pci_dma_sync_single_for_cpu(
					priv->pdev,
					priv->rx_buffers[entry].mapping,
					pktlen, PCI_DMA_FROMDEVICE);
				memcpy(skb_put(skb, pktlen),
				       skb_tail_pointer(priv->rx_buffers[entry].skb),
				       pktlen);
				pci_dma_sync_single_for_device(
					priv->pdev,
					priv->rx_buffers[entry].mapping,
					RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
			}
		} else {
			newskb = dev_alloc_skb(RX_PKT_SIZE);
			if (newskb) {
				skb = priv->rx_buffers[entry].skb;
				skb_put(skb, pktlen);
				pci_unmap_single(
					priv->pdev,
					priv->rx_buffers[entry].mapping,
					RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
				priv->rx_buffers[entry].skb = newskb;
				priv->rx_buffers[entry].mapping =
					pci_map_single(priv->pdev,
						       skb_tail_pointer(newskb),
						       RX_PKT_SIZE,
						       PCI_DMA_FROMDEVICE);
			} else {
				skb = NULL;
				/* TODO: update rx dropped stats */
			}

			priv->rx_ring[entry].buffer1 =
				cpu_to_le32(priv->rx_buffers[entry].mapping);
		}

		priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN |
							  RDES0_STATUS_SQL);
		priv->rx_ring[entry].length =
			cpu_to_le32(RX_PKT_SIZE |
				    (entry == priv->rx_ring_size - 1 ?
				     RDES1_CONTROL_RER : 0));

		if (skb) {
			struct ieee80211_rx_status rx_status = {0};

			if (priv->pdev->revision < ADM8211_REV_CA)
				rx_status.signal = rssi;
			else
				rx_status.signal = 100 - rssi;

			rx_status.rate_idx = rate;

			rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
			rx_status.band = IEEE80211_BAND_2GHZ;

			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
			ieee80211_rx_irqsafe(dev, skb);
		}

		entry = (++priv->cur_rx) % priv->rx_ring_size;
	}

	/* TODO: check LPC and update stats? */
}
Esempio n. 12
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;
	/* if allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = dev_alloc_skb(sz + NET_IP_ALIGN + len);
		if (skb_mux)
			break;
		pr_err("%s: cannot allocate skb of size:%d\n", __func__,
			sz + NET_IP_ALIGN + len);
		if (sz + NET_IP_ALIGN + len <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
static int dhd_init_wlan_mem(void)
{
	int i;
	int j;

	for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
		if (!wlan_static_skb[i])
			goto err_skb_alloc;
	}

	for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
		if (!wlan_static_skb[i])
			goto err_skb_alloc;
	}

#if !defined(CONFIG_BCMDHD_PCIE)
	wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
	if (!wlan_static_skb[i])
		goto err_skb_alloc;
#endif /* !CONFIG_BCMDHD_PCIE */

	for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
		if (wlan_mem_array[i].size > 0) {
			wlan_mem_array[i].mem_ptr =
				kmalloc(wlan_mem_array[i].size, GFP_KERNEL);

			if (!wlan_mem_array[i].mem_ptr)
				goto err_mem_alloc;
		}
	}

	wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_scan_buf0) {
		pr_err("Failed to alloc wlan_static_scan_buf0\n");
		goto err_mem_alloc;
	}

	wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_scan_buf1) {
		pr_err("Failed to alloc wlan_static_scan_buf1\n");
		goto err_mem_alloc;
	}

	wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_dhd_info_buf) {
		pr_err("Failed to alloc wlan_static_dhd_info_buf\n");
		goto err_mem_alloc;
	}

#ifdef CONFIG_BCMDHD_PCIE
	wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE,
		GFP_KERNEL);
	if (!wlan_static_if_flow_lkup) {
		pr_err("Failed to alloc wlan_static_if_flow_lkup\n");
		goto err_mem_alloc;
	}
#else
	wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE,
		GFP_KERNEL);
	if (!wlan_static_dhd_wlfc_buf) {
		pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n");
		goto err_mem_alloc;
	}
#endif /* CONFIG_BCMDHD_PCIE */

#ifdef CONFIG_BCMDHD_DEBUG_PAGEALLOC
	wlan_static_dhd_memdump_buf = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL);
	if (!wlan_static_dhd_memdump_buf) {
		pr_err("Failed to alloc wlan_static_dhd_memdump_buf\n");
		goto err_mem_alloc;
	}

	wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL);
	if (!wlan_static_dhd_memdump_ram) {
		pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n");
		goto err_mem_alloc;
	}
#endif /* CONFIG_BCMDHD_DEBUG_PAGEALLOC */

	pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__);
	return 0;

err_mem_alloc:
#ifdef CONFIG_BCMDHD_DEBUG_PAGEALLOC
	if (wlan_static_dhd_memdump_ram)
		kfree(wlan_static_dhd_memdump_ram);

	if (wlan_static_dhd_memdump_buf)
		kfree(wlan_static_dhd_memdump_buf);
#endif /* CONFIG_BCMDHD_DEBUG_PAGEALLOC */

#ifdef CONFIG_BCMDHD_PCIE
	if (wlan_static_if_flow_lkup)
		kfree(wlan_static_if_flow_lkup);
#else
	if (wlan_static_dhd_wlfc_buf)
		kfree(wlan_static_dhd_wlfc_buf);
#endif /* CONFIG_BCMDHD_PCIE */
	if (wlan_static_dhd_info_buf)
		kfree(wlan_static_dhd_info_buf);

	if (wlan_static_scan_buf1)
		kfree(wlan_static_scan_buf1);

	if (wlan_static_scan_buf0)
		kfree(wlan_static_scan_buf0);

	pr_err("Failed to mem_alloc for WLAN\n");

	for (j = 0; j < i; j++)
		kfree(wlan_mem_array[j].mem_ptr);

	i = WLAN_SKB_BUF_NUM;

err_skb_alloc:
	pr_err("Failed to skb_alloc for WLAN\n");
	for (j = 0; j < i; j++)
		dev_kfree_skb(wlan_static_skb[j]);

	return -ENOMEM;
}
Esempio n. 14
0
s32 rtl8192eu_hostap_mgnt_xmit_entry(_adapter *padapter, _pkt *pkt)
{
#if 0
//#ifdef PLATFORM_LINUX
	u16 fc;
	int rc, len, pipe;	
	unsigned int bmcst, tid, qsel;
	struct sk_buff *skb, *pxmit_skb;
	struct urb *urb;
	unsigned char *pxmitbuf;
	struct tx_desc *ptxdesc;
	struct rtw_ieee80211_hdr *tx_hdr;
	struct hostapd_priv *phostapdpriv = padapter->phostapdpriv;	
	struct net_device *pnetdev = padapter->pnetdev;
	HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
	struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);	

	
	//DBG_8192C("%s\n", __FUNCTION__);

	skb = pkt;
	
	len = skb->len;
	tx_hdr = (struct rtw_ieee80211_hdr *)(skb->data);
	fc = le16_to_cpu(tx_hdr->frame_ctl);
	bmcst = IS_MCAST(tx_hdr->addr1);

	if ((fc & RTW_IEEE80211_FCTL_FTYPE) != RTW_IEEE80211_FTYPE_MGMT)
		goto _exit;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
	pxmit_skb = dev_alloc_skb(len + TXDESC_SIZE);			
#else			
	pxmit_skb = netdev_alloc_skb(pnetdev, len + TXDESC_SIZE);
#endif		

	if(!pxmit_skb)
		goto _exit;

	pxmitbuf = pxmit_skb->data;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		goto _exit;
	}

	// ----- fill tx desc -----	
	ptxdesc = (struct tx_desc *)pxmitbuf;	
	_rtw_memset(ptxdesc, 0, sizeof(*ptxdesc));
		
	//offset 0	
	ptxdesc->txdw0 |= cpu_to_le32(len&0x0000ffff); 
	ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);//default = 32 bytes for TX Desc
	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);

	if(bmcst)	
	{
		ptxdesc->txdw0 |= cpu_to_le32(BIT(24));
	}	

	//offset 4	
	ptxdesc->txdw1 |= cpu_to_le32(0x00);//MAC_ID

	ptxdesc->txdw1 |= cpu_to_le32((0x12<<QSEL_SHT)&0x00001f00);

	ptxdesc->txdw1 |= cpu_to_le32((0x06<< 16) & 0x000f0000);//b mode

	//offset 8			

	//offset 12		
	ptxdesc->txdw3 |= cpu_to_le32((le16_to_cpu(tx_hdr->seq_ctl)<<16)&0xffff0000);

	//offset 16		
	ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate
		
	//offset 20


	//HW append seq
	ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); // Hw set sequence number
	ptxdesc->txdw3 |= cpu_to_le32((8 <<28)); //set bit3 to 1. Suugested by TimChen. 2009.12.29.
	

	rtl8192e_cal_txdesc_chksum(ptxdesc);
	// ----- end of fill tx desc -----

	//
	skb_put(pxmit_skb, len + TXDESC_SIZE);
	pxmitbuf = pxmitbuf + TXDESC_SIZE;
	_rtw_memcpy(pxmitbuf, skb->data, len);

	//DBG_8192C("mgnt_xmit, len=%x\n", pxmit_skb->len);


	// ----- prepare urb for submit -----
	
	//translate DMA FIFO addr to pipehandle
	//pipe = ffaddr2pipehdl(pdvobj, MGT_QUEUE_INX);
	pipe = usb_sndbulkpipe(pdvobj->pusbdev, pHalData->Queue2EPNum[(u8)MGT_QUEUE_INX]&0x0f);
	
	usb_fill_bulk_urb(urb, pdvobj->pusbdev, pipe,
			  pxmit_skb->data, pxmit_skb->len, rtl8192eu_hostap_mgnt_xmit_cb, pxmit_skb);
	
	urb->transfer_flags |= URB_ZERO_PACKET;
	usb_anchor_urb(urb, &phostapdpriv->anchored);
	rc = usb_submit_urb(urb, GFP_ATOMIC);
	if (rc < 0) {
		usb_unanchor_urb(urb);
		kfree_skb(skb);
	}
	usb_free_urb(urb);

	
_exit:	
	
	dev_kfree_skb_any(skb);

#endif

	return 0;

}
Esempio n. 15
0
static void z8530_rx_done(struct z8530_channel *c)
{
	struct sk_buff *skb;
	int ct;
	
	/*
	 *	Is our receive engine in DMA mode
	 */
	 
	if(c->rxdma_on)
	{
		/*
		 *	Save the ready state and the buffer currently
		 *	being used as the DMA target
		 */
		 
		int ready=c->dma_ready;
		unsigned char *rxb=c->rx_buf[c->dma_num];
		unsigned long flags;
		
		/*
		 *	Complete this DMA. Neccessary to find the length
		 */		
		 
		flags=claim_dma_lock();
		
		disable_dma(c->rxdma);
		clear_dma_ff(c->rxdma);
		c->rxdma_on=0;
		ct=c->mtu-get_dma_residue(c->rxdma);
		if(ct<0)
			ct=2;	/* Shit happens.. */
		c->dma_ready=0;
		
		/*
		 *	Normal case: the other slot is free, start the next DMA
		 *	into it immediately.
		 */
		 
		if(ready)
		{
			c->dma_num^=1;
			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
			set_dma_count(c->rxdma, c->mtu);
			c->rxdma_on = 1;
			enable_dma(c->rxdma);
			/* Stop any frames that we missed the head of 
			   from passing */
			write_zsreg(c, R0, RES_Rx_CRC);
		}
		else
			/* Can't occur as we dont reenable the DMA irq until
			   after the flip is done */
			printk(KERN_WARNING "%s: DMA flip overrun!\n",
			       c->netdevice->name);

		release_dma_lock(flags);

		/*
		 *	Shove the old buffer into an sk_buff. We can't DMA
		 *	directly into one on a PC - it might be above the 16Mb
		 *	boundary. Optimisation - we could check to see if we
		 *	can avoid the copy. Optimisation 2 - make the memcpy
		 *	a copychecksum.
		 */

		skb = dev_alloc_skb(ct);
		if (skb == NULL) {
			c->netdevice->stats.rx_dropped++;
			printk(KERN_WARNING "%s: Memory squeeze.\n",
			       c->netdevice->name);
		} else {
			skb_put(skb, ct);
			skb_copy_to_linear_data(skb, rxb, ct);
			c->netdevice->stats.rx_packets++;
			c->netdevice->stats.rx_bytes += ct;
		}
		c->dma_ready = 1;
	} else {
		RT_LOCK;
		skb = c->skb;

		/*
		 *	The game we play for non DMA is similar. We want to
		 *	get the controller set up for the next packet as fast
		 *	as possible. We potentially only have one byte + the
		 *	fifo length for this. Thus we want to flip to the new
		 *	buffer and then mess around copying and allocating
		 *	things. For the current case it doesn't matter but
		 *	if you build a system where the sync irq isnt blocked
		 *	by the kernel IRQ disable then you need only block the
		 *	sync IRQ for the RT_LOCK area.
		 *
		 */
		ct=c->count;

		c->skb = c->skb2;
		c->count = 0;
		c->max = c->mtu;
		if (c->skb) {
			c->dptr = c->skb->data;
			c->max = c->mtu;
		} else {
			c->count = 0;
			c->max = 0;
		}
		RT_UNLOCK;

		c->skb2 = dev_alloc_skb(c->mtu);
		if (c->skb2 == NULL)
			printk(KERN_WARNING "%s: memory squeeze.\n",
			       c->netdevice->name);
		else
			skb_put(c->skb2, c->mtu);
		c->netdevice->stats.rx_packets++;
		c->netdevice->stats.rx_bytes += ct;
	}
	/*
	 *	If we received a frame we must now process it.
	 */
	if (skb) {
		skb_trim(skb, ct);
		c->rx_function(c, skb);
	} else {
		c->netdevice->stats.rx_dropped++;
		printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
	}
}
Esempio n. 16
0
static bool vnt_alloc_bufs(struct vnt_private *priv)
{
    struct vnt_usb_send_context *tx_context;
    struct vnt_rcb *rcb;
    int ii;

    for (ii = 0; ii < priv->num_tx_context; ii++) {
        tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
                             GFP_KERNEL);
        if (!tx_context)
            goto free_tx;

        priv->tx_context[ii] = tx_context;
        tx_context->priv = priv;
        tx_context->pkt_no = ii;

        /* allocate URBs */
        tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!tx_context->urb)
            goto free_tx;

        tx_context->in_use = false;
    }

    for (ii = 0; ii < priv->num_rcb; ii++) {
        priv->rcb[ii] = kzalloc(sizeof(struct vnt_rcb), GFP_KERNEL);
        if (!priv->rcb[ii]) {
            dev_err(&priv->usb->dev,
                    "failed to allocate rcb no %d\n", ii);
            goto free_rx_tx;
        }

        rcb = priv->rcb[ii];

        rcb->priv = priv;

        /* allocate URBs */
        rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!rcb->urb)
            goto free_rx_tx;

        rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
        if (!rcb->skb)
            goto free_rx_tx;

        rcb->in_use = false;

        /* submit rx urb */
        if (vnt_submit_rx_urb(priv, rcb))
            goto free_rx_tx;
    }

    priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
    if (!priv->interrupt_urb)
        goto free_rx_tx;

    priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
    if (!priv->int_buf.data_buf) {
        usb_free_urb(priv->interrupt_urb);
        goto free_rx_tx;
    }

    return true;

free_rx_tx:
    vnt_free_rx_bufs(priv);

free_tx:
    vnt_free_tx_bufs(priv);

    return false;
}
Esempio n. 17
0
/* We have a good packet(s), get it/them out of the buffers. */
static void seeq8005_rx(struct net_device *dev)
{
	struct net_local *lp = netdev_priv(dev);
	int boguscount = 10;
	int pkt_hdr;
	int ioaddr = dev->base_addr;

	do {
		int next_packet;
		int pkt_len;
		int i;
		int status;

		status = inw(SEEQ_STATUS);
	  	outw( lp->receive_ptr, SEEQ_DMAAR);
		outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
	  	wait_for_buffer(dev);
	  	next_packet = ntohs(inw(SEEQ_BUFFER));
	  	pkt_hdr = inw(SEEQ_BUFFER);

		if (net_debug>2) {
			printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
		}

		if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) {	/* Read all the frames? */
			return;							/* Done for now */
		}

		if ((pkt_hdr & SEEQPKTS_DONE)==0)
			break;

		if (next_packet < lp->receive_ptr) {
			pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
		} else {
			pkt_len = next_packet - lp->receive_ptr - 4;
		}

		if (next_packet < ((DEFAULT_TEA+1)<<8)) {			/* is the next_packet address sane? */
			printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
			seeq8005_init(dev,1);
			return;
		}

		lp->receive_ptr = next_packet;

		if (net_debug>2) {
			printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
		}

		if (pkt_hdr & SEEQPKTS_ANY_ERROR) {				/* There was an error. */
			dev->stats.rx_errors++;
			if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++;
			if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++;
			if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++;
			if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++;
			/* skip over this packet */
			outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
			outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
		} else {
			/* Malloc up new buffer. */
			struct sk_buff *skb;
			unsigned char *buf;

			skb = dev_alloc_skb(pkt_len);
			if (skb == NULL) {
				printk("%s: Memory squeeze, dropping packet.\n", dev->name);
				dev->stats.rx_dropped++;
				break;
			}
			skb_reserve(skb, 2);	/* align data on 16 byte */
			buf = skb_put(skb,pkt_len);

			insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);

			if (net_debug>2) {
				char * p = buf;
				printk("%s: recv ",dev->name);
				for(i=0;i<14;i++) {
					printk("%02x ",*(p++)&0xff);
				}
				printk("\n");
			}

			skb->protocol=eth_type_trans(skb,dev);
			netif_rx(skb);
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;
		}
	} while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
Esempio n. 18
0
struct net_device * islpci_probe(struct net_device *nwdev, struct pci_dev *pci_device, long ioaddr, int irq)
{
    unsigned char mac_addr[ETH_ALEN];
    long counter;
    queue_entry *pointerq;
    islpci_private *private_config;
    struct sk_buff *skb;
    
    if (nwdev == NULL)
    {
    	nwdev = init_etherdev(0,0);
    	if (nwdev == NULL)
    	    return NULL;
    }
    
    firmware_uploaded = 0;

    // setup the structure members
    nwdev->base_addr = ioaddr;
    nwdev->irq = irq;

    // initialize the function pointers
    nwdev->open = &islpci_open;
    nwdev->stop = &islpci_close;
    nwdev->get_stats = &islpci_statistics;
#ifdef WIRELESS_IOCTLS
    nwdev->do_ioctl = &isl_ioctl;
#endif

    nwdev->hard_start_xmit = &islpci_eth_transmit;

//    nwdev->set_multicast_list = &set_rx_mode;

#ifdef HAVE_TX_TIMEOUT
    nwdev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
    nwdev->tx_timeout = &islpci_eth_tx_timeout;
#endif

    // FIXme
    mac_addr[0] = 0x01;
    mac_addr[1] = 0x01;
    mac_addr[2] = 0x01;
    mac_addr[3] = 0x01;
    mac_addr[4] = 0x01;
    mac_addr[5] = 0x01;
    mac_addr[6] = 0x01;
    mac_addr[7] = 0x01;
    memcpy(nwdev->dev_addr, mac_addr, ETH_ALEN);
    ether_setup(nwdev);
    
    // allocate a private device structure to the network device 
#ifdef CONFIG_ARCH_ISL3893
    if ( ( private_config = kmalloc(sizeof(islpci_private), GFP_KERNEL ) ) == NULL )
#else
    if ( ( private_config = kmalloc(sizeof(islpci_private), GFP_KERNEL | GFP_DMA ) ) == NULL )
#endif
    {
        // error allocating the DMA accessable memory area
        DEBUG(SHOW_ERROR_MESSAGES, "ERROR: Could not allocate additional "
            "memory \n");
        return NULL;
    }

    memset(private_config, 0, sizeof(islpci_private));
    nwdev->priv = private_config;
    private_config->next_module = root_islpci_device;
    root_islpci_device = nwdev;
    private_config->my_module = nwdev;
    private_config->pci_device = pci_device;

    // remap the PCI device base address to accessable
    if (private_config->remapped_device_base = ioremap(nwdev->base_addr, 
        ISL38XX_PCI_MEM_SIZE), private_config->remapped_device_base == NULL)
    {
        // error in remapping the PCI device memory address range
        DEBUG(SHOW_ERROR_MESSAGES, "ERROR: PCI memory remapping failed \n");
        return NULL;
    }
    
#ifdef CONFIG_ARCH_ISL3893
    // make sure the device is not running (anymore)
    writel(ISL38XX_DEV_INT_ABORT, private_config->remapped_device_base 
        + ISL38XX_DEV_INT_REG);
#endif

    // save the start and end address of the PCI memory area
    nwdev->mem_start = (unsigned long) private_config->remapped_device_base;
    nwdev->mem_end = nwdev->mem_start + ISL38XX_PCI_MEM_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES 
    DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p \n", 
        private_config->remapped_device_base );
#endif

    // initialize all the queues in the private configuration structure
    islpci_init_queue(&private_config->mgmt_tx_freeq);
    islpci_init_queue(&private_config->mgmt_rx_freeq);
    islpci_init_queue(&private_config->mgmt_tx_shadowq);
    islpci_init_queue(&private_config->mgmt_rx_shadowq);
    islpci_init_queue(&private_config->ioctl_rx_queue);
    islpci_init_queue(&private_config->trap_rx_queue);
    islpci_init_queue(&private_config->pimfor_rx_queue);
    
#ifdef WDS_LINKS
    private_config->wdsp = kmalloc(sizeof(struct wds_priv), GFP_KERNEL );
    memset(private_config->wdsp, 0, sizeof(struct wds_priv));
#endif    
    
    // allocate the consistent DMA memory depending on the mode    
    if( islpci_alloc_memory( private_config, ALLOC_MEMORY_MODE ))
        return NULL;
 
    // clear the indexes in the frame pointer
    for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++)
    {
        private_config->control_block->driver_curr_frag[counter] = cpu_to_le32(0);
        private_config->control_block->device_curr_frag[counter] = cpu_to_le32(0);
    }

    // Joho, ready setting up the queueing stuff, now fill the shadow rx queues
    // of both the management and data queues
    for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++)
    {
        // get an entry from the freeq and put it in the shadow queue
        if (islpci_get_queue(private_config->remapped_device_base, 
            &private_config->mgmt_rx_freeq, &pointerq))
        {
            // Oops, no more entries in the freeq ?
            DEBUG(SHOW_ERROR_MESSAGES, "Error: No more entries in freeq\n");
            return NULL;
        }
        else
        {
            // use the entry for the device interface management rx queue
            // these should always be inline !
            private_config->control_block->rx_data_mgmt[counter].address =
                cpu_to_le32(pointerq->dev_address);

            // put the entry got from the freeq in the shadow queue
            islpci_put_queue(private_config->remapped_device_base, 
                &private_config->mgmt_rx_shadowq, pointerq);
        }
    }

    for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++)
    {
        // allocate an sk_buff for received data frames storage
		// each frame on receive size consists of 1 fragment
	    // include any required allignment operations
        if (skb = dev_alloc_skb(MAX_FRAGMENT_SIZE+2), skb == NULL)
        {
            // error allocating an sk_buff structure elements
            DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
            return NULL;
        }

		// add the new allocated sk_buff to the buffer array
		private_config->data_low_rx[counter] = skb;

		// map the allocated skb data area to pci
        private_config->pci_map_rx_address[counter] = pci_map_single(
        	private_config->pci_device, (void *) skb->data, MAX_FRAGMENT_SIZE+2,
            PCI_DMA_FROMDEVICE );
        if( private_config->pci_map_rx_address[counter] == (dma_addr_t) NULL )
        {
            // error mapping the buffer to device accessable memory address
            DEBUG(SHOW_ERROR_MESSAGES, "Error mapping DMA address\n");
            return NULL;
        }

		// update the fragment address values in the contro block rx queue
        private_config->control_block->rx_data_low[counter].address = cpu_to_le32( (u32) 
		 	private_config->pci_map_rx_address[counter] );
    }

    // since the receive queues are filled with empty fragments, now we can
    // set the corresponding indexes in the Control Block
    private_config->control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ] =
        cpu_to_le32(ISL38XX_CB_RX_QSIZE);
    private_config->control_block->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] =
        cpu_to_le32(ISL38XX_CB_MGMT_QSIZE);

    // reset the real index registers and full flags
    private_config->index_mgmt_rx = 0;
	private_config->index_mgmt_tx = 0;
    private_config->free_data_rx = 0;
    private_config->free_data_tx = 0;
    private_config->data_low_tx_full = 0;

    // reset the queue read locks, process wait counter
    private_config->ioctl_queue_lock = 0;
    private_config->processes_in_wait = 0;

    // set the power save state
    private_config->powerstate = ISL38XX_PSM_ACTIVE_STATE;
    private_config->resetdevice = 0;

    return nwdev;
}
Esempio n. 19
0
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
				    struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
		/* static bcn for roaming */
		rtl_beacon_statistic(hw, skb);
	}
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
				      struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}

		/* static bcn for roaming */
		rtl_beacon_statistic(hw, skb);

		if (likely(rtl_action_proc(hw, skb, false)))
			ieee80211_rx(hw, skb);
		else
			dev_kfree_skb_any(skb);
	} else {
		dev_kfree_skb_any(skb);
	}
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct sk_buff *_skb;
	struct sk_buff_head rx_queue;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	skb_queue_head_init(&rx_queue);
	if (rtlusb->usb_rx_segregate_hdl)
		rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
	WARN_ON(skb_queue_empty(&rx_queue));
	while (!skb_queue_empty(&rx_queue)) {
		_skb = skb_dequeue(&rx_queue);
		_rtl_usb_rx_process_agg(hw, _skb);
		ieee80211_rx(hw, _skb);
	}
}

#define __RX_SKB_MAX_QUEUED	64

static void _rtl_rx_work(unsigned long param)
{
	struct rtl_usb *rtlusb = (struct rtl_usb *)param;
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct sk_buff *skb;

	while ((skb = skb_dequeue(&rtlusb->rx_queue))) {
		if (unlikely(IS_USB_STOP(rtlusb))) {
			dev_kfree_skb_any(skb);
			continue;
		}

		if (likely(!rtlusb->usb_rx_segregate_hdl)) {
			_rtl_usb_rx_process_noagg(hw, skb);
		} else {
			/* TO DO */
			_rtl_rx_pre_process(hw, skb);
			pr_err("rx agg not supported\n");
		}
	}
}

static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
					unsigned int len)
{
#if NET_IP_ALIGN != 0
	unsigned int padding = 0;
#endif

	/* make function no-op when possible */
	if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
		return 0;

#if NET_IP_ALIGN != 0
	/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
	/* TODO: deduplicate common code, define helper function instead? */

	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);

		padding ^= NET_IP_ALIGN;

		/* Input might be invalid, avoid accessing memory outside
		 * the buffer.
		 */
		if ((unsigned long)qc - (unsigned long)hdr < len &&
		    *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
			padding ^= NET_IP_ALIGN;
	}

	if (ieee80211_has_a4(hdr->frame_control))
		padding ^= NET_IP_ALIGN;

	return padding;
#endif
}

#define __RADIO_TAP_SIZE_RSV	32

static void _rtl_rx_completed(struct urb *_urb)
{
	struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	int err = 0;

	if (unlikely(IS_USB_STOP(rtlusb)))
		goto free;

	if (likely(0 == _urb->status)) {
		unsigned int padding;
		struct sk_buff *skb;
		unsigned int qlen;
		unsigned int size = _urb->actual_length;
		struct ieee80211_hdr *hdr;

		if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Too short packet from bulk IN! (len: %d)\n",
				 size);
			goto resubmit;
		}

		qlen = skb_queue_len(&rtlusb->rx_queue);
		if (qlen >= __RX_SKB_MAX_QUEUED) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Pending RX skbuff queue full! (qlen: %d)\n",
				 qlen);
			goto resubmit;
		}

		hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE);
		padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE);

		skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
		if (!skb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Can't allocate skb for bulk IN!\n");
			goto resubmit;
		}

		_rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);

		/* Make sure the payload data is 4 byte aligned. */
		skb_reserve(skb, padding);

		/* reserve some space for mac80211's radiotap */
		skb_reserve(skb, __RADIO_TAP_SIZE_RSV);

		memcpy(skb_put(skb, size), _urb->transfer_buffer, size);

		skb_queue_tail(&rtlusb->rx_queue, skb);
		tasklet_schedule(&rtlusb->rx_work_tasklet);

		goto resubmit;
	}

	switch (_urb->status) {
	/* disconnect */
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
		break;
	}

resubmit:
	usb_anchor_urb(_urb, &rtlusb->rx_submitted);
	err = usb_submit_urb(_urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(_urb);
		goto free;
	}
	return;

free:
	/* On some architectures, usb_free_coherent must not be called from
	 * hardirq context. Queue urb to cleanup list.
	 */
	usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs);
}

#undef __RADIO_TAP_SIZE_RSV

static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct urb *urb;

	usb_kill_anchored_urbs(&rtlusb->rx_submitted);

	tasklet_kill(&rtlusb->rx_work_tasklet);
	cancel_work_sync(&rtlpriv->works.lps_change_work);

	flush_workqueue(rtlpriv->works.rtl_wq);
	destroy_workqueue(rtlpriv->works.rtl_wq);

	skb_queue_purge(&rtlusb->rx_queue);

	while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
		usb_free_coherent(urb->dev, urb->transfer_buffer_length,
				urb->transfer_buffer, urb->transfer_dma);
		usb_free_urb(urb);
	}
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
	struct urb *urb;
	int err;
	int i;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	WARN_ON(0 == rtlusb->rx_urb_num);
	/* 1600 == 1514 + max WLAN header + rtk info */
	WARN_ON(rtlusb->rx_max_size < 1600);

	for (i = 0; i < rtlusb->rx_urb_num; i++) {
		err = -ENOMEM;
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to alloc URB!!\n");
			goto err_out;
		}

		err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
		if (err < 0) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to prep_rx_urb!!\n");
			usb_free_urb(urb);
			goto err_out;
		}

		usb_anchor_urb(urb, &rtlusb->rx_submitted);
		err = usb_submit_urb(urb, GFP_KERNEL);
		if (err)
			goto err_out;
		usb_free_urb(urb);
	}
	return 0;

err_out:
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);
	_rtl_usb_cleanup_rx(hw);
	return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
	int err;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	err = rtlpriv->cfg->ops->hw_init(hw);
	if (!err) {
		rtl_init_rx_config(hw);

		/* Enable software */
		SET_USB_START(rtlusb);
		/* should after adapter start and interrupt enable. */
		set_hal_start(rtlhal);

		/* Start bulk IN */
		err = _rtl_usb_receive(hw);
	}

	return err;
}
Esempio n. 20
0
void wlRecv(struct net_device *netdev)
{
	struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
	int work_done = 0;
	wlrxdesc_t *pCurrent = ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc;
	static Bool_e isFunctionBusy = WL_FALSE;
	int receivedHandled = 0;
	u_int32_t rxRdPtr;
	u_int32_t rxWrPtr;
	struct sk_buff *pRxSkBuff=NULL;
	WL_BUFF *wlb = NULL;
	void *pCurrentData;
	u_int8_t rxRate;
	int rxCount;
	int rssi;
	vmacApInfo_t *vmacSta_p = wlpptr->vmacSta_p;
	u_int32_t status;
    u_int32_t rssi_paths;
	WLDBG_ENTER(DBG_LEVEL_14);

    /* In a corner case the descriptors may be uninitialized and not usable, accessing these may cause a crash */
	if (isFunctionBusy || (pCurrent == NULL))
	{
		return;
	}
	isFunctionBusy = WL_TRUE;

	rxRdPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead);
	rxWrPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescWrite);

	while ((pCurrent->RxControl ==EAGLE_RXD_CTRL_DMA_OWN)
		&& (work_done < vmacSta_p->work_to_do)
		)
	{		
		/* AUTOCHANNEL */
		{
			if(vmacSta_p->StopTraffic)
				goto out;
		}

		rxCount = ENDIAN_SWAP16(pCurrent->PktLen);
		pRxSkBuff = pCurrent->pSkBuff;
		if (pRxSkBuff == NULL)
		{
			goto out;
		}
		pci_unmap_single(wlpptr->pPciDev, 
			ENDIAN_SWAP32(pCurrent->pPhysBuffData),
			((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
			PCI_DMA_FROMDEVICE);
		pCurrentData = pCurrent->pBuffData;
		rxRate = pCurrent->Rate;
		status = (u_int32_t)pCurrent->Status;
		pRxSkBuff->protocol = 0;
		if(pCurrent->QosCtrl & IEEE_QOS_CTL_AMSDU)
		{
			pRxSkBuff->protocol |= WL_WLAN_TYPE_AMSDU;
		}
		rssi = (int)pCurrent->RSSI + W836X_RSSI_OFFSET;
        rssi_paths = *((u_int32_t *)&pCurrent->HwRssiInfo);
		if (skb_tailroom(pRxSkBuff) >= rxCount)
		{
			skb_put(pRxSkBuff, rxCount ); 
			skb_pull(pRxSkBuff, 2); 
		}
		else
		{
			WLDBG_INFO(DBG_LEVEL_14,"Not enough tail room =%x recvlen=%x, pCurrent=%x, pCurrentData=%x", WL_BUFF_TAILROOM(pRxSkBuff), rxCount,pCurrent, pCurrentData);
			WL_SKB_FREE(pRxSkBuff);
			goto out;
		}

		wlpptr->netDevStats->rx_packets++;
		wlb = WL_BUFF_PTR(pRxSkBuff);
		WL_PREPARE_BUF_INFO(pRxSkBuff);
		if(pCurrent->HtSig2 & 0x8 )
		{
			u_int8_t ampdu_qos;
			/** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/	
			ampdu_qos = 8|(pCurrent->QosCtrl&0x7);
			work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status);
		}	
		else
		{
			u_int8_t ampdu_qos;
			/** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/	
			ampdu_qos = 0|(pCurrent->QosCtrl&0x7); 
			work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status);
		}

		wlpptr->netDevStats->rx_bytes += pRxSkBuff->len;
		{
			pCurrent->pSkBuff   = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
			if (pCurrent->pSkBuff != NULL)
			{
				if(skb_linearize(pCurrent->pSkBuff))
				{
					WL_SKB_FREE(pCurrent->pSkBuff);
					printk(KERN_ERR "%s: Need linearize memory\n", netdev->name);
					goto out;
				}
				skb_reserve(pCurrent->pSkBuff , MIN_BYTES_HEADROOM);
				pCurrent->Status    = EAGLE_RXD_STATUS_OK;
				pCurrent->QosCtrl   = 0x0000;
				pCurrent->Channel   = 0x00;
				pCurrent->RSSI      = 0x00;
				pCurrent->SQ2       = 0x00;

				pCurrent->PktLen    = 6*netdev->mtu + NUM_EXTRA_RX_BYTES;
				pCurrent->pBuffData = pCurrent->pSkBuff->data;
				pCurrent->pPhysBuffData =
					ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev,
					pCurrent->pSkBuff->data,
					((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize/*+sizeof(struct skb_shared_info)*/,
					PCI_DMA_BIDIRECTIONAL));
			}
		}
out:

		receivedHandled++;
		pCurrent->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
		pCurrent->QosCtrl =0;
		rxRdPtr = ENDIAN_SWAP32(pCurrent->pPhysNext);
		pCurrent = pCurrent->pNext;
	}
	writel(rxRdPtr, wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead);
	((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = pCurrent;
	isFunctionBusy = WL_FALSE;
	WLDBG_EXIT(DBG_LEVEL_14);
}
Esempio n. 21
0
/*******************************************************************************************************************************
 *function:  construct ADDBAREQ and ADDBARSP frame here together.
 *   input:  u8* 		Dst 	//ADDBA frame's destination
 *   	     PBA_RECORD 	pBA	//BA_RECORD entry which stores the necessary information for BA.
 *   	     u16 		StatusCode  //status code in RSP and I will use it to indicate whether it's RSP or REQ(will I?)
 *   	     u8			type	//indicate whether it's RSP(ACT_ADDBARSP) ow REQ(ACT_ADDBAREQ)
 *  output:  none
 *  return:  sk_buff* 		skb     //return constructed skb to xmit
*******************************************************************************************************************************/
static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, PBA_RECORD pBA, u16 StatusCode, u8 type)
{
	struct sk_buff *skb = NULL;
	 struct ieee80211_hdr_3addr* BAReq = NULL;
	u8* tag = NULL;
	u16 tmp = 0;
	u16 len = ieee->tx_headroom + 9;
	//category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) +  BA Timeout Value(2) +  BA Start SeqCtrl(2)(or StatusCode(2))
	IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __FUNCTION__, type, Dst, ieee->dev);
	if (pBA == NULL||ieee == NULL)
	{
		IEEE80211_DEBUG(IEEE80211_DL_ERR, "pBA(%p) is NULL or ieee(%p) is NULL\n", pBA, ieee);
		return NULL;
	}
	skb = dev_alloc_skb(len + sizeof( struct ieee80211_hdr_3addr)); //need to add something others? FIXME
	if (skb == NULL)
	{
		IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
		return NULL;
	}

	memset(skb->data, 0, sizeof( struct ieee80211_hdr_3addr));  	//I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
	skb_reserve(skb, ieee->tx_headroom);

	BAReq = ( struct ieee80211_hdr_3addr *) skb_put(skb,sizeof( struct ieee80211_hdr_3addr));

	memcpy(BAReq->addr1, Dst, ETH_ALEN);
	memcpy(BAReq->addr2, ieee->dev->dev_addr, ETH_ALEN);

	memcpy(BAReq->addr3, ieee->current_network.bssid, ETH_ALEN);

	BAReq->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame

	//tag += sizeof( struct ieee80211_hdr_3addr); //move to action field
	tag = (u8*)skb_put(skb, 9);
	*tag ++= ACT_CAT_BA;
	*tag ++= type;
	// Dialog Token
	*tag ++= pBA->DialogToken;

	if (ACT_ADDBARSP == type)
	{
		// Status Code
		printk("=====>to send ADDBARSP\n");
		tmp = cpu_to_le16(StatusCode);
		memcpy(tag, (u8*)&tmp, 2);
		tag += 2;
	}
	// BA Parameter Set
	tmp = cpu_to_le16(pBA->BaParamSet.shortData);
	memcpy(tag, (u8*)&tmp, 2);
	tag += 2;
	// BA Timeout Value
	tmp = cpu_to_le16(pBA->BaTimeoutValue);
	memcpy(tag, (u8*)&tmp, 2);
	tag += 2;

	if (ACT_ADDBAREQ == type)
	{
	// BA Start SeqCtrl
		memcpy(tag,(u8*)&(pBA->BaStartSeqCtrl), 2);
		tag += 2;
	}

	IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
	return skb;
	//return NULL;
}
Esempio n. 22
0
static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
				    struct ba_record *pBA,
				    u16 StatusCode, u8 type)
{
	struct sk_buff *skb = NULL;
	 struct rtllib_hdr_3addr *BAReq = NULL;
	u8 *tag = NULL;
	u16 len = ieee->tx_headroom + 9;

	netdev_dbg(ieee->dev, "%s(): frame(%d) sentd to: %pM, ieee->dev:%p\n",
		   __func__, type, Dst, ieee->dev);

	if (pBA == NULL) {
		netdev_warn(ieee->dev, "pBA is NULL\n");
		return NULL;
	}
	skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
	if (skb == NULL)
		return NULL;

	memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));

	skb_reserve(skb, ieee->tx_headroom);

	BAReq = (struct rtllib_hdr_3addr *)skb_put(skb,
		 sizeof(struct rtllib_hdr_3addr));

	ether_addr_copy(BAReq->addr1, Dst);
	ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);

	ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
	BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);

	tag = (u8 *)skb_put(skb, 9);
	*tag++ = ACT_CAT_BA;
	*tag++ = type;
	*tag++ = pBA->DialogToken;

	if (type == ACT_ADDBARSP) {
		RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");

		put_unaligned_le16(StatusCode, tag);
		tag += 2;
	}

	put_unaligned_le16(pBA->BaParamSet.shortData, tag);
	tag += 2;

	put_unaligned_le16(pBA->BaTimeoutValue, tag);
	tag += 2;

	if (type == ACT_ADDBAREQ) {
		memcpy(tag, (u8 *)&(pBA->BaStartSeqCtrl), 2);
		tag += 2;
	}

#ifdef VERBOSE_DEBUG
	print_hex_dump_bytes("rtllib_ADDBA(): ", DUMP_PREFIX_NONE, skb->data,
			     skb->len);
#endif
	return skb;
}
static int dhd_init_wlan_mem(void)
{
	int i;
	int j;

	for (i = 0; i < 8; i++) {
		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
		if (!wlan_static_skb[i])
			goto err_skb_alloc;
	}

	for (; i < 16; i++) {
		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
		if (!wlan_static_skb[i])
			goto err_skb_alloc;
	}

	wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
	if (!wlan_static_skb[i])
		goto err_skb_alloc;

	for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
		wlan_mem_array[i].mem_ptr =
			kmalloc(wlan_mem_array[i].size, GFP_KERNEL);

		if (!wlan_mem_array[i].mem_ptr)
			goto err_mem_alloc;
	}

	wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_scan_buf0)
		goto err_mem_alloc;

	wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_scan_buf1)
		goto err_mem_alloc;

	wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_dhd_info_buf)
		goto err_mem_alloc;

	wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);
	if (!wlan_static_dhd_wlfc_buf)
		goto err_mem_alloc;

	pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__);
	return 0;

err_mem_alloc:
	pr_err("Failed to mem_alloc for WLAN\n");
	if (wlan_static_scan_buf0)
		kfree(wlan_static_scan_buf0);
	if (wlan_static_scan_buf1)
		kfree(wlan_static_scan_buf1);
	if (wlan_static_dhd_info_buf)
		kfree(wlan_static_dhd_info_buf);

	for (j = 0; j < i; j++)
		kfree(wlan_mem_array[j].mem_ptr);

	i = WLAN_SKB_BUF_NUM;

err_skb_alloc:
	pr_err("Failed to skb_alloc for WLAN\n");
	for (j = 0; j < i; j++)
		dev_kfree_skb(wlan_static_skb[j]);

	return -ENOMEM;
}
Esempio n. 24
0
static void adapter_rx_packet(struct net_adapter *adapter,
		struct buffer_descriptor *bufdsc)
{
	struct hw_packet_header	*hdr;
	s32						rlen = bufdsc->length;
	u32						l;
	u8						*ofs;
	struct sk_buff				*rx_skb;
	ofs = (u8 *)bufdsc->buffer;

	while (rlen > 0) {
		hdr = (struct hw_packet_header *)ofs;

		/* "WD", "WC", "WP" or "WE" */
		if (unlikely(hdr->id0 != 'W')) {
			/*Ignore if it is the 4 byte allignment*/
			pr_warn("Wrong packet	\
			ID (%02x %02x) rlen = %d\n", hdr->id0, hdr->id1, rlen);
			/* skip rest of packets */
			break;
		}

		/* change offset */
		ofs += sizeof(*hdr);
		rlen -= sizeof(*hdr);

		/* check packet type */
		switch (hdr->id1) {
		case 'P': {
			/* revert offset */
			ofs -= sizeof(*hdr);
			rlen += sizeof(*hdr);
			/* process packet */
			l = process_private_cmd(adapter, ofs);
			/* shift */
			ofs += l;
			rlen -= l;

			/* process next packet */
			continue;
			}
		case 'C':
			if (!adapter->downloading) {
				ofs += 2;
				rlen -= 2;
				control_recv(adapter, (u8 *)ofs, hdr->length);
				break;
			} else {
				hdr->length -= sizeof(*hdr);
				process_indicate_packet(adapter, ofs);
				break;
			}
		case 'D':
			ofs += 2;
			rlen -= 2;

			if (hdr->length > BUFFER_DATA_SIZE) {
					pr_warn("Data packet too large");
					adapter->netstats.rx_dropped++;
					break;
				}

				if (likely(hdr->length <=
							(WIMAX_MTU_SIZE + 2))) {
					rx_skb = cmc7xx_fetch_skb(adapter);

					if (!rx_skb) {
						pr_err("unable to allocate skb");
						break;
					}
				} else {
					rx_skb = dev_alloc_skb(hdr->length +
						(ETHERNET_ADDRESS_LENGTH * 2) +
						NET_IP_ALIGN);
					if (!rx_skb) {
						pr_err("unable to allocate skb");
						break;
					}
					cmc7xx_prepare_skb(adapter, rx_skb);
				}

				memcpy(skb_put(rx_skb, hdr->length),
							(u8 *)ofs,
							hdr->length);
				rx_skb->protocol =
					eth_type_trans(rx_skb, adapter->net);
				if (netif_rx_ni(rx_skb) == NET_RX_DROP) {
					pr_debug("packet dropped!");
					adapter->netstats.rx_dropped++;
				}
				adapter->netstats.rx_packets++;
				adapter->netstats.rx_bytes +=
					(hdr->length +
					 (ETHERNET_ADDRESS_LENGTH * 2));

			break;
		case 'E':
			pr_warn("%s :Wrong packet Extended ID [%02x %02x]",
						__func__, hdr->id0, hdr->id1);
			/* skip rest of buffer */
			goto out;
		default:
			pr_warn("%s :Wrong packet ID [%02x %02x]",
						__func__, hdr->id0, hdr->id1);
			/* skip rest of buffer */
			goto out;
		}

		ofs += hdr->length;
		rlen -= hdr->length;
	}
Esempio n. 25
0
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
				  const u8 *orig_addr, u32 orig_sn,
				  u8 target_flags, const u8 *target,
				  u32 target_sn, const u8 *da,
				  u8 hop_count, u8 ttl,
				  u32 lifetime, u32 metric, u32 preq_id,
				  struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_local *local = sdata->local;
	struct sk_buff *skb;
	struct ieee80211_mgmt *mgmt;
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);

	skb = dev_alloc_skb(local->tx_headroom +
			    hdr_len +
			    2 + 37); /* max HWMP IE */
	if (!skb)
		return -1;
	skb_reserve(skb, local->tx_headroom);
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);

	memcpy(mgmt->da, da, ETH_ALEN);
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;

	switch (action) {
	case MPATH_PREQ:
		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
	case MPATH_RANN:
		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
	default:
		kfree_skb(skb);
		return -ENOTSUPP;
		break;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(target_sn, pos);
		pos += 4;
	} else {
		if (action == MPATH_PREQ) {
			put_unaligned_le32(preq_id, pos);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(orig_sn, pos);
		pos += 4;
	}
	put_unaligned_le32(lifetime, pos); /* interval for RANN */
	pos += 4;
	put_unaligned_le32(metric, pos);
	pos += 4;
	if (action == MPATH_PREQ) {
		*pos++ = 1; /* destination count */
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(target_sn, pos);
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(orig_sn, pos);
		pos += 4;
	}

	ieee80211_tx_skb(sdata, skb);
	return 0;
}
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
				struct data_queue *queue,
				struct sk_buff *frag_skb)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
	struct ieee80211_tx_info *rts_info;
	struct sk_buff *skb;
	int size;

	if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
		size = sizeof(struct ieee80211_cts);
	else
		size = sizeof(struct ieee80211_rts);

	skb = dev_alloc_skb(size + rt2x00dev->hw->extra_tx_headroom);
	if (!skb) {
		WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n");
		return NETDEV_TX_BUSY;
	}

	skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
	skb_put(skb, size);

	/*
	 * Copy TX information over from original frame to
	 * RTS/CTS frame. Note that we set the no encryption flag
	 * since we don't want this frame to be encrypted.
	 * RTS frames should be acked, while CTS-to-self frames
	 * should not. The ready for TX flag is cleared to prevent
	 * it being automatically send when the descriptor is
	 * written to the hardware.
	 */
	memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
	rts_info = IEEE80211_SKB_CB(skb);
	rts_info->control.hw_key = NULL;
	rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS;
	rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
	rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;

	if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
		rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
	else
		rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;

	if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
		ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
					frag_skb->data, size, tx_info,
					(struct ieee80211_cts *)(skb->data));
	else
		ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
				  frag_skb->data, size, tx_info,
				  (struct ieee80211_rts *)(skb->data));

	if (rt2x00queue_write_tx_frame(queue, skb)) {
		dev_kfree_skb_any(skb);
		WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
		return NETDEV_TX_BUSY;
	}

	return NETDEV_TX_OK;
}
Esempio n. 27
0
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
					 struct sta_info *sta, u16 tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_OPERATIONAL,
		.tid = tid,
		.timeout = 0,
		.ssn = 0,
	};

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	params.buf_size = tid_tx->buf_size;
	params.amsdu = tid_tx->amsdu;

	ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
	       sta->sta.addr, tid);

	drv_ampdu_action(local, sta->sdata, &params);

	/*
	 * synchronize with TX path, while splicing the TX path
	 * should block so it won't put more packets onto pending.
	 */
	spin_lock_bh(&sta->lock);

	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
	/*
	 * Now mark as operational. This will be visible
	 * in the TX path, and lets it go lock-free in
	 * the common case.
	 */
	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
	ieee80211_agg_splice_finish(sta->sdata, tid);

	spin_unlock_bh(&sta->lock);

	ieee80211_agg_start_txq(sta, tid, true);
}

void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_start_tx_ba_cb(sdata, ra, tid);

	if (tid >= IEEE80211_NUM_TIDS) {
		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
		       tid, IEEE80211_NUM_TIDS);
		return;
	}

	mutex_lock(&local->sta_mtx);
	sta = sta_info_get_bss(sdata, ra);
	if (!sta) {
		mutex_unlock(&local->sta_mtx);
		ht_dbg(sdata, "Could not find station: %pM\n", ra);
		return;
	}

	mutex_lock(&sta->ampdu_mlme.mtx);
	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	if (WARN_ON(!tid_tx)) {
		ht_dbg(sdata, "addBA was not requested!\n");
		goto unlock;
	}

	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
		goto unlock;

	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
		ieee80211_agg_tx_operational(local, sta, tid);

 unlock:
	mutex_unlock(&sta->ampdu_mlme.mtx);
	mutex_unlock(&local->sta_mtx);
}

void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				      const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct ieee80211_ra_tid *ra_tid;
	struct sk_buff *skb = dev_alloc_skb(0);

	if (unlikely(!skb))
		return;

	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
	memcpy(&ra_tid->ra, ra, ETH_ALEN);
	ra_tid->tid = tid;

	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
	skb_queue_tail(&sdata->skb_queue, skb);
	ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);

int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				   enum ieee80211_agg_stop_reason reason)
{
	int ret;

	mutex_lock(&sta->ampdu_mlme.mtx);

	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);

	mutex_unlock(&sta->ampdu_mlme.mtx);

	return ret;
}
Esempio n. 28
0
static int __lpc_handle_recv(struct net_device *ndev, int budget)
{
	struct netdata_local *pldat = netdev_priv(ndev);
	struct sk_buff *skb;
	u32 rxconsidx, len, ethst;
	struct rx_status_t *prxstat;
	u8 *prdbuf;
	int rx_done = 0;

	/* Get the current RX buffer indexes */
	rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
	while (rx_done < budget && rxconsidx !=
			readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
		/* Get pointer to receive status */
		prxstat = &pldat->rx_stat_v[rxconsidx];
		len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;

		/* Status error? */
		ethst = prxstat->statusinfo;
		if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
		    (RXSTATUS_ERROR | RXSTATUS_RANGE))
			ethst &= ~RXSTATUS_ERROR;

		if (ethst & RXSTATUS_ERROR) {
			int si = prxstat->statusinfo;
			/* Check statuses */
			if (si & RXSTATUS_OVERRUN) {
				/* Overrun error */
				ndev->stats.rx_fifo_errors++;
			} else if (si & RXSTATUS_CRC) {
				/* CRC error */
				ndev->stats.rx_crc_errors++;
			} else if (si & RXSTATUS_LENGTH) {
				/* Length error */
				ndev->stats.rx_length_errors++;
			} else if (si & RXSTATUS_ERROR) {
				/* Other error */
				ndev->stats.rx_length_errors++;
			}
			ndev->stats.rx_errors++;
		} else {
			/* Packet is good */
			skb = dev_alloc_skb(len);
			if (!skb) {
				ndev->stats.rx_dropped++;
			} else {
				prdbuf = skb_put(skb, len);

				/* Copy packet from buffer */
				memcpy(prdbuf, pldat->rx_buff_v +
					rxconsidx * ENET_MAXF_SIZE, len);

				/* Pass to upper layer */
				skb->protocol = eth_type_trans(skb, ndev);
				netif_receive_skb(skb);
				ndev->stats.rx_packets++;
				ndev->stats.rx_bytes += len;
			}
		}

		/* Increment consume index */
		rxconsidx = rxconsidx + 1;
		if (rxconsidx >= ENET_RX_DESC)
			rxconsidx = 0;
		writel(rxconsidx,
		       LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
		rx_done++;
	}

	return rx_done;
}
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
	int ret, extra;
	u16 fc;
	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
	struct sk_buff *skb;
	struct wl12xx_arp_rsp_template *tmpl;
	struct ieee80211_hdr_3addr *hdr;
	struct arphdr *arp_hdr;

	skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
			    WL1271_EXTRA_SPACE_MAX);
	if (!skb) {
		wl1271_error("failed to allocate buffer for arp rsp template");
		return -ENOMEM;
	}

	skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);

	tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
	memset(tmpl, 0, sizeof(tmpl));

	/* llc layer */
	memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
	tmpl->llc_type = cpu_to_be16(ETH_P_ARP);

	/* arp header */
	arp_hdr = &tmpl->arp_hdr;
	arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
	arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
	arp_hdr->ar_hln = ETH_ALEN;
	arp_hdr->ar_pln = 4;
	arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);

	/* arp payload */
	memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN);
	tmpl->sender_ip = wlvif->ip_addr;

	/* encryption space */
	switch (wlvif->encryption_type) {
	case KEY_TKIP:
		extra = WL1271_EXTRA_SPACE_TKIP;
		break;
	case KEY_AES:
		extra = WL1271_EXTRA_SPACE_AES;
		break;
	case KEY_NONE:
	case KEY_WEP:
	case KEY_GEM:
		extra = 0;
		break;
	default:
		wl1271_warning("Unknown encryption type: %d",
			       wlvif->encryption_type);
		ret = -EINVAL;
		goto out;
	}

	if (extra) {
		u8 *space = skb_push(skb, extra);
		memset(space, 0, extra);
	}

	/* QoS header - BE */
	if (wlvif->sta.qos)
		memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));

	/* mac80211 header */
	hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
	memset(hdr, 0, sizeof(hdr));
	fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
	if (wlvif->sta.qos)
		fc |= IEEE80211_STYPE_QOS_DATA;
	else
		fc |= IEEE80211_STYPE_DATA;
	if (wlvif->encryption_type != KEY_NONE)
		fc |= IEEE80211_FCTL_PROTECTED;

	hdr->frame_control = cpu_to_le16(fc);
	memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
	memcpy(hdr->addr2, vif->addr, ETH_ALEN);
	memset(hdr->addr3, 0xff, ETH_ALEN);

	ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
				      skb->data, skb->len, 0,
				      wlvif->basic_rate);
out:
	dev_kfree_skb(skb);
	return ret;
}
Esempio n. 30
0
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_rxdesc *rxdesc;

	int entry = mdp->cur_rx % RX_RING_SIZE;
	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
	struct sk_buff *skb;
	u16 pkt_len = 0;
	u32 desc_status;

	rxdesc = &mdp->rx_ring[entry];
	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
		desc_status = edmac_to_cpu(mdp, rxdesc->status);
		pkt_len = rxdesc->frame_length;

		if (--boguscnt < 0)
			break;

		if (!(desc_status & RDFEND))
			mdp->stats.rx_length_errors++;

		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
			mdp->stats.rx_errors++;
			if (desc_status & RD_RFS1)
				mdp->stats.rx_crc_errors++;
			if (desc_status & RD_RFS2)
				mdp->stats.rx_frame_errors++;
			if (desc_status & RD_RFS3)
				mdp->stats.rx_length_errors++;
			if (desc_status & RD_RFS4)
				mdp->stats.rx_length_errors++;
			if (desc_status & RD_RFS6)
				mdp->stats.rx_missed_errors++;
			if (desc_status & RD_RFS10)
				mdp->stats.rx_over_errors++;
		} else {
			if (!mdp->cd->hw_swap)
				sh_eth_soft_swap(
					phys_to_virt(ALIGN(rxdesc->addr, 4)),
					pkt_len + 2);
			skb = mdp->rx_skbuff[entry];
			mdp->rx_skbuff[entry] = NULL;
			if (mdp->cd->rpadir)
				skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len);
			skb->protocol = eth_type_trans(skb, ndev);
			netif_rx(skb);
			mdp->stats.rx_packets++;
			mdp->stats.rx_bytes += pkt_len;
		}
		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
		entry = (++mdp->cur_rx) % RX_RING_SIZE;
		rxdesc = &mdp->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
		entry = mdp->dirty_rx % RX_RING_SIZE;
		rxdesc = &mdp->rx_ring[entry];
		/* The size of the buffer is 16 byte boundary. */
		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);

		if (mdp->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(mdp->rx_buf_sz);
			mdp->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;	/* Better luck next round. */
			dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
					DMA_FROM_DEVICE);
			skb->dev = ndev;
			sh_eth_set_receive_align(skb);

			skb_checksum_none_assert(skb);
			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
		}
		if (entry >= RX_RING_SIZE - 1)
			rxdesc->status |=
				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
		else
			rxdesc->status |=
				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
	}

	/* Restart Rx engine if stopped. */
	/* If we don't need to check status, don't. -KDU */
	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
		sh_eth_write(ndev, EDRRR_R, EDRRR);

	return 0;
}