Ejemplo n.º 1
0
struct sk_buff *
__adf_nbuf_alloc_ncnb(adf_os_device_t osdev, size_t size, int reserve, int align, int prio)
{
    struct sk_buff *skb;
    unsigned long offset;

    if(align)
        size += (align - 1);

#ifdef CONFIG_COMCERTO_ZONE_DMA_NCNB
    skb = __dev_alloc_skb(size,  GFP_DMA_NCNB | GFP_ATOMIC);
#else
    skb = __dev_alloc_skb(size,  GFP_ATOMIC);
#endif

    if (!skb) {
        printk("ERROR:NBUF alloc failed\n");
        return NULL;
    }
    memset(skb->cb, 0x0, sizeof(skb->cb));

    /*
     * The default is for netbuf fragments to be interpreted
     * as wordstreams rather than bytestreams.
     * Set the CVG_NBUF_MAX_EXTRA_FRAGS+1 wordstream_flags bits,
     * to provide this default.
     */
    NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) =
        (1 << (CVG_NBUF_MAX_EXTRA_FRAGS + 1)) - 1;

    /**
     * XXX:how about we reserve first then align
     */

    /**
     * Align & make sure that the tail & data are adjusted properly
     */
    if(align) {
        offset = ((unsigned long) skb->data) % align;
        if(offset)
            skb_reserve(skb, align - offset);
    }

    /**
     * NOTE:alloc doesn't take responsibility if reserve unaligns the data
     * pointer
     */
    skb_reserve(skb, reserve);

    return skb;
}
Ejemplo n.º 2
0
static inline struct sk_buff *__alloc_skb_from_kernel(int size, gfp_t gfp_mask)
{
	struct sk_buff *skb = NULL;

	if (size > SKB_1_5K)
		skb = __dev_alloc_skb(SKB_4K, gfp_mask);
	else if (size > SKB_16)
		skb = __dev_alloc_skb(SKB_1_5K, gfp_mask);
	else if (size > 0)
		skb = __dev_alloc_skb(SKB_16, gfp_mask);
	if (!skb)
		CCCI_ERR_MSG(-1, BM, "%ps alloc skb from kernel fail, size=%d\n", __builtin_return_address(0), size);
	return skb;
}
Ejemplo n.º 3
0
static int
fe_dma_ring_alloc(END_DEVICE *ei_local)
{
	u32 i;

	/* allocate QDMA HW TX pool */
	ei_local->fq_head_page = dma_alloc_coherent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &ei_local->fq_head_page_phy, GFP_KERNEL);
	if (!ei_local->fq_head_page)
		goto err_cleanup;

	/* allocate QDMA HW TX descriptors */
	ei_local->fq_head = dma_alloc_coherent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &ei_local->fq_head_phy, GFP_KERNEL);
	if (!ei_local->fq_head)
		goto err_cleanup;

	/* allocate QDMA SW TX descriptors */
	ei_local->txd_pool = dma_alloc_coherent(NULL, NUM_TX_DESC * sizeof(struct QDMA_txdesc), &ei_local->txd_pool_phy, GFP_KERNEL);
	if (!ei_local->txd_pool)
		goto err_cleanup;

	/* allocate PDMA (or QDMA) RX descriptors */
	ei_local->rxd_ring = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->rxd_ring_phy, GFP_KERNEL);
	if (!ei_local->rxd_ring)
		goto err_cleanup;

#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
	/* allocate QDMA RX stub descriptors */
	ei_local->qrx_ring = dma_alloc_coherent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->qrx_ring_phy, GFP_KERNEL);
	if (!ei_local->qrx_ring)
		goto err_cleanup;

	/* allocate QDMA RX stub buffer */
	ei_local->qrx_buff = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_KERNEL);
	if (!ei_local->qrx_buff)
		goto err_cleanup;
#endif

	/* allocate PDMA (or QDMA) RX buffers */
	for (i = 0; i < NUM_RX_DESC; i++) {
		ei_local->rxd_buff[i] = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_KERNEL);
		if (!ei_local->rxd_buff[i])
			goto err_cleanup;
	}

	return 0;

err_cleanup:
	fe_dma_ring_free(ei_local);
	return -ENOMEM;
}
Ejemplo n.º 4
0
PNDIS_PACKET DuplicatePacket(
    IN	PRTMP_ADAPTER	pAd,
    IN	PNDIS_PACKET	pPacket,
    IN	UCHAR			FromWhichBSSID)
{
    struct sk_buff	*skb;
    PNDIS_PACKET	pRetPacket = NULL;
    USHORT			DataSize;
    UCHAR			*pData;

    DataSize = (USHORT) GET_OS_PKT_LEN(pPacket);
    pData = (PUCHAR) GET_OS_PKT_DATAPTR(pPacket);


    skb = skb_clone(RTPKT_TO_OSPKT(pPacket), MEM_ALLOC_FLAG);
    if (skb)
    {
        skb->dev = get_netdev_from_bssid(pAd, FromWhichBSSID);
        pRetPacket = OSPKT_TO_RTPKT(skb);
    }

#if 0
    if ((skb = __dev_alloc_skb(DataSize + 2+32, MEM_ALLOC_FLAG)) != NULL)
    {
        skb_reserve(skb, 2+32);
        NdisMoveMemory(skb->tail, pData, DataSize);
        skb_put(skb, DataSize);
        skb->dev = get_netdev_from_bssid(pAd, FromWhichBSSID);
        pRetPacket = OSPKT_TO_RTPKT(skb);
    }
#endif

    return pRetPacket;

}
Ejemplo n.º 5
0
static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
					struct rtl_usb *rtlusb,
					struct urb *urb,
					gfp_t gfp_mask)
{
	struct sk_buff *skb;
	struct rtl_priv *rtlpriv = rtl_priv(hw);

	skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
			       gfp_mask);
	if (!skb) {
		RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
			 "Failed to __dev_alloc_skb!!\n");
		return ERR_PTR(-ENOMEM);
	}

	/* reserve some space for mac80211's radiotap */
	skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
	usb_fill_bulk_urb(urb, rtlusb->udev,
			  usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
			  skb->data, min(skb_tailroom(skb),
			  (int)rtlusb->rx_max_size),
			  _rtl_rx_completed, skb);

	_rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
	return skb;
}
Ejemplo n.º 6
0
int ctcm_ch_alloc_buffer(struct channel *ch)
{
	clear_normalized_cda(&ch->ccw[1]);
	ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
	if (ch->trans_skb == NULL) {
		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
			"%s(%s): %s trans_skb allocation error",
			CTCM_FUNTAIL, ch->id,
			(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
		return -ENOMEM;
	}

	ch->ccw[1].count = ch->max_bufsize;
	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
		dev_kfree_skb(ch->trans_skb);
		ch->trans_skb = NULL;
		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
			"%s(%s): %s set norm_cda failed",
			CTCM_FUNTAIL, ch->id,
			(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
		return -ENOMEM;
	}

	ch->ccw[1].count = 0;
	ch->trans_skb_data = ch->trans_skb->data;
	ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
	return 0;
}
Ejemplo n.º 7
0
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
				u32 len,
				gfp_t gfp_mask)
{
	struct sk_buff *skb;
	u32 off;

	/*
	 * Cache-line-align.  This is important (for the
	 * 5210 at least) as not doing so causes bogus data
	 * in rx'd frames.
	 */

	/* Note: the kernel can allocate a value greater than
	 * what we ask it to give us. We really only need 4 KB as that
	 * is this hardware supports and in fact we need at least 3849
	 * as that is the MAX AMSDU size this hardware supports.
	 * Unfortunately this means we may get 8 KB here from the
	 * kernel... and that is actually what is observed on some
	 * systems :( */
	skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
	if (skb != NULL) {
		off = ((unsigned long) skb->data) % common->cachelsz;
		if (off != 0)
			skb_reserve(skb, common->cachelsz - off);
	} else {
		pr_err("skbuff alloc of size %u failed\n", len);
		return NULL;
	}

	return skb;
}
Ejemplo n.º 8
0
static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
        int headroom, gfp_t gfp_mask)
{
    struct ieee80211_txb *txb;
    int i;
    txb = kmalloc(sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
                  gfp_mask);
    if (!txb)
        return NULL;

    memset(txb, 0, sizeof(struct ieee80211_txb));
    txb->nr_frags = nr_frags;
    txb->frag_size = txb_size;

    for (i = 0; i < nr_frags; i++) {
        txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
                                            gfp_mask);
        if (unlikely(!txb->fragments[i])) {
            i--;
            break;
        }
        skb_reserve(txb->fragments[i], headroom);
    }
    if (unlikely(i != nr_frags)) {
        while (i >= 0)
            dev_kfree_skb_any(txb->fragments[i--]);
        kfree(txb);
        return NULL;
    }
    return txb;
}
Ejemplo n.º 9
0
static int p54u_init_urbs(struct ieee80211_hw *dev)
{
	struct p54u_priv *priv = dev->priv;
	struct urb *entry;
	struct sk_buff *skb;
	struct p54u_rx_info *info;

	while (skb_queue_len(&priv->rx_queue) < 32) {
		skb = __dev_alloc_skb(MAX_RX_SIZE, GFP_KERNEL);
		if (!skb)
			break;
		entry = usb_alloc_urb(0, GFP_KERNEL);
		if (!entry) {
			kfree_skb(skb);
			break;
		}
		usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), MAX_RX_SIZE, p54u_rx_cb, skb);
		info = (struct p54u_rx_info *) skb->cb;
		info->urb = entry;
		info->dev = dev;
		skb_queue_tail(&priv->rx_queue, skb);
		usb_submit_urb(entry, GFP_KERNEL);
	}

	return 0;
}
Ejemplo n.º 10
0
PNDIS_PACKET duplicate_pkt(
	IN	PRTMP_ADAPTER	pAd,
	IN	PUCHAR			pHeader802_3,
    IN  UINT            HdrLen,
	IN	PUCHAR			pData,
	IN	ULONG			DataSize,
	IN	UCHAR			FromWhichBSSID)
{
	struct sk_buff	*skb;
	PNDIS_PACKET	pPacket = NULL;


	if ((skb = __dev_alloc_skb(HdrLen + DataSize + 2, MEM_ALLOC_FLAG)) != NULL)
	{
		skb_reserve(skb, 2);
		NdisMoveMemory(skb_tail_pointer(skb), pHeader802_3, HdrLen);
		skb_put(skb, HdrLen);
		NdisMoveMemory(skb_tail_pointer(skb), pData, DataSize);
		skb_put(skb, DataSize);
		skb->dev = get_netdev_from_bssid(pAd, FromWhichBSSID);
		pPacket = OSPKT_TO_RTPKT(skb);
	}

	return pPacket;
}
Ejemplo n.º 11
0
int get_rx_buffers(void *priv, void **pkt_priv, void **buffer, int size)
{
	struct net_device *dev = (struct net_device *) priv;
	struct sk_buff *skb = NULL;
	void *ptr = NULL;

	DBG0("[%s] dev:%s\n", __func__, dev->name);
	skb = __dev_alloc_skb(size, GFP_ATOMIC);
	if (skb == NULL) {
		DBG0("%s: unable to alloc skb\n", __func__);
		return -ENOMEM;
	}

	/* TODO skb_reserve(skb, NET_IP_ALIGN); for ethernet mode */
	/* Populate some params now. */
	skb->dev = dev;
	ptr = skb_put(skb, size);

	skb_set_network_header(skb, 0);

	/* done with skb setup, return the buffer pointer. */
	*pkt_priv = skb;
	*buffer = ptr;

	return 0;
}
static struct sk_buff *osl_alloc_skb(unsigned int len)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
	return __dev_alloc_skb(len, GFP_ATOMIC);
#else
	return dev_alloc_skb(len);
#endif
}
Ejemplo n.º 13
0
Archivo: skbuff.c Proyecto: foxwolf/yjd
/**
 *	dev_alloc_skb - allocate an skbuff for receiving
 *	@length: length to allocate
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
 *	%NULL is returned if there is no free memory. Although this function
 *	allocates memory it can be called from an interrupt.
 */
struct sk_buff *dev_alloc_skb(unsigned int length)
{
	/*
	 * There is more code here than it seems:
	 * __dev_alloc_skb is an 
	 */
	return __dev_alloc_skb(length, GFP_ATOMIC);
} 
Ejemplo n.º 14
0
struct SimDevicePacket lib_dev_create_packet(struct SimDevice *dev, int size)
{
	struct SimDevicePacket packet;
	int len = get_hack_size(size);
	struct sk_buff *skb = __dev_alloc_skb(len, __GFP_WAIT);

	packet.token = skb;
	packet.buffer = skb_put(skb, len);
	return packet;
}
Ejemplo n.º 15
0
inline struct sk_buff *_rtw_skb_alloc(u32 sz)
{
#ifdef PLATFORM_LINUX
	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
#endif /* PLATFORM_LINUX */

#ifdef PLATFORM_FREEBSD
	return dev_alloc_skb(sz);
#endif /* PLATFORM_FREEBSD */
}
Ejemplo n.º 16
0
static struct sk_buff *osl_alloc_skb(unsigned int len)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
	gfp_t flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;

	return __dev_alloc_skb(len, flags);
#else
	return dev_alloc_skb(len);
#endif
}
Ejemplo n.º 17
0
static struct recv_buf* sd_recv_rxfifo(PADAPTER padapter, u32 size)
{
	u32 readsize, allocsize, ret;
	u8 *preadbuf;
	_pkt *ppkt;
	struct recv_priv *precvpriv;
	struct recv_buf	*precvbuf;


	readsize = size;

	//3 1. alloc skb
	// align to block size
	allocsize = _RND(readsize, adapter_to_dvobj(padapter)->intf_data.block_transfer_len);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
	ppkt = __dev_alloc_skb(allocsize, GFP_KERNEL);
#else
	ppkt = __netdev_alloc_skb(padapter->pnetdev, allocsize, GFP_KERNEL);
#endif
	if (ppkt == NULL) {
		RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s: alloc_skb fail! alloc=%d read=%d\n", __FUNCTION__, allocsize, readsize));
		return NULL;
	}

	//3 2. read data from rxfifo
	preadbuf = skb_put(ppkt, readsize);
//	rtw_read_port(padapter, WLAN_RX0FF_DEVICE_ID, readsize, preadbuf);
	ret = sdio_read_port(&padapter->iopriv.intf, WLAN_RX0FF_DEVICE_ID, readsize, preadbuf);
	if (ret == _FAIL) {
		dev_kfree_skb_any(ppkt);
		RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s: read port FAIL!\n", __FUNCTION__));
		return NULL;
	}

	//3 3. alloc recvbuf
	precvpriv = &padapter->recvpriv;
	precvbuf = rtw_dequeue_recvbuf(&precvpriv->free_recv_buf_queue);
	if (precvbuf == NULL) {
		dev_kfree_skb_any(ppkt);
		RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s: alloc recvbuf FAIL!\n", __FUNCTION__));
		return NULL;
	}

	//3 4. init recvbuf
	precvbuf->pskb = ppkt;

	precvbuf->len = ppkt->len;

	precvbuf->phead = ppkt->head;
	precvbuf->pdata = ppkt->data;
	precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
	precvbuf->pend = skb_end_pointer(precvbuf->pskb);

	return precvbuf;
}
Ejemplo n.º 18
0
Archivo: cmd.c Proyecto: UNwS/rtl8192su
struct sk_buff *r92su_h2c_alloc(struct r92su *r92su, int len, gfp_t flag)
{
	struct sk_buff *skb;
	unsigned int new_len =
		ALIGN(len + TX_DESC_SIZE + H2CC2H_HDR_LEN, H2CC2H_HDR_LEN);
	skb = __dev_alloc_skb(new_len, flag);
	if (skb)
		skb_reserve(skb, TX_DESC_SIZE + H2CC2H_HDR_LEN);

	return skb;
}
static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
{
	struct sk_buff *skb;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;

	skb = __dev_alloc_skb(len, flags);
#else
	skb = dev_alloc_skb(len);
#endif 
	return skb;
}
Ejemplo n.º 20
0
static void queue_rx(void)
{
	void *ptr;
	rx_skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
	ptr = skb_put(rx_skb, BUFFER_SIZE);
	/* need a way to handle error case */
	rx_skb_dma_addr = dma_map_single(NULL, ptr, BUFFER_SIZE,
						DMA_FROM_DEVICE);
	sps_transfer_one(bam_rx_pipe, rx_skb_dma_addr,
				BUFFER_SIZE, NULL,
				SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
}
Ejemplo n.º 21
0
static struct sk_buff *osl_alloc_skb(unsigned int len)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
    gfp_t flags = GFP_ATOMIC;
    struct sk_buff *skb;

    skb = __dev_alloc_skb(len, flags);
    return skb;
#else
    return dev_alloc_skb(len);
#endif 
}
Ejemplo n.º 22
0
static void wl1251_rx_body(struct wl1251 *wl,
			   struct wl1251_rx_descriptor *desc)
{
	struct sk_buff *skb;
	struct ieee80211_rx_status status;
	u8 *rx_buffer, beacon = 0;
	u16 length, *fc;
	u32 curr_id, last_id_inc, rx_packet_ring_addr;

	length = WL1251_RX_ALIGN(desc->length  - PLCP_HEADER_LENGTH);
	curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT;
	last_id_inc = (wl->rx_last_id + 1) % (RX_MAX_PACKET_ID + 1);

	if (last_id_inc != curr_id) {
		wl1251_warning("curr ID:%d, last ID inc:%d",
			       curr_id, last_id_inc);
		wl->rx_last_id = curr_id;
	} else {
		wl->rx_last_id = last_id_inc;
	}

	rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr +
		sizeof(struct wl1251_rx_descriptor) + 20;
	if (wl->rx_current_buffer)
		rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;

	skb = __dev_alloc_skb(length, GFP_KERNEL);
	if (!skb) {
		wl1251_error("Couldn't allocate RX frame");
		return;
	}

	rx_buffer = skb_put(skb, length);
	wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);

	/* The actual length doesn't include the target's alignment */
	skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);

	fc = (u16 *)skb->data;

	if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
		beacon = 1;

	wl1251_rx_status(wl, desc, &status, beacon);

	wl1251_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
		     beacon ? "beacon" : "");

	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
	ieee80211_rx_ni(wl->hw, skb);

	wl1251_update_rate(wl, length);
}
Ejemplo n.º 23
0
static int setup_rx_descbuffer(struct b43_dmaring *ring,
			       struct b43_dmadesc_generic *desc,
			       struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
{
	dma_addr_t dmaaddr;
	struct sk_buff *skb;

	B43_WARN_ON(ring->tx);

	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
	if (unlikely(!skb))
		return -ENOMEM;
	b43_poison_rx_buffer(ring, skb);
	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
		/* ugh. try to realloc in zone_dma */
		gfp_flags |= GFP_DMA;

		dev_kfree_skb_any(skb);

		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
		if (unlikely(!skb))
			return -ENOMEM;
		b43_poison_rx_buffer(ring, skb);
		dmaaddr = map_descbuffer(ring, skb->data,
					 ring->rx_buffersize, 0);
		if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
			b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
			dev_kfree_skb_any(skb);
			return -EIO;
		}
	}

	meta->skb = skb;
	meta->dmaaddr = dmaaddr;
	ring->ops->fill_descriptor(ring, desc, dmaaddr,
				   ring->rx_buffersize, 0, 0, 0);

	return 0;
}
Ejemplo n.º 24
0
static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
{
	struct sk_buff *skb;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
	flags |= GFP_ATOMIC;
#endif
	skb = __dev_alloc_skb(len, flags);
#else
	skb = dev_alloc_skb(len);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
	return skb;
}
Ejemplo n.º 25
0
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
{
	struct wl1271_rx_descriptor *desc;
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	u8 *buf;
	u8 beacon = 0;
	u8 is_data = 0;

	/*
	 * In PLT mode we seem to get frames and mac80211 warns about them,
	 * workaround this by not retrieving them at all.
	 */
	if (unlikely(wl->state == WL1271_STATE_PLT))
		return -EINVAL;

	skb = __dev_alloc_skb(length, GFP_KERNEL);
	if (!skb) {
		wl1271_error("Couldn't allocate RX frame");
		return -ENOMEM;
	}

	buf = skb_put(skb, length);
	memcpy(buf, data, length);

	/* the data read starts with the descriptor */
	desc = (struct wl1271_rx_descriptor *) buf;

	/* now we pull the descriptor out of the buffer */
	skb_pull(skb, sizeof(*desc));

	hdr = (struct ieee80211_hdr *)skb->data;
	if (ieee80211_is_beacon(hdr->frame_control))
		beacon = 1;
	if (ieee80211_is_data_present(hdr->frame_control))
		is_data = 1;

	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);

	wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
		     beacon ? "beacon" : "");

	skb_trim(skb, skb->len - desc->pad_len);

	skb_queue_tail(&wl->deferred_rx_queue, skb);
	ieee80211_queue_work(wl->hw, &wl->netstack_work);

	return is_data;
}
Ejemplo n.º 26
0
static void __16_reload_work(struct work_struct *work)
{
	struct sk_buff *skb;
	
	CCCI_DBG_MSG(-1, BM, "refill 16B skb pool\n");
	while(skb_pool_16.skb_list.qlen < SKB_POOL_SIZE_16) {
		skb = dev_alloc_skb(SKB_16);
		if(!skb)
			skb = __dev_alloc_skb(SKB_16, GFP_KERNEL);
		if(skb)
			skb_queue_tail(&skb_pool_16.skb_list, skb);
		else
			CCCI_ERR_MSG(-1, BM, "fail to reload 16B pool\n");
	}
}
Ejemplo n.º 27
0
static int __init rtw_mem_init(void)
{
	int i;
	u32 max_recvbuf_sz = 0;
	SIZE_PTR tmpaddr=0;
	SIZE_PTR alignment=0;
	struct sk_buff *pskb=NULL;

	printk("%s\n", __func__);

#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
	for(i=0; i<NR_RECVBUFF; i++)
	{
		rtk_buf_mem[i] = usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
	}
#endif //CONFIG_USE_USB_BUFFER_ALLOC_RX

	skb_queue_head_init(&rtk_skb_mem_q);

	rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
	if (max_recvbuf_sz == 0)
		max_recvbuf_sz = MAX_RECVBUF_SZ;
	DBG_871X("%s: max_recvbuf_sz: %d\n", __func__, max_recvbuf_sz);

	for(i=0; i<NR_PREALLOC_RECV_SKB; i++)
	{
		pskb = __dev_alloc_skb(max_recvbuf_sz + RECVBUFF_ALIGN_SZ, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
		if(pskb)
		{		
			tmpaddr = (SIZE_PTR)pskb->data;
			alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
			skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));

			skb_queue_tail(&rtk_skb_mem_q, pskb);
		}
		else
		{
			printk("%s, alloc skb memory fail!\n", __func__);
		}

		pskb=NULL;
	}

	printk("%s, rtk_skb_mem_q len : %d\n", __func__, skb_queue_len(&rtk_skb_mem_q));

	return 0;
	
}
Ejemplo n.º 28
0
struct sk_buff *os_alloc_skb(void){

	int offset=0;
	struct sk_buff *skb = __dev_alloc_skb(NLM_RX_BUF_SIZE, GFP_KERNEL);

	if (!skb) {
		return NULL;
	}

	/* align the data to the next cache line */
	offset = ((unsigned long)skb->data + SMP_CACHE_BYTES) &
		~(SMP_CACHE_BYTES - 1);
	skb_reserve(skb, (offset - (unsigned long)skb->data));

	return skb;
}
Ejemplo n.º 29
0
Archivo: pcie.c Proyecto: Lyude/linux
static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size,
			   int blk, const u8 *pblk, const u8 *fw)
{
	struct pci_dev *pdev = priv->pdev;
	struct qtnf_bus *bus = pci_get_drvdata(pdev);

	struct qtnf_pcie_fw_hdr *hdr;
	u8 *pdata;

	int hds = sizeof(*hdr);
	struct sk_buff *skb = NULL;
	int len = 0;
	int ret;

	skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	skb->len = QTN_PCIE_FW_BUFSZ;
	skb->dev = NULL;

	hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
	memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
	hdr->fwsize = cpu_to_le32(size);
	hdr->seqnum = cpu_to_le32(blk);

	if (blk)
		hdr->type = cpu_to_le32(QTN_FW_DSUB);
	else
		hdr->type = cpu_to_le32(QTN_FW_DBEGIN);

	pdata = skb->data + hds;

	len = QTN_PCIE_FW_BUFSZ - hds;
	if (pblk >= (fw + size - len)) {
		len = fw + size - pblk;
		hdr->type = cpu_to_le32(QTN_FW_DEND);
	}

	hdr->pktlen = cpu_to_le32(len);
	memcpy(pdata, pblk, len);
	hdr->crc = cpu_to_le32(~crc32(0, pdata, len));

	ret = qtnf_pcie_data_tx(bus, skb);

	return (ret == NETDEV_TX_OK) ? len : 0;
}
Ejemplo n.º 30
0
void*
osl_pktget(osl_t *osh, uint len)
{
    struct sk_buff *skb;
    gfp_t flags;

    flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
    if ((skb = __dev_alloc_skb(len, flags))) {
        skb_put(skb, len);
        skb->priority = 0;


        osh->pub.pktalloced++;
    }

    return ((void*) skb);
}