Пример #1
0
void*
osl_malloc(osl_t *osh, uint size)
{
	void *addr;

	
	if (osh)
		ASSERT(osh->magic == OS_HANDLE_MAGIC);

#ifdef DHD_USE_STATIC_BUF
	if (bcm_static_buf)
	{
		int i = 0;
		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
		{
			down(&bcm_static_buf->static_sem);
			
			for (i = 0; i < MAX_STATIC_BUF_NUM; i++)
			{
				if (bcm_static_buf->buf_use[i] == 0)
					break;
			}
			
			if (i == MAX_STATIC_BUF_NUM)
			{
				up(&bcm_static_buf->static_sem);
				OSL_MSG_INFO(("osl_malloc: all static buff in use!\n"));
				goto original;
			}
			
			bcm_static_buf->buf_use[i] = 1;
			up(&bcm_static_buf->static_sem);

			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
			if (osh)
				osh->malloced += size;

			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
		}
	}
original:
#endif 

	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
		OSL_MSG_ERROR(("osl_malloc: GFP_ATOMIC failed, trying GFP_KERNEL\n"));
        if ((addr = kmalloc(size, GFP_KERNEL)) == NULL) {
			OSL_MSG_ERROR(("osl_malloc: GFP_KERNEL failed also\n"));
            if (osh)
                osh->failed++;
            return (NULL);
        }
	}
	if (osh)
		osh->malloced += size;

	return (addr);
}
/*
 * Allocate and add an object to packet pool.
 */
void *
osl_ctfpool_add(osl_t *osh)
{
	struct sk_buff *skb;

	if ((osh == NULL) || (osh->ctfpool == NULL))
		return NULL;

	spin_lock_bh(&osh->ctfpool->lock);
	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);

	/* No need to allocate more objects */
	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
		spin_unlock_bh(&osh->ctfpool->lock);
		return NULL;
	}

	/* Allocate a new skb and add it to the ctfpool */
	skb = dev_alloc_skb(osh->ctfpool->obj_size);
	if (skb == NULL) {
		OSL_MSG_ERROR(("%s: skb alloc of len %d failed\n", __FUNCTION__,
		       osh->ctfpool->obj_size));
		spin_unlock_bh(&osh->ctfpool->lock);
		return NULL;
	}

	/* Add to ctfpool */
	skb->next = (struct sk_buff *)osh->ctfpool->head;
	osh->ctfpool->head = skb;
	osh->ctfpool->fast_frees++;
	osh->ctfpool->curr_obj++;

	/* Hijack a skb member to store ptr to ctfpool */
	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;

	/* Use bit flag to indicate skb from fast ctfpool */
	PKTFAST(osh, skb) = FASTBUF;

	spin_unlock_bh(&osh->ctfpool->lock);

	return skb;
}
Пример #3
0
void*
osl_pktget_static(osl_t *osh, uint len)
{
	int i = 0;
	struct sk_buff *skb;

	
	if (len > DHD_SKB_4PAGE_BUFSIZE)
	{
		OSL_MSG_ERROR(("osl_pktget_static: Do we really need this big skb?? len=%d\n", len));
		return osl_pktget_kernel(osh, len);
	}

	
	down(&bcm_static_skb->osl_pkt_sem);
	if (len <= DHD_SKB_1PAGE_BUFSIZE)
	{
		
		for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
		{
			if (bcm_static_skb->pkt_use[i] == 0)
				break;
		}

		if (i != MAX_STATIC_PKT_NUM)
		{
			bcm_static_skb->pkt_use[i] = 1;
			up(&bcm_static_skb->osl_pkt_sem);

			skb = bcm_static_skb->skb_4k[i];
			skb->tail = skb->data + len;
			skb->len = len;
			
			return skb;
		}
	}

	if (len <= DHD_SKB_2PAGE_BUFSIZE) 
	{
		for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
		{
			if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
				break;
		}

		if (i != MAX_STATIC_PKT_NUM)
		{
			bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
			up(&bcm_static_skb->osl_pkt_sem);
			skb = bcm_static_skb->skb_8k[i];
			skb->tail = skb->data + len;
			skb->len = len;
			
			return skb;
		}
	}

	if (bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] == 0) 
	{
		bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] = 1;
		up(&bcm_static_skb->osl_pkt_sem);

		skb = bcm_static_skb->skb_16k;
		skb->tail = skb->data + len;
		skb->len = len;

		return skb;
	}
	
	up(&bcm_static_skb->osl_pkt_sem);
	OSL_MSG_ERROR(("osl_pktget_static: all static pkt in use!\n"));
	return osl_pktget(osh, len);
}
Пример #4
0
osl_t *
osl_attach(void *pdev, uint bustype, bool pkttag)
{
	osl_t *osh;

	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
	ASSERT(osh);

	bzero(osh, sizeof(osl_t));

	
	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));

	osh->magic = OS_HANDLE_MAGIC;
	osh->malloced = 0;
	osh->failed = 0;
	osh->dbgmem_list = NULL;
	osh->pdev = pdev;
	osh->pub.pkttag = pkttag;
	osh->bustype = bustype;

	switch (bustype) {
		case PCI_BUS:
		case SI_BUS:
		case PCMCIA_BUS:
			osh->pub.mmbus = TRUE;
			break;
		case JTAG_BUS:
		case SDIO_BUS:
		case USB_BUS:
		case SPI_BUS:
			osh->pub.mmbus = FALSE;
			break;
		default:
			ASSERT(FALSE);
			break;
	}

#ifdef DHD_USE_STATIC_BUF


	if (!bcm_static_buf) {
		if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+
			STATIC_BUF_TOTAL_LEN))) {
			OSL_MSG_ERROR(("osl_attach: can not alloc static buf!\n"));
		}
		else
			OSL_MSG_INFO(("osl_attach: alloc static buf at %x!\n", (unsigned int)bcm_static_buf));

		
		init_MUTEX(&bcm_static_buf->static_sem);

		
		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;

	}
	
	if (!bcm_static_skb)
	{
		int i;
#ifndef CUSTOMER_HW_SAMSUNG
		void *skb_buff_ptr = 0;
#endif
		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
#ifdef CUSTOMER_HW_SAMSUNG
		for (i = 0; i < MAX_STATIC_PKT_NUM; i++) {
			bcm_static_skb->skb_4k[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
			if (bcm_static_skb->skb_4k[i] == NULL) {
				OSL_MSG_ERROR(("osl_attach: 4K memory allocation failure. idx=%d\n", i));
				goto err;
			}
		}
			
		for (i = 0; i < MAX_STATIC_PKT_NUM; i++) {
			bcm_static_skb->skb_8k[i] = dev_alloc_skb_kernel(DHD_SKB_2PAGE_BUFSIZE);
			if (bcm_static_skb->skb_8k[i] == NULL) {
				OSL_MSG_ERROR(("osl_attach: 8K memory allocation failure. idx=%d\n", i));
				goto err;
			}
		}

		bcm_static_skb->skb_16k = dev_alloc_skb_kernel(DHD_SKB_4PAGE_BUFSIZE);
		if (bcm_static_skb->skb_16k == NULL) {
			OSL_MSG_ERROR(("osl_attach: 16K memory allocation failure. idx=%d\n", i));
			goto err;
		}
#else
		skb_buff_ptr = dhd_os_prealloc(4, 0);

		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
#endif /* CUSTOMER_HW_SAMSUNG */
		for (i = 0; i < MAX_STATIC_PKT_NUM*2+1; i++)
			bcm_static_skb->pkt_use[i] = 0;

		init_MUTEX(&bcm_static_skb->osl_pkt_sem);
	}
#endif 
	return osh;
err:

	kfree(osh);
	return 0;
}
/* Free the driver packet. Free the tag if present */
void BCMFASTPATH
osl_pktfree(osl_t *osh, void *p, bool send)
{
	struct sk_buff *skb, *nskb;

	skb = (struct sk_buff*) p;

	if (send && osh->pub.tx_fn)
		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);

	/* perversion: we use skb->next to chain multi-skb packets */
	while (skb) {
		nskb = skb->next;
		skb->next = NULL;


#ifdef CTFPOOL
		if (PKTISFAST(osh, skb))
			osl_pktfastfree(osh, skb);
		else {
#else /* CTFPOOL */
		{
#endif /* CTFPOOL */

			if (skb->destructor)
				/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
				 * destructor exists
				 */
				dev_kfree_skb_any(skb);
			else
				/* can free immediately (even in_irq()) if destructor
				 * does not exist
				 */
				dev_kfree_skb(skb);
		}

		osh->pub.pktalloced--;

		skb = nskb;
	}
}

#ifdef DHD_USE_STATIC_BUF
void*
osl_pktget_static(osl_t *osh, uint len)
{
	int i = 0;
	struct sk_buff *skb;

	
	if (len > DHD_SKB_4PAGE_BUFSIZE)
	{
		OSL_MSG_ERROR(("osl_pktget_static: Do we really need this big skb?? len=%d\n", len));
		return osl_pktget(osh, len);
	}
	
	down(&bcm_static_skb->osl_pkt_sem);
	if (len <= DHD_SKB_1PAGE_BUFSIZE)
	{
		for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
		{
			if (bcm_static_skb->pkt_use[i] == 0)
				break;
		}

		if (i != MAX_STATIC_PKT_NUM)
		{
			bcm_static_skb->pkt_use[i] = 1;
			up(&bcm_static_skb->osl_pkt_sem);

			skb = bcm_static_skb->skb_4k[i];
			skb->tail = skb->data + len;
			skb->len = len;
			
			return skb;
		}
	}

	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
	
		for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
		{
			if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
				break;
		}

	if (i != MAX_STATIC_PKT_NUM)
	{
		bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
		up(&bcm_static_skb->osl_pkt_sem);
		skb = bcm_static_skb->skb_8k[i];
		skb->tail = skb->data + len;
		skb->len = len;
		
		return skb;
		}
	}

	if (bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] == 0) 
	{
		bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] = 1;
		up(&bcm_static_skb->osl_pkt_sem);

		skb = bcm_static_skb->skb_16k;
		skb->tail = skb->data + len;
		skb->len = len;

		return skb;
	}
	
	up(&bcm_static_skb->osl_pkt_sem);
	OSL_MSG_ERROR(("osl_pktget_static: all static pkt in use!\n"));
	return osl_pktget(osh, len);
}
osl_t *
osl_attach(void *pdev, uint bustype, bool pkttag)
{
	osl_t *osh;

	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
	ASSERT(osh);

	bzero(osh, sizeof(osl_t));

	/* Check that error map has the right number of entries in it */
	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));

	osh->magic = OS_HANDLE_MAGIC;
	atomic_set(&osh->malloced, 0);
	osh->failed = 0;
	osh->dbgmem_list = NULL;
	osh->pdev = pdev;
	osh->pub.pkttag = pkttag;
	osh->bustype = bustype;

	switch (bustype) {
		case PCI_BUS:
		case SI_BUS:
		case PCMCIA_BUS:
			osh->pub.mmbus = TRUE;
			break;
		case JTAG_BUS:
		case SDIO_BUS:
		case USB_BUS:
		case SPI_BUS:
		case RPC_BUS:
			osh->pub.mmbus = FALSE;
			break;
		default:
			ASSERT(FALSE);
			break;
	}

#ifdef DHD_USE_STATIC_BUF
	if (!bcm_static_buf) {
		if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+
			STATIC_BUF_TOTAL_LEN))) {
			OSL_MSG_ERROR(("can not alloc static buf!\n"));
		}
		else
			OSL_MSG_INFO(("alloc static buf at %x!\n", (unsigned int)bcm_static_buf));


		init_MUTEX(&bcm_static_buf->static_sem);


		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;

	}

	if (!bcm_static_skb)
	{
		int i;
		void *skb_buff_ptr = 0;
		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
		skb_buff_ptr = dhd_os_prealloc(4, 0);

		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*(MAX_STATIC_PKT_NUM*2+1));
		for (i = 0; i < MAX_STATIC_PKT_NUM*2+1; i++)
			bcm_static_skb->pkt_use[i] = 0;

		init_MUTEX(&bcm_static_skb->osl_pkt_sem);
	}
#endif /* DHD_USE_STATIC_BUF */

	return osh;
}