void * osl_malloc(osl_t *osh, uint size) { void *addr; gfp_t flags; /* only ASSERT if osh is defined */ if (osh) ASSERT(osh->magic == OS_HANDLE_MAGIC); #ifdef CONFIG_DHD_USE_STATIC_BUF if (bcm_static_buf) { int i = 0; if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) { down(&bcm_static_buf->static_sem); for (i = 0; i < STATIC_BUF_MAX_NUM; i++) { if (bcm_static_buf->buf_use[i] == 0) break; } if (i == STATIC_BUF_MAX_NUM) { up(&bcm_static_buf->static_sem); printk("all static buff in use!\n"); goto original; } bcm_static_buf->buf_use[i] = 1; up(&bcm_static_buf->static_sem); bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); if (osh) atomic_add(size, &osh->cmn->malloced); return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); } } original: #endif /* CONFIG_DHD_USE_STATIC_BUF */ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; if ((addr = kmalloc(size, flags)) == NULL) { if (osh) osh->failed++; return (NULL); } if (osh && osh->cmn) atomic_add(size, &osh->cmn->malloced); return (addr); }
static inline struct kfifo* dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) { struct kfifo *fifo; gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) fifo = kfifo_init(buf, size, flags, lock); #else fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); if (!fifo) { return NULL; } kfifo_init(fifo, buf, size); #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ return fifo; }
/* * Initialize the packet pool with specified number of objects. */ int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size) { gfp_t flags; flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags); ASSERT(osh->ctfpool); osh->ctfpool->max_obj = numobj; osh->ctfpool->obj_size = size; spin_lock_init(&osh->ctfpool->lock); while (numobj--) { if (!osl_ctfpool_add(osh)) return -1; osh->ctfpool->fast_frees--; } return 0; }
osl_t * osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn) { #else osl_t * osl_attach(void *pdev, uint bustype, bool pkttag) { void **osl_cmn = NULL; #endif /* SHARED_OSL_CMN */ osl_t *osh; gfp_t flags; flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; if (!(osh = kmalloc(sizeof(osl_t), flags))) return osh; ASSERT(osh); bzero(osh, sizeof(osl_t)); if (osl_cmn == NULL || *osl_cmn == NULL) { if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) { kfree(osh); return NULL; } bzero(osh->cmn, sizeof(osl_cmn_t)); if (osl_cmn) *osl_cmn = osh->cmn; atomic_set(&osh->cmn->malloced, 0); osh->cmn->dbgmem_list = NULL; spin_lock_init(&(osh->cmn->dbgmem_lock)); spin_lock_init(&(osh->cmn->pktalloc_lock)); } else { osh->cmn = *osl_cmn; } atomic_add(1, &osh->cmn->refcount); /* Check that error map has the right number of entries in it */ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); osh->failed = 0; osh->pdev = pdev; osh->pub.pkttag = pkttag; osh->bustype = bustype; osh->magic = OS_HANDLE_MAGIC; switch (bustype) { case PCI_BUS: case SI_BUS: case PCMCIA_BUS: osh->pub.mmbus = TRUE; break; case JTAG_BUS: case SDIO_BUS: case USB_BUS: case SPI_BUS: case RPC_BUS: osh->pub.mmbus = FALSE; break; default: ASSERT(FALSE); break; } return osh; }
void* dhd_deferred_work_init(void *dhd_info) { struct dhd_deferred_wq *work = NULL; u8* buf; unsigned long fifo_size = 0; gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; if (!dhd_info) { DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); goto return_null; } work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), flags); if (!work) { DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__)); goto return_null; } INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); /* initialize event fifo */ spin_lock_init(&work->work_lock); /* allocate buffer to hold prio events */ fifo_size = DHD_PRIO_WORK_FIFO_SIZE; fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); buf = (u8*)kzalloc(fifo_size, flags); if (!buf) { DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__)); goto return_null; } /* Initialize prio event fifo */ work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); if (!work->prio_fifo) { kfree(buf); goto return_null; } /* allocate buffer to hold work events */ fifo_size = DHD_WORK_FIFO_SIZE; fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); buf = (u8*)kzalloc(fifo_size, flags); if (!buf) { DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__)); goto return_null; } /* Initialize event fifo */ work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); if (!work->work_fifo) { kfree(buf); goto return_null; } work->dhd_info = dhd_info; DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__)); return work; return_null: if (work) dhd_deferred_work_deinit(work); return NULL; }