static int __init s3c64xx_dma_init(void) { int ret; printk(KERN_INFO "%s: Registering DMA channels\n", __func__); dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0); if (!dma_pool) { printk(KERN_ERR "%s: failed to create pool\n", __func__); return -ENOMEM; } ret = sysdev_class_register(&dma_sysclass); if (ret) { printk(KERN_ERR "%s: failed to create sysclass\n", __func__); return -ENOMEM; } writel(0xffffff, S3C_SYSREG(0x110)); s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000); s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000); return 0; }
static int __init my_init(void) { printk(KERN_INFO "Satish testing DMA module"); printk(KERN_INFO "testing DMA coherent mapping dma_alloc_coherent()"); kbuf = dma_alloc_coherent(NULL, size, &handle, GFP_KERNEL); output(kbuf, handle, size, "dma_alloc_coherent string"); dma_free_coherent(NULL, size, kbuf, handle); printk(KERN_INFO "Testing DMA Mapping dma_map_page()"); kbuf = kmalloc(size, GFP_KERNEL); handle = dma_map_single(NULL, size, &handle, GFP_KERNEL); output(kbuf, handle, size, "this is dma_map_single string"); dma_unmap_single(NULL, handle, size, direction); kfree(kbuf); printk(KERN_INFO "Testing DMA Pool method"); mypool = dma_pool_create("mypool", NULL, pool_size, pool_align, 0); kbuf = dma_pool_alloc(mypool, GFP_KERNEL, &handle); output(kbuf, handle, size, "This is dma_pool_alloc string"); dma_pool_free(mypool, kbuf, handle); dma_pool_destroy(mypool); return 0; }
int ath_hwcs_init(void) { dma_addr_t pa; #ifdef CONFIG_ATH_HWCS_notyet if (!dmapool) { dmapool = dma_pool_create("csum_hw_accel", NULL, sizeof(ath_hwcs_desc_t), (size_t)4, (size_t)ATH_HWCS_DMAPOOL_SIZE); if (!dmapool) return -1; } #endif ath_hwcs_tx_desc = kmalloc(sizeof(ath_hwcs_desc_t), GFP_DMA); // Setup checksum descriptor pa = dma_map_single(NULL, ath_hwcs_tx_desc, sizeof(ath_hwcs_desc_t), DMA_TO_DEVICE); ath_hwcs_tx_desc->next = (ath_hwcs_desc_t *)pa; uncached_cksum_desc = (ath_hwcs_desc_t *)KSEG1ADDR(virt_to_phys(ath_hwcs_tx_desc)); // Weight for channels ath_reg_wr(ATH_HWCS_DMATX_ARB_CFG, (63 << 8)); // Tx checksum interrupt mask ath_reg_rmw_set(ATH_HWCS_IMASK, ATH_HWCS_TX_INTR_MASK); // Initialize Tx descriptor address ath_reg_wr(ATH_HWCS_DMATX_DESC0, pa); printk("%s: Init done ...\n", __func__); return 0; }
int stmp3xxx_dma_request(int ch, struct device *dev, const char *name) { struct stmp3xxx_dma_user *user; int err = 0; user = channels + ch; if (!IS_VALID_CHANNEL(ch)) { err = -ENODEV; goto out; } if (IS_USED(ch)) { err = -EBUSY; goto out; } /* Create a pool to allocate dma commands from */ user->pool = dma_pool_create(name, dev, pool_item_size, pool_alignment, PAGE_SIZE); if (user->pool == NULL) { err = -ENOMEM; goto out; } user->name = name; user->inuse++; out: return err; }
static int __init s3c64xx_dma_init(void) { int ret; printk(KERN_INFO "%s: Registering DMA channels\n", __func__); dma_pool = dma_pool_create("DMA-LLI", NULL, 32, 16, 0); if (!dma_pool) { printk(KERN_ERR "%s: failed to create pool\n", __func__); return -ENOMEM; } ret = sysdev_class_register(&dma_sysclass); if (ret) { printk(KERN_ERR "%s: failed to create sysclass\n", __func__); return -ENOMEM; } /* Set all DMA configuration to be DMA, not SDMA */ writel(0xffffff, S3C_SYSREG(0x110)); /* Register standard DMA controlers */ s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000); s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000); return 0; }
static int __init s3c64xx_dma_init(void) { int ret; printk(KERN_INFO "%s: Registering DMA channels\n", __func__); dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0); if (!dma_pool) { printk(KERN_ERR "%s: failed to create pool\n", __func__); return -ENOMEM; } ret = sysdev_class_register(&dma_sysclass); if (ret) { printk(KERN_ERR "%s: failed to create sysclass\n", __func__); return -ENOMEM; } /* Set all DMA configuration to be DMA, not SDMA */ writel(0xffffff, S3C64XX_SDMA_SEL); /* Register standard DMA controllers */ platform_add_devices(s3c64xx_dma_devices, ARRAY_SIZE(s3c64xx_dma_devices)); platform_driver_probe(&s3c64xx_dma_driver, s3c64xx_dma_probe); return 0; }
int buffer_mgr_init(struct dx_drvdata *drvdata) { struct buff_mgr_handle * buff_mgr_handle; crypto_drvdata = drvdata; buff_mgr_handle = (struct buff_mgr_handle *) kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL); if (buff_mgr_handle == NULL) { return -ENOMEM; } buff_mgr_handle->mlli_buffs_pool = dma_pool_create( "dx_single_mlli_tables", drvdata->dev, (2 * LLI_MAX_NUM_OF_DATA_ENTRIES + LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) * SEP_LLI_ENTRY_BYTE_SIZE, MLLI_TABLE_MIN_ALIGNMENT, 0); if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL)) { goto error; } drvdata->buff_mgr_handle = buff_mgr_handle; return 0; error: buffer_mgr_fini(drvdata); return -ENOMEM; }
static int __init my_init(void) { /* dma_alloc_coherent method */ printk(KERN_INFO "Loading DMA allocation test module\n"); printk(KERN_INFO "\nTesting dma_alloc_coherent()..........\n\n"); kbuf = dma_alloc_coherent(NULL, size, &handle, GFP_KERNEL); output(kbuf, handle, size, "This is the dma_alloc_coherent() string"); dma_free_coherent(NULL, size, kbuf, handle); /* dma_map/unmap_single */ printk(KERN_INFO "\nTesting dma_map_single()................\n\n"); kbuf = kmalloc(size, GFP_KERNEL); handle = dma_map_single(NULL, kbuf, size, direction); output(kbuf, handle, size, "This is the dma_map_single() string"); dma_unmap_single(NULL, handle, size, direction); kfree(kbuf); /* dma_pool method */ printk(KERN_INFO "\nTesting dma_pool_alloc()..........\n\n"); mypool = dma_pool_create("mypool", NULL, pool_size, pool_align, 0); kbuf = dma_pool_alloc(mypool, GFP_KERNEL, &handle); output(kbuf, handle, size, "This is the dma_pool_alloc() string"); dma_pool_free(mypool, kbuf, handle); dma_pool_destroy(mypool); return 0; }
static int __init s3c64xx_dma_init(void) { int ret; printk(KERN_INFO "%s: Registering DMA channels\n", __func__); dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0); if (!dma_pool) { printk(KERN_ERR "%s: failed to create pool\n", __func__); return -ENOMEM; } ret = subsys_system_register(&dma_subsys, NULL); if (ret) { printk(KERN_ERR "%s: failed to create subsys\n", __func__); return -ENOMEM; } /* Set all DMA configuration to be DMA, not SDMA */ writel(0xffffff, S3C64XX_SDMA_SEL); /* Register standard DMA controllers */ s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000); s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000); return 0; }
/** * @brief Initialize & alloc RX buffer * @details This function executes;\n * # Create DMA pool\n * # Alloc RX buffer\n * # Call RX buffer clear * @param N/A * @retval 0 : Success * @retval -ENOMEM : Error, no enough memory. * @note */ int felica_rxbuf_init(void) { int i; pr_debug(PRT_NAME ": %s\n", __func__); rxbuf.dmapool = dma_pool_create(DMAPOOL_NAME, NULL, DMAPOOL_SIZE, DMAPOOL_ALIGN, DMAPOOL_ALIGN * RXBUF_N); if (!rxbuf.dmapool) { pr_err(PRT_NAME ": Error. Cannot create DMA pool for RXbuf."); return -ENOMEM; } for (i = 0; i < RXBUF_N; i++) { rxbuf.slot[i].buf = dma_pool_alloc(rxbuf.dmapool, GFP_KERNEL, &rxbuf.slot[i].dmabase); if (!rxbuf.slot[i].buf) { pr_err(PRT_NAME ": Error. No enough mem for RXbuf.\n"); goto err_alloc_rx_buf; } } felica_rxbuf_clear(); return 0; err_alloc_rx_buf: for (i--; i >= 0; i--) { dma_pool_free(rxbuf.dmapool, rxbuf.slot[i].buf, rxbuf.slot[i].dmabase); } dma_pool_destroy(rxbuf.dmapool); rxbuf.dmapool = NULL; return -ENOMEM; }
int cc_buffer_mgr_init(struct cc_drvdata *drvdata) { struct buff_mgr_handle *buff_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL); if (!buff_mgr_handle) return -ENOMEM; drvdata->buff_mgr_handle = buff_mgr_handle; buff_mgr_handle->mlli_buffs_pool = dma_pool_create("dx_single_mlli_tables", dev, MAX_NUM_OF_TOTAL_MLLI_ENTRIES * LLI_ENTRY_BYTE_SIZE, MLLI_TABLE_MIN_ALIGNMENT, 0); if (!buff_mgr_handle->mlli_buffs_pool) goto error; return 0; error: cc_buffer_mgr_fini(drvdata); return -ENOMEM; }
static int ohci_mem_init (struct ohci_hcd *ohci) { ohci->td_cache = dma_pool_create ("ohci_td", ohci->hcd.self.controller, sizeof (struct td), 32 /* byte alignment */, 0 /* no page-crossing issues */); if (!ohci->td_cache) return -ENOMEM; ohci->ed_cache = dma_pool_create ("ohci_ed", ohci->hcd.self.controller, sizeof (struct ed), 16 /* byte alignment */, 0 /* no page-crossing issues */); if (!ohci->ed_cache) { dma_pool_destroy (ohci->td_cache); return -ENOMEM; } return 0; }
int coh901318_pool_create(struct coh901318_pool *pool, struct device *dev, size_t size, size_t align) { spin_lock_init(&pool->lock); pool->dev = dev; pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); DEBUGFS_POOL_COUNTER_RESET(pool); return 0; }
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, unsigned long size) { pool->size = size; DO_STATS(pool->allocs = 0); pool->pool = dma_pool_create(name, dev, size, 0 , 0 ); return pool->pool ? 0 : -ENOMEM; }
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, unsigned long size) { pool->size = size; DO_STATS(pool->allocs = 0); pool->pool = dma_pool_create(name, dev, size, 0 /* byte alignment */, 0 /* no page-crossing issues */); return pool->pool ? 0 : -ENOMEM; }
static int create_crypto_dma_pool(struct nitrox_device *ndev) { size_t size; /* Crypto context pool, 16 byte aligned */ size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); ndev->ctx_pool = dma_pool_create("crypto-context", DEV(ndev), size, 16, 0); if (!ndev->ctx_pool) return -ENOMEM; return 0; }
/* Instantiate a software object representing a DMA controller. */ struct dma_controller *__init dma_controller_create(struct musb *musb, void __iomem *mregs) { struct cppi *controller; struct device *dev = musb->controller; struct platform_device *pdev = to_platform_device(dev); int irq = platform_get_irq_byname(pdev, "dma"); controller = kzalloc(sizeof *controller, GFP_KERNEL); if (!controller) return NULL; controller->mregs = mregs; controller->tibase = mregs - DAVINCI_BASE_OFFSET; controller->musb = musb; controller->controller.start = cppi_controller_start; controller->controller.stop = cppi_controller_stop; controller->controller.channel_alloc = cppi_channel_allocate; controller->controller.channel_release = cppi_channel_release; controller->controller.channel_program = cppi_channel_program; controller->controller.channel_abort = cppi_channel_abort; /* NOTE: allocating from on-chip SRAM would give the least * contention for memory access, if that ever matters here. */ /* setup BufferPool */ controller->pool = dma_pool_create("cppi", controller->musb->controller, sizeof(struct cppi_descriptor), CPPI_DESCRIPTOR_ALIGN, 0); if (!controller->pool) { kfree(controller); return NULL; } if (irq > 0) { if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) { dev_err(dev, "request_irq %d failed!\n", irq); dma_controller_destroy(&controller->controller); return NULL; } controller->irq = irq; } return &controller->controller; }
struct ipath_user_sdma_queue * ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport) { struct ipath_user_sdma_queue *pq = kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL); if (!pq) goto done; pq->counter = 0; pq->sent_counter = 0; INIT_LIST_HEAD(&pq->sent); mutex_init(&pq->lock); snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport); pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, sizeof(struct ipath_user_sdma_pkt), 0, 0, NULL); if (!pq->pkt_slab) goto err_kfree; snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport); pq->header_cache = dma_pool_create(pq->header_cache_name, dev, IPATH_USER_SDMA_EXP_HEADER_LENGTH, 4, 0); if (!pq->header_cache) goto err_slab; pq->dma_pages_root = RB_ROOT; goto done; err_slab: kmem_cache_destroy(pq->pkt_slab); err_kfree: kfree(pq); pq = NULL; done: return pq; }
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, unsigned long size) { unsigned int align = 0; if (!(*dev->dma_mask & 0x1)) align = 1 << ffs(*dev->dma_mask); if (align & (align-1)) { dev_warn(dev, "invalid DMA mask %#llx\n", *dev->dma_mask); return -ENOMEM; } pool->size = size; DO_STATS(pool->allocs = 0); pool->pool = dma_pool_create(name, dev, size, align, 0 /* no page-crossing issues */); return pool->pool ? 0 : -ENOMEM; }
/** * hcd_buffer_create - initialize buffer pools * @hcd: the bus whose buffer pools are to be initialized * Context: !in_interrupt() * * Call this as part of initializing a host controller that uses the dma * memory allocators. It initializes some pools of dma-coherent memory that * will be shared by all drivers using that controller, or returns a negative * errno value on error. * * Call hcd_buffer_destroy() to clean up after using those pools. */ int hcd_buffer_create (struct usb_hcd *hcd) { char name [16]; int i, size; for (i = 0; i < HCD_BUFFER_POOLS; i++) { if (!(size = pool_max [i])) continue; snprintf (name, sizeof name, "buffer-%d", size); hcd->pool [i] = dma_pool_create (name, hcd->self.controller, size, size, 0); if (!hcd->pool [i]) { hcd_buffer_destroy (hcd); return -ENOMEM; } } return 0; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); mtx_init(&htt->tx_lock, device_get_nameunit(ar->sc_dev), "athp htt tx", MTX_DEF); mtx_init(&htt->tx_comp_lock, device_get_nameunit(ar->sc_dev), "athp htt comp tx", MTX_DEF); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->sc_dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { ret = -ENOMEM; goto free_idr_pending_tx; } if (!ar->hw_params.continuous_frag_desc) goto skip_frag_desc_alloc; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); if (athp_descdma_alloc(ar, &htt->frag_desc.dd, "htt frag_desc", 8, size) != 0) { ath10k_warn(ar, "failed to alloc fragment desc memory\n"); ret = -ENOMEM; goto free_tx_pool; } htt->frag_desc.vaddr = (void *) htt->frag_desc.dd.dd_desc; htt->frag_desc.paddr = htt->frag_desc.dd.dd_desc_paddr; skip_frag_desc_alloc: return 0; free_tx_pool: dma_pool_destroy(htt->tx_pool); free_idr_pending_tx: mtx_destroy(&htt->tx_lock); idr_destroy(&htt->pending_tx); return ret; }
static int init_hdlc_queues(struct port *port) { int i; if (!ports_open) { dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, POOL_ALLOC_SIZE, 32, 0); if (!dma_pool) return -ENOMEM; } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) return -ENOMEM; memset(port->desc_tab, 0, POOL_ALLOC_SIZE); memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); /* Setup RX buffers */ for (i = 0; i < RX_DESCS; i++) { struct desc *desc = rx_desc_ptr(port, i); buffer_t *buff; void *data; #ifdef __ARMEB__ if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE))) return -ENOMEM; data = buff->data; #else if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL))) return -ENOMEM; data = buff; #endif desc->buf_len = RX_SIZE; desc->data = dma_map_single(&port->netdev->dev, data, RX_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&port->netdev->dev, desc->data)) { free_buffer(buff); return -EIO; } port->rx_buff_tab[i] = buff; } return 0; }
static int fdma_run_initialise_sequence(struct fdma *fdma) { fdma->llu_pool = dma_pool_create(fdma->name, NULL, sizeof(struct fdma_llu_entry), 32, 0); if (fdma->llu_pool == NULL) { fdma_dbg(fdma, "%s Can't allocate dma_pool memory\n", __FUNCTION__); return -ENOMEM; } fdma_initialise(fdma); fdma_reset_channels(fdma); if (!fdma_enable_all_channels(fdma)) return -ENODEV; return 0; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { idr_destroy(&htt->pending_tx); return -ENOMEM; } return 0; }
static int __init imapx200_dma_init(void) { int ret; printk(KERN_INFO "%s: Registering DMA channels\n", __func__); dma_pool = dma_pool_create("DMA-LLI", NULL, 32, 16, 0); if (!dma_pool) { printk(KERN_ERR "%s: failed to create pool\n", __func__); return -ENOMEM; } ret = sysdev_class_register(&dma_sysclass); if (ret) { printk(KERN_ERR "%s: failed to create sysclass\n", __func__); return -ENOMEM; } imapx200_dma_init_xxx(0,0,IRQ_DMA,DMA_BASE_REG_PA); return 0; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { ret = -ENOMEM; goto free_idr_pending_tx; } if (!ar->hw_params.continuous_frag_desc) goto skip_frag_desc_alloc; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, GFP_DMA); if (!htt->frag_desc.vaddr) { ath10k_warn(ar, "failed to alloc fragment desc memory\n"); ret = -ENOMEM; goto free_tx_pool; } skip_frag_desc_alloc: return 0; free_tx_pool: dma_pool_destroy(htt->tx_pool); free_idr_pending_tx: idr_destroy(&htt->pending_tx); return ret; }
static int __init my_init( void ) { char *kbuf; dma_addr_t handle; size_t size = ( 10 * PAGE_SIZE ); struct dma_pool *mypool; /* dma_alloc_coherent method */ kbuf = dma_alloc_coherent( NULL, size, &handle, GFP_KERNEL ); output( kbuf, handle, size, "This is the dma_alloc_coherent() string" ); dma_free_coherent( NULL, size, kbuf, handle ); /* dma_map/unmap_single */ kbuf = kmalloc( size, GFP_KERNEL ); handle = dma_map_single( NULL, kbuf, size, direction ); output( kbuf, handle, size, "This is the dma_map_single() string" ); dma_unmap_single( NULL, handle, size, direction ); kfree( kbuf ); /* dma_pool method */ mypool = dma_pool_create( "mypool", NULL, pool_size, pool_align, 0 ); kbuf = dma_pool_alloc( mypool, GFP_KERNEL, &handle ); output( kbuf, handle, size, "This is the dma_pool_alloc() string" ); dma_pool_free( mypool, kbuf, handle ); dma_pool_destroy( mypool ); return -1; }
/** * hcd_buffer_create - initialize buffer pools * @hcd: the bus whose buffer pools are to be initialized * Context: !in_interrupt() * * Call this as part of initializing a host controller that uses the dma * memory allocators. It initializes some pools of dma-coherent memory that * will be shared by all drivers using that controller, or returns a negative * errno value on error. * * Call hcd_buffer_destroy() to clean up after using those pools. */ int hcd_buffer_create (struct usb_hcd *hcd) { char name [16]; int i, size; #if !defined(CONFIG_ARCH_STR9100) && !defined(CONFIG_ARCH_STR8100) && !defined(CONFIG_ARCH_CETUSPLUS) if (!hcd->self.controller->dma_mask) return 0; #endif for (i = 0; i < HCD_BUFFER_POOLS; i++) { if (!(size = pool_max [i])) continue; snprintf (name, sizeof name, "buffer-%d", size); hcd->pool [i] = dma_pool_create (name, hcd->self.controller, size, size, 0); if (!hcd->pool [i]) { hcd_buffer_destroy (hcd); return -ENOMEM; } } return 0; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; spin_lock_init(&htt->tx_lock); if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; else htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * htt->max_num_pending_tx, GFP_KERNEL); if (!htt->pending_tx) return -ENOMEM; htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(htt->max_num_pending_tx), GFP_KERNEL); if (!htt->used_msdu_ids) { kfree(htt->pending_tx); return -ENOMEM; } htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { kfree(htt->used_msdu_ids); kfree(htt->pending_tx); return -ENOMEM; } return 0; }
/* Instantiate a software object representing a DMA controller. */ struct dma_controller *__init dma_controller_create(struct musb *musb, void __iomem *mregs) { struct cppi *controller; controller = kzalloc(sizeof *controller, GFP_KERNEL); if (!controller) return NULL; controller->mregs = mregs; controller->tibase = mregs - DAVINCI_BASE_OFFSET; controller->musb = musb; controller->controller.start = cppi_controller_start; controller->controller.stop = cppi_controller_stop; controller->controller.channel_alloc = cppi_channel_allocate; controller->controller.channel_release = cppi_channel_release; controller->controller.channel_program = cppi_channel_program; controller->controller.channel_abort = cppi_channel_abort; /* NOTE: allocating from on-chip SRAM would give the least * contention for memory access, if that ever matters here. */ /* setup BufferPool */ controller->pool = dma_pool_create("cppi", controller->musb->controller, sizeof(struct cppi_descriptor), CPPI_DESCRIPTOR_ALIGN, 0); if (!controller->pool) { kfree(controller); return NULL; } return &controller->controller; }