/* Frees request packet, called by gadget driver */ static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) { struct udc_ep *ep; struct udc_request *req; if (!usbep || !usbreq) return; ep = container_of(usbep, struct udc_ep, ep); req = container_of(usbreq, struct udc_request, req); VDBG(ep->dev, "free_req req=%p\n", req); BUG_ON(!list_empty(&req->queue)); if (req->td_data) { VDBG(ep->dev, "req->td_data=%p\n", req->td_data); /* free dma chain if created */ if (req->chain_len > 1) udc_free_dma_chain(ep->dev, req); pci_pool_free(ep->dev->data_requests, req->td_data, req->td_phys); } kfree(req); }
/** * crystalhd_destroy_dio_pool - Release DIO mem pool. * @adp: Adapter instance * * Return: * none. * * This routine releases dio memory pool during close. */ void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp) { struct crystalhd_dio_req *dio; int count = 0; if (!adp) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return; } do { dio = crystalhd_alloc_dio(adp); if (dio) { if (dio->fb_va) pci_pool_free(adp->fill_byte_pool, dio->fb_va, dio->fb_pa); count++; kfree(dio); } } while (dio); if (adp->fill_byte_pool) { pci_pool_destroy(adp->fill_byte_pool); adp->fill_byte_pool = NULL; } dev_dbg(&adp->pdev->dev, "Released dio pool %d\n", count); }
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags) { struct ehci_qh *qh; dma_addr_t dma; qh = (struct ehci_qh *) pci_pool_alloc (ehci->qh_pool, flags, &dma); if (!qh) return qh; memset (qh, 0, sizeof *qh); atomic_set (&qh->refcount, 1); qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); /* dummy td enables safe urb queuing */ qh->dummy = ehci_qtd_alloc (ehci, flags); if (qh->dummy == 0) { ehci_dbg (ehci, "no dummy td\n"); pci_pool_free (ehci->qh_pool, qh, qh->qh_dma); qh = 0; } return qh; }
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) { if (ah->on_hca) mthca_free(&dev->av_table.alloc, (ah->avdma - dev->av_table.ddr_av_base) / MTHCA_AV_SIZE); else pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); return 0; }
/** * ixgbe_fcoe_ddp_put - free the ddp context for a given xid * @netdev: the corresponding net_device * @xid: the xid that corresponding ddp will be freed * * This is the implementation of net_device_ops.ndo_fcoe_ddp_done * and it is expected to be called by ULD, i.e., FCP layer of libfc * to release the corresponding ddp context when the I/O is done. * * Returns : data length already ddp-ed in bytes */ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) { int len = 0; struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; u32 fcbuff; if (!netdev) goto out_ddp_put; if (xid >= IXGBE_FCOE_DDP_MAX) goto out_ddp_put; adapter = netdev_priv(netdev); fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) goto out_ddp_put; len = ddp->len; /* if there an error, force to invalidate ddp context */ if (ddp->err) { spin_lock_bh(&fcoe->lock); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, (xid | IXGBE_FCFLTRW_WE)); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_WE)); /* guaranteed to be invalidated after 100us */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_RE)); fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); spin_unlock_bh(&fcoe->lock); if (fcbuff & IXGBE_FCBUFF_VALID) udelay(100); } if (ddp->sgl) pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); if (ddp->pool) { pci_pool_free(ddp->pool, ddp->udl, ddp->udp); ddp->pool = NULL; } ixgbe_fcoe_clear_ddp(ddp); out_ddp_put: return len; }
static void td_free (struct ohci_hcd *hc, struct td *td) { struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)]; while (*prev && *prev != td) prev = &(*prev)->td_hash; if (*prev) *prev = td->td_hash; else if ((td->hwINFO & TD_DONE) != 0) ohci_dbg (hc, "no hash for td %p\n", td); pci_pool_free (hc->td_cache, td, td->td_dma); }
static void qh_put (struct ehci_hcd *ehci, struct ehci_qh *qh) { if (!atomic_dec_and_test (&qh->refcount)) return; /* clean qtds first, and know this is not linked */ if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { ehci_dbg (ehci, "unused qh not empty!\n"); BUG (); } if (qh->dummy) ehci_qtd_free (ehci, qh->dummy); pci_pool_free (ehci->qh_pool, qh, qh->qh_dma); }
/** * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets * @adapter : HBA soft state * * Teardown the dma pool for mailbox, passthru and extended passthru * structures, and scatter-gather lists. */ static void megaraid_mbox_teardown_dma_pools(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, sg_pci_blk[i].dma_addr); } if (raid_dev->sg_pool_handle) pci_pool_destroy(raid_dev->sg_pool_handle); epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { pci_pool_free(raid_dev->epthru_pool_handle, epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); } if (raid_dev->epthru_pool_handle) pci_pool_destroy(raid_dev->epthru_pool_handle); mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { pci_pool_free(raid_dev->mbox_pool_handle, mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); } if (raid_dev->mbox_pool_handle) pci_pool_destroy(raid_dev->mbox_pool_handle); return; }
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) { switch (ah->type) { case MTHCA_AH_ON_HCA: mthca_free(&dev->av_table.alloc, (ah->avdma - dev->av_table.ddr_av_base) / MTHCA_AV_SIZE); break; case MTHCA_AH_PCI_POOL: pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); break; case MTHCA_AH_KMALLOC: kfree(ah->av); break; } return 0; }
static void ed_free (struct ohci_hcd *hc, struct ed *ed) { pci_pool_free (hc->ed_cache, ed, ed->dma); }
/** * crystalhd_dioq_fetch_wait - Fetch element from Head. * @ioq: DIO queue instance * @to_secs: Wait timeout in seconds.. * * Return: * element from the head.. * * Return element from head if Q is not empty. Wait for new element * if Q is empty for Timeout seconds. */ void *crystalhd_dioq_fetch_wait(struct crystalhd_hw *hw, uint32_t to_secs, uint32_t *sig_pend) { struct device *dev = chddev(); unsigned long flags = 0; int rc = 0; crystalhd_rx_dma_pkt *r_pkt = NULL; crystalhd_dioq_t *ioq = hw->rx_rdyq; uint32_t picYcomp = 0; unsigned long fetchTimeout = jiffies + msecs_to_jiffies(to_secs * 1000); if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) { dev_err(dev, "%s: Invalid arg\n", __func__); return r_pkt; } spin_lock_irqsave(&ioq->lock, flags); #ifndef __APPLE__ while (!time_after_eq(jiffies, fetchTimeout)) { #else while (fetchTimeout >= jiffies) { #endif if(ioq->count == 0) { spin_unlock_irqrestore(&ioq->lock, flags); crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 250, rc, false); } else spin_unlock_irqrestore(&ioq->lock, flags); if (rc == 0) { // Found a packet. Check if it is a repeated picture or not // Drop the picture if it is a repeated picture // Lock against checks from get status calls if(down_interruptible(&hw->fetch_sem)) goto sem_error; #ifndef __APPLE__ r_pkt = crystalhd_dioq_fetch(ioq); #else r_pkt = (crystalhd_rx_dma_pkt*)crystalhd_dioq_fetch(ioq); #endif // If format change packet, then return with out checking anything if (r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE)) goto sem_rel_return; if (hw->adp->pdev->device == BC_PCI_DEVID_LINK) { picYcomp = link_GetRptDropParam(hw, hw->PICHeight, hw->PICWidth, (void *)r_pkt); } else { // For Flea, we don't have the width and height handy since they // come in the PIB in the picture, so this function will also // populate the width and height picYcomp = flea_GetRptDropParam(hw, (void *)r_pkt); // For flea it is the above function that indicated format change if(r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE)) goto sem_rel_return; } if(!picYcomp || (picYcomp == hw->LastPicNo) || (picYcomp == hw->LastTwoPicNo)) { //Discard picture if(picYcomp != 0) { hw->LastTwoPicNo = hw->LastPicNo; hw->LastPicNo = picYcomp; } crystalhd_dioq_add(hw->rx_freeq, r_pkt, false, r_pkt->pkt_tag); r_pkt = NULL; up(&hw->fetch_sem); } else { if(hw->adp->pdev->device == BC_PCI_DEVID_LINK) { if((picYcomp - hw->LastPicNo) > 1) { dev_info(dev, "MISSING %u PICTURES\n", (picYcomp - hw->LastPicNo)); } } hw->LastTwoPicNo = hw->LastPicNo; hw->LastPicNo = picYcomp; goto sem_rel_return; } } else if (rc == -EINTR) { *sig_pend = 1; return NULL; } spin_lock_irqsave(&ioq->lock, flags); } dev_info(dev, "FETCH TIMEOUT\n"); spin_unlock_irqrestore(&ioq->lock, flags); return r_pkt; sem_error: return NULL; sem_rel_return: up(&hw->fetch_sem); return r_pkt; } #ifdef __APPLE__ static bool CustomSegmentFunction(IODMACommand *target, IODMACommand::Segment64 segment, void *sglMem, UInt32 segmentIndex) { struct scatterlist *sg = (scatterlist*)sglMem; sg[segmentIndex].dma_address = (uint32_t)segment.fIOVMAddr; sg[segmentIndex].dma_length = (unsigned int)segment.fLength; //MPCLOG(MPCLOG_DBG,"CustomSegmentFunction: 0x%X/%d/%d\n",(unsigned int)segment.fIOVMAddr,(unsigned int)segment.fLength, (unsigned int)segmentIndex); return true; } #endif /** * crystalhd_map_dio - Map user address for DMA * @adp: Adapter instance * @ubuff: User buffer to map. * @ubuff_sz: User buffer size. * @uv_offset: UV buffer offset. * @en_422mode: TRUE:422 FALSE:420 Capture mode. * @dir_tx: TRUE for Tx (To device from host) * @dio_hnd: Handle to mapped DIO request. * * Return: * Status. * * This routine maps user address and lock pages for DMA. * */ BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff, uint32_t ubuff_sz, uint32_t uv_offset, bool en_422mode, bool dir_tx, crystalhd_dio_req **dio_hnd) { struct device *dev; crystalhd_dio_req *dio; uint32_t start = 0, end = 0, count = 0; #ifndef __APPLE__ uint32_t spsz = 0; unsigned long uaddr = 0, uv_start = 0; int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0; #else unsigned long uaddr = 0, uv_start = 0; int rw = 0; uint32_t nr_pages = 0; #endif if (!adp || !ubuff || !ubuff_sz || !dio_hnd) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return BC_STS_INV_ARG; } dev = &adp->pdev->dev; /* Compute pages */ uaddr = (unsigned long)ubuff; count = ubuff_sz; end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT; nr_pages = end - start; if (!count || ((uaddr + count) < uaddr)) { dev_err(dev, "User addr overflow!!\n"); return BC_STS_INV_ARG; } dio = crystalhd_alloc_dio(adp); if (!dio) { dev_err(dev, "dio pool empty..\n"); return BC_STS_INSUFF_RES; } if (dir_tx) { rw = WRITE; dio->direction = DMA_TO_DEVICE; } else { rw = READ; dio->direction = DMA_FROM_DEVICE; } if (nr_pages > dio->max_pages) { dev_err(dev, "max_pages(%d) exceeded(%d)!!\n", dio->max_pages, nr_pages); crystalhd_unmap_dio(adp, dio); return BC_STS_INSUFF_RES; } #ifndef __APPLE__ if (uv_offset) { uv_start = (uaddr + uv_offset) >> PAGE_SHIFT; dio->uinfo.uv_sg_ix = uv_start - start; dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK); } dio->fb_size = ubuff_sz & 0x03; if (dio->fb_size) { res = copy_from_user(dio->fb_va, (void *)(uaddr + count - dio->fb_size), dio->fb_size); if (res) { dev_err(dev, "failed %d to copy %u fill bytes from %p\n", res, dio->fb_size, (void *)(uaddr + count-dio->fb_size)); crystalhd_unmap_dio(adp, dio); return BC_STS_INSUFF_RES; } } down_read(¤t->mm->mmap_sem); res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ, 0, dio->pages, NULL); up_read(¤t->mm->mmap_sem); /* Save for release..*/ dio->sig = crystalhd_dio_locked; if (res < nr_pages) { dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res); dio->page_cnt = res; crystalhd_unmap_dio(adp, dio); return BC_STS_ERROR; } dio->page_cnt = nr_pages; /* Get scatter/gather */ crystalhd_init_sg(dio->sg, dio->page_cnt); crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK); if (nr_pages > 1) { dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset; #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23) #ifdef CONFIG_X86_64 dio->sg[0].dma_length = dio->sg[0].length; #endif #endif count -= dio->sg[0].length; for (i = 1; i < nr_pages; i++) { if (count < 4) { spsz = count; skip_fb_sg = 1; } else { spsz = (count < PAGE_SIZE) ? (count & ~0x03) : PAGE_SIZE; } crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0); count -= spsz; } } else { if (count < 4) { dio->sg[0].length = count; skip_fb_sg = 1; } else { dio->sg[0].length = count - dio->fb_size; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23) #ifdef CONFIG_X86_64 dio->sg[0].dma_length = dio->sg[0].length; #endif #endif } dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); if (dio->sg_cnt <= 0) { dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt); crystalhd_unmap_dio(adp, dio); return BC_STS_ERROR; } if (dio->sg_cnt && skip_fb_sg) dio->sg_cnt -= 1; #else IODMACommand *dma_command; IOMemoryDescriptor *mem_desc; IOReturn result; if (uv_offset) { uv_start = (uaddr + uv_offset) >> PAGE_SHIFT; dio->uinfo.uv_sg_ix = uv_start - start; dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & PAGE_MASK); } dio->fb_size = ubuff_sz & 0x03; // map user memory into kernel memory mem_desc = IOMemoryDescriptor::withAddress(uaddr, count, dir_tx ? kIODirectionIn : kIODirectionOut, current_task() ); if (mem_desc) { result = mem_desc->prepare(); //IOLog("bc_link_map_dio:mem_desc=0x%X, prepare result 0x%X \n", (unsigned int)mem_desc, (int)result); dio->io_class = (void*)mem_desc; } else { dev_err(&adp->pdev->dev, "bc_link_map_dio:IOMemoryDescriptor::withAddress failed\n"); crystalhd_free_dio(adp,dio); return BC_STS_INSUFF_RES; } // Save for release.. dio->sig = crystalhd_dio_locked; // check transfer count, counts less than four are handled using only the fill byte page if (count > 3) { do { // 32 bit physical address generation using IODMACommand // any memory above 4Gb in the memory descriptor will be buffered // to memory below the 4G line, on machines without remapping HW support dma_command = IODMACommand::withSpecification( // custom segment function (IODMACommand::SegmentFunction)CustomSegmentFunction, // numAddressBits 32, // maxSegmentSize PAGE_SIZE, // mappingOptions - kMapped for DMA addresses IODMACommand::kMapped, // maxTransferSize - no restriction 0, // alignment - no restriction 1 ); if (!dma_command) { dev_err(&adp->pdev->dev, "IODMACommand::withSpecification failed\n"); break; } //IOLog("bc_link_map_dio:dma_command=0x%X \n", (unsigned int)dma_command); // point IODMACommand at the memory descriptor, don't use auto prepare option result = dma_command->setMemoryDescriptor(mem_desc, false); if (kIOReturnSuccess != result) { dev_err(&adp->pdev->dev, "setMemoryDescriptor failed (0x%x)\n", result); break; } dio->io_class = (void*)dma_command; result = dma_command->prepare(0, count, true); //IOLog("bc_link_map_dio:dma_command->prepare() result 0x%X \n",(int)result); // generate scatter/gather list using custom segment function. This routine will make // sure that the first s/g entry will have the correct address and length for user // addresses that are not page aligned. UInt64 offset = 0; result = dma_command->gen32IOVMSegments(&offset, (IODMACommand::Segment32*)dio->sg, (UInt32*)&nr_pages); //IOLog("bc_link_map_dio:gen32IOVMSegments nr_pages %d, result %d\n", (int)nr_pages, (int)result); // if ending page is not end 4 byte aligned, decrease last page transfer length // as those bytes will be handled using the fill byte page. if(dio->fb_size) { dio->sg[nr_pages-1].dma_length -= dio->fb_size; // check for last page == same size as dio->fb_size if (dio->sg[nr_pages-1].dma_length == 0) { nr_pages--; } } // If need a fill byte page if(dio->fb_size) { UInt64 byte_count; UInt64 length; // manually copy those bytes into the fill byte page offset = count - dio->fb_size; length = dio->fb_size; byte_count = mem_desc->readBytes(offset, dio->fb_va, length); } dio->sg_cnt = nr_pages; } while(false); if (dio->sg_cnt <= 0) { dev_err(&adp->pdev->dev, "sg map %d \n",dio->sg_cnt); crystalhd_unmap_dio(adp,dio); return BC_STS_ERROR; } } else { // three bytes or less, handle this transfer using only the fill_byte page. UInt64 byte_count; UInt64 offset; UInt64 length; offset = 0; length = dio->fb_size; byte_count = mem_desc->readBytes(offset, dio->fb_va, length); dio->sg_cnt = 0; dio->sg[0].dma_length = count; } #endif dio->sig = crystalhd_dio_sg_mapped; /* Fill in User info.. */ dio->uinfo.xfr_len = ubuff_sz; #ifndef __APPLE__ dio->uinfo.xfr_buff = ubuff; #else dio->uinfo.xfr_buff = (uint8_t*)ubuff; #endif dio->uinfo.uv_offset = uv_offset; dio->uinfo.b422mode = en_422mode; dio->uinfo.dir_tx = dir_tx; *dio_hnd = dio; return BC_STS_SUCCESS; } /** * crystalhd_unmap_sgl - Release mapped resources * @adp: Adapter instance * @dio: DIO request instance * * Return: * Status. * * This routine is to unmap the user buffer pages. */ BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio) { #ifndef __APPLE__ struct page *page = NULL; int j = 0; if (!adp || !dio) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return BC_STS_INV_ARG; } if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) { for (j = 0; j < dio->page_cnt; j++) { page = dio->pages[j]; if (page) { if (!PageReserved(page) && (dio->direction == DMA_FROM_DEVICE)) SetPageDirty(page); page_cache_release(page); } } } if (dio->sig == crystalhd_dio_sg_mapped) pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); #else IODMACommand *dma_command; IOMemoryDescriptor *mem_desc; if(!adp || !dio ) { dev_err(chddev(), "bc_link_unmap_dio:Invalid arg \n"); return BC_STS_INV_ARG; } dma_command = OSDynamicCast(IODMACommand, (OSMetaClassBase*)dio->io_class); //MPCLOG(MPCLOG_DBG, "bc_link_unmap_dio:dma_command=0x%X \n", (unsigned int)dma_command); if (dma_command) { // fetch current IOMemoryDescriptor before dma_command->clearMemoryDescriptor; mem_desc = (IOMemoryDescriptor*)dma_command->getMemoryDescriptor(); dma_command->complete(); dma_command->clearMemoryDescriptor(); SAFE_RELEASE(dma_command); mem_desc->complete(); SAFE_RELEASE(mem_desc); dio->io_class = NULL; } #endif crystalhd_free_dio(adp, dio); return BC_STS_SUCCESS; } /** * crystalhd_create_dio_pool - Allocate mem pool for DIO management. * @adp: Adapter instance * @max_pages: Max pages for size calculation. * * Return: * system error. * * This routine creates a memory pool to hold dio context for * for HW Direct IO operation. */ int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages) { struct device *dev; uint32_t asz = 0, i = 0; uint8_t *temp; crystalhd_dio_req *dio; if (!adp || !max_pages) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return -EINVAL; } dev = &adp->pdev->dev; /* Get dma memory for fill byte handling..*/ adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte", adp->pdev, 8, 8, 0); if (!adp->fill_byte_pool) { dev_err(dev, "failed to create fill byte pool\n"); return -ENOMEM; } #ifndef __APPLE__ /* Get the max size from user based on 420/422 modes */ asz = (sizeof(*dio->pages) * max_pages) + (sizeof(*dio->sg) * max_pages) + sizeof(*dio); #else asz = (sizeof(*dio->sg) * max_pages) + sizeof(*dio); #endif dev_dbg(dev, "Initializing Dio pool %d %d %x %p\n", BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool); for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) { temp = (uint8_t *)kzalloc(asz, GFP_KERNEL); if ((temp) == NULL) { dev_err(dev, "Failed to alloc %d mem\n", asz); return -ENOMEM; } dio = (crystalhd_dio_req *)temp; temp += sizeof(*dio); #ifndef __APPLE__ dio->pages = (struct page **)temp; temp += (sizeof(*dio->pages) * max_pages); #else temp += sizeof(*dio); #endif dio->sg = (struct scatterlist *)temp; dio->max_pages = max_pages; dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL, &dio->fb_pa); if (!dio->fb_va) { dev_err(dev, "fill byte alloc failed.\n"); return -ENOMEM; } crystalhd_free_dio(adp, dio); } return 0; } /** * crystalhd_destroy_dio_pool - Release DIO mem pool. * @adp: Adapter instance * * Return: * none. * * This routine releases dio memory pool during close. */ void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp) { crystalhd_dio_req *dio; int count = 0; if (!adp) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return; } do { dio = crystalhd_alloc_dio(adp); if (dio) { if (dio->fb_va) pci_pool_free(adp->fill_byte_pool, dio->fb_va, dio->fb_pa); count++; kfree(dio); } } while (dio); if (adp->fill_byte_pool) { pci_pool_destroy(adp->fill_byte_pool); adp->fill_byte_pool = NULL; } dev_dbg(&adp->pdev->dev, "Released dio pool %d\n", count); }
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) { pci_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); }
/** * hinic_free_cmdq_buf - free buffer * @cmdqs: the cmdqs * @cmdq_buf: the buffer to free that is in this struct **/ void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); }