/** * crystalhd_dioq_fetch_wait - Fetch element from Head. * @ioq: DIO queue instance * @to_secs: Wait timeout in seconds.. * * Return: * element from the head.. * * Return element from head if Q is not empty. Wait for new element * if Q is empty for Timeout seconds. */ void *crystalhd_dioq_fetch_wait(struct crystalhd_hw *hw, uint32_t to_secs, uint32_t *sig_pend) { struct device *dev = chddev(); unsigned long flags = 0; int rc = 0; crystalhd_rx_dma_pkt *r_pkt = NULL; crystalhd_dioq_t *ioq = hw->rx_rdyq; uint32_t picYcomp = 0; unsigned long fetchTimeout = jiffies + msecs_to_jiffies(to_secs * 1000); if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) { dev_err(dev, "%s: Invalid arg\n", __func__); return r_pkt; } spin_lock_irqsave(&ioq->lock, flags); #ifndef __APPLE__ while (!time_after_eq(jiffies, fetchTimeout)) { #else while (fetchTimeout >= jiffies) { #endif if(ioq->count == 0) { spin_unlock_irqrestore(&ioq->lock, flags); crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 250, rc, false); } else spin_unlock_irqrestore(&ioq->lock, flags); if (rc == 0) { // Found a packet. Check if it is a repeated picture or not // Drop the picture if it is a repeated picture // Lock against checks from get status calls if(down_interruptible(&hw->fetch_sem)) goto sem_error; #ifndef __APPLE__ r_pkt = crystalhd_dioq_fetch(ioq); #else r_pkt = (crystalhd_rx_dma_pkt*)crystalhd_dioq_fetch(ioq); #endif // If format change packet, then return with out checking anything if (r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE)) goto sem_rel_return; if (hw->adp->pdev->device == BC_PCI_DEVID_LINK) { picYcomp = link_GetRptDropParam(hw, hw->PICHeight, hw->PICWidth, (void *)r_pkt); } else { // For Flea, we don't have the width and height handy since they // come in the PIB in the picture, so this function will also // populate the width and height picYcomp = flea_GetRptDropParam(hw, (void *)r_pkt); // For flea it is the above function that indicated format change if(r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE)) goto sem_rel_return; } if(!picYcomp || (picYcomp == hw->LastPicNo) || (picYcomp == hw->LastTwoPicNo)) { //Discard picture if(picYcomp != 0) { hw->LastTwoPicNo = hw->LastPicNo; hw->LastPicNo = picYcomp; } crystalhd_dioq_add(hw->rx_freeq, r_pkt, false, r_pkt->pkt_tag); r_pkt = NULL; up(&hw->fetch_sem); } else { if(hw->adp->pdev->device == BC_PCI_DEVID_LINK) { if((picYcomp - hw->LastPicNo) > 1) { dev_info(dev, "MISSING %u PICTURES\n", (picYcomp - hw->LastPicNo)); } } hw->LastTwoPicNo = hw->LastPicNo; hw->LastPicNo = picYcomp; goto sem_rel_return; } } else if (rc == -EINTR) { *sig_pend = 1; return NULL; } spin_lock_irqsave(&ioq->lock, flags); } dev_info(dev, "FETCH TIMEOUT\n"); spin_unlock_irqrestore(&ioq->lock, flags); return r_pkt; sem_error: return NULL; sem_rel_return: up(&hw->fetch_sem); return r_pkt; } #ifdef __APPLE__ static bool CustomSegmentFunction(IODMACommand *target, IODMACommand::Segment64 segment, void *sglMem, UInt32 segmentIndex) { struct scatterlist *sg = (scatterlist*)sglMem; sg[segmentIndex].dma_address = (uint32_t)segment.fIOVMAddr; sg[segmentIndex].dma_length = (unsigned int)segment.fLength; //MPCLOG(MPCLOG_DBG,"CustomSegmentFunction: 0x%X/%d/%d\n",(unsigned int)segment.fIOVMAddr,(unsigned int)segment.fLength, (unsigned int)segmentIndex); return true; } #endif /** * crystalhd_map_dio - Map user address for DMA * @adp: Adapter instance * @ubuff: User buffer to map. * @ubuff_sz: User buffer size. * @uv_offset: UV buffer offset. * @en_422mode: TRUE:422 FALSE:420 Capture mode. * @dir_tx: TRUE for Tx (To device from host) * @dio_hnd: Handle to mapped DIO request. * * Return: * Status. * * This routine maps user address and lock pages for DMA. * */ BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff, uint32_t ubuff_sz, uint32_t uv_offset, bool en_422mode, bool dir_tx, crystalhd_dio_req **dio_hnd) { struct device *dev; crystalhd_dio_req *dio; uint32_t start = 0, end = 0, count = 0; #ifndef __APPLE__ uint32_t spsz = 0; unsigned long uaddr = 0, uv_start = 0; int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0; #else unsigned long uaddr = 0, uv_start = 0; int rw = 0; uint32_t nr_pages = 0; #endif if (!adp || !ubuff || !ubuff_sz || !dio_hnd) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return BC_STS_INV_ARG; } dev = &adp->pdev->dev; /* Compute pages */ uaddr = (unsigned long)ubuff; count = ubuff_sz; end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT; nr_pages = end - start; if (!count || ((uaddr + count) < uaddr)) { dev_err(dev, "User addr overflow!!\n"); return BC_STS_INV_ARG; } dio = crystalhd_alloc_dio(adp); if (!dio) { dev_err(dev, "dio pool empty..\n"); return BC_STS_INSUFF_RES; } if (dir_tx) { rw = WRITE; dio->direction = DMA_TO_DEVICE; } else { rw = READ; dio->direction = DMA_FROM_DEVICE; } if (nr_pages > dio->max_pages) { dev_err(dev, "max_pages(%d) exceeded(%d)!!\n", dio->max_pages, nr_pages); crystalhd_unmap_dio(adp, dio); return BC_STS_INSUFF_RES; } #ifndef __APPLE__ if (uv_offset) { uv_start = (uaddr + uv_offset) >> PAGE_SHIFT; dio->uinfo.uv_sg_ix = uv_start - start; dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK); } dio->fb_size = ubuff_sz & 0x03; if (dio->fb_size) { res = copy_from_user(dio->fb_va, (void *)(uaddr + count - dio->fb_size), dio->fb_size); if (res) { dev_err(dev, "failed %d to copy %u fill bytes from %p\n", res, dio->fb_size, (void *)(uaddr + count-dio->fb_size)); crystalhd_unmap_dio(adp, dio); return BC_STS_INSUFF_RES; } } down_read(¤t->mm->mmap_sem); res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ, 0, dio->pages, NULL); up_read(¤t->mm->mmap_sem); /* Save for release..*/ dio->sig = crystalhd_dio_locked; if (res < nr_pages) { dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res); dio->page_cnt = res; crystalhd_unmap_dio(adp, dio); return BC_STS_ERROR; } dio->page_cnt = nr_pages; /* Get scatter/gather */ crystalhd_init_sg(dio->sg, dio->page_cnt); crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK); if (nr_pages > 1) { dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset; #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23) #ifdef CONFIG_X86_64 dio->sg[0].dma_length = dio->sg[0].length; #endif #endif count -= dio->sg[0].length; for (i = 1; i < nr_pages; i++) { if (count < 4) { spsz = count; skip_fb_sg = 1; } else { spsz = (count < PAGE_SIZE) ? (count & ~0x03) : PAGE_SIZE; } crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0); count -= spsz; } } else { if (count < 4) { dio->sg[0].length = count; skip_fb_sg = 1; } else { dio->sg[0].length = count - dio->fb_size; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23) #ifdef CONFIG_X86_64 dio->sg[0].dma_length = dio->sg[0].length; #endif #endif } dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); if (dio->sg_cnt <= 0) { dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt); crystalhd_unmap_dio(adp, dio); return BC_STS_ERROR; } if (dio->sg_cnt && skip_fb_sg) dio->sg_cnt -= 1; #else IODMACommand *dma_command; IOMemoryDescriptor *mem_desc; IOReturn result; if (uv_offset) { uv_start = (uaddr + uv_offset) >> PAGE_SHIFT; dio->uinfo.uv_sg_ix = uv_start - start; dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & PAGE_MASK); } dio->fb_size = ubuff_sz & 0x03; // map user memory into kernel memory mem_desc = IOMemoryDescriptor::withAddress(uaddr, count, dir_tx ? kIODirectionIn : kIODirectionOut, current_task() ); if (mem_desc) { result = mem_desc->prepare(); //IOLog("bc_link_map_dio:mem_desc=0x%X, prepare result 0x%X \n", (unsigned int)mem_desc, (int)result); dio->io_class = (void*)mem_desc; } else { dev_err(&adp->pdev->dev, "bc_link_map_dio:IOMemoryDescriptor::withAddress failed\n"); crystalhd_free_dio(adp,dio); return BC_STS_INSUFF_RES; } // Save for release.. dio->sig = crystalhd_dio_locked; // check transfer count, counts less than four are handled using only the fill byte page if (count > 3) { do { // 32 bit physical address generation using IODMACommand // any memory above 4Gb in the memory descriptor will be buffered // to memory below the 4G line, on machines without remapping HW support dma_command = IODMACommand::withSpecification( // custom segment function (IODMACommand::SegmentFunction)CustomSegmentFunction, // numAddressBits 32, // maxSegmentSize PAGE_SIZE, // mappingOptions - kMapped for DMA addresses IODMACommand::kMapped, // maxTransferSize - no restriction 0, // alignment - no restriction 1 ); if (!dma_command) { dev_err(&adp->pdev->dev, "IODMACommand::withSpecification failed\n"); break; } //IOLog("bc_link_map_dio:dma_command=0x%X \n", (unsigned int)dma_command); // point IODMACommand at the memory descriptor, don't use auto prepare option result = dma_command->setMemoryDescriptor(mem_desc, false); if (kIOReturnSuccess != result) { dev_err(&adp->pdev->dev, "setMemoryDescriptor failed (0x%x)\n", result); break; } dio->io_class = (void*)dma_command; result = dma_command->prepare(0, count, true); //IOLog("bc_link_map_dio:dma_command->prepare() result 0x%X \n",(int)result); // generate scatter/gather list using custom segment function. This routine will make // sure that the first s/g entry will have the correct address and length for user // addresses that are not page aligned. UInt64 offset = 0; result = dma_command->gen32IOVMSegments(&offset, (IODMACommand::Segment32*)dio->sg, (UInt32*)&nr_pages); //IOLog("bc_link_map_dio:gen32IOVMSegments nr_pages %d, result %d\n", (int)nr_pages, (int)result); // if ending page is not end 4 byte aligned, decrease last page transfer length // as those bytes will be handled using the fill byte page. if(dio->fb_size) { dio->sg[nr_pages-1].dma_length -= dio->fb_size; // check for last page == same size as dio->fb_size if (dio->sg[nr_pages-1].dma_length == 0) { nr_pages--; } } // If need a fill byte page if(dio->fb_size) { UInt64 byte_count; UInt64 length; // manually copy those bytes into the fill byte page offset = count - dio->fb_size; length = dio->fb_size; byte_count = mem_desc->readBytes(offset, dio->fb_va, length); } dio->sg_cnt = nr_pages; } while(false); if (dio->sg_cnt <= 0) { dev_err(&adp->pdev->dev, "sg map %d \n",dio->sg_cnt); crystalhd_unmap_dio(adp,dio); return BC_STS_ERROR; } } else { // three bytes or less, handle this transfer using only the fill_byte page. UInt64 byte_count; UInt64 offset; UInt64 length; offset = 0; length = dio->fb_size; byte_count = mem_desc->readBytes(offset, dio->fb_va, length); dio->sg_cnt = 0; dio->sg[0].dma_length = count; } #endif dio->sig = crystalhd_dio_sg_mapped; /* Fill in User info.. */ dio->uinfo.xfr_len = ubuff_sz; #ifndef __APPLE__ dio->uinfo.xfr_buff = ubuff; #else dio->uinfo.xfr_buff = (uint8_t*)ubuff; #endif dio->uinfo.uv_offset = uv_offset; dio->uinfo.b422mode = en_422mode; dio->uinfo.dir_tx = dir_tx; *dio_hnd = dio; return BC_STS_SUCCESS; } /** * crystalhd_unmap_sgl - Release mapped resources * @adp: Adapter instance * @dio: DIO request instance * * Return: * Status. * * This routine is to unmap the user buffer pages. */ BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio) { #ifndef __APPLE__ struct page *page = NULL; int j = 0; if (!adp || !dio) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return BC_STS_INV_ARG; } if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) { for (j = 0; j < dio->page_cnt; j++) { page = dio->pages[j]; if (page) { if (!PageReserved(page) && (dio->direction == DMA_FROM_DEVICE)) SetPageDirty(page); page_cache_release(page); } } } if (dio->sig == crystalhd_dio_sg_mapped) pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); #else IODMACommand *dma_command; IOMemoryDescriptor *mem_desc; if(!adp || !dio ) { dev_err(chddev(), "bc_link_unmap_dio:Invalid arg \n"); return BC_STS_INV_ARG; } dma_command = OSDynamicCast(IODMACommand, (OSMetaClassBase*)dio->io_class); //MPCLOG(MPCLOG_DBG, "bc_link_unmap_dio:dma_command=0x%X \n", (unsigned int)dma_command); if (dma_command) { // fetch current IOMemoryDescriptor before dma_command->clearMemoryDescriptor; mem_desc = (IOMemoryDescriptor*)dma_command->getMemoryDescriptor(); dma_command->complete(); dma_command->clearMemoryDescriptor(); SAFE_RELEASE(dma_command); mem_desc->complete(); SAFE_RELEASE(mem_desc); dio->io_class = NULL; } #endif crystalhd_free_dio(adp, dio); return BC_STS_SUCCESS; } /** * crystalhd_create_dio_pool - Allocate mem pool for DIO management. * @adp: Adapter instance * @max_pages: Max pages for size calculation. * * Return: * system error. * * This routine creates a memory pool to hold dio context for * for HW Direct IO operation. */ int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages) { struct device *dev; uint32_t asz = 0, i = 0; uint8_t *temp; crystalhd_dio_req *dio; if (!adp || !max_pages) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return -EINVAL; } dev = &adp->pdev->dev; /* Get dma memory for fill byte handling..*/ adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte", adp->pdev, 8, 8, 0); if (!adp->fill_byte_pool) { dev_err(dev, "failed to create fill byte pool\n"); return -ENOMEM; } #ifndef __APPLE__ /* Get the max size from user based on 420/422 modes */ asz = (sizeof(*dio->pages) * max_pages) + (sizeof(*dio->sg) * max_pages) + sizeof(*dio); #else asz = (sizeof(*dio->sg) * max_pages) + sizeof(*dio); #endif dev_dbg(dev, "Initializing Dio pool %d %d %x %p\n", BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool); for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) { temp = (uint8_t *)kzalloc(asz, GFP_KERNEL); if ((temp) == NULL) { dev_err(dev, "Failed to alloc %d mem\n", asz); return -ENOMEM; } dio = (crystalhd_dio_req *)temp; temp += sizeof(*dio); #ifndef __APPLE__ dio->pages = (struct page **)temp; temp += (sizeof(*dio->pages) * max_pages); #else temp += sizeof(*dio); #endif dio->sg = (struct scatterlist *)temp; dio->max_pages = max_pages; dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL, &dio->fb_pa); if (!dio->fb_va) { dev_err(dev, "fill byte alloc failed.\n"); return -ENOMEM; } crystalhd_free_dio(adp, dio); } return 0; } /** * crystalhd_destroy_dio_pool - Release DIO mem pool. * @adp: Adapter instance * * Return: * none. * * This routine releases dio memory pool during close. */ void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp) { crystalhd_dio_req *dio; int count = 0; if (!adp) { printk(KERN_ERR "%s: Invalid arg\n", __func__); return; } do { dio = crystalhd_alloc_dio(adp); if (dio) { if (dio->fb_va) pci_pool_free(adp->fill_byte_pool, dio->fb_va, dio->fb_pa); count++; kfree(dio); } } while (dio); if (adp->fill_byte_pool) { pci_pool_destroy(adp->fill_byte_pool); adp->fill_byte_pool = NULL; } dev_dbg(&adp->pdev->dev, "Released dio pool %d\n", count); }
/** * crystalhd_dioq_fetch_wait - Fetch element from Head. * @ioq: DIO queue instance * @to_secs: Wait timeout in seconds.. * * Return: * element from the head.. * * Return element from head if Q is not empty. Wait for new element * if Q is empty for Timeout seconds. */ void *crystalhd_dioq_fetch_wait(struct crystalhd_hw *hw, uint32_t to_secs, uint32_t *sig_pend) { struct device *dev = chddev(); unsigned long flags = 0; int rc = 0; struct crystalhd_rx_dma_pkt *r_pkt = NULL; struct crystalhd_dioq *ioq = hw->rx_rdyq; uint32_t picYcomp = 0; unsigned long fetchTimeout = jiffies + msecs_to_jiffies(to_secs * 1000); if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) { dev_err(dev, "%s: Invalid arg\n", __func__); return r_pkt; } spin_lock_irqsave(&ioq->lock, flags); while (!time_after_eq(jiffies, fetchTimeout)) { if(ioq->count == 0) { spin_unlock_irqrestore(&ioq->lock, flags); crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 250, rc, false); } else spin_unlock_irqrestore(&ioq->lock, flags); if (rc == 0) { /* Found a packet. Check if it is a repeated picture or not */ /* Drop the picture if it is a repeated picture */ /* Lock against checks from get status calls */ if(down_interruptible(&hw->fetch_sem)) goto sem_error; r_pkt = crystalhd_dioq_fetch(ioq); /* If format change packet, then return with out checking anything */ if (r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE)) goto sem_rel_return; if (hw->adp->pdev->device == BC_PCI_DEVID_LINK) { picYcomp = link_GetRptDropParam(hw, hw->PICHeight, hw->PICWidth, (void *)r_pkt); } else { /* For Flea, we don't have the width and height handy since they */ /* come in the PIB in the picture, so this function will also */ /* populate the width and height */ picYcomp = flea_GetRptDropParam(hw, (void *)r_pkt); /* For flea it is the above function that indicated format change */ if(r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE)) goto sem_rel_return; } if(!picYcomp || (picYcomp == hw->LastPicNo) || (picYcomp == hw->LastTwoPicNo)) { /*Discard picture */ if(picYcomp != 0) { hw->LastTwoPicNo = hw->LastPicNo; hw->LastPicNo = picYcomp; } crystalhd_dioq_add(hw->rx_freeq, r_pkt, false, r_pkt->pkt_tag); r_pkt = NULL; up(&hw->fetch_sem); } else { if(hw->adp->pdev->device == BC_PCI_DEVID_LINK) { if((picYcomp - hw->LastPicNo) > 1) { dev_info(dev, "MISSING %u PICTURES\n", (picYcomp - hw->LastPicNo)); } } hw->LastTwoPicNo = hw->LastPicNo; hw->LastPicNo = picYcomp; goto sem_rel_return; } } else if (rc == -EINTR) { *sig_pend = 1; return r_pkt; } spin_lock_irqsave(&ioq->lock, flags); } dev_info(dev, "FETCH TIMEOUT\n"); spin_unlock_irqrestore(&ioq->lock, flags); return r_pkt; sem_error: return NULL; sem_rel_return: up(&hw->fetch_sem); return r_pkt; }