int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, void __user *userbuf, int size_in_bytes) { struct ivtv_dma_page_info user_dma; struct ivtv_user_dma *dma = &itv->udma; int i, err; IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr); /* Still in USE */ if (dma->SG_length || dma->page_count) { IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n", dma->SG_length, dma->page_count); return -EBUSY; } ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes); if (user_dma.page_count <= 0) { IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n", user_dma.page_count, size_in_bytes, user_dma.offset); return -EINVAL; } /* Get user pages for DMA Xfer */ down_read(¤t->mm->mmap_sem); err = get_user_pages(current, current->mm, user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL); up_read(¤t->mm->mmap_sem); if (user_dma.page_count != err) { IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", err, user_dma.page_count); return -EINVAL; } dma->page_count = user_dma.page_count; /* Fill SG List with new values */ if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) { for (i = 0; i < dma->page_count; i++) { put_page(dma->map[i]); } dma->page_count = 0; return -ENOMEM; } /* Map SG List */ dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); /* Fill SG Array with new values */ ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1); /* Tag SG Array with Interrupt Bit */ dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000); ivtv_udma_sync_for_device(itv); return dma->page_count; }
void ivtv_udma_start(struct ivtv *itv) { IVTV_DEBUG_DMA("start UDMA\n"); write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); set_bit(IVTV_F_I_DMA, &itv->i_flags); set_bit(IVTV_F_I_UDMA, &itv->i_flags); }