/* * Get a anonymous page for the mapping. Make sure we can DMA to that * memory location with 32bit PCI devices (i.e. don't use highmem for * now ...). Bounce buffers don't work very well for the data rates * video capture has. */ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n", (unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end); page = alloc_page(GFP_USER | __GFP_DMA32); if (!page) return VM_FAULT_OOM; clear_user_page(page_address(page), (unsigned long)vmf->virtual_address, page); vmf->page = page; return 0; }
/* * Get a anonymous page for the mapping. Make sure we can DMA to that * memory location with 32bit PCI devices (i.e. don't use highmem for * now ...). Bounce buffers don't work very well for the data rates * video capture has. */ static struct page* videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type) { struct page *page; dprintk(3,"nopage: fault @ %08lx [vma %08lx-%08lx]\n", vaddr,vma->vm_start,vma->vm_end); if (vaddr > vma->vm_end) return NOPAGE_SIGBUS; page = alloc_page(GFP_USER | __GFP_DMA32); if (!page) return NOPAGE_OOM; clear_user_page(page_address(page), vaddr, page); if (type) *type = VM_FAULT_MINOR; return page; }
/* filemap_write_and_wait(inode->i_mapping); */ if ( inode->i_mapping->nrpages && filemap_fdatawrite(inode->i_mapping) != -EIO) filemap_fdatawait(inode->i_mapping); #endif rc = vboxCallClose(&client_handle, &sf_g->map, sf_r->handle); if (RT_FAILURE(rc)) LogFunc(("vboxCallClose failed rc=%Rrc\n", rc)); kfree(sf_r); sf_i->file = NULL; sf_i->handle = SHFL_HANDLE_NIL; file->private_data = NULL; return 0; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type) # define SET_TYPE(t) *type = (t) #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused) # define SET_TYPE(t) #endif { struct page *page; char *buf; loff_t off; uint32_t nread = PAGE_SIZE; int err; struct file *file = vma->vm_file; struct inode *inode = GET_F_DENTRY(file)->d_inode; struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); struct sf_reg_info *sf_r = file->private_data; TRACE(); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) if (vmf->pgoff > vma->vm_end) return VM_FAULT_SIGBUS; #else if (vaddr > vma->vm_end) { SET_TYPE(VM_FAULT_SIGBUS); return NOPAGE_SIGBUS; } #endif /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls vboxCallRead() * which works on virtual addresses. On Linux cannot reliably determine the * physical address for high memory, see rtR0MemObjNativeLockKernel(). */ page = alloc_page(GFP_USER); if (!page) { LogRelFunc(("failed to allocate page\n")); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) return VM_FAULT_OOM; #else SET_TYPE(VM_FAULT_OOM); return NOPAGE_OOM; #endif } buf = kmap(page); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) off = (vmf->pgoff << PAGE_SHIFT); #else off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); #endif err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); if (err) { kunmap(page); put_page(page); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) return VM_FAULT_SIGBUS; #else SET_TYPE(VM_FAULT_SIGBUS); return NOPAGE_SIGBUS; #endif } BUG_ON (nread > PAGE_SIZE); if (!nread) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) clear_user_page(page_address(page), vmf->pgoff, page); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) clear_user_page(page_address(page), vaddr, page); #else clear_user_page(page_address(page), vaddr); #endif } else memset(buf + nread, 0, PAGE_SIZE - nread); flush_dcache_page(page); kunmap(page); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) vmf->page = page; return 0; #else SET_TYPE(VM_FAULT_MAJOR); return page; #endif }