int SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma) { int ret; ret = PVRMMap(pFile, ps_vma); if (ret == -ENOENT) { ret = drm_mmap(pFile, ps_vma); } return ret; }
/* * Create a virtual address mapping for physical pages of memory. * * This needs to handle requrests for both the EMGD display driver * and the IMG 2D/3D drivers. * * If the page offset falls below the 256MB limit for display, * then map display memory. If above, route to the IMG handler. */ int emgd_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; drm_emgd_priv_t *emgd_priv; gmm_chunk_t *chunk; unsigned long offset; /* * re-direct offsets beyond the 256MB display range to PVRMMap */ if (vma->vm_pgoff > DRM_PSB_FILE_PAGE_OFFSET) { EMGD_DEBUG("emgd_mmap: Calling PVRMMap()."); return PVRMMap(filp, vma); } file_priv = (struct drm_file *) filp->private_data; emgd_priv = (drm_emgd_priv_t *)file_priv->minor->dev->dev_private; offset = vma->vm_pgoff << PAGE_SHIFT; /* * Look up the buffer in the gmm chunk list based on offset * and size. */ /* chunk = emgd_priv->context->dispatch->gmm_get_chunk(vma->vm_pgoff);*/ chunk = gmm_get_chunk(emgd_priv->context, offset); if (chunk == NULL) { printk(KERN_ERR "emgd_mmap: Failed to find memory at 0x%lx.", offset); } /* * Fill in the vma */ vma->vm_ops = &emgd_vm_ops; vma->vm_private_data = chunk; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; #else vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) pgprot_val(vma->vm_page_prot) = pgprot_val(vma->vm_page_prot) | _PAGE_CACHE_MODE_UC_MINUS; #else pgprot_val(vma->vm_page_prot) = pgprot_val(vma->vm_page_prot) | _PAGE_CACHE_UC_MINUS; #endif return 0; }