static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { *virt = mtd->priv + from; *retlen = len; if (phys) { /* limit retlen to the number of contiguous physical pages */ unsigned long page_ofs = offset_in_page(*virt); void *addr = *virt - page_ofs; unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr); *phys = __pfn_to_phys(pfn0) + page_ofs; len += page_ofs; while (len > PAGE_SIZE) { len -= PAGE_SIZE; addr += PAGE_SIZE; pfn0++; pfn1 = vmalloc_to_pfn(addr); if (pfn1 != pfn0) { *retlen = addr - *virt; break; } } } return 0; }
unsigned long vmalloc_to_phys(void *va) { unsigned long pfn = vmalloc_to_pfn(va); BUG_ON(!pfn); return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); }
int cs598_dev_mmap(struct file *fp, struct vm_area_struct *vma) { int ret,i; unsigned long length = vma->vm_end - vma->vm_start; if (length > (NPAGES * HASH_SIZE)) { return -EIO; } /* Done for every page */ for (i=0; i < length; i+=PAGE_SIZE) { /* Remap every page in the virtual address space of the user process. This is required so that the process can access with correct privilege Else MMU will report violation */ if ((ret = remap_pfn_range(vma, vma->vm_start + i, /* Convert virtual address to page frame number */ vmalloc_to_pfn((void*)(((unsigned long)vmalloc_buffer) + i)), PAGE_SIZE, vma->vm_page_prot)) < 0) { printk(KERN_INFO "cs598:mmap failed"); return ret; } } printk(KERN_INFO "cs598:mmap successful"); return 0; }
int DDPK_Util_Map_Vmalloc_to_User( struct vm_area_struct* p_vma ) { int ret; unsigned long pfn; void* vmalloc_addr = (void*)( p_vma->vm_pgoff * PAGE_SIZE ); unsigned long start = p_vma->vm_start; long size = (long)(p_vma->vm_end - p_vma->vm_start); DDPK_BBINFO("Map Kernel Vmalloc (0x%08X) to User Space (0x%08X) (szie=0x%08X)-------\n", (unsigned int)vmalloc_addr, (unsigned int)start, (unsigned int)size ); while( size > 0 ) { pfn = vmalloc_to_pfn( vmalloc_addr ); /*Debug Info*/ DDPK_BBINFO("kernel vm=0x%08X pfn==0x%08X\n", (unsigned int)vmalloc_addr, (unsigned int)pfn); if( (ret = remap_pfn_range( p_vma, start, pfn, PAGE_SIZE, PAGE_SHARED )) < 0 ) { return ret; } start += PAGE_SIZE; vmalloc_addr += PAGE_SIZE; size -= PAGE_SIZE; } return 0; }
static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long page, pos; if (offset + size > info->fix.smem_len) return -EINVAL; pos = (unsigned long)info->fix.smem_start + offset; pr_notice("mmap() framebuffer addr:%lu size:%lu\n", pos, size); while (size > 0) { page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; start += PAGE_SIZE; pos += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; else size = 0; } /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ return 0; }
static int gsl_kmod_mmap(struct file *fd, struct vm_area_struct *vma) { int status = 0; unsigned long start = vma->vm_start; unsigned long pfn = vma->vm_pgoff; unsigned long size = vma->vm_end - vma->vm_start; unsigned long prot = pgprot_writecombine(vma->vm_page_prot); unsigned long addr = vma->vm_pgoff << PAGE_SHIFT; void *va = NULL; if (gsl_driver.enable_mmu && (addr < GSL_LINUX_MAP_RANGE_END) && (addr >= GSL_LINUX_MAP_RANGE_START)) { va = gsl_linux_map_find(addr); while (size > 0) { if (remap_pfn_range(vma, start, vmalloc_to_pfn(va), PAGE_SIZE, prot)) { return -EAGAIN; } start += PAGE_SIZE; va += PAGE_SIZE; size -= PAGE_SIZE; } } else { if (remap_pfn_range(vma, start, pfn, size, prot)) { status = -EAGAIN; } } vma->vm_ops = &gsl_kmod_vmops; return status; }
static int smartcam_mmap(struct file *file, struct vm_area_struct *vma) { int ret; long length = vma->vm_end - vma->vm_start; unsigned long start = vma->vm_start; char *vmalloc_area_ptr = frame_data; unsigned long pfn; SCAM_MSG("(%s) %s called\n", current->comm, __FUNCTION__); if (length > SMARTCAM_BUFFER_SIZE) return -EIO; /* loop over all pages, map each page individually */ while (length > 0) { pfn = vmalloc_to_pfn (vmalloc_area_ptr); ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED); if(ret < 0) { return ret; } start += PAGE_SIZE; vmalloc_area_ptr += PAGE_SIZE; length -= PAGE_SIZE; } return 0; }
static int videoin_mmap (struct file *file, struct vm_area_struct *vma) { struct video_device *dev = video_devdata(file); videoin_priv_t *priv = (videoin_priv_t *)dev->priv; unsigned long start = vma->vm_start; unsigned long size = vma->vm_end-vma->vm_start; unsigned long page, pos; DBG_PRINTF("%s\n",__FUNCTION__); DBG_PRINTF("start = 0x%x\n",start); DBG_PRINTF("size = 0x%x\n",size); if(bDumpframeBuffer==0) pos = videoIn_buf[0].u32VirtAddr; else pos = videoIn_buf[3].u32VirtAddr; priv->mmap_bufsize = size; while (size > 0) { page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { ERR_PRINTF("remap error\n"); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; else size = 0; } return 0; }
static int evdi_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long page, pos; if (offset + size > info->fix.smem_len) { return -EINVAL; } pos = (unsigned long)info->fix.smem_start + offset; pr_notice("mmap() framebuffer addr:%lu size:%lu\n", pos, size); while (size > 0) { page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; if (size > PAGE_SIZE) { size -= PAGE_SIZE; } else { size = 0; } } return 0; }
static int dlfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long page, pos; printk("MMAP: %lu %u\n", offset + size, info->fix.smem_len); if (offset + size > info->fix.smem_len) return -EINVAL; pos = (unsigned long)info->fix.smem_start + offset; while (size > 0) { page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; start += PAGE_SIZE; pos += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; else size = 0; } vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ return 0; }
static int tegra_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { int err = 0; int size = 0; char *vmalloc_area_ptr = NULL; unsigned long start = 0; unsigned long pfn = 0; start = vma->vm_start; vmalloc_area_ptr = substream->dma_buffer.area; size = vma->vm_end - vma->vm_start; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); while (size > 0) { pfn = vmalloc_to_pfn(vmalloc_area_ptr); err = io_remap_pfn_range(vma, start, pfn, PAGE_SIZE, vma->vm_page_prot); if (err < 0) { snd_printk(KERN_ERR "io_remap_pfn_range failed \n"); return err; } start += PAGE_SIZE; vmalloc_area_ptr += PAGE_SIZE; size -= PAGE_SIZE; } return err; }
int __init vmalloc_to_pfn_init(void) { addr = (char *)vmalloc(VMALLOC_SIZE); //调用vmalloc( )分配一段内存区间 if( addr == NULL ) printk("<0>vmalloc failed!\n"); else { printk("<0>vmalloc successfully! addr = 0x%lx\n",(unsigned long)addr); //输出返回地址 unsigned long to_pfn; to_pfn = vmalloc_to_pfn( addr ); //to_pfn为虚拟地址addr所映射的物理页框号 printk("<0>the_pfn = %ld\n",to_pfn); //to_page为虚拟地址addr+4097所映射的物理页框号 to_pfn = vmalloc_to_pfn( addr+4097 ); printk("<0>the_pfn = %ld\n",to_pfn); } return 0; }
static int remap_mmap(struct file *filp, struct vm_area_struct *vma) { if(remap_pfn_range(vma, vma->vm_start, vmalloc_to_pfn(kbuff), vma->vm_end - vma->vm_start,\ vma->vm_page_prot)) return -EAGAIN; vma->vm_ops = &remap_vm_ops; remap_open(vma); return 0; }
int jpeg_mmap(struct file *filp, struct vm_area_struct *vma) { #if defined(CONFIG_S5P_SYSMMU_JPEG) #if !defined(CONFIG_S5P_VMEM) unsigned long page_frame_no; unsigned long start; unsigned long size; char *ptr; /* vmalloc */ size = vma->vm_end - vma->vm_start; ptr = (char *)jpeg_ctrl->mem.base; start = 0; vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); while (size > 0) { page_frame_no = vmalloc_to_pfn(ptr); if (remap_pfn_range(vma, vma->vm_start + start, page_frame_no, PAGE_SIZE, vma->vm_page_prot)) { jpeg_err("failed to remap jpeg pfn range.\n"); return -ENOMEM; } start += PAGE_SIZE; ptr += PAGE_SIZE; size -= PAGE_SIZE; } #endif /* CONFIG_S5P_VMEM */ #else unsigned long page_frame_no; unsigned long size; int ret; size = vma->vm_end - vma->vm_start; if (!cma_is_registered_region(jpeg_ctrl->mem.base, size)) { pr_err("[%s] handling non-cma region (%#x@%#x)is prohibited\n", __func__, (unsigned int)size, jpeg_ctrl->mem.base); return -EINVAL; } vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); page_frame_no = __phys_to_pfn(jpeg_ctrl->mem.base); ret = remap_pfn_range(vma, vma->vm_start, page_frame_no, size, vma->vm_page_prot); if (ret != 0) { jpeg_err("failed to remap jpeg pfn range.\n"); return -ENOMEM; } #endif /* SYSMMU_JPEG_ON */ return 0; }
/* * This is API for making I/D Caches consistent when modifying * kernel code (loadable modules, kprobes, kgdb...) * This is called on insmod, with kernel virtual address for CODE of * the module. ARC cache maintenance ops require PHY address thus we * need to convert vmalloc addr to PHY addr */ void flush_icache_range(unsigned long kstart, unsigned long kend) { unsigned int tot_sz, off, sz; unsigned long phy, pfn; /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ /* This is not the right API for user virtual address */ if (kstart < TASK_SIZE) { BUG_ON("Flush icache range for user virtual addr space"); return; } /* Shortcut for bigger flush ranges. * Here we don't care if this was kernel virtual or phy addr */ tot_sz = kend - kstart; if (tot_sz > PAGE_SIZE) { flush_cache_all(); return; } /* Case: Kernel Phy addr (0x8000_0000 onwards) */ if (likely(kstart > PAGE_OFFSET)) { /* * The 2nd arg despite being paddr will be used to index icache * This is OK since no alternate virtual mappings will exist * given the callers for this case: kprobe/kgdb in built-in * kernel code only. */ __sync_icache_dcache(kstart, kstart, kend - kstart); return; } /* * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) * (1) ARC Cache Maintenance ops only take Phy addr, hence special * handling of kernel vaddr. * * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), * it still needs to handle a 2 page scenario, where the range * straddles across 2 virtual pages and hence need for loop */ while (tot_sz > 0) { off = kstart % PAGE_SIZE; pfn = vmalloc_to_pfn((void *)kstart); phy = (pfn << PAGE_SHIFT) + off; sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); __sync_icache_dcache(phy, kstart, sz); kstart += sz; tot_sz -= sz; } }
static int roach_mem_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long start, vsize, offset; unsigned long page, pos; struct fpga_device *rdev_mem = file->private_data; /* VMA properties */ start = vma->vm_start; vsize = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; printk(KERN_NOTICE "%s: vm_start %lx, vm_end %lx, vsize %lx, offset %lx", __func__, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start), vma->vm_pgoff << PAGE_SHIFT); if (offset + vsize > ROACH_FPGA_LENGTH) return -EINVAL; pos = (unsigned long)(rdev_mem->fpga_virt) + offset; printk(KERN_NOTICE "%s: pos to be converted : %lx", __func__, pos); /* Avoid to swap out this VMA */ /* vma->vm_flags |= VM_RESERVED; */ /* VM_RESERVED replacement with VM_IO that accounts as reserved_vm */ vma->vm_flags |= VM_IO; /* Page caching disabled completely for memory mapping to work properly*/ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); while (vsize > 0) { page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; start += PAGE_SIZE; pos += PAGE_SIZE; if (vsize > PAGE_SIZE) vsize -= PAGE_SIZE; else vsize = 0; } vma->vm_ops = &roach_remap_vm_ops; roach_vma_open(vma); return 0; }
static long kgsl_cache_range_op(unsigned long addr, int size, unsigned int flags) { #ifdef CONFIG_OUTER_CACHE unsigned long end; #endif BUG_ON(addr & (KGSL_PAGESIZE - 1)); BUG_ON(size & (KGSL_PAGESIZE - 1)); if (flags & KGSL_CACHE_FLUSH) dmac_flush_range((const void *)addr, (const void *)(addr + size)); else if (flags & KGSL_CACHE_CLEAN) dmac_clean_range((const void *)addr, (const void *)(addr + size)); else dmac_inv_range((const void *)addr, (const void *)(addr + size)); #ifdef CONFIG_OUTER_CACHE for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) { pte_t *pte_ptr, pte; unsigned long physaddr; if (flags & KGSL_CACHE_VMALLOC_ADDR) physaddr = vmalloc_to_pfn((void *)end); else if (flags & KGSL_CACHE_USER_ADDR) { pte_ptr = kgsl_get_pte_from_vaddr(end); if (!pte_ptr) return -EINVAL; pte = *pte_ptr; physaddr = pte_pfn(pte); pte_unmap(pte_ptr); } else return -EINVAL; physaddr <<= PAGE_SHIFT; if (flags & KGSL_CACHE_FLUSH) outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE); else if (flags & KGSL_CACHE_CLEAN) outer_clean_range(physaddr, physaddr + KGSL_PAGESIZE); else outer_inv_range(physaddr, physaddr + KGSL_PAGESIZE); } #endif return 0; }
/* Convert a region of vmalloc memory to an opal sg list */ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, unsigned long vmalloc_size) { struct opal_sg_list *sg, *first = NULL; unsigned long i = 0; sg = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sg) goto nomem; first = sg; while (vmalloc_size > 0) { uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; uint64_t length = min(vmalloc_size, PAGE_SIZE); sg->entry[i].data = cpu_to_be64(data); sg->entry[i].length = cpu_to_be64(length); i++; if (i >= SG_ENTRIES_PER_NODE) { struct opal_sg_list *next; next = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!next) goto nomem; sg->length = cpu_to_be64( i * sizeof(struct opal_sg_entry) + 16); i = 0; sg->next = cpu_to_be64(__pa(next)); sg = next; } vmalloc_addr += length; vmalloc_size -= length; } sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); return first; nomem: pr_err("%s : Failed to allocate memory\n", __func__); opal_free_sg_list(first); return NULL; }
/* * Build candidate image scatter gather list * * list format: * ----------------------------------- * | VER (8) | Entry length in bytes | * ----------------------------------- * | Pointer to next entry | * ----------------------------------- * | Address of memory area 1 | * ----------------------------------- * | Length of memory area 1 | * ----------------------------------- * | ......... | * ----------------------------------- * | ......... | * ----------------------------------- * | Address of memory area N | * ----------------------------------- * | Length of memory area N | * ----------------------------------- */ static struct opal_sg_list *image_data_to_sglist(void) { struct opal_sg_list *sg1, *list = NULL; void *addr; int size; addr = image_data.data; size = image_data.size; sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL); if (!sg1) return NULL; list = sg1; sg1->num_entries = 0; while (size > 0) { /* Translate virtual address to physical address */ sg1->entry[sg1->num_entries].data = (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); if (size > PAGE_SIZE) sg1->entry[sg1->num_entries].length = PAGE_SIZE; else sg1->entry[sg1->num_entries].length = size; sg1->num_entries++; if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { sg1->next = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL); if (!sg1->next) { pr_err("%s : Failed to allocate memory\n", __func__); goto nomem; } sg1 = sg1->next; sg1->num_entries = 0; } addr += PAGE_SIZE; size -= PAGE_SIZE; } return list; nomem: free_sg_list(list); return NULL; }
static int my_init(void) { /* print hex dump of module */ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, THIS_MODULE->module_core,THIS_MODULE->core_text_size); misc_register(&misc); misc_register(&my_read_misc); misc_register(&my_write_misc); misc_register(&mem_buf_read_misc); misc_register(&mem_buf_write_misc); printk(KERN_INFO "ioctl GET cmd: %d\n", MY_READ); printk(KERN_INFO "ioctl SET cmd: %d\n", MY_WRITE); /* create debug directory */ debug_dir = debugfs_create_dir("pb173", NULL); /* create u32 debug file mapped to global var count */ counter_file = debugfs_create_u32("counter", S_IRUSR, debug_dir, &count); /* create debug file containing binary data of this module */ bindata_file = debugfs_create_file("bindata", S_IRUSR, debug_dir, THIS_MODULE->module_core, &fops_debug); /* allocate buffer and zero memory */ mem_buffer = (char*) vzalloc(20971520); if (!mem_buffer) { printk(KERN_INFO "20MB allocation error.."); return -ENOMEM; } unsigned long i, pfn; /* iterate allocated pages */ for (i = 0; i < 20971520; i += PAGE_SIZE) { /* get pfn */ pfn = vmalloc_to_pfn(&(mem_buffer[i])); /* get phys address */ phys_addr_t phys = pfn << PAGE_SHIFT; /* write string <Virt> : <Phys>\n to current page */ snprintf(&(mem_buffer[i]), PAGE_SIZE, "%p : %llx\n", &(mem_buffer[i]), phys); } return 0; }
static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump) { struct opal_sg_list *sg1, *list = NULL; void *addr; int64_t size; addr = dump->buffer; size = dump->size; sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sg1) goto nomem; list = sg1; sg1->num_entries = 0; while (size > 0) { /* Translate virtual address to physical address */ sg1->entry[sg1->num_entries].data = (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); if (size > PAGE_SIZE) sg1->entry[sg1->num_entries].length = PAGE_SIZE; else sg1->entry[sg1->num_entries].length = size; sg1->num_entries++; if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sg1->next) goto nomem; sg1 = sg1->next; sg1->num_entries = 0; } addr += PAGE_SIZE; size -= PAGE_SIZE; } return list; nomem: pr_err("%s : Failed to allocate memory\n", __func__); free_dump_sg_list(list); return NULL; }
// helper function, mmap's the vmalloc'd area which is not physically contiguous static int mmap_vmem(struct file *filp, struct vm_area_struct *vma, char * vmalloc_area_ptr) { int ret; long length = vma->vm_end - vma->vm_start; unsigned long start = vma->vm_start; unsigned long pfn; /* loop over all pages, map it page individually */ while (length > 0) { pfn = vmalloc_to_pfn(vmalloc_area_ptr); if ((ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) { return ret; } start += PAGE_SIZE; vmalloc_area_ptr += PAGE_SIZE; length -= PAGE_SIZE; } return 0; }
static int my_init(void) { char str[100], *ptr; buf = vzalloc(BUF_SIZE); if (buf == NULL) return -ENOMEM; /* write page virtual and physical address to every page */ for (ptr = buf; ptr < buf + BUF_SIZE; ptr += PAGE_SIZE) { sprintf(str, "%p: %lx\n", ptr, (unsigned long) vmalloc_to_pfn(ptr) << PAGE_SHIFT); strcpy(ptr, str); } misc_register(&mydevice); printk(KERN_INFO "MY_SET_LEN: %u\n", MY_SET_LEN); printk(KERN_INFO "MY_GET_LEN: %u\n", MY_GET_LEN); return 0; }
static int schar_mmap_2(struct file *filp, struct vm_area_struct *vma) { int ret; long length = vma->vm_end - vma->vm_start; unsigned long start = vma->vm_start; char *vmalloc_area_ptr = (char *)vmalloc_area; unsigned long pfn; lock_kernel(); printk(KERN_INFO " entering mmap \n"); if (!buf_start_2) { printk(KERN_INFO " Data was not initialized"); unlock_kernel(); return -EINVAL; } /* check length - do not allow larger mappings than the number of pages allocated */ if (length > BIGPHYS_PAGES_2*PAGE_SIZE){ printk(KERN_INFO " Page allocation problem %ld %ld \n",length,BIGPHYS_PAGES_2*PAGE_SIZE); unlock_kernel(); return -EIO; } /* loop over all pages, map it page individually */ while (length > 0) { pfn = vmalloc_to_pfn(vmalloc_area_ptr); if ((ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) { printk(KERN_INFO "do_rvmmap BAD RETURN %ld %ld ret %d \n",length,BIGPHYS_PAGES_2*PAGE_SIZE,ret); printk(KERN_INFO " this can happen if MEM_SHARED is not used by calling process ! \n"); unlock_kernel(); return ret; } start += PAGE_SIZE; vmalloc_area_ptr += PAGE_SIZE; length -= PAGE_SIZE; } unlock_kernel(); return 0; }
static int evdi_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long vma_start = vma->vm_start; unsigned long vma_size = vma->vm_end - vma->vm_start; unsigned long vma_page_cnt = vma_size >> PAGE_SHIFT; unsigned long smem_page_cnt = info->fix.smem_len >> PAGE_SHIFT; unsigned long smem_offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long smem_pos; if (smem_page_cnt < vma->vm_pgoff) return -EINVAL; if (vma_page_cnt > smem_page_cnt - vma->vm_pgoff) return -EINVAL; smem_pos = (unsigned long)info->fix.smem_start + smem_offset; pr_notice("mmap() framebuffer addr:%lu size:%lu\n", smem_pos, vma_size); while (vma_size > 0) { unsigned long page = vmalloc_to_pfn((void *)smem_pos); if (remap_pfn_range(vma, vma_start, page, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; vma_start += PAGE_SIZE; smem_pos += PAGE_SIZE; if (vma_size > PAGE_SIZE) vma_size -= PAGE_SIZE; else vma_size = 0; } return 0; }
static int mfc_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long user_size = vma->vm_end - vma->vm_start; unsigned long real_size; struct mfc_inst_ctx *mfc_ctx; #if !(defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM)) /* mmap support */ unsigned long pfn; unsigned long remap_offset, remap_size; struct mfc_dev *dev; #ifdef SYSMMU_MFC_ON /* kernel virtual memory allocator */ char *ptr; unsigned long start, size; #endif #endif mfc_ctx = (struct mfc_inst_ctx *)file->private_data; if (!mfc_ctx) return -EINVAL; #if !(defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM)) dev = mfc_ctx->dev; #endif mfc_dbg("vm_start: 0x%08lx, vm_end: 0x%08lx, size: %ld(%ldMB)\n", vma->vm_start, vma->vm_end, user_size, (user_size >> 20)); real_size = (unsigned long)(mfc_mem_data_size(0) + mfc_mem_data_size(1)); mfc_dbg("port 0 size: %d, port 1 size: %d, total: %ld\n", mfc_mem_data_size(0), mfc_mem_data_size(1), real_size); /* * if memory size required from appl. mmap() is bigger than max data memory * size allocated in the driver. */ if (user_size > real_size) { mfc_err("user requeste mem(%ld) is bigger than available mem(%ld)\n", user_size, real_size); return -EINVAL; } #ifdef SYSMMU_MFC_ON #if (defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM)) vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &mfc_vm_ops; vma->vm_private_data = mfc_ctx; mfc_ctx->userbase = vma->vm_start; #else /* not CONFIG_VIDEO_MFC_VCM_UMP && not CONFIG_S5P_VMEM */ /* kernel virtual memory allocator */ if (dev->mem_ports == 1) { remap_offset = 0; remap_size = user_size; vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * Port 0 mapping for stream buf & frame buf (chroma + MV + luma) */ ptr = (char *)mfc_mem_data_base(0); start = remap_offset; size = remap_size; while (size > 0) { pfn = vmalloc_to_pfn(ptr); if (remap_pfn_range(vma, vma->vm_start + start, pfn, PAGE_SIZE, vma->vm_page_prot)) { mfc_err("failed to remap port 0\n"); return -EAGAIN; } start += PAGE_SIZE; ptr += PAGE_SIZE; size -= PAGE_SIZE; } } else { remap_offset = 0; remap_size = min((unsigned long)mfc_mem_data_size(0), user_size); vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * Port 0 mapping for stream buf & frame buf (chroma + MV) */ ptr = (char *)mfc_mem_data_base(0); start = remap_offset; size = remap_size; while (size > 0) { pfn = vmalloc_to_pfn(ptr); if (remap_pfn_range(vma, vma->vm_start + start, pfn, PAGE_SIZE, vma->vm_page_prot)) { mfc_err("failed to remap port 0\n"); return -EAGAIN; } start += PAGE_SIZE; ptr += PAGE_SIZE; size -= PAGE_SIZE; } remap_offset = remap_size; remap_size = min((unsigned long)mfc_mem_data_size(1), user_size - remap_offset); vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * Port 1 mapping for frame buf (luma) */ ptr = (void *)mfc_mem_data_base(1); start = remap_offset; size = remap_size; while (size > 0) { pfn = vmalloc_to_pfn(ptr); if (remap_pfn_range(vma, vma->vm_start + start, pfn, PAGE_SIZE, vma->vm_page_prot)) { mfc_err("failed to remap port 1\n"); return -EAGAIN; } start += PAGE_SIZE; ptr += PAGE_SIZE; size -= PAGE_SIZE; } } mfc_ctx->userbase = vma->vm_start; mfc_dbg("user request mem = %ld, available data mem = %ld\n", user_size, real_size); if ((remap_offset + remap_size) < real_size) mfc_warn("The MFC reserved memory dose not mmap fully [%ld: %ld]\n", real_size, (remap_offset + remap_size)); #endif /* end of CONFIG_VIDEO_MFC_VCM_UMP */ #else /* not SYSMMU_MFC_ON */ /* early allocator */ /* CMA or bootmem(memblock) */ if (dev->mem_ports == 1) { remap_offset = 0; remap_size = user_size; vma->vm_flags |= VM_RESERVED | VM_IO; if(mfc_ctx->buf_cache_type == NO_CACHE){ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mfc_info("CONFIG_VIDEO_MFC_CACHE is not enabled\n"); }else mfc_info("CONFIG_VIDEO_MFC_CACHE is enabled\n"); /* * Port 0 mapping for stream buf & frame buf (chroma + MV + luma) */ pfn = __phys_to_pfn(mfc_mem_data_base(0)); if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn, remap_size, vma->vm_page_prot)) { mfc_err("failed to remap port 0\n"); return -EINVAL; } } else { remap_offset = 0; remap_size = min((unsigned long)mfc_mem_data_size(0), user_size); vma->vm_flags |= VM_RESERVED | VM_IO; if(mfc_ctx->buf_cache_type == NO_CACHE){ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mfc_info("CONFIG_VIDEO_MFC_CACHE is not enabled\n"); }else mfc_info("CONFIG_VIDEO_MFC_CACHE is enabled\n"); /* * Port 0 mapping for stream buf & frame buf (chroma + MV) */ pfn = __phys_to_pfn(mfc_mem_data_base(0)); if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn, remap_size, vma->vm_page_prot)) { mfc_err("failed to remap port 0\n"); return -EINVAL; } remap_offset = remap_size; remap_size = min((unsigned long)mfc_mem_data_size(1), user_size - remap_offset); vma->vm_flags |= VM_RESERVED | VM_IO; if(mfc_ctx->buf_cache_type == NO_CACHE) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * Port 1 mapping for frame buf (luma) */ pfn = __phys_to_pfn(mfc_mem_data_base(1)); if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn, remap_size, vma->vm_page_prot)) { mfc_err("failed to remap port 1\n"); return -EINVAL; } } mfc_ctx->userbase = vma->vm_start; mfc_dbg("user request mem = %ld, available data mem = %ld\n", user_size, real_size); if ((remap_offset + remap_size) < real_size) mfc_warn("The MFC reserved memory dose not mmap fully [%ld: %ld]\n", real_size, (remap_offset + remap_size)); #endif /* end of SYSMMU_MFC_ON */ return 0; }
static int vloopback_mmap(struct file *f, struct vm_area_struct *vma) { struct video_device *loopdev=video_devdata(f); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev); #else priv_ptr ptr=(priv_ptr)loopdev->priv; #endif int nr=ptr->pipenr; unsigned long start = (unsigned long)vma->vm_start; long size = vma->vm_end - vma->vm_start; unsigned long page, pos; down(&loops[nr]->lock); if (ptr->in) { loops[nr]->zerocopy=1; if (loops[nr]->ropen) { info("Can't change size while opened for read"); up(&loops[nr]->lock); return -EINVAL; } if (!size) { up(&loops[nr]->lock); return -EINVAL; } if (loops[nr]->buffer) rvfree(loops[nr]->buffer, loops[nr]->buflength*N_BUFFS); loops[nr]->buflength=size; loops[nr]->buffer=rvmalloc(loops[nr]->buflength*N_BUFFS); } if (loops[nr]->buffer == NULL) { up(&loops[nr]->lock); return -EINVAL; } if (size > (((N_BUFFS * loops[nr]->buflength) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))) { up(&loops[nr]->lock); return -EINVAL; } pos = (unsigned long)loops[nr]->buffer; while (size > 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) page = kvirt_to_pa(pos); if (remap_page_range(vma,start, page, PAGE_SIZE, PAGE_SHARED)) { #else page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { #endif up(&loops[nr]->lock); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } up(&loops[nr]->lock); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) static long vloopback_ioctl(struct file *f, unsigned int cmd, unsigned long arg) #else static int vloopback_ioctl(struct inode *inod, struct file *f, unsigned int cmd, unsigned long arg) #endif { struct video_device *loopdev=video_devdata(f); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev); #else priv_ptr ptr=(priv_ptr)loopdev->priv; #endif int nr=ptr->pipenr; int i; if (loops[nr]->zerocopy) { if (!ptr->in) { loops[nr]->ioctlnr=cmd; loops[nr]->ioctllength=_IOC_SIZE(cmd); /* info("DEBUG: vl_ioctl: !loop->in"); */ /* info("DEBUG: vl_ioctl: cmd %lu", cmd); */ /* info("DEBUG: vl_ioctl: len %lu", loops[nr]->ioctllength); */ if(copy_from_user(loops[nr]->ioctldata, (void*)arg, _IOC_SIZE(cmd))) return -EFAULT; kill_proc(loops[nr]->pid, SIGIO, 1); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) wait_event_interruptible(loops[nr]->wait, loops[nr]->ioctlnr==-1); #else interruptible_sleep_on(&loops[nr]->wait); #endif if (loops[nr]->invalid_ioctl) { //info ("DEBUG: There was an invalid ioctl"); loops[nr]->invalid_ioctl = 0; return -ENOTTY; } if (cmd & IOC_IN && !(cmd & IOC_OUT)) { //info("DEBUG: vl_ioctl: cmd & IOC_IN 1"); if (memcmp(loops[nr]->ioctlretdata, loops[nr]->ioctldata, _IOC_SIZE(cmd))) { return -EINVAL; } //info("DEBUG: vl_ioctl: cmd & IOC_IN 2"); return 0; } else { if (copy_to_user((void*)arg, loops[nr]->ioctlretdata, _IOC_SIZE(cmd))) return -EFAULT; //info("DEBUG: vl_ioctl: !(cmd & IOC_IN) 1"); return 0; } } else { if ( (loops[nr]->ioctlnr!=cmd) && (cmd != (VIDIOCSINVALID))) { /* wrong ioctl */ info("DEBUG: vo_ioctl: Wrong IOCTL"); return 0; } if (cmd == VIDIOCSINVALID) { loops[nr]->invalid_ioctl = 1; } else { if (copy_from_user(loops[nr]->ioctlretdata, (void*)arg, loops[nr]->ioctllength)) return -EFAULT; } loops[nr]->ioctlnr=-1; if (waitqueue_active(&loops[nr]->wait)) wake_up(&loops[nr]->wait); return 0; } } switch(cmd) { /* Get capabilities */ case VIDIOCGCAP: { struct video_capability b; if (ptr->in) { sprintf(b.name, "Video loopback %d input", ptr->pipenr); b.type = 0; } else { sprintf(b.name, "Video loopback %d output", ptr->pipenr); b.type = VID_TYPE_CAPTURE; } b.channels=1; b.audios=0; b.maxwidth=loops[nr]->width; b.maxheight=loops[nr]->height; b.minwidth=20; b.minheight=20; if(copy_to_user((void*)arg, &b, sizeof(b))) return -EFAULT; return 0; } /* Get channel info (sources) */ case VIDIOCGCHAN: { struct video_channel v; if(copy_from_user(&v, (void*)arg, sizeof(v))) return -EFAULT; if(v.channel!=0) { info("VIDIOCGCHAN: Invalid Channel, was %d", v.channel); v.channel=0; //return -EINVAL; } v.flags=0; v.tuners=0; v.norm=0; v.type = VIDEO_TYPE_CAMERA; /*strcpy(v.name, "Loopback"); -- tibit */ strcpy(v.name, "Composite1"); if(copy_to_user((void*)arg, &v, sizeof(v))) return -EFAULT; return 0; } /* Set channel */ case VIDIOCSCHAN: { int v; if(copy_from_user(&v, (void*)arg, sizeof(v))) return -EFAULT; if(v!=0) { info("VIDIOCSCHAN: Invalid Channel, was %d", v); return -EINVAL; } return 0; } /* Get tuner abilities */ case VIDIOCGTUNER: { struct video_tuner v; if(copy_from_user(&v, (void*)arg, sizeof(v))!=0) return -EFAULT; if(v.tuner) { info("VIDIOCGTUNER: Invalid Tuner, was %d", v.tuner); return -EINVAL; } strcpy(v.name, "Format"); v.rangelow=0; v.rangehigh=0; v.flags=0; v.mode=VIDEO_MODE_AUTO; if(copy_to_user((void*)arg,&v, sizeof(v))!=0) return -EFAULT; return 0; } /* Get picture properties */ case VIDIOCGPICT: { struct video_picture p; p.colour=0x8000; p.hue=0x8000; p.brightness=0x8000; p.contrast=0x8000; p.whiteness=0x8000; p.depth=0x8000; p.palette=loops[nr]->palette; if(copy_to_user((void*)arg, &p, sizeof(p))) return -EFAULT; return 0; } /* Set picture properties */ case VIDIOCSPICT: { struct video_picture p; if(copy_from_user(&p, (void*)arg, sizeof(p))) return -EFAULT; if (!ptr->in) { if (p.palette!=loops[nr]->palette) return -EINVAL; } else loops[nr]->palette=p.palette; return 0; } /* Get the video overlay window */ case VIDIOCGWIN: { struct video_window vw; vw.x=0; vw.y=0; vw.width=loops[nr]->width; vw.height=loops[nr]->height; vw.chromakey=0; vw.flags=0; vw.clipcount=0; if(copy_to_user((void*)arg, &vw, sizeof(vw))) return -EFAULT; return 0; } /* Set the video overlay window - passes clip list for hardware smarts , chromakey etc */ case VIDIOCSWIN: { struct video_window vw; if(copy_from_user(&vw, (void*)arg, sizeof(vw))) return -EFAULT; if(vw.flags) return -EINVAL; if(vw.clipcount) return -EINVAL; if (loops[nr]->height==vw.height && loops[nr]->width==vw.width) return 0; if(!ptr->in) { return -EINVAL; } else { loops[nr]->height=vw.height; loops[nr]->width=vw.width; /* Make sure nobody is using the buffer while we fool around with it. We are also not allowing changes while somebody using mmap has the output open. */ down(&loops[nr]->lock); if (loops[nr]->ropen) { info("Can't change size while opened for read"); up(&loops[nr]->lock); return -EINVAL; } if (loops[nr]->buffer) rvfree(loops[nr]->buffer, loops[nr]->buflength*N_BUFFS); loops[nr]->buflength=vw.width*vw.height*4; loops[nr]->buffer=rvmalloc(loops[nr]->buflength*N_BUFFS); up(&loops[nr]->lock); } return 0; } /* Memory map buffer info */ case VIDIOCGMBUF: { struct video_mbuf vm; vm.size=loops[nr]->buflength*N_BUFFS; vm.frames=N_BUFFS; for (i=0; i<vm.frames; i++) vm.offsets[i]=i*loops[nr]->buflength; if(copy_to_user((void*)arg, &vm, sizeof(vm))) return -EFAULT; return 0; } /* Grab frames */ case VIDIOCMCAPTURE: { struct video_mmap vm; if (ptr->in) return -EINVAL; if (!loops[nr]->buffer) return -EINVAL; if (copy_from_user(&vm, (void*)arg, sizeof(vm))) return -EFAULT; if (vm.format!=loops[nr]->palette) return -EINVAL; if (vm.frame > N_BUFFS) return -EINVAL; return 0; } /* Sync with mmap grabbing */ case VIDIOCSYNC: { int frame; unsigned long fw; if (copy_from_user((void *)&frame, (void*)arg, sizeof(int))) return -EFAULT; if (ptr->in) return -EINVAL; if (!loops[nr]->buffer) return -EINVAL; /* Ok, everything should be alright since the program should have called VIDIOMCAPTURE and we are ready to do the 'capturing' */ if (frame > 1) return -EINVAL; loops[nr]->frame=frame; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) fw = loops[nr]->frameswrite; wait_event_interruptible(loops[nr]->wait, fw!=loops[nr]->frameswrite); #else interruptible_sleep_on(&loops[nr]->wait); #endif if (!loops[nr]->buffer) /* possibly released during sleep */ return -EINVAL; loops[nr]->framesread++; return 0; } /* Get attached units */ case VIDIOCGUNIT: { struct video_unit vu; if (ptr->in) vu.video=loops[nr]->vloopout->minor; else vu.video=loops[nr]->vloopin->minor; vu.vbi=VIDEO_NO_UNIT; vu.radio=VIDEO_NO_UNIT; vu.audio=VIDEO_NO_UNIT; vu.teletext=VIDEO_NO_UNIT; if (copy_to_user((void*)arg, &vu, sizeof(vu))) return -EFAULT; return 0; } /* Get frame buffer */ case VIDIOCGFBUF: { struct video_buffer vb; memset(&vb, 0, sizeof(vb)); vb.base=NULL; if(copy_to_user((void *)arg, (void *)&vb, sizeof(vb))) return -EFAULT; return 0; } /* Start, end capture */ case VIDIOCCAPTURE: { int start; if (copy_from_user(&start, (void*)arg, sizeof(int))) return -EFAULT; /* if (start) info ("Capture started"); else info ("Capture stopped"); */ return 0; } case VIDIOCGFREQ: case VIDIOCSFREQ: case VIDIOCGAUDIO: case VIDIOCSAUDIO: return -EINVAL; case VIDIOCKEY: return 0; default: return -ENOTTY; //return -ENOIOCTLCMD; } return 0; } static unsigned int vloopback_poll(struct file *f, struct poll_table_struct *wait) { struct video_device *loopdev=video_devdata(f); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev); #else priv_ptr ptr=(priv_ptr)loopdev->priv; #endif int nr=ptr->pipenr; if (loopdev==NULL) return -EFAULT; if (!ptr->in) return 0; if (loops[nr]->ioctlnr!=-1) { if (loops[nr]->zerocopy) { return (POLLIN | POLLPRI | POLLOUT | POLLRDNORM); } else { return (POLLOUT); } } return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) static struct v4l2_file_operations fileops_template= #else static struct file_operations fileops_template= #endif { owner: THIS_MODULE, open: vloopback_open, release: vloopback_release, read: vloopback_read, write: vloopback_write, poll: vloopback_poll, ioctl: vloopback_ioctl, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15) && defined(CONFIG_COMPAT) compat_ioctl: v4l_compat_ioctl32, #endif mmap: vloopback_mmap, };
static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val) { u64 prot = val & pg_level[level].mask; if (st->start_address >= start_vmalloc_allocated && st->start_address <= start_vmalloc_allocated) { goto skip_process; } if (!st->level && !ro) { st->level = level; st->current_prot = prot; } else if (prot != st->current_prot || level != st->level || addr >= st->marker[1].start_address) { if (st->current_prot && addr_in_valid_range (st->marker->start_address)) { if(ro) { n_entries++; goto skip_process; } if (entry < n_entries) { int nr_pages = (addr - st->start_address) / PAGE_SIZE; struct kernel_map_info *info = &st->k_map->map_info[entry]; info->start_addr = st->start_address; info->end_addr = addr; if (nr_pages >= MAX_PHYS_ADDR) nr_pages = MAX_PHYS_ADDR - 1; info->n_pages = nr_pages; if (addr_from_kernel (st->marker->start_address)) { info->phys_addr[0] = __pa (st->start_address); info->phys_addr[1] = __pa (addr); info->n_phys_addr = 2; entry++; } else if (addr_from_vmalloc (st->marker->start_address) && level == 4) { int i; unsigned long aux_addr; for (i = 0, aux_addr = st->start_address; i < nr_pages; i++, aux_addr += PAGE_SIZE) { unsigned long pfn = vmalloc_to_pfn ((void *) aux_addr); if (!pfn_valid (pfn)) info->phys_addr[i] = 0; else info->phys_addr[i] = (pfn << PAGE_SHIFT); } info->n_phys_addr = info->n_pages; entry++; } } } skip_process: if (addr >= st->marker[1].start_address) { st->marker++; } st->start_address = addr; st->current_prot = prot; st->level = level; } #ifdef CONFIG_ARM64 if (addr >= st->marker[1].start_address) { st->marker++; } #endif }