static int mmap_mem(struct file * file, struct vm_area_struct * vma) { #ifndef NO_MM unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; /* * Accessing memory above the top the kernel knows about or * through a file pointer that was marked O_SYNC will be * done non-cached. */ if (noncached_address(offset) || (file->f_flags & O_SYNC)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Don't try to swap out physical pages.. */ vma->vm_flags |= VM_RESERVED; /* * Don't dump addresses that are not real memory to a core file. */ if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC)) vma->vm_flags |= VM_IO; if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0; #else /* !NO_MM */ /* Return the physical address unmodified if it's possible to do so given the arguments. */ if (vma->vm_start == file->f_pos + vma->vm_offset) return 0; else return -EINVAL; #endif /* !NO_MM */ }
int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { /* * I/O space cannot be accessed via normal processor loads and stores on this * platform. */ if (mmap_state == pci_mmap_io) /* * XXX we could relax this for I/O spaces for which ACPI indicates that * the space is 1-to-1 mapped. But at the moment, we don't support * multiple PCI address spaces and the legacy I/O space is not 1-to-1 * mapped, so this is moot. */ return -EINVAL; /* * Leave vm_pgoff as-is, the PCI space address is the physical address on this * platform. */ vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO); if (write_combine) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_page_range(vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0; }
static int mmap_mem(struct file * file, struct vm_area_struct * vma) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; /* * Accessing memory above the top the kernel knows about or * through a file pointer that was marked O_SYNC will be * done non-cached. */ if (noncached_address(offset) || (file->f_flags & O_SYNC)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Don't try to swap out physical pages.. */ vma->vm_flags |= VM_RESERVED; /* * Don't dump addresses that are not real memory to a core file. */ if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC)) vma->vm_flags |= VM_IO; if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0; }
static int mmap_mem(struct inode * inode, struct file * file, unsigned long addr, size_t len, int prot, unsigned long off) { struct vm_area_struct * mpnt; if (off & 0xfff || off + len < off) return -ENXIO; if (x86 > 3 && off >= high_memory) prot |= PAGE_PCD; if (remap_page_range(addr, off, len, prot)) return -EAGAIN; /* try to create a dummy vmm-structure so that the rest of the kernel knows we are here */ mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL); if (!mpnt) return 0; mpnt->vm_task = current; mpnt->vm_start = addr; mpnt->vm_end = addr + len; mpnt->vm_page_prot = prot; mpnt->vm_share = NULL; mpnt->vm_inode = inode; inode->i_count++; mpnt->vm_offset = off; mpnt->vm_ops = NULL; insert_vm_struct(current, mpnt); merge_segments(current->mmap, NULL, NULL); return 0; }
static int gfx_inf_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long size; if(vma->vm_flags & VM_EXEC) return -EPERM; size = vma->vm_end - vma->vm_start; PDEBUG("mmap size = 0x%8.8lx\n", size); if(size == 0) return EINVAL; // To make things safer, I'll check if it is my baby // Aliens are simply rejected. // The cost of some slowdown should be acceptable if(_gfx_inf_h_validate_surface_address(vma->vm_pgoff << PAGE_SHIFT, size)) { // BJC 102102 PDEBUG("Invalid surface address specified\n"); // BJC 102102 return -EPERM; // don't permit it PDEBUG("mmaping alien memory region with caching enabled\n"); // BJC 102102 } else { // BJC102102 vma->vm_page_prot.pgprot |= _PAGE_NO_CACHE; // map without caching } vma->vm_flags |= VM_SHM | VM_IO ; // | VM_DONTCOPY | VM_DONTEXPAND; // we are just not in system memory if(remap_page_range(vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, size, vma->vm_page_prot)) return -EAGAIN; return 0; }
static int ruby_mmap(struct fb_info *info, struct file *file, struct vm_area_struct *vma) { int ret; unsigned long offset = vma->vm_pgoff<<PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; if(offset & ~PAGE_MASK) { printk("Offset not aligned: %ld\n", offset); return -ENXIO; } /* if(size > RUBY_PHYSICAL_MEM_SIZE) { printk("size too big\n"); return(-ENXIO); } */ if( (vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) { printk("writeable mappings must be shared, rejecting\n"); return(-EINVAL); } /* we do not want to have this area swapped out, lock it */ vma->vm_flags |= VM_LOCKED; if(remap_page_range(vma->vm_start, virt_to_phys((void *)(unsigned long)fb_info.FrameBufferAddress), size, PAGE_SHARED)) { printk("remap page range failed\n"); return -ENXIO; } return 0; }
/** * mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_page_range() to map the clock's registers into * the calling process' address space. */ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_flags |= (VM_IO | VM_SHM | VM_LOCKED ); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmtimer_addr = __pa(RTC_COUNTER_ADDR); mmtimer_addr &= ~(PAGE_SIZE - 1); mmtimer_addr &= 0xfffffffffffffffUL; if (remap_page_range(vma, vma->vm_start, mmtimer_addr, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_page_range failed in mmtimer.c\n"); return -EAGAIN; } return 0; }
static int fb_mmap(struct inode *inode, struct file *file, struct vm_area_struct * vma) { struct fb_ops *fb = registered_fb[GET_FB_IDX(inode->i_rdev)]; struct fb_fix_screeninfo fix; if (! fb) return -ENODEV; fb->fb_get_fix(&fix, PROC_CONSOLE()); if ((vma->vm_end - vma->vm_start + vma->vm_offset) > fix.smem_len) return -EINVAL; vma->vm_offset += fix.smem_start; if (vma->vm_offset & ~PAGE_MASK) return -ENXIO; if (m68k_is040or060) { pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; /* Use write-through cache mode */ pgprot_val(vma->vm_page_prot) |= _PAGE_CACHE040W; } if (remap_page_range(vma->vm_start, vma->vm_offset, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; vma->vm_inode = inode; inode->i_count++; return 0; }
static int cdata_mmap(struct file *filp, struct vm_area_struct* vma) { unsigned long from; unsigned long to; unsigned long size; printk(KERN_INFO "CDATA: cdata_mmap\n"); printk(KERN_INFO "CDATA: cdata_mmap, start=%08x\n", vma->vm_start); printk(KERN_INFO "CDATA: cdata_mmap, end=%08x\n", vma->vm_end); from = vma->vm_start; to = 0x33f00000; size = vma->vm_end - vma->vm_start; while (size) { remap_page_range(from, to, PAGE_SIZE, PAGE_SHARED); /* for kernel v2.6, change remap_page_range() to remap_pfn_range() remap_pfn_range(vma, mva->vmstart, __pa(kvirt) >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot); */ from += PAGE_SIZE; to += PAGE_SIZE; size -= PAGE_SIZE; } //remap_page_range(from, to, size, PAGE_SHARED); return 0; }
static int vino_mmap(struct file *file, struct vm_area_struct *vma) { struct video_device *dev = video_devdata(file); struct vino_device *v = dev->priv; unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; int i, err = 0; if (down_interruptible(&v->sem)) return -EINTR; if (size > v->page_count * PAGE_SIZE) { err = -EINVAL; goto out; } for (i = 0; i < v->page_count; i++) { unsigned long page = virt_to_phys((void *)v->desc[i]); if (remap_page_range(start, page, PAGE_SIZE, PAGE_READONLY)) { err = -EAGAIN; goto out; } start += PAGE_SIZE; if (size <= PAGE_SIZE) break; size -= PAGE_SIZE; } out: up(&v->sem); return err; }
static int mmap_mem(struct file * file, struct vm_area_struct * vma) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; int uncached; uncached = uncached_access(file, offset); #ifdef pgprot_noncached if (uncached) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif /* Don't try to swap out physical pages.. */ vma->vm_flags |= VM_RESERVED; /* * Don't dump addresses that are not real memory to a core file. */ if (uncached) vma->vm_flags |= VM_IO; if (remap_page_range(vma, vma->vm_start, offset, vma->vm_end-vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0; }
static int mp3_mmap(struct file *file, struct vm_area_struct *vma) { int ret; if (vma->vm_pgoff != 0) { ERRMSG("mp3_mmap: pgoff must be 0 (%lu)\n", vma->vm_pgoff); return -EINVAL; } if ((vma->vm_end - vma->vm_start) != mp3_bufsize) { ERRMSG("mp3_mmap: vma size mismatch %p %p %d\n", (void*)vma->vm_end, (void*)vma->vm_start, mp3_bufsize); return -EINVAL; } if (!mp3_buff) { ERRMSG("mp3_mmap: mem not allocated\n"); return -ENOMEM; } ret = remap_page_range(vma->vm_start, mp3_buff_bus, mp3_bufsize, PAGE_SHARED); // ret = remap_pfn_range(vma, vma->vm_start, mp3_buff_bus >> PAGE_SHIFT, mp3_bufsize, vma->vm_page_prot); if (ret) { ERRMSG("mp3_mmap: remap failed\n"); return ret; } //vma->vm_ops = &mp3_vmops; return 0; }
static int sh7722gfx_mmap( struct file *file, struct vm_area_struct *vma ) { unsigned int size; /* Just allow mapping at offset 0. */ if (vma->vm_pgoff) return -EINVAL; /* Check size of requested mapping. */ size = vma->vm_end - vma->vm_start; if (size != PAGE_ALIGN(sizeof(SH772xGfxSharedArea))) return -EINVAL; /* Set reserved and I/O flag for the area. */ vma->vm_flags |= VM_RESERVED | VM_IO; /* Select uncached access. */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) return remap_pfn_range( vma, vma->vm_start, virt_to_phys((void*)shared) >> PAGE_SHIFT, size, vma->vm_page_prot ); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) return remap_page_range( vma, vma->vm_start, virt_to_phys((void*)shared), size, vma->vm_page_prot ); #else return io_remap_page_range( vma->vm_start, virt_to_phys((void*)shared), size, vma->vm_page_prot ); #endif }
safl_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma) #endif { unsigned long size; if (vma->vm_offset != 0) return -EINVAL; size = vma->vm_end - vma->vm_start; if (size > FLASH_SZ) return -EINVAL; pgprot_val(vma->vm_page_prot) = pgprot_noncached(pgprot_val(vma->vm_page_prot)); #if LINUX_VERSION_CODE >= 0x020100 vma->vm_flags |= VM_IO; #endif if (remap_page_range(vma->vm_start, flash_addr, size, vma->vm_page_prot)) return -EAGAIN; #if LINUX_VERSION_CODE < 0x020100 vma->vm_inode = inode; inode->i_count++; #endif return 0; }
static int cramfs_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long address, length; struct inode *inode = file->f_dentry->d_inode; struct super_block *sb = inode->i_sb; /* this is only used in the case of read-only maps for XIP */ if (vma->vm_flags & VM_WRITE) return generic_file_mmap(file, vma); if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -EINVAL; address = PAGE_ALIGN(sb->CRAMFS_SB_LINEAR_PHYS_ADDR + OFFSET(inode)); address += vma->vm_pgoff << PAGE_SHIFT; length = vma->vm_end - vma->vm_start; if (length > inode->i_size) length = inode->i_size; length = PAGE_ALIGN(length); #if 0 /* Doing the following makes it slower and more broken. bdl */ /* * Accessing memory above the top the kernel knows about or * through a file pointer that was marked O_SYNC will be * done non-cached. */ vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot) & ~_CACHE_MASK) | _CACHE_UNCACHED); #endif /* * Don't dump addresses that are not real memory to a core file. */ vma->vm_flags |= VM_IO; flush_tlb_page(vma, address); if (remap_page_range(vma->vm_start, address, length, vma->vm_page_prot)) return -EAGAIN; #ifdef DEBUG_CRAMFS_XIP printk("cramfs_mmap: mapped %s at 0x%08lx, length %lu to vma 0x%08lx" ", page_prot 0x%08lx\n", file->f_dentry->d_name.name, address, length, vma->vm_start, pgprot_val(vma->vm_page_prot)); #endif return 0; }
static int dev_nvram_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long offset = virt_to_phys(nvram_buf); if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0; }
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_page_range(vma, vma->vm_start, mfn_to_pfn(xen_start_info->store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; }
/* this function will map (fragment of) rvmalloc'ed memory area to user space */ int rvmmap(void *mem, unsigned memsize, struct vm_area_struct *vma) { unsigned long pos, size, start=vma->vm_start; #if LINUX_VERSION_CODE < 0x20300 unsigned long offset = vma->vm_offset; #else unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; #endif /* this is not time critical code, so we check the arguments */ /* vma->vm_offset HAS to be checked (and is checked)*/ if (offset<0) return -EFAULT; size = vma->vm_end - vma->vm_start; if (size + offset > memsize) return -EFAULT; pos = (unsigned long) mem + offset; if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) return -EFAULT; while (size>0) { #if LINUX_VERSION_CODE < 0x020300 if (remap_page_range(start,kvirt_to_phys(pos), PAGE_SIZE, vma->vm_page_prot )) { /* FIXME: what should we do here to unmap previous pages ?*/ printk(KERN_ERR "rvmmap failed: vm_start=0x%lx, vm_end=0x%lx, size=0x%lx, pos=0x%lx; please report to [email protected]\n",vma->vm_start,vma->vm_end,size,pos); return -EFAULT; } #else if (remap_page_range(start, kvirt_to_pa(pos), PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; #endif pos+=PAGE_SIZE; start+=PAGE_SIZE; size-=PAGE_SIZE; } return 0; }
//----------------------------- LX_map_vm_to_user ------------------------------ void* LX_map_vm_to_user(void *vPtr,unsigned long size) { struct vm_mapped_mem *pMap=NULL; pgprot_t prot={0}; unsigned long mhandle; spin_lock_irq(&mmap_lock); mhandle=mmap_handle++; if(mmap_handle>255) mmap_handle=1; spin_unlock_irq(&mmap_lock); if(!remap_page_range(0,mhandle,(unsigned long)vPtr,size,prot)) { pMap=LX_get_mapped_mem(mhandle); if(pMap) return pMap->mAddr; } return NULL; }
static int audio_mmap(struct file *file, struct vm_area_struct *vma) { audio_state_t *state = file->private_data; audio_stream_t *s; unsigned long size, vma_addr; int i, ret; if (vma->vm_pgoff != 0) return -EINVAL; if (vma->vm_flags & VM_WRITE) { if (!state->wr_ref) return -EINVAL;; s = state->output_stream; } else if (vma->vm_flags & VM_READ) { if (!state->rd_ref) return -EINVAL; s = state->input_stream; } else return -EINVAL; if (s->mapped) return -EINVAL; size = vma->vm_end - vma->vm_start; if (size != s->fragsize * s->nbfrags) return -EINVAL; if (!s->buffers && audio_setup_buf(s)) return -ENOMEM; vma_addr = vma->vm_start; for (i = 0; i < s->nbfrags; i++) { audio_buf_t *buf = &s->buffers[i]; if (!buf->master) continue; ret = remap_page_range(vma_addr, buf->dma_desc->dsadr, buf->master, vma->vm_page_prot); if (ret) return ret; vma_addr += buf->master; } for (i = 0; i < s->nbfrags; i++) s->buffers[i].dma_desc->ddadr &= ~DDADR_STOP; s->mapped = 1; return 0; }
static int fpga_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long off = vma->vm_pgoff << PAGE_SHIFT; unsigned long physical = FPGA_PHY_START + off; unsigned long vsize = vma->vm_end - vma->vm_start; unsigned long psize = FPGA_PHY_SIZE- off; DPRINTK("mmap offset=0x%x, protect=0x%x. \n", off, vma->vm_page_prot); if (vsize > psize) return -EINVAL; // spans too high vma->vm_flags |= VM_IO|VM_RESERVED; vma->vm_page_prot=pgprot_noncached(vma->vm_page_prot); remap_page_range(vma->vm_start, physical, vsize, vma->vm_page_prot); //remap_page_range(vma->vm_start, physical, vsize, PAGE_SHARED); return 0; }
static int iomap_mmap(struct file *file, struct vm_area_struct *vma) { Iomap *idev = iomap_dev[MINOR(file->f_dentry->d_inode->i_rdev)]; unsigned long size; /* no such device */ if (!idev->base) return -ENXIO; /* size must be a multiple of PAGE_SIZE */ size = vma->vm_end - vma->vm_start; if (size % PAGE_SIZE) return -EINVAL; if (remap_page_range(vma->vm_start, idev->base, size, vma->vm_page_prot)) return -EAGAIN; MSG("region mmapped\n"); return 0; }
static int do_rv_mmap(struct vm_area_struct *vma,const char *adr, unsigned long size) { unsigned long start=(unsigned long) adr; unsigned long page,pos; if (!buf_start_2) { printk(KERN_INFO " Data was not initialized"); return -EINVAL; } pos=(unsigned long)buf_start_2; while (size > 0) { page = kvirt_to_pa(pos); if (remap_page_range(vma,start, page, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; start+=PAGE_SIZE; pos+=PAGE_SIZE; size-=PAGE_SIZE; } return 0; }
int oprof_hash_map_mmap(struct file * file, struct vm_area_struct * vma) { ulong start = (ulong)vma->vm_start; ulong page, pos; ulong size = (ulong)(vma->vm_end-vma->vm_start); if (size > PAGE_ALIGN(OP_HASH_MAP_SIZE) || (vma->vm_flags & VM_WRITE) || GET_VM_OFFSET(vma)) return -EINVAL; pos = (ulong)hash_map; while (size > 0) { page = kvirt_to_pa(pos); if (remap_page_range(start, page, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } return 0; }
static int dhahelper_mmap(struct file *file, struct vm_area_struct *vma) { if (last_mem_request.operation != MEMORY_OP_MAP) { if (dhahelper_verbosity > 0) printk(KERN_ERR "dhahelper: mapping not requested before mmap\n"); return -EFAULT; } if (dhahelper_verbosity > 1) printk(KERN_INFO "dhahelper: mapping %x (size: %x)\n", last_mem_request.start+last_mem_request.offset, last_mem_request.size); if (remap_page_range(0, last_mem_request.start + last_mem_request.offset, last_mem_request.size, vma->vm_page_prot)) { if (dhahelper_verbosity > 0) printk(KERN_ERR "dhahelper: error mapping memory\n"); return -EFAULT; } return 0; }
int cdata_mmap(struct file* filp, struct vm_area_struct *vma) { unsigned long from; unsigned long to; unsigned long size; from = vma->vm_start; to = 0x33f00000; size = vma->vm_end - vma->vm_start; //remap_page_range(from, to, size, PAGE_SHARED); while(size) { remap_page_range(from, to, PAGE_SIZE, PAGE_SHARED); from += PAGE_SIZE; to += PAGE_SIZE; size -= PAGE_SIZE; } printk(KERN_INFO "CDATA vm start: %08x\n", vma->vm_start); printk(KERN_INFO "CDATA vm end: %08x\n", vma->vm_end); return 0; }
static int mmapper_mmap(struct file *file, struct vm_area_struct * vma) { int ret = -EINVAL; int size; lock_kernel(); if (vma->vm_pgoff != 0) goto out; size = vma->vm_end - vma->vm_start; if(size > mmapper_size) return(-EFAULT); /* XXX A comment above remap_page_range says it should only be * called when the mm semaphore is held */ if (remap_page_range(vma->vm_start, p_buf, size, vma->vm_page_prot)) goto out; ret = 0; out: unlock_kernel(); return ret; }
/* * This is the core of the direct rendering engine. */ struct page * sgi_graphics_nopage (struct vm_area_struct *vma, unsigned long address, int no_share) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; int board = GRAPHICS_CARD (vma->vm_dentry->d_inode->i_rdev); unsigned long virt_add, phys_add; #ifdef DEBUG printk ("Got a page fault for board %d address=%lx guser=%lx\n", board, address, (unsigned long) cards[board].g_user); #endif /* Figure out if another process has this mapped, and revoke the mapping * in that case. */ if (cards[board].g_user && cards[board].g_user != current) { /* FIXME: save graphics context here, dump it to rendering * node? */ remove_mapping(cards[board].g_user, vma->vm_start, vma->vm_end); } cards [board].g_user = current; /* Map the physical address of the newport registers into the address * space of this process */ virt_add = address & PAGE_MASK; phys_add = cards[board].g_regs + virt_add - vma->vm_start; remap_page_range(virt_add, phys_add, PAGE_SIZE, vma->vm_page_prot); pgd = pgd_offset(current->mm, address); pmd = pmd_offset(pgd, address); pte = pte_offset(pmd, address); return pte_page(*pte); }
static int vloopback_mmap(struct file *f, struct vm_area_struct *vma) { struct video_device *loopdev=video_devdata(f); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev); #else priv_ptr ptr=(priv_ptr)loopdev->priv; #endif int nr=ptr->pipenr; unsigned long start = (unsigned long)vma->vm_start; long size = vma->vm_end - vma->vm_start; unsigned long page, pos; down(&loops[nr]->lock); if (ptr->in) { loops[nr]->zerocopy=1; if (loops[nr]->ropen) { info("Can't change size while opened for read"); up(&loops[nr]->lock); return -EINVAL; } if (!size) { up(&loops[nr]->lock); return -EINVAL; } if (loops[nr]->buffer) rvfree(loops[nr]->buffer, loops[nr]->buflength*N_BUFFS); loops[nr]->buflength=size; loops[nr]->buffer=rvmalloc(loops[nr]->buflength*N_BUFFS); } if (loops[nr]->buffer == NULL) { up(&loops[nr]->lock); return -EINVAL; } if (size > (((N_BUFFS * loops[nr]->buflength) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))) { up(&loops[nr]->lock); return -EINVAL; } pos = (unsigned long)loops[nr]->buffer; while (size > 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) page = kvirt_to_pa(pos); if (remap_page_range(vma,start, page, PAGE_SIZE, PAGE_SHARED)) { #else page = vmalloc_to_pfn((void *)pos); if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { #endif up(&loops[nr]->lock); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } up(&loops[nr]->lock); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) static long vloopback_ioctl(struct file *f, unsigned int cmd, unsigned long arg) #else static int vloopback_ioctl(struct inode *inod, struct file *f, unsigned int cmd, unsigned long arg) #endif { struct video_device *loopdev=video_devdata(f); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev); #else priv_ptr ptr=(priv_ptr)loopdev->priv; #endif int nr=ptr->pipenr; int i; if (loops[nr]->zerocopy) { if (!ptr->in) { loops[nr]->ioctlnr=cmd; loops[nr]->ioctllength=_IOC_SIZE(cmd); /* info("DEBUG: vl_ioctl: !loop->in"); */ /* info("DEBUG: vl_ioctl: cmd %lu", cmd); */ /* info("DEBUG: vl_ioctl: len %lu", loops[nr]->ioctllength); */ if(copy_from_user(loops[nr]->ioctldata, (void*)arg, _IOC_SIZE(cmd))) return -EFAULT; kill_proc(loops[nr]->pid, SIGIO, 1); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) wait_event_interruptible(loops[nr]->wait, loops[nr]->ioctlnr==-1); #else interruptible_sleep_on(&loops[nr]->wait); #endif if (loops[nr]->invalid_ioctl) { //info ("DEBUG: There was an invalid ioctl"); loops[nr]->invalid_ioctl = 0; return -ENOTTY; } if (cmd & IOC_IN && !(cmd & IOC_OUT)) { //info("DEBUG: vl_ioctl: cmd & IOC_IN 1"); if (memcmp(loops[nr]->ioctlretdata, loops[nr]->ioctldata, _IOC_SIZE(cmd))) { return -EINVAL; } //info("DEBUG: vl_ioctl: cmd & IOC_IN 2"); return 0; } else { if (copy_to_user((void*)arg, loops[nr]->ioctlretdata, _IOC_SIZE(cmd))) return -EFAULT; //info("DEBUG: vl_ioctl: !(cmd & IOC_IN) 1"); return 0; } } else { if ( (loops[nr]->ioctlnr!=cmd) && (cmd != (VIDIOCSINVALID))) { /* wrong ioctl */ info("DEBUG: vo_ioctl: Wrong IOCTL"); return 0; } if (cmd == VIDIOCSINVALID) { loops[nr]->invalid_ioctl = 1; } else { if (copy_from_user(loops[nr]->ioctlretdata, (void*)arg, loops[nr]->ioctllength)) return -EFAULT; } loops[nr]->ioctlnr=-1; if (waitqueue_active(&loops[nr]->wait)) wake_up(&loops[nr]->wait); return 0; } } switch(cmd) { /* Get capabilities */ case VIDIOCGCAP: { struct video_capability b; if (ptr->in) { sprintf(b.name, "Video loopback %d input", ptr->pipenr); b.type = 0; } else { sprintf(b.name, "Video loopback %d output", ptr->pipenr); b.type = VID_TYPE_CAPTURE; } b.channels=1; b.audios=0; b.maxwidth=loops[nr]->width; b.maxheight=loops[nr]->height; b.minwidth=20; b.minheight=20; if(copy_to_user((void*)arg, &b, sizeof(b))) return -EFAULT; return 0; } /* Get channel info (sources) */ case VIDIOCGCHAN: { struct video_channel v; if(copy_from_user(&v, (void*)arg, sizeof(v))) return -EFAULT; if(v.channel!=0) { info("VIDIOCGCHAN: Invalid Channel, was %d", v.channel); v.channel=0; //return -EINVAL; } v.flags=0; v.tuners=0; v.norm=0; v.type = VIDEO_TYPE_CAMERA; /*strcpy(v.name, "Loopback"); -- tibit */ strcpy(v.name, "Composite1"); if(copy_to_user((void*)arg, &v, sizeof(v))) return -EFAULT; return 0; } /* Set channel */ case VIDIOCSCHAN: { int v; if(copy_from_user(&v, (void*)arg, sizeof(v))) return -EFAULT; if(v!=0) { info("VIDIOCSCHAN: Invalid Channel, was %d", v); return -EINVAL; } return 0; } /* Get tuner abilities */ case VIDIOCGTUNER: { struct video_tuner v; if(copy_from_user(&v, (void*)arg, sizeof(v))!=0) return -EFAULT; if(v.tuner) { info("VIDIOCGTUNER: Invalid Tuner, was %d", v.tuner); return -EINVAL; } strcpy(v.name, "Format"); v.rangelow=0; v.rangehigh=0; v.flags=0; v.mode=VIDEO_MODE_AUTO; if(copy_to_user((void*)arg,&v, sizeof(v))!=0) return -EFAULT; return 0; } /* Get picture properties */ case VIDIOCGPICT: { struct video_picture p; p.colour=0x8000; p.hue=0x8000; p.brightness=0x8000; p.contrast=0x8000; p.whiteness=0x8000; p.depth=0x8000; p.palette=loops[nr]->palette; if(copy_to_user((void*)arg, &p, sizeof(p))) return -EFAULT; return 0; } /* Set picture properties */ case VIDIOCSPICT: { struct video_picture p; if(copy_from_user(&p, (void*)arg, sizeof(p))) return -EFAULT; if (!ptr->in) { if (p.palette!=loops[nr]->palette) return -EINVAL; } else loops[nr]->palette=p.palette; return 0; } /* Get the video overlay window */ case VIDIOCGWIN: { struct video_window vw; vw.x=0; vw.y=0; vw.width=loops[nr]->width; vw.height=loops[nr]->height; vw.chromakey=0; vw.flags=0; vw.clipcount=0; if(copy_to_user((void*)arg, &vw, sizeof(vw))) return -EFAULT; return 0; } /* Set the video overlay window - passes clip list for hardware smarts , chromakey etc */ case VIDIOCSWIN: { struct video_window vw; if(copy_from_user(&vw, (void*)arg, sizeof(vw))) return -EFAULT; if(vw.flags) return -EINVAL; if(vw.clipcount) return -EINVAL; if (loops[nr]->height==vw.height && loops[nr]->width==vw.width) return 0; if(!ptr->in) { return -EINVAL; } else { loops[nr]->height=vw.height; loops[nr]->width=vw.width; /* Make sure nobody is using the buffer while we fool around with it. We are also not allowing changes while somebody using mmap has the output open. */ down(&loops[nr]->lock); if (loops[nr]->ropen) { info("Can't change size while opened for read"); up(&loops[nr]->lock); return -EINVAL; } if (loops[nr]->buffer) rvfree(loops[nr]->buffer, loops[nr]->buflength*N_BUFFS); loops[nr]->buflength=vw.width*vw.height*4; loops[nr]->buffer=rvmalloc(loops[nr]->buflength*N_BUFFS); up(&loops[nr]->lock); } return 0; } /* Memory map buffer info */ case VIDIOCGMBUF: { struct video_mbuf vm; vm.size=loops[nr]->buflength*N_BUFFS; vm.frames=N_BUFFS; for (i=0; i<vm.frames; i++) vm.offsets[i]=i*loops[nr]->buflength; if(copy_to_user((void*)arg, &vm, sizeof(vm))) return -EFAULT; return 0; } /* Grab frames */ case VIDIOCMCAPTURE: { struct video_mmap vm; if (ptr->in) return -EINVAL; if (!loops[nr]->buffer) return -EINVAL; if (copy_from_user(&vm, (void*)arg, sizeof(vm))) return -EFAULT; if (vm.format!=loops[nr]->palette) return -EINVAL; if (vm.frame > N_BUFFS) return -EINVAL; return 0; } /* Sync with mmap grabbing */ case VIDIOCSYNC: { int frame; unsigned long fw; if (copy_from_user((void *)&frame, (void*)arg, sizeof(int))) return -EFAULT; if (ptr->in) return -EINVAL; if (!loops[nr]->buffer) return -EINVAL; /* Ok, everything should be alright since the program should have called VIDIOMCAPTURE and we are ready to do the 'capturing' */ if (frame > 1) return -EINVAL; loops[nr]->frame=frame; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) fw = loops[nr]->frameswrite; wait_event_interruptible(loops[nr]->wait, fw!=loops[nr]->frameswrite); #else interruptible_sleep_on(&loops[nr]->wait); #endif if (!loops[nr]->buffer) /* possibly released during sleep */ return -EINVAL; loops[nr]->framesread++; return 0; } /* Get attached units */ case VIDIOCGUNIT: { struct video_unit vu; if (ptr->in) vu.video=loops[nr]->vloopout->minor; else vu.video=loops[nr]->vloopin->minor; vu.vbi=VIDEO_NO_UNIT; vu.radio=VIDEO_NO_UNIT; vu.audio=VIDEO_NO_UNIT; vu.teletext=VIDEO_NO_UNIT; if (copy_to_user((void*)arg, &vu, sizeof(vu))) return -EFAULT; return 0; } /* Get frame buffer */ case VIDIOCGFBUF: { struct video_buffer vb; memset(&vb, 0, sizeof(vb)); vb.base=NULL; if(copy_to_user((void *)arg, (void *)&vb, sizeof(vb))) return -EFAULT; return 0; } /* Start, end capture */ case VIDIOCCAPTURE: { int start; if (copy_from_user(&start, (void*)arg, sizeof(int))) return -EFAULT; /* if (start) info ("Capture started"); else info ("Capture stopped"); */ return 0; } case VIDIOCGFREQ: case VIDIOCSFREQ: case VIDIOCGAUDIO: case VIDIOCSAUDIO: return -EINVAL; case VIDIOCKEY: return 0; default: return -ENOTTY; //return -ENOIOCTLCMD; } return 0; } static unsigned int vloopback_poll(struct file *f, struct poll_table_struct *wait) { struct video_device *loopdev=video_devdata(f); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev); #else priv_ptr ptr=(priv_ptr)loopdev->priv; #endif int nr=ptr->pipenr; if (loopdev==NULL) return -EFAULT; if (!ptr->in) return 0; if (loops[nr]->ioctlnr!=-1) { if (loops[nr]->zerocopy) { return (POLLIN | POLLPRI | POLLOUT | POLLRDNORM); } else { return (POLLOUT); } } return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) static struct v4l2_file_operations fileops_template= #else static struct file_operations fileops_template= #endif { owner: THIS_MODULE, open: vloopback_open, release: vloopback_release, read: vloopback_read, write: vloopback_write, poll: vloopback_poll, ioctl: vloopback_ioctl, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15) && defined(CONFIG_COMPAT) compat_ioctl: v4l_compat_ioctl32, #endif mmap: vloopback_mmap, };
static int hs_set_io_map(unsigned int sock, struct pccard_io_map *io) { hs_socket_t *sp = &hs_sockets[sock]; int map = io->map; struct pccard_io_map *sio; pgprot_t prot; DPRINTK("hs_set_io_map(sock=%d, map=%d, flags=0x%x, speed=%dns, start=0x%04x, stop=0x%04x)\n", sock, map, io->flags, io->speed, io->start, io->stop); if (map >= MAX_IO_WIN) return -EINVAL; sio = &sp->io_maps[map]; /* check for null changes */ if (io->flags == sio->flags && io->start == sio->start && io->stop == sio->stop) return 0; if (io->flags & MAP_AUTOSZ) prot = PAGE_KERNEL_PCC(sock, _PAGE_PCC_IODYN); else if (io->flags & MAP_16BIT) prot = PAGE_KERNEL_PCC(sock, _PAGE_PCC_IO16); else prot = PAGE_KERNEL_PCC(sock, _PAGE_PCC_IO8); /* TODO: handle MAP_USE_WAIT */ if (io->flags & MAP_USE_WAIT) printk(KERN_INFO MODNAME ": MAP_USE_WAIT unimplemented\n"); /* TODO: handle MAP_PREFETCH */ if (io->flags & MAP_PREFETCH) printk(KERN_INFO MODNAME ": MAP_PREFETCH unimplemented\n"); /* TODO: handle MAP_WRPROT */ if (io->flags & MAP_WRPROT) printk(KERN_INFO MODNAME ": MAP_WRPROT unimplemented\n"); /* TODO: handle MAP_0WS */ if (io->flags & MAP_0WS) printk(KERN_INFO MODNAME ": MAP_0WS unimplemented\n"); if (io->flags & MAP_ACTIVE) { unsigned long pstart, psize, paddrbase, vaddrbase; paddrbase = virt_to_phys((void*)(sp->mem_base + 2 * HD64465_PCC_WINDOW)); #ifndef CONFIG_SH_KEYWEST vaddrbase = (unsigned long)sp->io_vma->addr; pstart = io->start & PAGE_MASK; psize = ((io->stop + PAGE_SIZE) & PAGE_MASK) - pstart; /* * Change PTEs in only that portion of the mapping requested * by the caller. This means that most of the time, most of * the PTEs in the io_vma will be unmapped and only the bottom * page will be mapped. But the code allows for weird cards * that might want IO ports > 4K. */ DPRINTK("remap_page_range(vaddr=0x%08lx, paddr=0x%08lx, size=0x%08lxx)\n", vaddrbase + pstart, paddrbase + pstart, psize); remap_page_range(vaddrbase + pstart, paddrbase + pstart, psize, prot); #else vaddrbase = paddrbase | 0xa0000000; #endif /* * Change the mapping used by inb() outb() etc */ mach_port_map(io->start, io->stop - io->start + 1, vaddrbase + io->start, 0); } else { mach_port_unmap(sio->start, sio->stop - sio->start + 1); /* TODO: remap_page_range() to mark pages not present ? */ } *sio = *io; return 0; }