static glodf_t mmap_fifo(int fd, int UNUSED(flags)) { /* map the whole of stdin, attention this could cause RAM pressure */ /* we start out with one page of memory ... */ size_t iniz = sysconf(_SC_PAGESIZE); glodf_t m; if ((m = mmap_mem(iniz)).d == NULL) { goto out; } { ptrdiff_t off = 0; for (ssize_t nrd, bz = m.z - off; (nrd = read(fd, (char*)m.d + off, bz)) > 0; off += nrd, bz = m.z - off) { if (nrd == bz) { /* enlarge and reread */ m = mremap_mem(m, 2U * m.z); } } } out: return m; }
static int mmap_kmem(struct file * file, struct vm_area_struct * vma) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; /* * If the user is not attempting to mmap a high memory address then * the standard mmap_mem mechanism will work. High memory addresses * need special handling, as remap_page_range expects a physically- * contiguous range of kernel addresses (such as obtained in kmalloc). */ if ((offset + size) < (unsigned long) high_memory) return mmap_mem(file, vma); /* * Accessing memory above the top the kernel knows about or * through a file pointer that was marked O_SYNC will be * done non-cached. */ if (noncached_address(offset) || (file->f_flags & O_SYNC)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Don't do anything here; "nopage" will fill the holes */ vma->vm_ops = &kmem_vm_ops; /* Don't try to swap out physical pages.. */ vma->vm_flags |= VM_RESERVED; /* * Don't dump addresses that are not real memory to a core file. */ vma->vm_flags |= VM_IO; return 0; }
static int mmap_kmem(struct file * file, struct vm_area_struct * vma) { unsigned long long val; /* * RED-PEN: on some architectures there is more mapped memory * than available in mem_map which pfn_valid checks * for. Perhaps should add a new macro here. * * RED-PEN: vmalloc is not supported right now. */ if (!pfn_valid(vma->vm_pgoff)) return -EIO; val = (u64)vma->vm_pgoff << PAGE_SHIFT; vma->vm_pgoff = __pa(val) >> PAGE_SHIFT; return mmap_mem(file, vma); }