Example #1
0
unsigned long
pisces_map_xpmem_pfn_range(struct xpmem_pfn * pfns,
			   u64                num_pfns)
{
	struct aspace * aspace      = current->aspace;
	unsigned long   addr        = 0;
	unsigned long   attach_addr = 0;
	unsigned long   size        = 0;
	int status = 0;
	u64 i      = 0;

	size = num_pfns * PAGE_SIZE;

	spin_lock(&(aspace->lock));
	{
		status = __aspace_find_hole(aspace, 0, size, PAGE_SIZE, &attach_addr);
		
		if (status) {
			spin_unlock(&(aspace->lock));
			printk(KERN_ERR "Cannot map xpmem pfn range - out of memory\n");
			return -ENOMEM;
		}
		
		status = __aspace_add_region(aspace, attach_addr, size, VM_READ | VM_WRITE | VM_USER,
					     PAGE_SIZE, "pisces_ppe");
		if (status) {
			spin_unlock(&(aspace->lock));
			printk(KERN_ERR "Cannot map xpmem pfn range - cannot add memory to aspace\n");
			return -ENOMEM;
		}
	}
	spin_unlock(&(aspace->lock));

	for (i = 0; i < num_pfns; i++) {
		addr = attach_addr + (i * PAGE_SIZE);

		printk("Mapping vaddr = %p, pfn = %llu, (paddr = %p)\n", 
		       (void *)addr,
		       pfns[i].pfn,
		       (void *)(pfns[i].pfn << PAGE_SHIFT)
		       );

		status = map_pfn_range(addr, pfns[i].pfn, PAGE_SIZE);

		if (status) {
			return -ENOMEM;
		}
	}   

	return attach_addr;
}
Example #2
0
long
sys_mmap(
	unsigned long addr,
	unsigned long len,
	unsigned long prot,
	unsigned long flags,
	unsigned long fd,
	unsigned long off
)
{
	struct aspace *as = current->aspace;
	struct file *file;
	struct vm_area_struct vma;
	unsigned long mmap_brk;
	int rv;

	if (len != round_up(len, PAGE_SIZE))
		return -EINVAL;

	/* we only support anonymous private mapping; file-backed
	   private mapping has copy-on-write semantics, which we don't
	   want due to complete lack of any pagefaulting resolution */

	if((flags & MAP_PRIVATE) && !(flags & MAP_ANONYMOUS))
		return -EINVAL;

	/* anonymous mappings (not backed by a file) are handled specially */
	if(flags & MAP_ANONYMOUS) {
		/* anonymous mmap()ed memory is put at the top of the
		   heap region, and grows from high to low addresses,
		   i.e. down towards the current heap end. */
		spin_lock(&as->lock);
		mmap_brk = round_down(as->mmap_brk - len, PAGE_SIZE);

		/* Protect against extending into the UNIX data segment,
		   or becoming negative (which wraps around to large addr) */
		if ((mmap_brk <= as->brk) || (mmap_brk >= as->mmap_brk)) {
			spin_unlock(&as->lock);
			return -ENOMEM;
		}

		as->mmap_brk = mmap_brk;
		spin_unlock(&as->lock);

		paddr_t phys;
		if ( __aspace_virt_to_phys( as, mmap_brk, & phys ) ) {
			panic("sys_mmap() failed to get physical address\n");
		}
		memset( __va(phys), 0, len );
		return mmap_brk;
	}

	/* file-backed mappings */

	/* TODO: add a million checks here that we'll simply ignore now */

	file = get_current_file(fd);
	if(NULL == file)
		return -EBADF;

	if(NULL == file->f_op ||
	   NULL == file->f_op->mmap)
		return -ENODEV;

	spin_lock(&as->lock);
	if ((rv = __aspace_find_hole(as, addr, len, PAGE_SIZE, &addr))) {
		spin_unlock(&as->lock);
		return -ENOMEM;
	}

	if ((rv = __aspace_add_region(as, addr, len,
				      VM_READ|VM_WRITE|VM_USER,
				      PAGE_SIZE, "mmap"))) {
		/* assuming there is no race between find_hole and
		   add_region, as we're holding the as->lock, this
		   failure can't be due to someone adding our region
		   in between */
		spin_unlock(&as->lock);
		return -ENOMEM;
	}
	spin_unlock(&as->lock);

	/* fill the vm_area_struct to keep compatible with linux layer */
	vma.vm_start = addr;
	vma.vm_end = addr + len;
	vma.vm_page_prot = __pgprot(VM_READ|VM_WRITE);
	vma.vm_pgoff = 0;

	rv = file->f_op->mmap(file, &vma);
	if(rv) {
		spin_lock(&as->lock);
		__aspace_del_region(as, addr, len);
		spin_unlock(&as->lock);
		return rv;
	}
	return vma.vm_start;
}