Пример #1
0
static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf){
	unsigned long * vp=(unsigned long*)vma->vm_private_data;
	if(likely(vp)){
		unsigned int offset=1+(((unsigned long)vmf->virtual_address-vma->vm_start)>>PAGE_SHIFT);
		if(likely(offset<vp[0]))
			//vm_insert_page(vma,(unsigned long)vmf->virtual_address,(struct page*) vp[offset]);
			vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, vp[offset]);
		
		//gt(printk("<0>" "vma_fault for va: %lx, vma_start: %lx, offset: %u, len: %lx, vp[offset]:%lx\n", (unsigned long)vmf->virtual_address, vma->vm_start, offset, vp[0], vp[offset]));
	}
	return  VM_FAULT_NOPAGE;
}
Пример #2
0
/*
 * mspec_fault
 *
 * Creates a mspec page and maps it to user space.
 */
static int
mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	unsigned long paddr, maddr;
	unsigned long pfn;
	pgoff_t index = vmf->pgoff;
	struct vma_data *vdata = vma->vm_private_data;

	maddr = (volatile unsigned long) vdata->maddr[index];
	if (maddr == 0) {
		maddr = uncached_alloc_page(numa_node_id(), 1);
		if (maddr == 0)
			return VM_FAULT_OOM;

		spin_lock(&vdata->lock);
		if (vdata->maddr[index] == 0) {
			vdata->count++;
			vdata->maddr[index] = maddr;
		} else {
			uncached_free_page(maddr, 1);
			maddr = vdata->maddr[index];
		}
		spin_unlock(&vdata->lock);
	}

	if (vdata->type == MSPEC_FETCHOP)
		paddr = TO_AMO(maddr);
	else
		paddr = maddr & ~__IA64_UNCACHED_OFFSET;

	pfn = paddr >> PAGE_SHIFT;

	/*
	 * vm_insert_pfn can fail with -EBUSY, but in that case it will
	 * be because another thread has installed the pte first, so it
	 * is no problem.
	 */
	vm_insert_pfn(vma, vmf->address, pfn);

	return VM_FAULT_NOPAGE;
}
static int mali_mem_block_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma, u32 mali_phys, u32 mapping_offset, u32 size, u32 cpu_usage_adjust)
{
	u32 virt = vma->vm_start + mapping_offset;
	u32 cpu_phys = mali_phys + cpu_usage_adjust;
	u32 offset = 0;
	int ret;

	while (size) {
		ret = vm_insert_pfn(vma, virt + offset, __phys_to_pfn(cpu_phys + offset));

		if (unlikely(ret)) {
			MALI_DEBUG_PRINT(1, ("Block allocator: Failed to insert pfn into vma\n"));
			return 1;
		}

		size -= MALI_MMU_PAGE_SIZE;
		offset += MALI_MMU_PAGE_SIZE;
	}

	return 0;
}
Пример #4
0
			goto fail;
		}
		r->mmapping = 1;
	}

	/* Page relative to the VMA start - we must calculate this ourselves
	   because vmf->pgoff is the fake GEM offset */
	page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
				>> PAGE_SHIFT;

	/* CPU view of the page, don't go via the GART for CPU writes */
	if (r->stolen)
		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
	else
		pfn = page_to_pfn(r->pages[page_offset]);
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);

fail:
	mutex_unlock(&dev->struct_mutex);
	switch (ret) {
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}