int mappedfile_setfile(struct vmproc *owner,
	struct vir_region *region, int fd, u64_t offset,
	dev_t dev, ino_t ino, u16_t clearend, int prefill, int mayclosefd)
{
	vir_bytes vaddr;
	struct fdref *newref;

	newref = fdref_dedup_or_new(owner, ino, dev, fd, mayclosefd);

	assert(newref);
	assert(!region->param.file.inited);
	assert(dev != NO_DEV);
	fdref_ref(newref, region);
	region->param.file.offset = offset;
	region->param.file.clearend = clearend;
	region->param.file.inited = 1;

	if(!prefill) return OK;

	for(vaddr = 0; vaddr < region->length; vaddr+=VM_PAGE_SIZE) {
		struct cached_page *cp = NULL;
		struct phys_region *pr;
		u64_t referenced_offset = offset + vaddr;

		if(roundup(vaddr+region->param.file.clearend,
			VM_PAGE_SIZE) >= region->length) {
			break;
		}

		if(ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(dev, referenced_offset,
			  	VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(dev, ino,
				referenced_offset, 1);
		}
		/*
		 * If we get a hit for a page that is to be used only once,
		 * then either we found a stale page (due to a process dying
		 * before a requested once-page could be mapped in) or this is
		 * a rare case of concurrent requests for the same page.  In
		 * both cases, force the page to be obtained from its FS later.
		 */
		if(!cp || (cp->flags & VMSF_ONCE)) continue;
		if(!(pr = pb_reference(cp->page, vaddr, region,
			&mem_type_mappedfile))) {
			printf("mappedfile_setfile: pb_reference failed\n");
			break;
		}
		if(map_ph_writept(region->parent, region, pr) != OK) {
			printf("mappedfile_setfile: map_ph_writept failed\n");
			break;
		}
	}

	return OK;
}
示例#2
0
int mappedfile_setfile(struct vmproc *owner,
	struct vir_region *region, int fd, u64_t offset,
	dev_t dev, ino_t ino, u16_t clearend, int prefill, int mayclosefd)
{
	vir_bytes vaddr;
	struct fdref *newref;

	newref = fdref_dedup_or_new(owner, ino, dev, fd, mayclosefd);

	assert(newref);
	assert(!region->param.file.inited);
	assert(dev != NO_DEV);
	fdref_ref(newref, region);
	region->param.file.offset = offset;
	region->param.file.clearend = clearend;
	region->param.file.inited = 1;

	if(!prefill) return OK;

	for(vaddr = 0; vaddr < region->length; vaddr+=VM_PAGE_SIZE) {
		struct cached_page *cp = NULL;
		struct phys_region *pr;
		u64_t referenced_offset = offset + vaddr;

		if(roundup(vaddr+region->param.file.clearend,
			VM_PAGE_SIZE) >= region->length) {
			break;
		}

		if(ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(dev, referenced_offset,
			  	VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(dev, ino,
				referenced_offset, 1);
		}
		if(!cp) continue;
		if(!(pr = pb_reference(cp->page, vaddr, region,
			&mem_type_mappedfile))) {
			printf("mappedfile_setfile: pb_reference failed\n");
			break;
		}
		if(map_ph_writept(region->parent, region, pr) != OK) {
			printf("mappedfile_setfile: map_ph_writept failed\n");
			break;
		}
	}

	return OK;
}