int mappedfile_setfile(struct vmproc *owner,
	struct vir_region *region, int fd, u64_t offset,
	dev_t dev, ino_t ino, u16_t clearend, int prefill, int mayclosefd)
{
	vir_bytes vaddr;
	struct fdref *newref;

	newref = fdref_dedup_or_new(owner, ino, dev, fd, mayclosefd);

	assert(newref);
	assert(!region->param.file.inited);
	assert(dev != NO_DEV);
	fdref_ref(newref, region);
	region->param.file.offset = offset;
	region->param.file.clearend = clearend;
	region->param.file.inited = 1;

	if(!prefill) return OK;

	for(vaddr = 0; vaddr < region->length; vaddr+=VM_PAGE_SIZE) {
		struct cached_page *cp = NULL;
		struct phys_region *pr;
		u64_t referenced_offset = offset + vaddr;

		if(roundup(vaddr+region->param.file.clearend,
			VM_PAGE_SIZE) >= region->length) {
			break;
		}

		if(ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(dev, referenced_offset,
			  	VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(dev, ino,
				referenced_offset, 1);
		}
		/*
		 * If we get a hit for a page that is to be used only once,
		 * then either we found a stale page (due to a process dying
		 * before a requested once-page could be mapped in) or this is
		 * a rare case of concurrent requests for the same page.  In
		 * both cases, force the page to be obtained from its FS later.
		 */
		if(!cp || (cp->flags & VMSF_ONCE)) continue;
		if(!(pr = pb_reference(cp->page, vaddr, region,
			&mem_type_mappedfile))) {
			printf("mappedfile_setfile: pb_reference failed\n");
			break;
		}
		if(map_ph_writept(region->parent, region, pr) != OK) {
			printf("mappedfile_setfile: map_ph_writept failed\n");
			break;
		}
	}

	return OK;
}
Esempio n. 2
0
void cache_sanitycheck_internal(void)
{
	int h;
	int n = 0;
	int byino = 0;
	int withino = 0;
	int bydev_total = 0, lru_total = 0;
	struct cached_page *cp;

	for(h = 0; h < HASHSIZE; h++) {
		for(cp = cache_hash_bydev[h]; cp; cp = cp->hash_next_dev) {
			assert(cp->dev != NO_DEV);
			assert(h == makehash(cp->dev, cp->dev_offset));
			assert(cp == find_cached_page_bydev(cp->dev, cp->dev_offset, cp->ino, cp->ino_offset));
			if(cp->ino != VMC_NO_INODE) withino++;
			bydev_total++;
			n++;
			assert(n < 1500000);
		}
		for(cp = cache_hash_byino[h]; cp; cp = cp->hash_next_ino) {
			assert(cp->dev != NO_DEV);
			assert(cp->ino != VMC_NO_INODE);
			assert(h == makehash(cp->ino, cp->ino_offset));
			byino++;
			n++;
			assert(n < 1500000);
		}
	}

	assert(byino == withino);

	if(lru_newest) {
		assert(lru_oldest);
		assert(!lru_newest->newer);
		assert(!lru_oldest->older);
	} else {
		assert(!lru_oldest);
	}

	for(cp = lru_oldest; cp; cp = cp->newer) {
		struct cached_page *newer = cp->newer,
			*older = cp->older;
		if(newer) assert(newer->older == cp);
		if(older) assert(older->newer == cp);
		lru_total++;
	}

	assert(lru_total == bydev_total);

	assert(lru_total == cached_pages);
}
Esempio n. 3
0
int mappedfile_setfile(struct vmproc *owner,
	struct vir_region *region, int fd, u64_t offset,
	dev_t dev, ino_t ino, u16_t clearend, int prefill, int mayclosefd)
{
	vir_bytes vaddr;
	struct fdref *newref;

	newref = fdref_dedup_or_new(owner, ino, dev, fd, mayclosefd);

	assert(newref);
	assert(!region->param.file.inited);
	assert(dev != NO_DEV);
	fdref_ref(newref, region);
	region->param.file.offset = offset;
	region->param.file.clearend = clearend;
	region->param.file.inited = 1;

	if(!prefill) return OK;

	for(vaddr = 0; vaddr < region->length; vaddr+=VM_PAGE_SIZE) {
		struct cached_page *cp = NULL;
		struct phys_region *pr;
		u64_t referenced_offset = offset + vaddr;

		if(roundup(vaddr+region->param.file.clearend,
			VM_PAGE_SIZE) >= region->length) {
			break;
		}

		if(ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(dev, referenced_offset,
			  	VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(dev, ino,
				referenced_offset, 1);
		}
		if(!cp) continue;
		if(!(pr = pb_reference(cp->page, vaddr, region,
			&mem_type_mappedfile))) {
			printf("mappedfile_setfile: pb_reference failed\n");
			break;
		}
		if(map_ph_writept(region->parent, region, pr) != OK) {
			printf("mappedfile_setfile: map_ph_writept failed\n");
			break;
		}
	}

	return OK;
}
Esempio n. 4
0
int addcache(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, struct phys_block *pb)
{
	int hv_dev;
        struct cached_page *hb;

	if(pb->flags & PBF_INCACHE) {
		printf("VM: already in cache\n");
		return EINVAL;
	}

        if(!SLABALLOC(hb)) {
                printf("VM: no memory for cache node\n");
                return ENOMEM;
        }

	assert(dev != NO_DEV);
#if CACHE_SANITY
	assert(!find_cached_page_bydev(dev, dev_off, ino, ino_off));
#endif

        hb->dev = dev;
        hb->dev_offset = dev_off;
        hb->ino = ino;
        hb->ino_offset = ino_off;
        hb->page = pb;
        hb->page->refcount++;   /* block also referenced by cache now */
	hb->page->flags |= PBF_INCACHE;

        hv_dev = makehash(dev, dev_off);

        hb->hash_next_dev = cache_hash_bydev[hv_dev];
        cache_hash_bydev[hv_dev] = hb;

        if(hb->ino != VMC_NO_INODE)
		addcache_byino(hb);

	lru_add(hb);

	return OK;
}
Esempio n. 5
0
static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region,
	struct phys_region *ph, int write, vfs_callback_t cb,
	void *state, int statelen, int *io)
{
	u32_t allocflags;
	int procfd = region->param.file.fdref->fd;

	allocflags = vrallocflags(region->flags);

	assert(ph->ph->refcount > 0);
	assert(region->param.file.inited);
	assert(region->param.file.fdref);
	assert(region->param.file.fdref->dev != NO_DEV);

	/* Totally new block? Create it. */
	if(ph->ph->phys == MAP_NONE) {
		struct cached_page *cp;
		u64_t referenced_offset =
			region->param.file.offset + ph->offset;
		if(region->param.file.fdref->ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(region->param.file.fdref->dev,
				referenced_offset, VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(region->param.file.fdref->dev,
				region->param.file.fdref->ino, referenced_offset, 1);
		}
		if(cp) {
			int result = OK;
			pb_unreferenced(region, ph, 0);
			pb_link(ph, cp->page, ph->offset, region);

			if(roundup(ph->offset+region->param.file.clearend,
				VM_PAGE_SIZE) >= region->length) {
				result = cow_block(vmp, region, ph,
					region->param.file.clearend);
			} else if(result == OK && write) {
				result = cow_block(vmp, region, ph, 0);
			}

			return result;
		}

		if(!cb) {
#if 0
			printf("VM: mem_file: no callback, returning EFAULT\n");
#endif
			sys_diagctl_stacktrace(vmp->vm_endpoint);
			return EFAULT;
		}

                if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset,
			VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) {
			printf("VM: mappedfile_pagefault: vfs_request failed\n");
			return ENOMEM;
		}
		*io = 1;
		return SUSPEND;
	}

	if(!write) {
#if 0
		printf("mappedfile_pagefault: nonwrite fault?\n");
#endif
		return OK;
	}

	return cow_block(vmp, region, ph, 0);
}
Esempio n. 6
0
int
do_mapcache(message *msg)
{
	dev_t dev = msg->m_u.m_vmmcp.dev;
	u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
	u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
	int n;
	int bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
	struct vir_region *vr;
	struct vmproc *caller;
	vir_bytes offset;
	int io = 0;

	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
	caller = &vmproc[n];

	if(bytes < VM_PAGE_SIZE) return EINVAL;

	if(!(vr = map_page_region(caller, VM_PAGE_SIZE, VM_DATATOP, bytes,
		VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) {
		printf("VM: map_page_region failed\n");
		return ENOMEM;
	}

	assert(vr->length == bytes);

	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
		struct cached_page *hb;

		assert(vr->length == bytes);
		assert(offset < vr->length);

		if(!(hb = find_cached_page_bydev(dev, dev_off + offset,
			msg->m_u.m_vmmcp.ino, ino_off + offset, 1))) {
			map_unmap_region(caller, vr, 0, bytes);
			return ENOENT;
		}

		assert(!vr->param.pb_cache);
		vr->param.pb_cache = hb->page;

		assert(vr->length == bytes);
		assert(offset < vr->length);

		if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) {
			map_unmap_region(caller, vr, 0, bytes);
			printf("VM: map_pf failed\n");
			return ENOMEM;
		}
		assert(!vr->param.pb_cache);
	}

	memset(msg, 0, sizeof(*msg));

	msg->m_u.m_vmmcp_reply.addr = (void *) vr->vaddr;
 
 	assert(vr);

#if CACHE_SANITY
	cache_sanitycheck_internal();
#endif

	return OK;
}
Esempio n. 7
0
int
do_setcache(message *msg)
{
	int r;
	dev_t dev = msg->m_u.m_vmmcp.dev;
	u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
	u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
	int n;
	struct vmproc *caller;
	vir_bytes offset;
	int bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;

	if(bytes < VM_PAGE_SIZE) return EINVAL;

	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
	caller = &vmproc[n];

	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
		struct vir_region *region;
		struct phys_region *phys_region = NULL;
		vir_bytes v = (vir_bytes) msg->m_u.m_vmmcp.block + offset;
                struct cached_page *hb;

		if(!(region = map_lookup(caller, v, &phys_region))) {
			printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset, v);
			return EFAULT;
		}

		if(!phys_region) {
			printf("VM: error: no available memory region given\n");
			return EFAULT;
		}

		if((hb=find_cached_page_bydev(dev, dev_off + offset,
			msg->m_u.m_vmmcp.ino, ino_off + offset, 1))) {
			/* block inode info updated */
			if(hb->page != phys_region->ph) {
				/* previous cache entry has become
				 * obsolete; make a new one. rmcache
				 * removes it from the cache and frees
				 * the page if it isn't mapped in anywhere
				 * else.
				 */
                        	rmcache(hb);
			} else {
				/* block was already there, inode info might've changed which is fine */
				continue;
			}
		}

		if(phys_region->memtype != &mem_type_anon &&
			phys_region->memtype != &mem_type_anon_contig) {
			printf("VM: error: no reasonable memory type\n");
			return EFAULT;
		}

		if(phys_region->ph->refcount != 1) {
			printf("VM: error: no reasonable refcount\n");
			return EFAULT;
		}

		phys_region->memtype = &mem_type_cache;

		if((r=addcache(dev, dev_off + offset,
			msg->m_u.m_vmmcp.ino, ino_off + offset, phys_region->ph)) != OK) {
			printf("VM: addcache failed\n");
			return r;
		}
	}

#if CACHE_SANITY
	cache_sanitycheck_internal();
#endif

	return OK;
}
static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region,
	struct phys_region *ph, int write, vfs_callback_t cb,
	void *state, int statelen, int *io)
{
	u32_t allocflags;
	int procfd = region->param.file.fdref->fd;

	allocflags = vrallocflags(region->flags);

	assert(ph->ph->refcount > 0);
	assert(region->param.file.inited);
	assert(region->param.file.fdref);
	assert(region->param.file.fdref->dev != NO_DEV);

	/* Totally new block? Create it. */
	if(ph->ph->phys == MAP_NONE) {
		struct cached_page *cp;
		u64_t referenced_offset =
			region->param.file.offset + ph->offset;
		if(region->param.file.fdref->ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(region->param.file.fdref->dev,
				referenced_offset, VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(region->param.file.fdref->dev,
				region->param.file.fdref->ino, referenced_offset, 1);
		}
		/*
		 * Normally, a cache hit saves a round-trip to the file system
		 * to load the page.  However, if the page in the VM cache is
		 * marked for one-time use, then force a round-trip through the
		 * file system anyway, so that the FS can update the page by
		 * by readding it to the cache.  Thus, for one-time use pages,
		 * no caching is performed.  This approach is correct even in
		 * the light of concurrent requests and disappearing processes
		 * but relies on VM requests to VFS being fully serialized.
		 */
		if(cp && (!cb || !(cp->flags & VMSF_ONCE))) {
			int result = OK;
			pb_unreferenced(region, ph, 0);
			pb_link(ph, cp->page, ph->offset, region);

			if(roundup(ph->offset+region->param.file.clearend,
				VM_PAGE_SIZE) >= region->length) {
				result = cow_block(vmp, region, ph,
					region->param.file.clearend);
			} else if(result == OK && write) {
				result = cow_block(vmp, region, ph, 0);
			}

			/* Discard one-use pages after mapping them in. */
			if (result == OK && (cp->flags & VMSF_ONCE))
				rmcache(cp);

			return result;
		}

		if(!cb) {
#if 0
			printf("VM: mem_file: no callback, returning EFAULT\n");
			sys_diagctl_stacktrace(vmp->vm_endpoint);
#endif
			return EFAULT;
		}

                if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset,
			VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) {
			printf("VM: mappedfile_pagefault: vfs_request failed\n");
			return ENOMEM;
		}
		*io = 1;
		return SUSPEND;
	}

	if(!write) {
#if 0
		printf("mappedfile_pagefault: nonwrite fault?\n");
#endif
		return OK;
	}

	return cow_block(vmp, region, ph, 0);
}