Exemplo n.º 1
0
int kt_kernel_idle_task(void)
{
	tm_thread_raise_flag(current_thread, THREAD_KERNEL);
	kthread_create(&kthread_pager, "[kpager]", 0, __KT_pager, 0);
	strncpy((char *)current_process->command, "[kernel]", 128);
	/* wait until init has successfully executed, and then remap. */
	while(!(kernel_state_flags & KSF_HAVEEXECED)) {
		tm_schedule();
	}
	printk(1, "[kernel]: remapping lower memory with protection flags...\n");
	cpu_interrupt_set(0);
	for(addr_t addr = MEMMAP_KERNEL_START; addr < MEMMAP_KERNEL_END; addr += PAGE_SIZE)
	{
		mm_virtual_changeattr(addr, PAGE_PRESENT | PAGE_WRITE, PAGE_SIZE);
	}
	cpu_interrupt_set(1);
	/* Now enter the main idle loop, waiting to do periodic cleanup */
	printk(0, "[idle]: entering background loop\n");
	for(;;) {
		assert(!current_thread->held_locks);
		int r=1;
		if(__current_cpu->work.count > 0)
			r=workqueue_dowork(&__current_cpu->work);
		else
			tm_schedule();
		int status;
		int pid = sys_waitpid(-1, &status, WNOHANG);
		if(WIFSTOPPED(status)) {
			sys_kill(pid, SIGKILL);
		}
	}
}
Exemplo n.º 2
0
addr_t fs_inode_map_private_physical_page(struct inode *node, addr_t virt, size_t offset, int attrib, size_t req_len)
{
	addr_t ph;
	ASSERT(!(virt & ~PAGE_MASK));
	ASSERT(!(offset & ~PAGE_MASK));
	// Specify MAP_ZERO, since read_inode may not fill up the whole page.
	size_t memsz = PAGE_SIZE;
	ph = mm_physical_allocate(memsz, false);
	bool result = mm_virtual_map(virt, ph, MAP_ZERO | attrib | PAGE_WRITE, memsz);
	mm_physical_increment_count(ph);
	if(!result) {
		printk_safe(KERN_PANIC, "[MMINODE]: Trying to remap mminode private section %x.\n", virt);
		PANIC(0, "[MMINODE]: Trying to remap mminode private section.", EINVAL);
	}
	int err = -1;
	/* Try to read the data. If this fails, we don't really have a good way 
	 * of telling userspace this...eh.
	 */
	size_t len = req_len;
	if(len + offset > (size_t)node->length)
		len = node->length - offset;
	if(offset < (size_t)node->length) {
		if(node->filesystem) {
			err = fs_inode_read(node, offset, len, (void *)virt);
			if(err < 0 || (size_t)err != len)
				printk(0, "[MMINODE]: read inode failed with %d.\n", err);
		}
	}
	mm_virtual_changeattr(virt, attrib, memsz);
	return ph;
}
Exemplo n.º 3
0
/* Try to map a physical page of an inode to a virtual address. If FS_INODE_POPULATE
 * is passed in flags, then if the page doesn't exist, then it allocates a physical
 * page, maps it, and loaded the data. If that flag is not passed, then it simply
 * tries to return the physical page.
 */
addr_t fs_inode_map_shared_physical_page(struct inode *node, addr_t virt, size_t offset, int flags, int attrib)
{
	ASSERT(!(virt & ~PAGE_MASK));
	ASSERT(!(offset & ~PAGE_MASK));
	// Test if we have any shared mappings...
	if(!(node->flags & INODE_PCACHE)) {
		return 0;
	}
	mutex_acquire(&node->mappings_lock);
	int page_number = offset / PAGE_SIZE;
	struct physical_page *entry;
	if((entry = hash_lookup(&node->physicals, &page_number, sizeof(page_number))) == NULL) {
		mutex_release(&node->mappings_lock);
		return 0;
	}
	ASSERT(entry->count);
	/* So, we don't have to worry about someone decreasing to count to zero while we're working, 
	   since a process never calls this function without being responsible for one of the counts. */
	mutex_acquire(&entry->lock);
	if(!entry->page && (flags & FS_INODE_POPULATE))
	{
		// Map a new page into virt, and load data into it.
		entry->page = mm_physical_allocate(0x1000, false);
		if(!mm_virtual_map(virt, entry->page, MAP_ZERO | attrib | PAGE_WRITE, 0x1000))
			PANIC(0, "[MMINODE]: Trying to remap mminode shared section.", EINVAL);
		// YES. DO THIS TWICE. THIS IS NOT A TYPO.
		mm_physical_increment_count(entry->page); /* Once for the map. */
		mm_physical_increment_count(entry->page); /* Once for entry->page. */

		int err=-1;
		/* Try to read the data. If this fails, we don't really have a good way 
		 * of telling userspace this...eh.
		 */
		size_t len = PAGE_SIZE;
		if(len + offset > (size_t)node->length)
			len = node->length - offset;
		if(offset < (size_t)node->length) {
			if(node->filesystem && (err=fs_inode_read(node, offset, len, (void *)virt) < 0))
				printk(0, "[MMINODE]: Read inode failed with %d.\n", err);
		}
		mm_virtual_changeattr(virt, attrib, 0x1000);
		atomic_fetch_add(&node->mapped_pages_count, 1);
	} else if(entry->page) {
		mm_physical_increment_count(entry->page);
		if(!mm_virtual_map(virt, entry->page, attrib, 0x1000))
			PANIC(0, "[MMINODE]: Trying to remap mminode shared section.", EINVAL);
	}
	addr_t ret = entry->page;
	mutex_release(&entry->lock);
	mutex_release(&node->mappings_lock);
	return ret;
}