Beispiel #1
0
/* Try to map a physical page of an inode to a virtual address. If FS_INODE_POPULATE
 * is passed in flags, then if the page doesn't exist, then it allocates a physical
 * page, maps it, and loaded the data. If that flag is not passed, then it simply
 * tries to return the physical page.
 */
addr_t fs_inode_map_shared_physical_page(struct inode *node, addr_t virt, size_t offset, int flags, int attrib)
{
	ASSERT(!(virt & ~PAGE_MASK));
	ASSERT(!(offset & ~PAGE_MASK));
	// Test if we have any shared mappings...
	if(!(node->flags & INODE_PCACHE)) {
		return 0;
	}
	mutex_acquire(&node->mappings_lock);
	int page_number = offset / PAGE_SIZE;
	struct physical_page *entry;
	if((entry = hash_lookup(&node->physicals, &page_number, sizeof(page_number))) == NULL) {
		mutex_release(&node->mappings_lock);
		return 0;
	}
	ASSERT(entry->count);
	/* So, we don't have to worry about someone decreasing to count to zero while we're working, 
	   since a process never calls this function without being responsible for one of the counts. */
	mutex_acquire(&entry->lock);
	if(!entry->page && (flags & FS_INODE_POPULATE))
	{
		// Map a new page into virt, and load data into it.
		entry->page = mm_physical_allocate(0x1000, false);
		if(!mm_virtual_map(virt, entry->page, MAP_ZERO | attrib | PAGE_WRITE, 0x1000))
			PANIC(0, "[MMINODE]: Trying to remap mminode shared section.", EINVAL);
		// YES. DO THIS TWICE. THIS IS NOT A TYPO.
		mm_physical_increment_count(entry->page); /* Once for the map. */
		mm_physical_increment_count(entry->page); /* Once for entry->page. */

		int err=-1;
		/* Try to read the data. If this fails, we don't really have a good way 
		 * of telling userspace this...eh.
		 */
		size_t len = PAGE_SIZE;
		if(len + offset > (size_t)node->length)
			len = node->length - offset;
		if(offset < (size_t)node->length) {
			if(node->filesystem && (err=fs_inode_read(node, offset, len, (void *)virt) < 0))
				printk(0, "[MMINODE]: Read inode failed with %d.\n", err);
		}
		mm_virtual_changeattr(virt, attrib, 0x1000);
		atomic_fetch_add(&node->mapped_pages_count, 1);
	} else if(entry->page) {
		mm_physical_increment_count(entry->page);
		if(!mm_virtual_map(virt, entry->page, attrib, 0x1000))
			PANIC(0, "[MMINODE]: Trying to remap mminode shared section.", EINVAL);
	}
	addr_t ret = entry->page;
	mutex_release(&entry->lock);
	mutex_release(&node->mappings_lock);
	return ret;
}
Beispiel #2
0
void mm_init(struct multiboot *m)
{
	printk(KERN_DEBUG, "[mm]: Setting up Memory Management...\n");
	arch_mm_virtual_init(&kernel_context);
	cpu_interrupt_register_handler (14, &arch_mm_page_fault_handle);
	pmm_buddy_init();
	process_memorymap(m);
	slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END);
	set_ksf(KSF_MMU);
	/* hey, look at that, we have happy memory times! */
	mm_reclaim_init();
	for(size_t i=0;i<=(sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1);i++) {
		mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1),
				mm_physical_allocate(mm_page_size(1), true),
				PAGE_PRESENT | PAGE_WRITE, mm_page_size(1));
	}
	frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START);
	printk(0, "[mm]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024);
#if CONFIG_MODULES
	loader_add_kernel_symbol(slab_kmalloc);
	loader_add_kernel_symbol(slab_kfree);
	loader_add_kernel_symbol(mm_virtual_map);
	loader_add_kernel_symbol(mm_virtual_getmap);
	loader_add_kernel_symbol(mm_allocate_dma_buffer);
	loader_add_kernel_symbol(mm_free_dma_buffer);
	loader_add_kernel_symbol(mm_physical_allocate);
	loader_add_kernel_symbol(mm_physical_deallocate);
#endif
}
Beispiel #3
0
addr_t fs_inode_map_private_physical_page(struct inode *node, addr_t virt, size_t offset, int attrib, size_t req_len)
{
	addr_t ph;
	ASSERT(!(virt & ~PAGE_MASK));
	ASSERT(!(offset & ~PAGE_MASK));
	// Specify MAP_ZERO, since read_inode may not fill up the whole page.
	size_t memsz = PAGE_SIZE;
	ph = mm_physical_allocate(memsz, false);
	bool result = mm_virtual_map(virt, ph, MAP_ZERO | attrib | PAGE_WRITE, memsz);
	mm_physical_increment_count(ph);
	if(!result) {
		printk_safe(KERN_PANIC, "[MMINODE]: Trying to remap mminode private section %x.\n", virt);
		PANIC(0, "[MMINODE]: Trying to remap mminode private section.", EINVAL);
	}
	int err = -1;
	/* Try to read the data. If this fails, we don't really have a good way 
	 * of telling userspace this...eh.
	 */
	size_t len = req_len;
	if(len + offset > (size_t)node->length)
		len = node->length - offset;
	if(offset < (size_t)node->length) {
		if(node->filesystem) {
			err = fs_inode_read(node, offset, len, (void *)virt);
			if(err < 0 || (size_t)err != len)
				printk(0, "[MMINODE]: read inode failed with %d.\n", err);
		}
	}
	mm_virtual_changeattr(virt, attrib, memsz);
	return ph;
}
Beispiel #4
0
void mm_init(struct multiboot *m)
{
	printk(KERN_DEBUG, "[MM]: Setting up Memory Management...\n");
	arch_mm_virtual_init(&kernel_context);
	cpu_interrupt_register_handler(14, &arch_mm_page_fault_handle);
	pmm_buddy_init();
	process_memorymap(m);
	slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END);
	set_ksf(KSF_MMU);
	// Memory init, check!
	mm_reclaim_init();
	for(size_t i = 0; i <= (sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1); i++) {
		mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1));
	}
	frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START);
	printk(0, "[MM]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024);
}
Beispiel #5
0
static void preexec(void)
{
	struct thread *t = current_thread;
	/* unmap all mappings, specified by POSIX */
	mm_destroy_all_mappings(t->process);
	mm_virtual_map(MEMMAP_SYSGATE_ADDRESS, sysgate_page, PAGE_PRESENT | PAGE_USER, PAGE_SIZE);
	/* we need to re-create the vmem for memory mappings */
	valloc_create(&(t->process->mmf_valloc), MEMMAP_MMAP_BEGIN, MEMMAP_MMAP_END, PAGE_SIZE, 0);
	addr_t ret = mm_mmap(t->usermode_stack_start, CONFIG_STACK_PAGES * PAGE_SIZE,
			PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0);
	mm_page_fault_test_mappings(t->usermode_stack_end - PAGE_SIZE, PF_CAUSE_USER | PF_CAUSE_WRITE);
	t->signal = t->signals_pending = 0;
	memset((void *)t->process->signal_act, 0, sizeof(struct sigaction) * NUM_SIGNALS);

	if(t->flags & THREAD_PTRACED) {
		tm_signal_send_thread(t, SIGTRAP);
	}
}
Beispiel #6
0
int mm_allocate_dma_buffer(struct dma_region *d)
{
	if (!atomic_exchange(&dma_virtual_init, true)) {
		valloc_create(&dma_virtual, MEMMAP_VIRTDMA_START, MEMMAP_VIRTDMA_END, mm_page_size(0), 0);
	}
	d->p.address = mm_physical_allocate(d->p.size, false);
	if (d->p.address == 0)
		return -1;

	struct valloc_region reg;
	int npages = (d->p.size - 1) / mm_page_size(0) + 1;
	valloc_allocate(&dma_virtual, &reg, npages);

	for (int i = 0; i < npages; i++)
		mm_virtual_map(reg.start + i * mm_page_size(0), d->p.address + i * mm_page_size(0), PAGE_PRESENT | PAGE_WRITE, mm_page_size(0));
	d->v = reg.start;
	return 0;
}