Exemplo n.º 1
0
void mm_init(struct multiboot *m)
{
	printk(KERN_DEBUG, "[mm]: Setting up Memory Management...\n");
	arch_mm_virtual_init(&kernel_context);
	cpu_interrupt_register_handler (14, &arch_mm_page_fault_handle);
	pmm_buddy_init();
	process_memorymap(m);
	slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END);
	set_ksf(KSF_MMU);
	/* hey, look at that, we have happy memory times! */
	mm_reclaim_init();
	for(size_t i=0;i<=(sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1);i++) {
		mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1),
				mm_physical_allocate(mm_page_size(1), true),
				PAGE_PRESENT | PAGE_WRITE, mm_page_size(1));
	}
	frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START);
	printk(0, "[mm]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024);
#if CONFIG_MODULES
	loader_add_kernel_symbol(slab_kmalloc);
	loader_add_kernel_symbol(slab_kfree);
	loader_add_kernel_symbol(mm_virtual_map);
	loader_add_kernel_symbol(mm_virtual_getmap);
	loader_add_kernel_symbol(mm_allocate_dma_buffer);
	loader_add_kernel_symbol(mm_free_dma_buffer);
	loader_add_kernel_symbol(mm_physical_allocate);
	loader_add_kernel_symbol(mm_physical_deallocate);
#endif
}
Exemplo n.º 2
0
addr_t fs_inode_map_private_physical_page(struct inode *node, addr_t virt, size_t offset, int attrib, size_t req_len)
{
	addr_t ph;
	ASSERT(!(virt & ~PAGE_MASK));
	ASSERT(!(offset & ~PAGE_MASK));
	// Specify MAP_ZERO, since read_inode may not fill up the whole page.
	size_t memsz = PAGE_SIZE;
	ph = mm_physical_allocate(memsz, false);
	bool result = mm_virtual_map(virt, ph, MAP_ZERO | attrib | PAGE_WRITE, memsz);
	mm_physical_increment_count(ph);
	if(!result) {
		printk_safe(KERN_PANIC, "[MMINODE]: Trying to remap mminode private section %x.\n", virt);
		PANIC(0, "[MMINODE]: Trying to remap mminode private section.", EINVAL);
	}
	int err = -1;
	/* Try to read the data. If this fails, we don't really have a good way 
	 * of telling userspace this...eh.
	 */
	size_t len = req_len;
	if(len + offset > (size_t)node->length)
		len = node->length - offset;
	if(offset < (size_t)node->length) {
		if(node->filesystem) {
			err = fs_inode_read(node, offset, len, (void *)virt);
			if(err < 0 || (size_t)err != len)
				printk(0, "[MMINODE]: read inode failed with %d.\n", err);
		}
	}
	mm_virtual_changeattr(virt, attrib, memsz);
	return ph;
}
Exemplo n.º 3
0
/* Try to map a physical page of an inode to a virtual address. If FS_INODE_POPULATE
 * is passed in flags, then if the page doesn't exist, then it allocates a physical
 * page, maps it, and loaded the data. If that flag is not passed, then it simply
 * tries to return the physical page.
 */
addr_t fs_inode_map_shared_physical_page(struct inode *node, addr_t virt, size_t offset, int flags, int attrib)
{
	ASSERT(!(virt & ~PAGE_MASK));
	ASSERT(!(offset & ~PAGE_MASK));
	// Test if we have any shared mappings...
	if(!(node->flags & INODE_PCACHE)) {
		return 0;
	}
	mutex_acquire(&node->mappings_lock);
	int page_number = offset / PAGE_SIZE;
	struct physical_page *entry;
	if((entry = hash_lookup(&node->physicals, &page_number, sizeof(page_number))) == NULL) {
		mutex_release(&node->mappings_lock);
		return 0;
	}
	ASSERT(entry->count);
	/* So, we don't have to worry about someone decreasing to count to zero while we're working, 
	   since a process never calls this function without being responsible for one of the counts. */
	mutex_acquire(&entry->lock);
	if(!entry->page && (flags & FS_INODE_POPULATE))
	{
		// Map a new page into virt, and load data into it.
		entry->page = mm_physical_allocate(0x1000, false);
		if(!mm_virtual_map(virt, entry->page, MAP_ZERO | attrib | PAGE_WRITE, 0x1000))
			PANIC(0, "[MMINODE]: Trying to remap mminode shared section.", EINVAL);
		// YES. DO THIS TWICE. THIS IS NOT A TYPO.
		mm_physical_increment_count(entry->page); /* Once for the map. */
		mm_physical_increment_count(entry->page); /* Once for entry->page. */

		int err=-1;
		/* Try to read the data. If this fails, we don't really have a good way 
		 * of telling userspace this...eh.
		 */
		size_t len = PAGE_SIZE;
		if(len + offset > (size_t)node->length)
			len = node->length - offset;
		if(offset < (size_t)node->length) {
			if(node->filesystem && (err=fs_inode_read(node, offset, len, (void *)virt) < 0))
				printk(0, "[MMINODE]: Read inode failed with %d.\n", err);
		}
		mm_virtual_changeattr(virt, attrib, 0x1000);
		atomic_fetch_add(&node->mapped_pages_count, 1);
	} else if(entry->page) {
		mm_physical_increment_count(entry->page);
		if(!mm_virtual_map(virt, entry->page, attrib, 0x1000))
			PANIC(0, "[MMINODE]: Trying to remap mminode shared section.", EINVAL);
	}
	addr_t ret = entry->page;
	mutex_release(&entry->lock);
	mutex_release(&node->mappings_lock);
	return ret;
}
Exemplo n.º 4
0
void mm_init(struct multiboot *m)
{
	printk(KERN_DEBUG, "[MM]: Setting up Memory Management...\n");
	arch_mm_virtual_init(&kernel_context);
	cpu_interrupt_register_handler(14, &arch_mm_page_fault_handle);
	pmm_buddy_init();
	process_memorymap(m);
	slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END);
	set_ksf(KSF_MMU);
	// Memory init, check!
	mm_reclaim_init();
	for(size_t i = 0; i <= (sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1); i++) {
		mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1));
	}
	frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START);
	printk(0, "[MM]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024);
}
Exemplo n.º 5
0
int mm_allocate_dma_buffer(struct dma_region *d)
{
	if (!atomic_exchange(&dma_virtual_init, true)) {
		valloc_create(&dma_virtual, MEMMAP_VIRTDMA_START, MEMMAP_VIRTDMA_END, mm_page_size(0), 0);
	}
	d->p.address = mm_physical_allocate(d->p.size, false);
	if (d->p.address == 0)
		return -1;

	struct valloc_region reg;
	int npages = (d->p.size - 1) / mm_page_size(0) + 1;
	valloc_allocate(&dma_virtual, &reg, npages);

	for (int i = 0; i < npages; i++)
		mm_virtual_map(reg.start + i * mm_page_size(0), d->p.address + i * mm_page_size(0), PAGE_PRESENT | PAGE_WRITE, mm_page_size(0));
	d->v = reg.start;
	return 0;
}
Exemplo n.º 6
0
void tm_init_multitasking(void)
{
	printk(KERN_DEBUG, "[sched]: Starting multitasking system...\n");
	sysgate_page = mm_physical_allocate(PAGE_SIZE, true);
	mm_physical_memcpy((void *)sysgate_page,
				(void *)signal_return_injector, MEMMAP_SYSGATE_ADDRESS_SIZE, PHYS_MEMCPY_MODE_DEST);

	process_table = hash_create(0, 0, 128);

	process_list = linkedlist_create(0, LINKEDLIST_MUTEX);
	mutex_create(&process_refs_lock, 0);
	mutex_create(&thread_refs_lock, 0);
	
	thread_table = hash_create(0, 0, 128);

	struct thread *thread = kmalloc(sizeof(struct thread));
	struct process *proc = kernel_process = kmalloc(sizeof(struct process));

	proc->refs = 2;
	thread->refs = 1;
	hash_insert(process_table, &proc->pid, sizeof(proc->pid), &proc->hash_elem, proc);
	hash_insert(thread_table, &thread->tid, sizeof(thread->tid), &thread->hash_elem, thread);
	linkedlist_insert(process_list, &proc->listnode, proc);

	valloc_create(&proc->mmf_valloc, MEMMAP_MMAP_BEGIN, MEMMAP_MMAP_END, PAGE_SIZE, 0);
	linkedlist_create(&proc->threadlist, 0);
	mutex_create(&proc->map_lock, 0);
	mutex_create(&proc->stacks_lock, 0);
	mutex_create(&proc->fdlock, 0);
	hash_create(&proc->files, HASH_LOCKLESS, 64);
	proc->magic = PROCESS_MAGIC;
	blocklist_create(&proc->waitlist, 0, "process-waitlist");
	mutex_create(&proc->fdlock, 0);
	memcpy(&proc->vmm_context, &kernel_context, sizeof(kernel_context));
	thread->process = proc; /* we have to do this early, so that the vmm system can use the lock... */
	thread->state = THREADSTATE_RUNNING;
	thread->magic = THREAD_MAGIC;
	workqueue_create(&thread->resume_work, 0);
	thread->kernel_stack = (addr_t)&initial_kernel_stack;
	spinlock_create(&thread->status_lock);

	primary_cpu->active_queue = tqueue_create(0, 0);
	primary_cpu->idle_thread = thread;
	primary_cpu->numtasks=1;
	ticker_create(&primary_cpu->ticker, 0);
	workqueue_create(&primary_cpu->work, 0);
	tm_thread_add_to_process(thread, proc);
	tm_thread_add_to_cpu(thread, primary_cpu);
	atomic_fetch_add_explicit(&running_processes, 1, memory_order_relaxed);
	atomic_fetch_add_explicit(&running_threads, 1, memory_order_relaxed);
	set_ksf(KSF_THREADING);
	*(struct thread **)(thread->kernel_stack) = thread;
	primary_cpu->flags |= CPU_RUNNING;

#if CONFIG_MODULES
	loader_add_kernel_symbol(tm_thread_delay_sleep);
	loader_add_kernel_symbol(tm_thread_delay);
	loader_add_kernel_symbol(tm_timing_get_microseconds);
	loader_add_kernel_symbol(tm_thread_set_state);
	loader_add_kernel_symbol(tm_thread_exit);
	loader_add_kernel_symbol(tm_thread_poke);
	loader_add_kernel_symbol(tm_thread_block);
	loader_add_kernel_symbol(tm_thread_got_signal);
	loader_add_kernel_symbol(tm_thread_unblock);
	loader_add_kernel_symbol(tm_blocklist_wakeall);
	loader_add_kernel_symbol(kthread_create);
	loader_add_kernel_symbol(kthread_wait);
	loader_add_kernel_symbol(kthread_join);
	loader_add_kernel_symbol(kthread_kill);
	loader_add_kernel_symbol(tm_schedule);
	loader_add_kernel_symbol(arch_tm_get_current_thread);
#endif
}