void mm_init(struct multiboot *m) { printk(KERN_DEBUG, "[mm]: Setting up Memory Management...\n"); arch_mm_virtual_init(&kernel_context); cpu_interrupt_register_handler (14, &arch_mm_page_fault_handle); pmm_buddy_init(); process_memorymap(m); slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END); set_ksf(KSF_MMU); /* hey, look at that, we have happy memory times! */ mm_reclaim_init(); for(size_t i=0;i<=(sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1);i++) { mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1)); } frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START); printk(0, "[mm]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024); #if CONFIG_MODULES loader_add_kernel_symbol(slab_kmalloc); loader_add_kernel_symbol(slab_kfree); loader_add_kernel_symbol(mm_virtual_map); loader_add_kernel_symbol(mm_virtual_getmap); loader_add_kernel_symbol(mm_allocate_dma_buffer); loader_add_kernel_symbol(mm_free_dma_buffer); loader_add_kernel_symbol(mm_physical_allocate); loader_add_kernel_symbol(mm_physical_deallocate); #endif }
int mm_free_dma_buffer(struct dma_region *d) { int npages = ((d->p.size - 1) / mm_page_size(0)) + 1; for (int i = 0; i < npages; i++) mm_virtual_unmap(d->v + i * mm_page_size(0)); mm_physical_deallocate(d->p.address); struct valloc_region reg; reg.flags = 0; reg.start = d->v; reg.npages = npages; valloc_deallocate(&dma_virtual, ®); d->p.address = d->v = 0; return 0; }
void mm_init(struct multiboot *m) { printk(KERN_DEBUG, "[MM]: Setting up Memory Management...\n"); arch_mm_virtual_init(&kernel_context); cpu_interrupt_register_handler(14, &arch_mm_page_fault_handle); pmm_buddy_init(); process_memorymap(m); slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END); set_ksf(KSF_MMU); // Memory init, check! mm_reclaim_init(); for(size_t i = 0; i <= (sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1); i++) { mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1)); } frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START); printk(0, "[MM]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024); }
int mm_allocate_dma_buffer(struct dma_region *d) { if (!atomic_exchange(&dma_virtual_init, true)) { valloc_create(&dma_virtual, MEMMAP_VIRTDMA_START, MEMMAP_VIRTDMA_END, mm_page_size(0), 0); } d->p.address = mm_physical_allocate(d->p.size, false); if (d->p.address == 0) return -1; struct valloc_region reg; int npages = (d->p.size - 1) / mm_page_size(0) + 1; valloc_allocate(&dma_virtual, ®, npages); for (int i = 0; i < npages; i++) mm_virtual_map(reg.start + i * mm_page_size(0), d->p.address + i * mm_page_size(0), PAGE_PRESENT | PAGE_WRITE, mm_page_size(0)); d->v = reg.start; return 0; }