int ofmem_posix_memalign( void **memptr, size_t alignment, size_t size ) { ofmem_t *ofmem = ofmem_arch_get_private(); alloc_desc_t *d, **pp; void *ret; ucell top; phys_addr_t pa; if( !size ) return ENOMEM; if( !ofmem->next_malloc ) ofmem->next_malloc = (char*)ofmem_arch_get_malloc_base(); size = align_size(size + sizeof(alloc_desc_t), alignment); /* look in the freelist */ for( pp=&ofmem->mfree; *pp && (**pp).size < size; pp = &(**pp).next ) { } /* waste at most 4K by taking an entry from the freelist */ if( *pp && (**pp).size > size + 0x1000 ) { /* Alignment should be on physical not virtual address */ pa = va2pa((uintptr_t)*pp + sizeof(alloc_desc_t)); pa = align_ptr(pa, alignment); ret = (void *)pa2va(pa); memset( ret, 0, (**pp).size - sizeof(alloc_desc_t) ); *pp = (**pp).next; *memptr = ret; return 0; } top = ofmem_arch_get_heap_top(); /* Alignment should be on physical not virtual address */ pa = va2pa((uintptr_t)ofmem->next_malloc + sizeof(alloc_desc_t)); pa = align_ptr(pa, alignment); ret = (void *)pa2va(pa); if( pointer2cell(ret) + size > top ) { printk("out of malloc memory (%x)!\n", size ); return ENOMEM; } d = (alloc_desc_t*)((uintptr_t)ret - sizeof(alloc_desc_t)); ofmem->next_malloc += size; d->next = NULL; d->size = size; memset( ret, 0, size - sizeof(alloc_desc_t) ); *memptr = ret; return 0; }
void do_no_page(void *addr) { pde_t *page_dir = (pde_t *)current->cr3; pte_t *page_tbl = 0; unsigned long page = alloc_one_page(0); assert(page != 0); int npde = get_npd(addr); int npte = get_npt(addr); if(page_dir[npde] == 0) { page_tbl = (pte_t *) alloc_one_page(0); assert(page_tbl != 0); memset((void *) page_tbl, 0, PAGE_SIZE); page_dir[npde] = va2pa(page_tbl) | PAGE_P | PAGE_WR | PAGE_US; } page_tbl = (pte_t *)pa2va(PAGE_ALIGN(page_dir[npde])); page_tbl[npte] = va2pa(page) | PAGE_P | PAGE_WR | PAGE_US; load_cr3(current); }
void do_wp_page(void *addr) { //printk("%s addr %08x current %08x\n", __func__, (unsigned long)addr, current); if((unsigned long) addr >= PAGE_OFFSET) { panic("%s invalid addr", __func__); } int npde = get_npd(addr); int npte = get_npt(addr); pde_t *page_dir = (pde_t *)current->cr3; pte_t *page_tbl = pa2va(PAGE_ALIGN(page_dir[npde])); unsigned long wp_pa_addr = PAGE_ALIGN(page_tbl[npte]); page_t *page = pa2page(wp_pa_addr); if(page->count > 0) { page->count --; unsigned long flags = PAGE_FLAGS(page_tbl[npte]); unsigned long wp_va_addr = (unsigned long) pa2va(wp_pa_addr); unsigned long newtbl = alloc_one_page(0); assert(newtbl != 0); memcpy((void *)newtbl, (void *)wp_va_addr, PAGE_SIZE); page_tbl[npte] = va2pa(newtbl) | flags; } page_tbl[npte] |= PAGE_WR; #if 0 page_tbl[npte] |= PAGE_US; page_dir[npde] |= PAGE_WR; page_dir[npde] |= PAGE_US; #endif load_cr3(current); }
void * mem_alloc(struct mem *t, int size, int align) { char *p; unsigned long pa; // The alignment restrictions refer to physical, not virtual // addresses pa = va2pa((unsigned long)t->curp) + (align - 1); pa &= ~(align - 1); p = (char *)pa2va(pa); if ((unsigned long)p >= (unsigned long)t->uplim || (unsigned long)p + size > (unsigned long)t->uplim) return NULL; t->curp = p + size; return p; }
void init_boot_params(multiboot_info_t *p) { boot_params.cmdline = (char *) p->cmdline; parse_cmdline(boot_params.cmdline); // KB to Bytes // no need to concern about 64bit boot_params.mem_lower = p->mem_lower << 10; boot_params.mem_upper = p->mem_upper << 10; boot_params.boot_device = p->boot_device; memory_map_t *mmap = (memory_map_t *) pa2va(p->mmap_addr); unsigned int i; boot_params.e820map.map_cnt = p->mmap_length / sizeof(memory_map_t); for(i=0; i<boot_params.e820map.map_cnt; ++i, ++mmap) { boot_params.e820map.map[i].addr = mmap->base_addr_low; boot_params.e820map.map[i].size = mmap->length_low; boot_params.e820map.map[i].type = mmap->type; } }