/* * Initialize page allocator. * page_init() must be called prior to other memory manager's * initializations. */ void page_init(void) { struct physmem *ram; int i; total_size = 0; bootdisk_size = 0; page_head.next = page_head.prev = &page_head; /* * First, create a free list from the boot information. */ for (i = 0; i < bootinfo->nr_rams; i++) { ram = &bootinfo->ram[i]; if (ram->type == MT_USABLE) { page_free((void *)ram->base, ram->size); total_size += ram->size; } } /* * Then, reserve un-usable memory. */ for (i = 0; i < bootinfo->nr_rams; i++) { ram = &bootinfo->ram[i]; switch (ram->type) { case MT_BOOTDISK: bootdisk_size = ram->size; /* FALLTHROUGH */ case MT_MEMHOLE: total_size -= ram->size; /* FALLTHROUGH */ case MT_RESERVED: page_reserve((void *)ram->base, ram->size); break; } } #ifdef DEBUG page_dump(); #endif }
/* * Machine-dependent startup code */ void machine_startup(void) { void *vector_offset = 0; /* * Reserve system pages. */ page_reserve(kvtop(SYSPAGE), SYSPAGESZ); /* * Copy exception vectors. */ memcpy(vector_offset, &exception_vector, 0x3000); #ifdef CONFIG_MMU /* * Initialize MMU */ mmu_init(mmumap_table); #endif }
static int do_allocate(vm_map_t map, void **addr, size_t size, int anywhere) { struct region *reg; char *start, *end; if (size == 0) return EINVAL; /* * Allocate region, and reserve pages for it. */ if (anywhere) { size = (size_t)PAGE_ALIGN(size); if ((start = page_alloc(size)) == 0) return ENOMEM; } else { start = (char *)PAGE_TRUNC(*addr); end = (char *)PAGE_ALIGN(start + size); size = (size_t)(end - start); if (page_reserve(start, size)) return EINVAL; } reg = region_create(&map->head, start, size); if (reg == NULL) { page_free(start, size); return ENOMEM; } reg->flags = REG_READ | REG_WRITE; /* Zero fill */ memset(start, 0, size); *addr = reg->addr; return 0; }