/** * \brief Initialize the pinned region * * Allocates a region of virtual address space and initializes its state. */ errval_t vspace_pinned_init(void) { errval_t err; struct pinned_state *state = get_current_pinned_state(); struct vspace *vspace = get_current_vspace(); err = memobj_create_pinned(&state->memobj, VSPACE_PINNED_SIZE, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_PINNED); } err = vregion_map(&state->vregion, vspace, (struct memobj*)&state->memobj, 0, VSPACE_PINNED_SIZE, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VREGION_MAP); } state->offset = 0; thread_mutex_init(&state->mutex); slab_init(&state->vregion_list_slab, VSPACE_PINNED_UNIT * sizeof(struct vregion_list), NULL); slab_init(&state->frame_list_slab, VSPACE_PINNED_UNIT * sizeof(struct memobj_frame_list), NULL); return SYS_ERR_OK; }
errval_t debug_create(struct debug_q** q, struct devq* other_q) { errval_t err; struct debug_q* que; que = calloc(1, sizeof(struct debug_q)); assert(que); slab_init(&que->alloc, sizeof(struct memory_ele), slab_default_refill); slab_init(&que->alloc_list, sizeof(struct memory_list), slab_default_refill); que->q = other_q; err = devq_init(&que->my_q, false); if (err_is_fail(err)) { return err; } que->my_q.f.reg = debug_register; que->my_q.f.dereg = debug_deregister; que->my_q.f.ctrl = debug_control; que->my_q.f.notify = debug_notify; que->my_q.f.enq = debug_enqueue; que->my_q.f.deq = debug_dequeue; que->my_q.f.destroy = debug_destroy; *q = que; return SYS_ERR_OK; }
void mm_init(struct multiboot *m) { printk(KERN_DEBUG, "[mm]: Setting up Memory Management...\n"); arch_mm_virtual_init(&kernel_context); cpu_interrupt_register_handler (14, &arch_mm_page_fault_handle); pmm_buddy_init(); process_memorymap(m); slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END); set_ksf(KSF_MMU); /* hey, look at that, we have happy memory times! */ mm_reclaim_init(); for(size_t i=0;i<=(sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1);i++) { mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1)); } frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START); printk(0, "[mm]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024); #if CONFIG_MODULES loader_add_kernel_symbol(slab_kmalloc); loader_add_kernel_symbol(slab_kfree); loader_add_kernel_symbol(mm_virtual_map); loader_add_kernel_symbol(mm_virtual_getmap); loader_add_kernel_symbol(mm_allocate_dma_buffer); loader_add_kernel_symbol(mm_free_dma_buffer); loader_add_kernel_symbol(mm_physical_allocate); loader_add_kernel_symbol(mm_physical_deallocate); #endif }
/* create a slab and add to kmem_cache_t */ int cache_grow(kmem_cache_t *cachep) { if(cachep == NULL || cachep->obj_size == 0) { return -1; } /*DD("----- cache %s grow -----", cachep->name);*/ void *ptr = NULL; struct slab *slabp = NULL; int obj_num = 0; ptr = slab_alloc(cachep); slabp = (struct slab *)ptr; slab_estimate(cachep->obj_size, &obj_num); cachep->obj_num = obj_num; cachep->nr_pages += pow(2, DEFAULT_SLAB_PAGES); ptr += sizeof(struct slab); slab_init(cachep, slabp, ptr); kmem_add_slab(cachep, slabp); return 0; }
int hook_init2() { if(g_capstone != 0) { cs_close(&g_capstone); } cs_opt_mem cs_mem; cs_mem.malloc = &_cs_malloc; cs_mem.calloc = &_cs_calloc; cs_mem.realloc = &_cs_realloc; cs_mem.free = &_cs_free; // TODO Is there an alternative besides doing your own implementation? cs_mem.vsnprintf = &vsnprintf; cs_option(0, CS_OPT_MEM, (size_t) (uintptr_t) &cs_mem); _capstone_init(); // Memory for function stubs of all the hooks. slab_init(&g_function_stubs, 64, 128, PAGE_EXECUTE_READWRITE); // TODO At the moment this only works on Vista+, not on Windows XP. As // shown by Brad Spengler it's fairly trivial to achieve the same on // Windows XP but for now.. it's fine. register_dll_notification(&_ldr_dll_notification, NULL); return 0; }
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism // - check the correctness of pmm & paging mechanism, print PDT&PT void pmm_init(void) { init_pmm_manager (); page_init (); #ifndef NOCHECK //check_alloc_page(); #endif boot_pgdir = boot_alloc_page (); memset (boot_pgdir, 0, PGSIZE); boot_pgdir_pa = PADDR (boot_pgdir); current_pgdir_pa = boot_pgdir_pa; #ifndef NOCHECK //check_pgdir (); #endif static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0); boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_SPR_R | PTE_SPR_W | PTE_A | PTE_D; boot_map_segment(boot_pgdir, KERNBASE, RAM_SIZE, 0, PTE_SPR_R | PTE_SPR_W | PTE_A | PTE_D); enable_paging (); #ifndef NOCHECK //check_boot_pgdir (); #endif print_pgdir (kprintf); slab_init (); }
void vm_mem_bootstrap(void) { vm_offset_t start, end; /* * Initializes resident memory structures. * From here on, all physical memory is accounted for, * and we use only virtual addresses. */ vm_page_bootstrap(&start, &end); /* * Initialize other VM packages */ slab_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); slab_init(); kalloc_init(); vm_fault_init(); vm_page_module_init(); memory_manager_default_init(); }
/* create a slab and add to kmem_cache_t */ int cache_grow(struct kmem_cache_t *cachep) { if(cachep == NULL || cachep->obj_size == 0) { return -1; } void *ptr; struct slab *slabp = NULL; /*unsigned long slab_size;*/ int obj_num = 0; ptr = slab_alloc(cachep); slabp = (struct slab *)ptr; slab_estimate(cachep->obj_size, &obj_num); cachep->obj_num = obj_num; cachep->nr_pages += pow(2, DEFAULT_SLAB_PAGES); ptr += sizeof(struct slab); slab_init(cachep, slabp, ptr); kmem_add_slab(cachep, slabp); return 0; }
/** * \brief Initialize the pmap object */ errval_t pmap_init(struct pmap *pmap, struct vspace *vspace, struct capref vnode, struct slot_allocator *opt_slot_alloc) { struct pmap_arm* pmap_arm = (struct pmap_arm*)pmap; /* Generic portion */ pmap->f = pmap_funcs; pmap->vspace = vspace; // Slab allocator for vnodes slab_init(&pmap_arm->slab, sizeof(struct vnode), NULL); slab_grow(&pmap_arm->slab, pmap_arm->slab_buffer, sizeof(pmap_arm->slab_buffer)); pmap_arm->root.is_vnode = true; pmap_arm->root.u.vnode.cap = vnode; pmap_arm->root.next = NULL; pmap_arm->root.u.vnode.children = NULL; return SYS_ERR_OK; }
int mm_init(void) { pmm_init(); slab_init(); return E_OK; }
void init_timer(){ slab_id = slab_init(sizeof(struct timer),10); tc = 0; head.nxt = head.pre = NULL; head.now = 1; head.count = 1; head.cb = add_tc; }
static void iio_start() { nioDbg("Starting IIO ..."); slab_init(&apictx->msg_pool, MSG_POOL_SIZE, sizeof(struct qnio_msg), 0, NULL); apictx->devices = new_qnio_map(compare_key, NULL, NULL); return; }
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism // - check the correctness of pmm & paging mechanism, print PDT&PT void pmm_init(void) { //We need to alloc/free the physical memory (granularity is 4KB or other size). //So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h //First we should init a physical memory manager(pmm) based on the framework. //Then pmm can alloc/free the physical memory. //Now the first_fit/best_fit/worst_fit/buddy_system pmm are available. init_pmm_manager(); // detect physical memory space, reserve already used memory, // then use pmm->init_memmap to create free page list page_init(); //use pmm->check to verify the correctness of the alloc/free function in a pmm check_alloc_page(); // create boot_pgdir, an initial page directory(Page Directory Table, PDT) boot_pgdir = boot_alloc_page(); memset(boot_pgdir, 0, PGSIZE); boot_cr3 = PADDR(boot_pgdir); check_pgdir(); static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0); // recursively insert boot_pgdir in itself // to form a virtual page table at virtual address VPT boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_W; // map all physical memory to linear memory with base linear addr KERNBASE //linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE //But shouldn't use this map until enable_paging() & gdt_init() finished. boot_map_segment(boot_pgdir, KERNBASE, KMEMSIZE, 0, PTE_W); //temporary map: //virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)]; boot_pgdir[1] = boot_pgdir[PDX(KERNBASE) + 1]; enable_paging(); //reload gdt(third time,the last time) to map all physical memory //virtual_addr 0~4G=liear_addr 0~4G //then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS gdt_init(); //disable the map of virtual_addr 0~4M boot_pgdir[0] = boot_pgdir[1] = 0; //now the basic virtual memory map(see memalyout.h) is established. //check the correctness of the basic virtual memory map. check_boot_pgdir(); print_pgdir(kprintf); slab_init(); }
struct slab * slab_pool_alloc(struct slab_pool *so, size_t nitem, size_t isize) { assert_isize(isize); assert_nitem(nitem); struct slab *sa = slab_alloc((struct slab *)so); slab_init(sa, isize, isize*nitem); struct slab *po = (struct slab *)so; sa->memctx = po->memctx; sa->alloc = po->alloc; sa->dealloc = po->dealloc; return sa; }
struct slab * slab_create(struct memface *mc, size_t nitem, size_t isize) { assert_isize(isize); assert_nitem(nitem); size_t totalsize = isize*nitem; struct slab *sa = mc->alloc(mc->ctx, totalsize + sizeof(struct slab), __FILE__, __LINE__); slab_init(sa, isize, totalsize); sa->sentinel = (char *)(sa+1) + totalsize; sa->memctx = mc->ctx; sa->alloc = mc->alloc; sa->dealloc = mc->dealloc; return sa; }
int test_slab (int argc, char **argv) { // track runtime struct timeval t0, t1; // malloc version gettimeofday (&t0, NULL); for (int p = 0; p < PASSES; ++p) { for (int i = 0; i < ALLOCS; ++i) { test_s *ts = malloc (sizeof(test_s)); ts->a = i; ts->b = i; ts->c = (i % 2 == 0); tsps[i] = ts; } for (int i = 0; i < ALLOCS; ++i) { free (tsps[i]); } } gettimeofday (&t1, NULL); double mdt = ((t1.tv_usec + 1000000 * t1.tv_sec) - (t0.tv_usec + 1000000 * t0.tv_sec)) / 1000000.0; // slab alloc version gettimeofday (&t0, NULL); slab_init (SLAB_SIZE); for (int p = 0; p < PASSES; ++p) { slab_free (); for (int i = 0; i < ALLOCS; ++i) { test_s *ts = slab_alloc (sizeof(test_s)); ts->a = i; ts->b = i; ts->c = (i % 2 == 0); tsps[i] = ts; } } gettimeofday (&t1, NULL); double sdt = ((t1.tv_usec + 1000000 * t1.tv_sec) - (t0.tv_usec + 1000000 * t0.tv_sec)) / 1000000.0; /* Check that results are coherent, and collapse the wavefunction. */ for (int i = 0; i < ALLOCS; ++i) { test_s ts = *(tsps[i]); if (ts.a != i || ts.b != i || ((i % 2 == 0) != ts.c)) { printf ("ERR ptr=%p, i=%d, a=%d, b=%f, c=%s\n", tsps[i], i, ts.a, ts.b, ts.c ? "EVEN" : "ODD"); } } fprintf (stderr, "%f sec malloc, %f sec slab, speedup %f\n", mdt, sdt, mdt/sdt); return 0; }
void thread_init(L4_ThreadId_t tid) { L4_Word_t my_threadno, i; mutex_init(&thrlock); mutex_lock(&thrlock); slab_init(LIST_SIZE(sizeof(thread_t)), THREAD_SLAB_BUFFER_COUNT, &thrpool, thread_slab_buffer, kmalloc, THREAD_SLAB_BUFFER_COUNT); thread_list = NULL; memset(bitmap, 0, MAX_TASKS / 8); my_threadno = L4_ThreadNo(tid); for (i = 0; i <= my_threadno; i++) threadno_alloc(bitmap, i); mutex_unlock(&thrlock); }
/** * This is the first real C function ever called. It performs a lot of * hardware-specific initialization, then creates a pseudo-context to * execute the bootstrap function in. */ void kmain() { GDB_CALL_HOOK(boot); dbg_init(); dbgq(DBG_CORE, "Kernel binary:\n"); dbgq(DBG_CORE, " text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text); dbgq(DBG_CORE, " data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data); dbgq(DBG_CORE, " bss: 0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss); page_init(); pt_init(); slab_init(); pframe_init(); acpi_init(); apic_init(); pci_init(); intr_init(); gdt_init(); /* initialize slab allocators */ #ifdef __VM__ anon_init(); shadow_init(); #endif vmmap_init(); proc_init(); kthread_init(); #ifdef __DRIVERS__ bytedev_init(); blockdev_init(); #endif void *bstack = page_alloc(); pagedir_t *bpdir = pt_get(); KASSERT(NULL != bstack && "Ran out of memory while booting."); context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir); context_make_active(&bootstrap_context); panic("\nReturned to kmain()!!!\n"); }
void mm_init(struct multiboot *m) { printk(KERN_DEBUG, "[MM]: Setting up Memory Management...\n"); arch_mm_virtual_init(&kernel_context); cpu_interrupt_register_handler(14, &arch_mm_page_fault_handle); pmm_buddy_init(); process_memorymap(m); slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END); set_ksf(KSF_MMU); // Memory init, check! mm_reclaim_init(); for(size_t i = 0; i <= (sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1); i++) { mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1)); } frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START); printk(0, "[MM]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024); }
int main(void) { if (CUE_SUCCESS != CU_initialize_registry()) return CU_get_error(); slab_init(0, 1.125); check_indexlog(); /* {{{ CU run & cleanup */ CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); CU_cleanup_registry(); /* }}} */ slab_stats(); return 0; }
/* Initialize the hash table */ int ht_init(struct hashtable *t) { t->table = NULL; t->size = 0; t->sizemask = 0; t->used = 0; t->collisions = 0; t->hashf = NULL; t->key_destructor = ht_no_destructor; t->val_destructor = ht_no_destructor; t->key_compare = ht_compare_ptr; #ifdef AHT_USE_SLAB t->cache = malloc(sizeof(struct ht_cache)); if (!t->cache) return HT_NOMEM; slab_init(t->cache); #endif return HT_OK; }
static int init_vars(void) { memset(&g_master_settings, 0, sizeof g_master_settings); memset(&g_master_runtime_vars, 0, sizeof g_master_runtime_vars); slab_init(MASTER_MEMPOOL_SIZE, 1.125); signal(SIGPIPE, SIG_IGN); g_master_settings.bind_ip = inet_addr(MASTER_BIND_IP); g_master_settings.accept_port = MASTER_ACCEPT_PORT; g_master_settings.read_timeout = MASTER_READ_TIMEOUT; g_master_settings.write_timeout = MASTER_WRITE_TIMEOUT; strcpy(g_master_settings.log_path, MASTER_LOG_PATH); g_master_runtime_vars.ss.status = read_write; pthread_mutex_init(&g_master_runtime_vars.ss.lock, NULL); log_open("./master_mirror.log"); return 0; }
/** * Initialize page management mechanism. * Parts of no use are deleted, while no extra parts except a check is added. * arch/x86/mm/pmm.c should be a good reference. */ void pmm_init (void) { check_vpm (); init_pmm_manager (); page_init (); check_alloc_page (); boot_pgdir = boot_alloc_page(); memset(boot_pgdir, 0, PGSIZE); check_pgdir(); /* register kernel code and data pages in the table so that it won't raise bad segv. */ boot_map_segment (boot_pgdir, KERNBASE, mem_size, 0, PTE_W); check_boot_pgdir (); print_pgdir (kprintf); slab_init (); }
/** * \brief Initializer that does not allocate any space * * #slot_alloc_init duplicates some of the code below, * modify it if making changes here. * * XXX: top_buf head_buf and reserve_buf each point to a separate buffer of * size bufsize bytes which can be used for backing storage. bufsize evidently * needs to be >= sizeof(struct cnode_meta) * nslots / 2. Don't ask me why! -AB */ errval_t multi_slot_alloc_init_raw(struct multi_slot_allocator *ret, cslot_t nslots, struct capref top_cap, struct cnoderef top_cnode, void *top_buf, void *head_buf, void *reserve_buf, size_t bufsize) { errval_t err; struct capref cap; struct cnoderef cnode; /* Generic part */ ret->a.alloc = multi_alloc; ret->a.free = multi_free; ret->a.space = nslots; ret->a.nslots = nslots; thread_mutex_init(&ret->a.mutex); ret->head->next = NULL; ret->reserve->next = NULL; /* Top */ err = single_slot_alloc_init_raw((struct single_slot_allocator*)ret->top, top_cap, top_cnode, nslots, top_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Head */ err = ret->top->alloc(ret->top, &cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cnode_create_raw(cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = single_slot_alloc_init_raw(&ret->head->a, cap, cnode, nslots, head_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Reserve */ err = ret->top->alloc(ret->top, &cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cnode_create_raw(cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = single_slot_alloc_init_raw(&ret->reserve->a, cap, cnode, nslots, reserve_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Slab */ size_t allocation_unit = sizeof(struct slot_allocator_list) + SINGLE_SLOT_ALLOC_BUFLEN(nslots); slab_init(&ret->slab, allocation_unit, NULL); return SYS_ERR_OK; }
void _main() { mem_extent_t *ramext; u8 sn[6]; u32 cpu_clk_hz = 0; rtc_time_t tm; s32 ret; /* This section runs with interrupts disabled. The boot console is not available in this section. */ preempt_disable(); /* Copy kernel read/write data areas into kernel RAM */ memcpy(&_sdata, &_etext, &_edata - &_sdata); /* Copy .data section to kernel RAM */ bzero(&_sbss, &_ebss - &_sbss); /* Initialise .bss section */ /* Begin platform initialisation */ if(plat_init() != SUCCESS) boot_early_fail(1); if(plat_mem_detect() != SUCCESS) /* Detect installed RAM, initialise memory extents */ boot_early_fail(2); /* Initialise kernel slabs */ slab_init(&_ebss); /* Slabs sit after the .bss section */ /* Initialise kernel heap */ kmeminit(g_slab_end, mem_get_highest_addr(MEM_EXTENT_KERN | MEM_EXTENT_RAM) - KERNEL_STACK_LEN); /* Initialise user heap. Place it in the largest user RAM extent. */ ramext = mem_get_largest_extent(MEM_EXTENT_USER | MEM_EXTENT_RAM); umeminit(ramext->base, ramext->base + ramext->len); /* By default, all exceptions cause a context-dump followed by a halt. */ cpu_irq_init_table(); /* Initialise device tree */ if(dev_init() != SUCCESS) boot_early_fail(3); /* It's not yet possible to initialise the real (platform) console because devices haven't been enumerated and interrupts are disabled. In the meantime, create a temporary in-memory kernel console device to capture output from the boot process. */ if(early_boot_console_init() != SUCCESS) boot_early_fail(4); printf("%s\nplatform: %s\n", g_warmup_message, plat_get_name()); printf("%uMB RAM detected\n", (mem_get_total_size(MEM_EXTENT_USER | MEM_EXTENT_RAM) + mem_get_total_size(MEM_EXTENT_KERN | MEM_EXTENT_RAM)) >> 20); /* === Initialise peripherals - phase 2 === */ if(dev_enumerate() != SUCCESS) boot_early_fail(5); /* Initialise the console */ if(plat_console_init() != SUCCESS) boot_early_fail(6); ret = sched_init("[sys]"); /* Init scheduler and create system process */ /* Enable interrupts and continue booting */ preempt_enable(); /* Copy the contents of the temporary console to the real console; close the temp console. */ early_boot_console_close(); /* Activate red LED while the boot process continues */ plat_led_off(LED_ALL); plat_led_on(LED_RED); /* Device enumeration is done; interrupts are enabled, and the console should be functional. Booting continues... */ /* Zero any user RAM extents. This happens after init'ing the DUART, because beeper. */ /* put("Clearing user RAM: "); mem_zero_extents(MEM_EXTENT_USER | MEM_EXTENT_RAM); puts("done"); */ /* Initialise the block cache, then scan mass-storage devices for partitions */ block_cache_init(2039); partition_init(); boot_list_mass_storage(); boot_list_partitions(); /* ret is set by the call to sched_init(), above */ if(ret != SUCCESS) printf("sched: init failed: %s\n", kstrerror(ret)); ret = vfs_init(); if(ret != SUCCESS) printf("vfs: init failed: %s\n", kstrerror(ret)); /* Display approximate CPU clock speed */ if(plat_get_cpu_clock(&cpu_clk_hz) == SUCCESS) printf("\nCPU fclk ~%2u.%uMHz\n", cpu_clk_hz / 1000000, (cpu_clk_hz % 1000000) / 100000); /* Initialise tick handler */ tick_init(); /* Display memory information */ printf("%u bytes of kernel heap memory available\n" "%u bytes of user memory available\n", kfreemem(), ufreemem()); /* Display platform serial number */ if(plat_get_serial_number(sn) == SUCCESS) { printf("Hardware serial number %02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5]); } /* Display the current date and time */ if(get_time(&tm) == SUCCESS) { char timebuf[12], datebuf[32]; if((time_iso8601(&tm, timebuf, sizeof(timebuf)) == SUCCESS) && (date_long(&tm, datebuf, sizeof(datebuf)) == SUCCESS)) printf("%s %s\n", timebuf, datebuf); else puts("Date/time invalid - please set clock"); } /* Create housekeeper process */ // proc_create(0, 0, "[hk]", NULL, housekeeper, 0, 0, PROC_TYPE_KERNEL, NULL, NULL); /* Initialise networking system */ ret = net_init(); if(ret != SUCCESS) printf("net: init failed: %s\n", kstrerror(ret)); /* Startup complete - activate green LED */ plat_led_off(LED_RED); plat_led_on(LED_GREEN); monitor(); /* start interactive "shell" thing */ cpu_halt(); /* should never be reached */ }
/** * This is the first real C function ever called. It performs a lot of * hardware-specific initialization, then creates a pseudo-context to * execute the bootstrap function in. */ void kmain() { GDB_CALL_HOOK(boot); dbg_init(); dbgq(DBG_CORE, "Kernel binary:\n"); dbgq(DBG_CORE, " text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text); dbgq(DBG_CORE, " data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data); dbgq(DBG_CORE, " bss: 0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss); page_init(); pt_init(); slab_init(); pframe_init(); acpi_init(); apic_init(); pci_init(); intr_init(); gdt_init(); /* initialize slab allocators */ #ifdef __VM__ anon_init(); shadow_init(); #endif vmmap_init(); proc_init(); kthread_init(); #ifdef __DRIVERS__ bytedev_init(); blockdev_init(); #endif void *bstack = page_alloc(); pagedir_t *bpdir = pt_get(); KASSERT(NULL != bstack && "Ran out of memory while booting."); /* This little loop gives gdb a place to synch up with weenix. In the * past the weenix command started qemu was started with -S which * allowed gdb to connect and start before the boot loader ran, but * since then a bug has appeared where breakpoints fail if gdb connects * before the boot loader runs. See * * https://bugs.launchpad.net/qemu/+bug/526653 * * This loop (along with an additional command in init.gdb setting * gdb_wait to 0) sticks weenix at a known place so gdb can join a * running weenix, set gdb_wait to zero and catch the breakpoint in * bootstrap below. See Config.mk for how to set GDBWAIT correctly. * * DANGER: if GDBWAIT != 0, and gdb is not running, this loop will never * exit and weenix will not run. Make SURE the GDBWAIT is set the way * you expect. */ while (gdb_wait) ; context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir); context_make_active(&bootstrap_context); panic("\nReturned to kmain()!!!\n"); }