void test_fini(void) { dbgq(DBG_TEST, "tests completed:\n"); dbgq(DBG_TEST, "\t\t%d passed\n", _test_data.td_passed); dbgq(DBG_TEST, "\t\t%d failed\n", _test_data.td_failed); }
static void _default_test_fail(const char *file, int line, const char *name, const char *fmt, va_list args) { _test_data.td_failed++; if (NULL == fmt) { dbgq(DBG_TEST, "FAILED: %s(%d): %s\n", file, line, name); } else { char buf[2048]; vsnprintf(buf, 2048, fmt, args); buf[2047] = '\0'; dbgq(DBG_TEST, "FAILED: %s(%d): %s: %s\n", file, line, name, buf); } }
void acpi_init() { /* search memory for the RSDP, this should reside within the first 1mb of * of memory, which is identity mapped during initialization */ rsd_ptr = __rsdp_search(); KASSERT(NULL != rsd_ptr && "Could not find the ACPI Root Descriptor Table."); /* use the RSDP to find the RSDT, which will probably be in unmapped physical * memory, therefore we must use the phys_tmp_map functionallity of page tables */ rsd_table = _acpi_load_table(rsd_ptr->rp_addr); KASSERT(RSDT_SIGNATURE == rsd_table->rt_header.ah_sign); /* Only support ACPI version 1.0 */ KASSERT(0 == __acpi_checksum((void *)rsd_table, rsd_table->rt_header.ah_size) && "Weenix only supports ACPI 1.0"); dbgq(DBG_CORE, "--- ACPI INIT ---\n"); dbgq(DBG_CORE, "rsdp addr: %p\n", rsd_ptr); dbgq(DBG_CORE, "rsdt addr: %p\n", rsd_table); dbgq(DBG_CORE, "rev: %i\n", (int)rsd_ptr->rp_rev); dbgq(DBG_CORE, "oem: %s6\n", (char *)rsd_ptr->rp_oemid); /* search for all tables listed in the RSDT and checksum them */ dbgq(DBG_CORE, "ents:\t"); int len = (rsd_table->rt_header.ah_size - sizeof(rsd_table->rt_header)); len /= sizeof(rsd_table->rt_other[0]); int i; for (i = 0; i < len; ++i) { struct acpi_header *h = _acpi_load_table(rsd_table->rt_other[i]); rsd_table->rt_other[i] = (uintptr_t)h; dbgq(DBG_CORE, "%.4s ", (char *)&h->ah_sign); KASSERT(0 == __acpi_checksum((void *)h, h->ah_size)); } dbgq(DBG_CORE, "\n"); }
/** * This function is called from kmain, however it is not running in a * thread context yet. It should create the idle process which will * start executing idleproc_run() in a real thread context. To start * executing in the new process's context call context_make_active(), * passing in the appropriate context. This function should _NOT_ * return. * * Note: Don't forget to set curproc and curthr appropriately. * * @param arg1 the first argument (unused) * @param arg2 the second argument (unused) */ static void * bootstrap(int arg1, void *arg2) { dbg(DBG_PRINT,"*****************************************Entering bootstrap\n"); /* If the next line is removed/altered in your submission, 20 points will be deducted. */ dbgq(DBG_CORE, "SIGNATURE: 53616c7465645f5fd0f5f4e9adc70694ad12fcfaae6423bd5d01ca122cf44b611898d35ebf9b3fab4a0fbdefab9ecc12\n"); /* necessary to finalize page table information */ /*Cody Modifying*/ pt_template_init(); proc_t *p=proc_create("idle"); kthread_t *t=kthread_create(p,idleproc_run,NULL,NULL); curproc=p; curthr=t; KASSERT(NULL!=curproc); dbg(DBG_PRINT,"(GRADING1A 1.a)\n"); KASSERT(PID_IDLE == curproc->p_pid); dbg(DBG_PRINT,"(GRADING1A 1.a)\n"); KASSERT(NULL != curthr); dbg(DBG_PRINT,"(GRADING1A 1.a)\n"); context_make_active(&curthr->kt_ctx); /* NOT_YET_IMPLEMENTED("PROCS: bootstrap");*/ /*panic("weenix returned to bootstrap()!!! BAD!!!\n");*/ dbg(DBG_PRINT,"*****************************************Leaving bootstrap\n"); return NULL; }
/** * This is the first real C function ever called. It performs a lot of * hardware-specific initialization, then creates a pseudo-context to * execute the bootstrap function in. */ void kmain() { GDB_CALL_HOOK(boot); dbg_init(); dbgq(DBG_CORE, "Kernel binary:\n"); dbgq(DBG_CORE, " text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text); dbgq(DBG_CORE, " data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data); dbgq(DBG_CORE, " bss: 0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss); page_init(); pt_init(); slab_init(); pframe_init(); acpi_init(); apic_init(); pci_init(); intr_init(); gdt_init(); /* initialize slab allocators */ #ifdef __VM__ anon_init(); shadow_init(); #endif vmmap_init(); proc_init(); kthread_init(); #ifdef __DRIVERS__ bytedev_init(); blockdev_init(); #endif void *bstack = page_alloc(); pagedir_t *bpdir = pt_get(); KASSERT(NULL != bstack && "Ran out of memory while booting."); context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir); context_make_active(&bootstrap_context); panic("\nReturned to kmain()!!!\n"); }
uintptr_t phys_detect_highmem(void) { uint32_t i; struct mmap_def *mmap = (struct mmap_def *)MEMORY_MAP_BASE; dbgq(DBG_MM, "Physical Memory Map:\n"); for (i = 0; i < mmap->md_count; ++i) { uint32_t base = mmap->md_ents[i].me_baselo; uint32_t length = mmap->md_ents[i].me_lenlo; uint32_t type = mmap->md_ents[i].me_type; dbgq(DBG_MM, " 0x%.8x-0x%.8x: %s\n", base, base + length, (type < type_count) ? type_strings[type] : "UNDEF"); if (1 /* Usable */ == type && KERNEL_PHYS_BASE >= base && KERNEL_PHYS_BASE < base + length) { return (uintptr_t)(base + length); } } KASSERT(0 && "Failed to detect correct physical addresses."); return 0; }
/* Read in the given fd's ELF header into the location pointed to by the given * argument and does some basic checks that it is a valid ELF file, is an * executable, and is for the correct platform * interp is 1 if we are loading an interpreter, 0 otherwise * Returns 0 on success, -errno on failure. Returns the ELF header in the header * argument. */ static int _elf32_load_ehdr(int fd, Elf32_Ehdr *header, int interp) { int err; memset(header, 0, sizeof(*header)); /* Preliminary check that this is an ELF file */ if (0 > (err = do_read(fd, header, sizeof(*header)))) { return err; } else if ((SELFMAG > err) || 0 != memcmp(&header->e_ident[0], ELFMAG, SELFMAG)) { dbg(DBG_ELF, "ELF load failed: no magic number present\n"); return -ENOEXEC; } else if (header->e_ehsize > err) { dbg(DBG_ELF, "ELF load failed: bad file size\n"); return -ENOEXEC; } /* Log information about the file */ dbg(DBG_ELF, "loading ELF file\n"); dbgq(DBG_ELF, "ELF Header Information:\n"); dbgq(DBG_ELF, "Version: %d\n", (int)header->e_ident[EI_VERSION]); dbgq(DBG_ELF, "Class: %d\n", (int)header->e_ident[EI_CLASS]); dbgq(DBG_ELF, "Data: %d\n", (int)header->e_ident[EI_DATA]); dbgq(DBG_ELF, "Type: %d\n", (int)header->e_type); dbgq(DBG_ELF, "Machine: %d\n", (int)header->e_machine); /* Check that the ELF file is executable and targets * the correct platform */ if (ET_EXEC != header->e_type && !(ET_DYN == header->e_type && interp)) { dbg(DBG_ELF, "ELF load failed: not exectuable ELF\n"); return -ENOEXEC; } else if (!_elf32_platform_check(header)) { dbg(DBG_ELF, "ELF load failed: incorrect platform\n"); return -ENOEXEC; } return 0; }
/** * This function is called from kmain, however it is not running in a * thread context yet. It should create the idle process which will * start executing idleproc_run() in a real thread context. To start * executing in the new process's context call context_make_active(), * passing in the appropriate context. This function should _NOT_ * return. * * Note: Don't forget to set curproc and curthr appropriately. * * @param arg1 the first argument (unused) * @param arg2 the second argument (unused) */ static void * bootstrap(int arg1, void *arg2) { /* If the next line is removed/altered in your submission, 20 points will be deducted. */ dbgq(DBG_CORE, "SIGNATURE: 53616c7465645f5f75d4d6807cbe46557c5894883e55a7be357a5954568eccfc0c1d901bcc73a4409c500b4c2ad2554d\n"); /* necessary to finalize page table information */ pt_template_init(); curproc = proc_create("IDLE"); /* Creating idle process */ KASSERT(NULL != curproc); dbg(DBG_PRINT," (GRADING1A 1.a) successfully created IDLE process with process id %d\n",curproc->p_pid); KASSERT(PID_IDLE == curproc->p_pid); dbg(DBG_PRINT," (GRADING1A 1.a) PID_IDLE value is %d and it matches with the idle process id %d\n",PID_IDLE,curproc->p_pid); curthr = kthread_create(curproc,idleproc_run,0,NULL); /*running idleproc run*/ KASSERT(NULL != curthr); dbg(DBG_PRINT," (GRADING1A 1.a) thread for the idle process has been created successfully!!\n"); context_make_active(&curthr->kt_ctx); /*NOT_YET_IMPLEMENTED("PROCS: do_waitpid");*/ panic("weenix returned to bootstrap()!!! BAD!!!\n"); return NULL; }
/** * This is the first real C function ever called. It performs a lot of * hardware-specific initialization, then creates a pseudo-context to * execute the bootstrap function in. */ void kmain() { GDB_CALL_HOOK(boot); dbg_init(); dbgq(DBG_CORE, "Kernel binary:\n"); dbgq(DBG_CORE, " text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text); dbgq(DBG_CORE, " data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data); dbgq(DBG_CORE, " bss: 0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss); page_init(); pt_init(); slab_init(); pframe_init(); acpi_init(); apic_init(); pci_init(); intr_init(); gdt_init(); /* initialize slab allocators */ #ifdef __VM__ anon_init(); shadow_init(); #endif vmmap_init(); proc_init(); kthread_init(); #ifdef __DRIVERS__ bytedev_init(); blockdev_init(); #endif void *bstack = page_alloc(); pagedir_t *bpdir = pt_get(); KASSERT(NULL != bstack && "Ran out of memory while booting."); /* This little loop gives gdb a place to synch up with weenix. In the * past the weenix command started qemu was started with -S which * allowed gdb to connect and start before the boot loader ran, but * since then a bug has appeared where breakpoints fail if gdb connects * before the boot loader runs. See * * https://bugs.launchpad.net/qemu/+bug/526653 * * This loop (along with an additional command in init.gdb setting * gdb_wait to 0) sticks weenix at a known place so gdb can join a * running weenix, set gdb_wait to zero and catch the breakpoint in * bootstrap below. See Config.mk for how to set GDBWAIT correctly. * * DANGER: if GDBWAIT != 0, and gdb is not running, this loop will never * exit and weenix will not run. Make SURE the GDBWAIT is set the way * you expect. */ while (gdb_wait) ; context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir); context_make_active(&bootstrap_context); panic("\nReturned to kmain()!!!\n"); }
static int _elf32_load(const char *filename, int fd, char *const argv[], char *const envp[], uint32_t *eip, uint32_t *esp) { int err = 0; Elf32_Ehdr header; Elf32_Ehdr interpheader; /* variables to clean up on failure */ vmmap_t *map = NULL; file_t *file = NULL; char *pht = NULL; char *interpname = NULL; int interpfd = -1; file_t *interpfile = NULL; char *interppht = NULL; Elf32_auxv_t *auxv = NULL; char *argbuf = NULL; uintptr_t entry; file = fget(fd); KASSERT(NULL != file); /* Load and verify the ELF header */ if (0 > (err = _elf32_load_ehdr(fd, &header, 0))) { goto done; } if (NULL == (map = vmmap_create())) { err = -ENOMEM; goto done; } size_t phtsize = header.e_phentsize * header.e_phnum; if (NULL == (pht = kmalloc(phtsize))) { err = -ENOMEM; goto done; } /* Read in the program header table */ if (0 > (err = _elf32_load_phtable(fd, &header, pht, phtsize))) { goto done; } /* Load the segments in the program header table */ if (0 > (err = _elf32_map_progsegs(file->f_vnode, map, &header, pht, 0))) { goto done; } Elf32_Phdr *phinterp = NULL; /* Check if program requires an interpreter */ if (0 > (err = _elf32_find_phinterp(&header, pht, &phinterp))) { goto done; } /* Calculate program bounds for future reference */ void *proglow; void *proghigh; _elf32_calc_progbounds(&header, pht, &proglow, &proghigh); entry = (uintptr_t) header.e_entry; /* if an interpreter was requested load it */ if (NULL != phinterp) { /* read the file name of the interpreter from the binary */ if (0 > (err = do_lseek(fd, phinterp->p_offset, SEEK_SET))) { goto done; } else if (NULL == (interpname = kmalloc(phinterp->p_filesz))) { err = -ENOMEM; goto done; } else if (0 > (err = do_read(fd, interpname, phinterp->p_filesz))) { goto done; } if (err != (int)phinterp->p_filesz) { err = -ENOEXEC; goto done; } /* open the interpreter */ dbgq(DBG_ELF, "ELF Interpreter: %*s\n", phinterp->p_filesz, interpname); if (0 > (interpfd = do_open(interpname, O_RDONLY))) { err = interpfd; goto done; } kfree(interpname); interpname = NULL; interpfile = fget(interpfd); KASSERT(NULL != interpfile); /* Load and verify the interpreter ELF header */ if (0 > (err = _elf32_load_ehdr(interpfd, &interpheader, 1))) { goto done; } size_t interpphtsize = interpheader.e_phentsize * interpheader.e_phnum; if (NULL == (interppht = kmalloc(interpphtsize))) { err = -ENOMEM; goto done; } /* Read in the program header table */ if (0 > (err = _elf32_load_phtable(interpfd, &interpheader, interppht, interpphtsize))) { goto done; } /* Interpreter shouldn't itself need an interpreter */ Elf32_Phdr *interpphinterp; if (0 > (err = _elf32_find_phinterp(&interpheader, interppht, &interpphinterp))) { goto done; } if (NULL != interpphinterp) { err = -EINVAL; goto done; } /* Calculate the interpreter program size */ void *interplow; void *interphigh; _elf32_calc_progbounds(&interpheader, interppht, &interplow, &interphigh); uint32_t interpnpages = ADDR_TO_PN(PAGE_ALIGN_UP(interphigh)) - ADDR_TO_PN(interplow); /* Find space for the interpreter */ /* This is the first pn at which the interpreter will be mapped */ uint32_t interppagebase = (uint32_t) vmmap_find_range(map, interpnpages, VMMAP_DIR_HILO); if ((uint32_t) - 1 == interppagebase) { err = -ENOMEM; goto done; } /* Base address at which the interpreter begins on that page */ void *interpbase = (void *)((uintptr_t)PN_TO_ADDR(interppagebase) + PAGE_OFFSET(interplow)); /* Offset from "expected base" in number of pages */ int32_t interpoff = (int32_t) interppagebase - (int32_t) ADDR_TO_PN(interplow); entry = (uintptr_t) interpbase + ((uintptr_t) interpheader.e_entry - (uintptr_t) interplow); /* Load the interpreter program header and map in its segments */ if (0 > (err = _elf32_map_progsegs(interpfile->f_vnode, map, &interpheader, interppht, interpoff))) { goto done; } /* Build the ELF aux table */ /* Need to hold AT_PHDR, AT_PHENT, AT_PHNUM, AT_ENTRY, AT_BASE, * AT_PAGESZ, AT_NULL */ if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(7 * sizeof(Elf32_auxv_t)))) { err = -ENOMEM; goto done; } Elf32_auxv_t *auxvent = auxv; /* Add all the necessary entries */ auxvent->a_type = AT_PHDR; auxvent->a_un.a_ptr = pht; auxvent++; auxvent->a_type = AT_PHENT; auxvent->a_un.a_val = header.e_phentsize; auxvent++; auxvent->a_type = AT_PHNUM; auxvent->a_un.a_val = header.e_phnum; auxvent++; auxvent->a_type = AT_ENTRY; auxvent->a_un.a_ptr = (void *) header.e_entry; auxvent++; auxvent->a_type = AT_BASE; auxvent->a_un.a_ptr = interpbase; auxvent++; auxvent->a_type = AT_PAGESZ; auxvent->a_un.a_val = PAGE_SIZE; auxvent++; auxvent->a_type = AT_NULL; } else { /* Just put AT_NULL (we don't really need this at all) */ if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(sizeof(Elf32_auxv_t)))) { err = -ENOMEM; goto done; } auxv->a_type = AT_NULL; } /* Allocate a stack. We put the stack immediately below the program text. * (in the Intel x86 ELF supplement pp 59 "example stack", that is where the * stack is located). I suppose we can add this "extra page for magic data" too */ uint32_t stack_lopage = ADDR_TO_PN(proglow) - (DEFAULT_STACK_SIZE / PAGE_SIZE) - 1; err = vmmap_map(map, NULL, stack_lopage, (DEFAULT_STACK_SIZE / PAGE_SIZE) + 1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, 0, 0, NULL); KASSERT(0 == err); dbg(DBG_ELF, "Mapped stack at low addr 0x%p, size %#x\n", PN_TO_ADDR(stack_lopage), DEFAULT_STACK_SIZE + PAGE_SIZE); /* Copy out arguments onto the user stack */ int argc, envc, auxc; size_t argsize = _elf32_calc_argsize(argv, envp, auxv, phtsize, &argc, &envc, &auxc); /* Make sure it fits on the stack */ if (argsize >= DEFAULT_STACK_SIZE) { err = -E2BIG; goto done; } /* Copy arguments into kernel buffer */ if (NULL == (argbuf = (char *) kmalloc(argsize))) { err = -ENOMEM; goto done; } /* Calculate where in user space we start putting the args. */ void *arglow = (void *)((uintptr_t)(((char *) proglow) - argsize) & ~PTR_MASK); /* Copy everything into the user address space, modifying addresses in * argv, envp, and auxv to be user addresses as we go. */ _elf32_load_args(map, arglow, argsize, argbuf, argv, envp, auxv, argc, envc, auxc, phtsize); dbg(DBG_ELF, "Past the point of no return. Swapping to map at 0x%p, setting brk to 0x%p\n", map, proghigh); /* the final threshold / What warm unspoken secrets will we learn? / Beyond * the point of no return ... */ /* Give the process the new mappings. */ vmmap_t *tempmap = curproc->p_vmmap; curproc->p_vmmap = map; map = tempmap; /* So the old maps are cleaned up */ curproc->p_vmmap->vmm_proc = curproc; map->vmm_proc = NULL; /* Flush the process pagetables and TLB */ pt_unmap_range(curproc->p_pagedir, USER_MEM_LOW, USER_MEM_HIGH); tlb_flush_all(); /* Set the process break and starting break (immediately after the mapped-in * text/data/bss from the executable) */ curproc->p_brk = proghigh; curproc->p_start_brk = proghigh; strncpy(curproc->p_comm, filename, PROC_NAME_LEN); /* Tell the caller the correct stack pointer and instruction * pointer to begin execution in user space */ *eip = (uint32_t) entry; *esp = ((uint32_t) arglow) - 4; /* Space on the user stack for the (garbage) return address */ /* Note that the return address will be fixed by the userland entry code, * whether in static or dynamic */ /* And we're done */ err = 0; done: if (NULL != map) { vmmap_destroy(map); } if (NULL != file) { fput(file); } if (NULL != pht) { kfree(pht); } if (NULL != interpname) { kfree(interpname); } if (0 <= interpfd) { do_close(interpfd); } if (NULL != interpfile) { fput(interpfile); } if (NULL != interppht) { kfree(interppht); } if (NULL != auxv) { kfree(auxv); } if (NULL != argbuf) { kfree(argbuf); } return err; }