void sem_kinit(void) { /* Zero the semaphore table */ memset(sem_table, 0, sizeof(sem_t) * NUM_SEMS); /* Initilize the semaphore lock */ slock_init(&sem_table_lock); }
void sig_init(void) { slock_init(&sigtable_lock); memset(sigtable, 0, sizeof(struct signal_t) * NSIG); int x; for(x = 0;x < NSIG;x++) default_actions[x] = SIGDEFAULT_KPANIC; default_actions[SIGHUP] = SIGDEFAULT_TERM; default_actions[SIGINT] = SIGDEFAULT_TERM; default_actions[SIGQUIT] = SIGDEFAULT_CORE; default_actions[SIGILL] = SIGDEFAULT_CORE; default_actions[SIGTRAP] = SIGDEFAULT_CORE; default_actions[SIGIOT] = SIGDEFAULT_TERM; default_actions[SIGABRT] = SIGDEFAULT_CORE; default_actions[SIGEMT] = SIGDEFAULT_TERM; default_actions[SIGFPE] = SIGDEFAULT_CORE; default_actions[SIGKILL] = SIGDEFAULT_TERM; default_actions[SIGBUS] = SIGDEFAULT_CORE; default_actions[SIGSEGV] = SIGDEFAULT_CORE; default_actions[SIGSYS] = SIGDEFAULT_CORE; default_actions[SIGPIPE] = SIGDEFAULT_TERM; default_actions[SIGALRM] = SIGDEFAULT_TERM; default_actions[SIGTERM] = SIGDEFAULT_TERM; default_actions[SIGURG] = SIGDEFAULT_IGN; default_actions[SIGSTOP] = SIGDEFAULT_STOP; default_actions[SIGTSTP] = SIGDEFAULT_STOP; default_actions[SIGCONT] = SIGDEFAULT_CONT; default_actions[SIGCHLD] = SIGDEFAULT_IGN; default_actions[SIGTTIN] = SIGDEFAULT_STOP; default_actions[SIGTTOU] = SIGDEFAULT_STOP; default_actions[SIGIO] = SIGDEFAULT_TERM; default_actions[SIGWINCH] = SIGDEFAULT_IGN; default_actions[SIGUSR1] = SIGDEFAULT_TERM; default_actions[SIGUSR2] = SIGDEFAULT_TERM; }
int vm_init(void) { slock_init(&global_mem_lock); #ifdef __ALLOW_VM_SHARE__ vm_share_init(); /* Setup shared memory */ #endif /** * The boot loader has handled all of the messy work for us. * All we need to do is pick up the free map head and kernel * page directory. */ /* The boot strap directly mapped in the null guard page */ vm_unmappage(0x0, k_pgdir); vmflags_t dir_flags = VM_DIR_READ | VM_DIR_WRIT; vmflags_t tbl_flags = VM_TBL_READ | VM_TBL_WRIT; /* Map pages in for our kernel stack */ vm_mappages(KVM_KSTACK_S, KVM_KSTACK_E - KVM_KSTACK_S, k_pgdir, dir_flags, tbl_flags); /* Add bootstrap code to the memory pool */ int boot2_s = PGROUNDDOWN(KVM_BOOT2_S) + PGSIZE; int boot2_e = PGROUNDUP(KVM_BOOT2_E); int x; for(x = boot2_s;x < boot2_e;x += PGSIZE) pfree(x); /* Clear the TLB */ vm_enable_paging(k_pgdir); return 0; }
struct ilka_region * ilka_open(const char *file, struct ilka_options *options) { journal_recover(file); struct ilka_region *r = calloc(1, sizeof(struct ilka_region)); if (!r) { ilka_fail("out-of-memory for ilka_region struct: %lu", sizeof(struct ilka_region)); return NULL; } slock_init(&r->lock); r->file = file; r->options = *options; if ((r->fd = file_open(file, &r->options)) == -1) goto fail_open; if ((r->len = file_grow(r->fd, ILKA_PAGE_SIZE)) == -1UL) goto fail_grow; if (!mmap_init(&r->mmap, r->fd, r->len, &r->options)) goto fail_mmap; if (!persist_init(&r->persist, r, r->file)) goto fail_persist; const struct meta * meta = meta_read(r); if (meta->magic != ilka_magic) { if (!r->options.create) { ilka_fail("invalid magic for file '%s'", file); goto fail_magic; } struct meta * m = meta_write(r); m->magic = ilka_magic; m->version = ilka_version; m->alloc = sizeof(struct meta); } if (meta->version != ilka_version) { ilka_fail("invalid version for file '%s': %lu != %lu", file, meta->version, ilka_version); goto fail_version; } if (!alloc_init(&r->alloc, r, &r->options, meta->alloc)) goto fail_alloc; if (!epoch_init(&r->epoch, r, &r->options)) goto fail_epoch; if (ILKA_MCHECK) mcheck_init(&r->mcheck); r->header_len = alloc_end(&r->alloc); return r; fail_epoch: fail_alloc: fail_version: fail_magic: persist_close(&r->persist); fail_persist: mmap_close(&r->mmap); fail_mmap: fail_grow: file_close(r->fd); fail_open: free(r); return NULL; }
void ktime_init(void) { slock_init(&rtc_lock); memset(&k_time, 0, sizeof(struct rtc_t)); }