void kernel_threads_init() { uint8_t tid; tid = init_thread("Idle", THREAD_PRIO_IDLE, idle_thread_entry, NULL, 1 * KBYTE); if ((THREAD_TID_INVALID == tid) || (THREAD_TID_IDLE != tid)) { kernel_panic("cannot create the IDLE thread."); return; } if (!scheduler_add_thread(tid)) { kernel_panic("cannot add IDLE to the scheduler."); } kprintf("kernel threads setup complete.\n"); }
void kernel_libs_init() { BOOL resval; unsigned i; for (i = 0; i < NELEMENTS(libs_init_array); i++) { resval = libs_init_array[i](); if (!resval) { kernel_panic(messages2[i]); } } kprintf("kernel setup complete.\n"); }
//! page fault void _cdecl page_fault (uint32_t eip,uint32_t cs,uint32_t flags,uint32_t err) { intstart (); int faultAddr=0; _asm { mov eax, cr2 mov [faultAddr], eax } kernel_panic ("Page Fault at 0x%x:0x%x refrenced memory at 0x%x", cs, eip, faultAddr); for (;;); }
int vfs_lseek(struct thread *t, int fd, off_t offset, int whence) { struct file *file; struct process *process; /* Kernel request */ if (!t) process = process_get(0); else process = t->parent; if (!(whence & VFS_SEEK_SET) && !(whence & VFS_SEEK_CUR) && !(whence & VFS_SEEK_END)) return -EINVAL; if (fd < 0 || fd > PROCESS_MAX_OPEN_FD) return -EINVAL; file = &process->files[fd]; if (!file->used) return -EINVAL; if (whence & VFS_SEEK_SET) { if (offset < 0) return -EINVAL; file->offset = offset; } else if (whence & VFS_SEEK_CUR) { size_t old_off = file->offset; file->offset += offset; if (offset < 0 && file->offset > old_off) { file->offset = old_off; return -EINVAL; } if (offset > 0 && file->offset < old_off) { process->files[fd].offset = old_off; return -EINVAL; } } else { kernel_panic("VFS_SEEK_END not implemented yet"); } return 0; }
static void kernel_interrupt_others(register_t pending) { for(size_t i=0; i<7; i++) { if(pending & (1<<i)) { if(int_child[i] == 0) { KERNEL_ERROR("unknown interrupt %lx", i); continue; } cp0_status_im_disable(1<<i); struct reg_frame * frame = kernel_exception_framep + 0; frame->mf_v0 = -3; frame->mf_a0 = i; if(msg_push(int_child[i], 0, NULL, NULL)) { kernel_panic("queue full (int)"); } } } }
/* PR_ENTER -- Make a new entry in the process table. Something is very wrong * if the table overflows. */ void pr_enter (int pid, int inchan, int outchan) { register struct proctable *pr; struct proctable *pr_findpid(); extern int kernel_panic (char *msg); if ((pr = pr_findpid (NULL)) == NULL) kernel_panic ("iraf process table overflow"); else { pr->pr_pid = pid; pr->pr_active = YES; pr->pr_inchan = inchan; pr->pr_outchan = outchan; } }
void interrupt_dispatch(struct irq_regs *regs) { /* * FIXME: we use reg->irq_num and reg->irq_data that must be here * for every architecture .... */ interrupt_acnowledge(regs->irq_num); if (regs->irq_num >= MAX_IRQ_NUMBER) kernel_panic("Invalid IRQ number"); if (interrupt_entries[regs->irq_num].type == INTERRUPT_CALLBACK) interrupt_entries[regs->irq_num].callback(regs); else console_message(T_INF, "Unhandled IRQ %i fired with data = 0x%x", regs->irq_num, regs->irq_data); }
void task_entry(const char *exe_name) { __asm__ ("sti"); screen_print("spawned new task\n"); screen_print("loading "); screen_print(exe_name); screen_put('\n'); const char *paths[] = {"bin", exe_name}; auto mData = _kernel_state.fs.GetInode(2, paths); if(mData.IsNothing()) { kernel_panic("failed to get inode"); } auto data = mData.FromJust(); _kernel_state.pager->Enable(_kernel_state.task->context); ELF elf(data); user_enter(elf.entry(), &_kernel_state.task->stack[PAGE_ALLOCATOR_PAGE_SIZE * Task::STACK_PAGES]); for(;;) { __asm__ ("hlt"); } // unreachable }
struct thread *kthread_create(uintptr_t code, int argc, char *argv[]) { int tid; struct thread *t; tid = thread_create(kproc, code, argc, argv, THREAD_CREATEF_NOSTART_THREAD); if (tid < 0) return NULL; t = thread_get(kproc, tid); /* XXX: I am not sure this is possible, will see while testing. If this * happens it will be worth recovering */ if (!t) kernel_panic("Thread get == NULL after thread_create()"); return t; }
void thread_suspend() { thread_t *prev, *next; prev = scheduler_running_thread(); if (!scheduler_suspend_thread()) { kernel_panic("cannot suspend a thread.\n"); return; } schedule_thread(); next = scheduler_running_thread(); /* No one to wait for */ if (prev == next) { return; } switch_context(prev->context, next->context); }
/* No Maskable interrupt trap */ void interrupt _cdecl nmi_trap(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("NMI trap"); for (;;); }
/* Single step */ void interrupt _cdecl single_step_trap(uint32_t cs,uint32_t eip,uint32_t eflags) { kernel_panic("Single step"); for (;;); }
/* FPU Single Instruction Multiple Data (SIMD) error */ void interrupt _cdecl simd_fpu_fault(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("FPU SIMD fault"); for (;;); }
/* Machine Check */ void interrupt _cdecl machine_check_abort(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Machine Check"); for (;;); }
//! bounds check void _cdecl bounds_check_fault (uint32_t cs, uint32_t eip, uint32_t flags) { intstart (); kernel_panic ("Bounds check fault at physical address [0x%x:0x%x] EFLAGS [0x%x]",cs,eip, flags); for (;;); }
/* Device not available */ void interrupt _cdecl no_device_fault(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Device not found"); for (;;); }
/* Bounds check */ void interrupt _cdecl bounds_check_fault(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Bounds check fault"); for (;;); }
/* Segment not present */ void interrupt _cdecl no_segment_fault(uint32_t cs,uint32_t err,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Invalid segment"); for (;;); }
/* Double Fault */ void interrupt _cdecl double_fault_abort(uint32_t cs,uint32_t err,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Double fault"); for (;;); }
//! double fault void _cdecl double_fault_abort (uint32_t eip,uint32_t cs,uint32_t flags,uint32_t err) { intstart (); kernel_panic ("Double fault at physical address [0x%x:0x%x] EFLAGS [0x%x]",cs,eip, flags); for (;;); }
//! device not available void _cdecl no_device_fault (uint32_t cs, uint32_t eip, uint32_t flags) { intstart (); kernel_panic ("Device not found fault at physical address [0x%x:0x%x] EFLAGS [0x%x]",cs,eip, flags); for (;;); }
//! invalid opcode / instruction void _cdecl invalid_opcode_fault (uint32_t cs, uint32_t eip, uint32_t flags) { intstart (); kernel_panic ("Invalid opcode at physical address [0x%x:0x%x] EFLAGS [0x%x]",cs,eip, flags); for (;;); }
/* Breakpoint hit */ void interrupt _cdecl breakpoint_trap(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Breakpoint trap"); for (;;); }
/* Stack fault */ void interrupt _cdecl stack_fault(uint32_t cs,uint32_t err,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Stack fault"); for (;;); }
/* Overflow trap */ void interrupt _cdecl overflow_trap(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Overflow trap"); for (;;); }
/* General Protection Fault */ void interrupt _cdecl general_protection_fault(uint32_t cs,uint32_t err,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("General Protection Fault"); for (;;); }
/* invalid opcode instruction */ void interrupt _cdecl invalid_opcode_fault(uint32_t cs,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Invalid opcode"); for (;;); }
/* Alignment Check */ void interrupt _cdecl alignment_check_fault(uint32_t cs,uint32_t err,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Alignment Check"); for (;;); }
/* Invalid TSS */ void interrupt _cdecl invalid_tss_fault(uint32_t cs,uint32_t err,uint32_t eip,uint32_t eflags) { intstart (); kernel_panic("Invalid TSS"); for (;;); }
//! overflow void _cdecl overflow_trap (uint32_t cs, uint32_t eip, uint32_t flags) { intstart (); kernel_panic ("Overflow trap at physical address [0x%x:0x%x] EFLAGS [0x%x]",cs,eip, flags); for (;;); }