int signal_kill(struct signal *signal) { struct task_struct *from; struct task_struct *to; int ret = 0; from = pid_get_task(signal->from); to = pid_get_task(signal->to); /* * if from == to, it means task has stoped and * need to exit. if not it means one task need * to kill another task, low prio can not kill * the task whose prio is higher than him */ if ((signal->from) == (signal->to)) { ret = task_kill_self(from); } else { if (from->prio < to->prio) ret = task_kill_other(from, to); else { kernel_error("Can not kill task %d --> %d\n", signal->from, signal->to); ret = -EINVAL; } } return ret; }
/** * This function handles most interrupts by finding out the interrupt # * and calling the appropriate handler functions * * @param ptr_to_stack Pointer to top of stack */ void main_interrupt_handler(uint ptr_to_stack) { struct handler_stack_frame* frame = (struct handler_stack_frame*)&ptr_to_stack; if (frame->interrupt_number) { klprintf(15, "Handling interrupt #%d, ticks=%d", frame->interrupt_number, get_system_ticks()); switch(frame->interrupt_number) { case DIVIDE_ERROR: case DEBUG_EXCEPTION: case NMI_HARDWARE: case DEBUG_BREAKPOINT: case INSTRUCTION_OVERFLOW: case INSTRUCTION_OVERRANGE: case INVALID_OPCODE: case NO_COPROCESSOR: case DOUBLE_FAULT: case COPROCESSOR_SEG_OVERRUN: case INVALID_TSS: case SEGMENT_NOT_PRESENT: case STACK_FAULT: case GENERAL_PROTECTION_FAULT: case PAGE_FAULT: case RESERVED15: case COPROCESSOR_ERROR: case ALIGNMENT_CHECK: case MACHINE_CHECK: kernel_error(frame->interrupt_number, frame->stck_frame.eip, frame->stck_frame.esp, frame->eax, frame->ebx, frame->ecx, frame->edx, frame->edi, frame->esi); break; case TIMER_IRQ: timer_interrupt_handler(); break; case SYSTEM_CALL: system_call( frame ); break; } } }
void interrupt_math(ExceptionStackFrame *frame){ kernel_error("illegal math operation", frame); }
void interrupt_illegal(ExceptionStackFrame *frame){ kernel_error("illegal instruction", frame); }
void interrupt_disk(ExceptionStackFrame *frame){ kernel_error("illegal disk access", frame); }
int init_task_page_table(struct task_page_table *table) { unsigned long base = 0; struct page *page; struct pgt_temp_buffer *tb = &table->pgt_temp_buffer; if (!table) return -EINVAL; /* * if the page table has been alloced * we reinit the pde and pte page table */ if (!table->pde_base) { memset((char *)table, 0, sizeof(struct task_page_table)); page = alloc_new_pde(); if (!page) { kernel_error("No memory for task PDE\n"); return -ENOMEM; } table->pde_base = page_to_va(page); table->pde_base_pa = page_to_pa(page); /* * init temp buffer */ tb->tbuf_pte_page = request_pages(1, GFP_PGT); if (!tb->tbuf_pte_page) { release_pde(base); return -ENOMEM; } tb->tbuf_pte_base = page_to_va(tb->tbuf_pte_page); tb->tbuf_page_nr = PTES_PER_PDE; } /* * if do memset op here, it will cause much time * to be fix */ mmu_copy_kernel_pde(table->pde_base); init_pte(table); /* * init temp_buffer member */ base = pgt_get_pde_entry_addr(table->pde_base, KERNEL_TEMP_BUFFER_BASE); mmu_create_pde_entry(base, page_to_pa(tb->tbuf_pte_page), KERNEL_TEMP_BUFFER_BASE); table->mmap_current_base = PROCESS_USER_MMAP_BASE; return 0; }