Beispiel #1
0
void cpu_interrupt_syscall_entry(struct registers *regs, int syscall_num)
{
	cpu_interrupt_set(0);
	ASSERT(!current_thread->regs);
	current_thread->regs = regs;
	atomic_fetch_add_explicit(&interrupt_counts[0x80], 1, memory_order_relaxed);
	if(syscall_num == 128) {
		arch_tm_userspace_signal_cleanup(regs);
	} else {
		current_thread->regs = regs;
		syscall_handler(regs);
	}
	cpu_interrupt_set(0);
	__setup_signal_handler(regs);
	current_thread->regs = 0;
	ASSERT((current_process == kernel_process) || current_thread->held_locks == 0);
}
Beispiel #2
0
void
kern_boot_comp(void)
{
	struct pt_regs regs;
	struct captbl *ct, *ct0;
	pgtbl_t pt, pt0 = 0;
	unsigned int i;
	struct thread *thd = (struct thread *)thdinit;

	/* llbooter's captbl */
	ct = captbl_create(boot_comp_captbl);
	assert(ct);
	pt = pgtbl_create(boot_comp_pgd, boot_comp_pgd);
	assert(pt);
	pgtbl_init_pte(boot_comp_pte_vm);
	pgtbl_init_pte(boot_comp_pte_pm);

	assert(!captbl_activate_boot(ct, BOOT_CAPTBL_SELF_CT));
	assert(!sret_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_SRET));
	assert(!pgtbl_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_SELF_PT, pt, 0));
	assert(!pgtbl_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_BOOTVM_PTE, (pgtbl_t)boot_comp_pte_vm, 1));
	assert(!pgtbl_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_PHYSM_PTE, (pgtbl_t)boot_comp_pte_pm, 1));
	assert(!comp_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_SELF_COMP, 
			      BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_SELF_PT, 0, 0x37337, NULL));
	/* construct the page tables */
	assert(!cap_cons(ct, BOOT_CAPTBL_SELF_PT, BOOT_CAPTBL_BOOTVM_PTE, BOOT_MEM_VM_BASE));
	assert(!cap_cons(ct, BOOT_CAPTBL_SELF_PT, BOOT_CAPTBL_PHYSM_PTE, BOOT_MEM_PM_BASE));
	/* add the component's virtual memory at 4MB (1<<22) using "physical memory" starting at 0xADEAD000 */
	for (i = 0 ; i < sys_llbooter_sz ; i++) {
		u32_t addr = 0xADEAD000 + i*PAGE_SIZE;
		u32_t flags;
		assert(!cap_memactivate(ct, BOOT_CAPTBL_SELF_PT, 
					BOOT_MEM_VM_BASE + i*PAGE_SIZE, 
					addr, PGTBL_USER_DEF));
		assert(chal_pa2va((void *)addr) == pgtbl_lkup(pt, BOOT_MEM_VM_BASE+i*PAGE_SIZE, &flags));
	}
	/* add the system's physical memory at address 1GB */
	for (i = 0 ; i < sys_maxmem ; i++) {
		u32_t addr = i*PAGE_SIZE;
		u32_t flags;
		assert(!cap_memactivate(ct, BOOT_CAPTBL_SELF_PT, 
					BOOT_MEM_PM_BASE + i*PAGE_SIZE, 
					addr, PGTBL_COSFRAME));
		assert(chal_pa2va((void *)addr) == pgtbl_lkup(pt, BOOT_MEM_PM_BASE+i*PAGE_SIZE, &flags));
	}

	/* comp0's data, culminated in a static invocation capability to the llbooter */
	ct0 = captbl_create(c0_comp_captbl);
	assert(ct0);
	assert(!captbl_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_COMP0_CT, ct0, 0));
	/* pt0 should be replaced with page tables from the Linux cos_loader */
	assert(!pgtbl_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_COMP0_PT, pt0, 0));
	assert(!comp_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_COMP0_COMP, 
			      BOOT_CAPTBL_COMP0_CT, BOOT_CAPTBL_COMP0_PT, 0, 0x37337, NULL));

	/* 
	 * Only capability for the comp0 is 0: the synchronous
	 * invocation capability.  
	 *
	 * Replace 0xADD44343 with the actual entry-point in the
	 * llbooter!
	 */
	assert(!sinv_activate(ct, BOOT_CAPTBL_COMP0_CT, 0, BOOT_CAPTBL_SELF_COMP, 0xADD44343));

	/* 
	 * Create a thread in comp0.
	 */
	assert(!thd_activate(ct, BOOT_CAPTBL_SELF_CT, BOOT_CAPTBL_SELF_INITTHD_BASE, thd, BOOT_CAPTBL_COMP0_COMP, 0));
	thd_current_update(thd, NULL);

	/* 
	 * Synchronous invocation!
	 */
	u32_t cap_no = 1;
	u32_t ret_cap = 0;
	u32_t orig_cr3 = __cr3_contents;
	regs.ax = (cap_no + 1) << COS_CAPABILITY_OFFSET;		/* sinv */
	syscall_handler(&regs);
	assert(cos_cpu_local_info()->invstk_top > 0); /* we cache invstk_top on kernel stk */
	assert(__cr3_contents != orig_cr3);
	regs.ax = (ret_cap + 1) << COS_CAPABILITY_OFFSET;		/* sret */
	syscall_handler(&regs);
	assert(cos_cpu_local_info()->invstk_top == 0);
	assert(__cr3_contents == orig_cr3);
	printf("Test passed!\n");
}
Beispiel #3
0
/* this should NEVER enter from an interrupt handler, 
 * and only from kernel code in the one case of calling
 * sys_setup() */
void entry_syscall_handler(volatile registers_t regs)
{
	/* don't need to save the flag here, since it will always be true */
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(regs.int_no == 0x80 && ((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	set_int(0);
	add_atomic(&int_count[0x80], 1);
	if(current_task->flags & TF_IN_INT)
		panic(0, "attempted to enter syscall while handling an interrupt");
	/* set the interrupt handling flag... */
	raise_flag(TF_IN_INT);
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	if(regs.rax == 128) {
#elif CONFIG_ARCH == TYPE_ARCH_X86
	if(regs.eax == 128) {
#endif
		/* the injection code at the end of the signal handler calls
		 * a syscall with eax = 128. So here we handle returning from
		 * a signal handler. First, copy back the old registers, and
		 * reset flags and signal stuff */
		memcpy((void *)&regs, (void *)&current_task->reg_b, sizeof(registers_t));
		current_task->sig_mask = current_task->old_mask;
		current_task->cursig=0;
		lower_flag(TF_INSIG);
		lower_flag(TF_JUMPIN);
	} else {
		assert(!current_task->sysregs && !current_task->regs);
		/* otherwise, this is a normal system call. Save the regs for modification
		 * for signals and exec */
		current_task->regs = &regs;
		current_task->sysregs = &regs;
		syscall_handler(&regs);
		assert(!get_cpu_interrupt_flag());
		/* handle stage2's here...*/
		if(maybe_handle_stage_2 || !current_task->syscall_count) {
			mutex_acquire(&s2_lock);
			for(int i=0;i<MAX_INTERRUPTS;i++)
			{
				if(stage2_count[i])
				{
					sub_atomic(&stage2_count[i], 1);
					for(int j=0;j<MAX_HANDLERS;j++) {
						if(interrupt_handlers[i][j][1]) {
							(interrupt_handlers[i][j][1])(&regs);
						}
					}
				}
			}
			mutex_release(&s2_lock);
		}
		assert(!get_cpu_interrupt_flag());
	}
	assert(!set_int(0));
	current_task->sysregs=0;
	current_task->regs=0;
	/* we don't need worry about this being wrong, since we'll always be returning to
	 * user-space code */
	set_cpu_interrupt_flag(1);
	/* we're never returning to an interrupt, so we can
	 * safely reset this flag */
	lower_flag(TF_IN_INT);
#if CONFIG_SMP
	lapic_eoi();
#endif
}

/* This gets called from our ASM interrupt handler stub. */
void isr_handler(volatile registers_t regs)
{
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	/* this is explained in the IRQ handler */
	int previous_interrupt_flag = set_int(0);
	add_atomic(&int_count[regs.int_no], 1);
	/* check if we're interrupting kernel code, and set the interrupt
	 * handling flag */
	char already_in_interrupt = 0;
	if(current_task->flags & TF_IN_INT)
		already_in_interrupt = 1;
	raise_flag(TF_IN_INT);
	/* run the stage1 handlers, and see if we need any stage2s. And if we
	 * don't handle it at all, we need to actually fault to handle the error
	 * and kill the process or kernel panic */
	char called=0;
	char need_second_stage = 0;
	for(int i=0;i<MAX_HANDLERS;i++)
	{
		if(interrupt_handlers[regs.int_no][i][0] || interrupt_handlers[regs.int_no][i][1])
		{
			/* we're able to handle the error! */
			called = 1;
			if(interrupt_handlers[regs.int_no][i][0])
				(interrupt_handlers[regs.int_no][i][0])(&regs);
			if(interrupt_handlers[regs.int_no][i][1])
				need_second_stage = 1;
		}
	}
	if(need_second_stage) {
		/* we need to run a second stage handler. Indicate that here... */
		add_atomic(&stage2_count[regs.int_no], 1);
		maybe_handle_stage_2 = 1;
	}
	/* clean up... Also, we don't handle stage 2 in ISR handling, since this
	 can occur from within a stage2 handler */
	assert(!set_int(0));
	/* if it went unhandled, kill the process or panic */
	if(!called)
		faulted(regs.int_no, !already_in_interrupt, regs.eip);
	/* restore previous interrupt state */
	set_cpu_interrupt_flag(previous_interrupt_flag);
	if(!already_in_interrupt)
		lower_flag(TF_IN_INT);
	/* send out the EOI... */
#if CONFIG_SMP
	lapic_eoi();
#endif
}
Beispiel #4
0
static void trap_dispatch(struct frame *tf)
{

	switch(tf->tf_trapno) 
	{
		case T_PGFLT: 
		{	
			//print_frame(tf);
			do_page_fault(tf);
			break;
		}
		case T_GPFLT:
		{
			panic("GPFLT!\n");
			do_exit(curtask);
			break;
		}
		case T_BRKPT : 
		{
			print_frame(tf);
			panic("break point handler not implemented!\n");
			break;
		}
		case T_DIVIDE:
		{
			printk("CPU:%d USER T_DIVIDE\n",get_cpuid());
			do_exit(curtask);
		}
		case T_SYSCALL:
		{	
			tf->tf_regs.reg_eax = syscall_handler(tf); 
			break;
		}
		case IRQ_SPURIOUS: 
		{
			printk("CPU:%d Spurious interrupt on irq 7\n",get_cpuid());
			print_frame(tf);
			return;
		}
		case IRQ_TIMER : 
		{ 
			lapic_eoi();
			schedule_tick();
			break; 
		}
		case IRQ_KBD : 
		{
			irq_eoi();
			printk("CPU:%d IRQ_KBD \n",get_cpuid()); 
			inb(0x60);
			
			break;
		}
		case IRQ_SERIAL :
		{	
			panic("SERIAL handler not implemented!\n");
			break;
		}
		case IRQ_IDE0 : 
		case IRQ_IDE1 : 
		{	
			irq_eoi();
			do_hd_interrupt(tf);
			break;
		}
		case IRQ_ERROR :
		{ 
			print_frame(tf);
			panic("ERROR handler not implemented!\n");
			break;
		}
		default:
		{	
			 if (tf->tf_cs == _KERNEL_CS_) 
				panic("unhandled trap in kernel");
			 else {	
				print_frame(tf);
				return;	
			 }
			 break;
		}
	}
}