Exemple #1
0
__attribute__((always_inline)) inline void set_as_dead(task_t *t)
{
	assert(t);
	sub_atomic(&running_processes, 1);
	sub_atomic(&(((cpu_t *)t->cpu)->numtasks), 1);
	set_int(0);
	raise_flag(TF_DYING);
	tqueue_remove(((cpu_t *)t->cpu)->active_queue, t->activenode);
	t->state = TASK_DEAD;
}
Exemple #2
0
/* make sure it eventually gets handled */
void __KT_try_handle_stage2_interrupts()
{
	if(maybe_handle_stage_2)
	{
		int old = set_int(0);
		maybe_handle_stage_2 = 0;
		/* handle the stage2 handlers. NOTE: this may change to only 
		 * handling one interrupt, one function. For now, this works. */
		mutex_acquire(&s2_lock);
		for(int i=0;i<MAX_INTERRUPTS;i++)
		{
			if(stage2_count[i])
			{
				sub_atomic(&stage2_count[i], 1);
				for(int j=0;j<MAX_HANDLERS;j++) {
					if(interrupt_handlers[i][j][1]) {
						(interrupt_handlers[i][j][1])(current_task->regs);
					}
				}
			}
		}
		mutex_release(&s2_lock);
		set_int(old);
	}
}
Exemple #3
0
void exit(int code)
{
	if(!current_task || current_task->pid == 0) 
		panic(PANIC_NOSYNC, "kernel tried to exit");
	task_t *t = (task_t *)current_task;
	/* Get ready to exit */
	assert(t->thread->magic == THREAD_MAGIC);
	ll_insert(kill_queue, (void *)t);
	raise_flag(TF_EXITING);
	if(code != -9) 
		t->exit_reason.cause = 0;
	t->exit_reason.ret = code;
	t->exit_reason.pid = t->pid;
	/* Clear out system resources */
	free_thread_specific_directory();
	/* tell our parent that we're dead */
	if(t->parent)
		do_send_signal(t->parent->pid, SIGCHILD, 1);
	if(!sub_atomic(&t->thread->count, 1))
	{
		/* we're the last thread to share this data. Clean it up */
		close_all_files(t);
		if(t->thread->root)iput(t->thread->root);
		if(t->thread->pwd) iput(t->thread->pwd);
		mutex_destroy(&t->thread->files_lock);
		void *addr = t->thread;
		t->thread = 0;
		kfree(addr);
	}
	/* don't do this while the state is dead, as we may step on the toes of waitpid.
	 * this fixes all tasks that are children of current_task, or are waiting
	 * on current_task. For those waiting, it signals the task. For those that
	 * are children, it fixes the 'parent' pointer. */
	search_tqueue(primary_queue, TSEARCH_EXIT_PARENT | TSEARCH_EXIT_WAITING, 0, 0, 0, 0);
	char flag_last_page_dir_task;
	/* is this the last task to use this pd_info? */
	flag_last_page_dir_task = (sub_atomic(&pd_cur_data->count, 1) == 0) ? 1 : 0;
	if(flag_last_page_dir_task) {
		/* no one else is referencing this directory. Clean it up... */
		free_thread_shared_directory();
		vm_unmap(PDIR_DATA);
		raise_flag(TF_LAST_PDIR);
	}
	set_as_dead(t);
	for(;;) schedule();
}
Exemple #4
0
/* WARNING: This does not sync!!! */
void remove_element(cache_t *c, struct ce_t *o, int locked)
{
	if(!o) return;
	if(o->dirty)
		panic(PANIC_NOSYNC, "tried to remove non-sync'd element");
	
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	if(o->dirty)
		set_dirty(c, o, 0);
	assert(c->count);
	sub_atomic(&c->count, 1);
	ll_remove(&c->primary_ll, o->list_node);
	if(c->hash) chash_delete(c->hash, o->id, o->key);
	if(o->data)
		kfree(o->data);
	rwlock_destroy(o->rwl);
	kfree(o);
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
}
Exemple #5
0
int set_dirty(cache_t *c, struct ce_t *e, int dirty)
{
	int old = e->dirty;
	e->dirty=dirty;
	if(dirty)
	{
		if(old != dirty) {
			add_dlist(c, e);
			add_atomic(&c->dirty, 1);
		}
	} else
	{
		if(old != dirty) {
			assert(c->dirty);
			sub_atomic(&c->dirty, 1);
			remove_dlist(c, e);
		}
	}
	return old;
}
Exemple #6
0
void irq_handler(volatile registers_t regs)
{
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	/* ok, so the assembly entry function clears interrupts in the cpu, 
	 * but the kernel doesn't know that yet. So we clear the interrupt
	 * flag in the cpu structure as part of the normal set_int call, but
	 * it returns the interrupts-enabled flag from BEFORE the interrupt
	 * was recieved! F****n' brilliant! Back up that flag, so we can
	 * properly restore the flag later. */
	int previous_interrupt_flag = set_int(0);
	add_atomic(&int_count[regs.int_no], 1);
	/* save the registers so we can screw with iret later if we need to */
	char clear_regs=0;
	if(current_task && !current_task->regs) {
		/* of course, if we are already inside an interrupt, we shouldn't
		 * overwrite those. Also, we remember if we've saved this set of registers
		 * for later use */
		clear_regs=1;
		current_task->regs = &regs;
	}
	/* check if we're interrupting kernel code */
	char already_in_interrupt = 0;
	if(current_task->flags & TF_IN_INT)
		already_in_interrupt = 1;
	/* ...and set the flag so we know we're in an interrupt */
	raise_flag(TF_IN_INT);
	
	/* now, run through the stage1 handlers, and see if we need any
	 * stage2 handlers to run later */
	char need_second_stage = 0;
	for(int i=0;i<MAX_HANDLERS;i++)
	{
		if(interrupt_handlers[regs.int_no][i][0])
			(interrupt_handlers[regs.int_no][i][0])(&regs);
		if(interrupt_handlers[regs.int_no][i][1]) 
			need_second_stage = 1;
	}
	/* if we need a second stage handler, increment the count for this 
	 * interrupt number, and indicate that handlers should check for
	 * second stage handlers. */
	if(need_second_stage) {
		add_atomic(&stage2_count[regs.int_no], 1);
		maybe_handle_stage_2 = 1;
	}
	assert(!get_cpu_interrupt_flag());
	/* ok, now are we allowed to handle stage2's right here? */
	if(!already_in_interrupt && (maybe_handle_stage_2||need_second_stage))
	{
		maybe_handle_stage_2 = 0;
		/* handle the stage2 handlers. NOTE: this may change to only 
		 * handling one interrupt, and/or one function. For now, this works. */
		mutex_acquire(&s2_lock);
		for(int i=0;i<MAX_INTERRUPTS;i++)
		{
			if(stage2_count[i])
			{
				/* decrease the count for this interrupt number, and loop through
				 * all the second stage handlers and run them */
				sub_atomic(&stage2_count[i], 1);
				for(int j=0;j<MAX_HANDLERS;j++) {
					if(interrupt_handlers[i][j][1]) {
						(interrupt_handlers[i][j][1])(&regs);
					}
				}
			}
		}
		mutex_release(&s2_lock);
		assert(!get_cpu_interrupt_flag());
	}
	/* ok, now lets clean up */
	assert(!set_int(0));
	/* clear the registers if we saved the ones from this interrupt */
	if(current_task && clear_regs)
		current_task->regs=0;
	/* restore the flag in the cpu struct. The assembly routine will
	 * call iret, which will also restore the EFLAG state to what
	 * it was before, including the interrupts-enabled bit in eflags */
	set_cpu_interrupt_flag(previous_interrupt_flag);
	/* and clear the state flag if this is going to return to user-space code */
	if(!already_in_interrupt)
		lower_flag(TF_IN_INT);
	/* and send out the EOIs */
	if(interrupt_controller == IOINT_PIC) ack_pic(regs.int_no);
#if CONFIG_SMP
	lapic_eoi();
#endif
}
Exemple #7
0
/* this should NEVER enter from an interrupt handler, 
 * and only from kernel code in the one case of calling
 * sys_setup() */
void entry_syscall_handler(volatile registers_t regs)
{
	/* don't need to save the flag here, since it will always be true */
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(regs.int_no == 0x80 && ((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	set_int(0);
	add_atomic(&int_count[0x80], 1);
	if(current_task->flags & TF_IN_INT)
		panic(0, "attempted to enter syscall while handling an interrupt");
	/* set the interrupt handling flag... */
	raise_flag(TF_IN_INT);
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	if(regs.rax == 128) {
#elif CONFIG_ARCH == TYPE_ARCH_X86
	if(regs.eax == 128) {
#endif
		/* the injection code at the end of the signal handler calls
		 * a syscall with eax = 128. So here we handle returning from
		 * a signal handler. First, copy back the old registers, and
		 * reset flags and signal stuff */
		memcpy((void *)&regs, (void *)&current_task->reg_b, sizeof(registers_t));
		current_task->sig_mask = current_task->old_mask;
		current_task->cursig=0;
		lower_flag(TF_INSIG);
		lower_flag(TF_JUMPIN);
	} else {
		assert(!current_task->sysregs && !current_task->regs);
		/* otherwise, this is a normal system call. Save the regs for modification
		 * for signals and exec */
		current_task->regs = &regs;
		current_task->sysregs = &regs;
		syscall_handler(&regs);
		assert(!get_cpu_interrupt_flag());
		/* handle stage2's here...*/
		if(maybe_handle_stage_2 || !current_task->syscall_count) {
			mutex_acquire(&s2_lock);
			for(int i=0;i<MAX_INTERRUPTS;i++)
			{
				if(stage2_count[i])
				{
					sub_atomic(&stage2_count[i], 1);
					for(int j=0;j<MAX_HANDLERS;j++) {
						if(interrupt_handlers[i][j][1]) {
							(interrupt_handlers[i][j][1])(&regs);
						}
					}
				}
			}
			mutex_release(&s2_lock);
		}
		assert(!get_cpu_interrupt_flag());
	}
	assert(!set_int(0));
	current_task->sysregs=0;
	current_task->regs=0;
	/* we don't need worry about this being wrong, since we'll always be returning to
	 * user-space code */
	set_cpu_interrupt_flag(1);
	/* we're never returning to an interrupt, so we can
	 * safely reset this flag */
	lower_flag(TF_IN_INT);
#if CONFIG_SMP
	lapic_eoi();
#endif
}

/* This gets called from our ASM interrupt handler stub. */
void isr_handler(volatile registers_t regs)
{
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	/* this is explained in the IRQ handler */
	int previous_interrupt_flag = set_int(0);
	add_atomic(&int_count[regs.int_no], 1);
	/* check if we're interrupting kernel code, and set the interrupt
	 * handling flag */
	char already_in_interrupt = 0;
	if(current_task->flags & TF_IN_INT)
		already_in_interrupt = 1;
	raise_flag(TF_IN_INT);
	/* run the stage1 handlers, and see if we need any stage2s. And if we
	 * don't handle it at all, we need to actually fault to handle the error
	 * and kill the process or kernel panic */
	char called=0;
	char need_second_stage = 0;
	for(int i=0;i<MAX_HANDLERS;i++)
	{
		if(interrupt_handlers[regs.int_no][i][0] || interrupt_handlers[regs.int_no][i][1])
		{
			/* we're able to handle the error! */
			called = 1;
			if(interrupt_handlers[regs.int_no][i][0])
				(interrupt_handlers[regs.int_no][i][0])(&regs);
			if(interrupt_handlers[regs.int_no][i][1])
				need_second_stage = 1;
		}
	}
	if(need_second_stage) {
		/* we need to run a second stage handler. Indicate that here... */
		add_atomic(&stage2_count[regs.int_no], 1);
		maybe_handle_stage_2 = 1;
	}
	/* clean up... Also, we don't handle stage 2 in ISR handling, since this
	 can occur from within a stage2 handler */
	assert(!set_int(0));
	/* if it went unhandled, kill the process or panic */
	if(!called)
		faulted(regs.int_no, !already_in_interrupt, regs.eip);
	/* restore previous interrupt state */
	set_cpu_interrupt_flag(previous_interrupt_flag);
	if(!already_in_interrupt)
		lower_flag(TF_IN_INT);
	/* send out the EOI... */
#if CONFIG_SMP
	lapic_eoi();
#endif
}