Esempio n. 1
0
void copy_task_struct(task_t *task, task_t *parent, char share_thread_data)
{
	task->parent = parent;
	task->pid = add_atomic(&next_pid, 1)-1;
	/* copy over the data if we're a new process. If this is going to be a thread, 
	 * then add to the count and set the pointer */
	if(!share_thread_data) {
		task->thread = thread_data_create();
		copy_thread_data(task, parent);
	} else {
		add_atomic(&parent->thread->count, 1);
		task->thread = parent->thread;
		assert(parent->thread->magic == THREAD_MAGIC);
	}
	
	task->tty = parent->tty;
	task->sig_mask = parent->sig_mask;
	task->priority = parent->priority;
	task->stack_end = parent->stack_end;
	task->heap_end = parent->heap_end;
	task->heap_start = parent->heap_start;
	task->system = parent->system;
	task->cmask = parent->cmask;
	task->path_loc_start = parent->path_loc_start;

	copy_file_handles(parent, task);
	/* this flag gets cleared on the first scheduling of this task */
	task->flags = TF_FORK;
	task->phys_mem_usage = parent->phys_mem_usage;
}
Esempio n. 2
0
int do_fork(unsigned flags)
{
	assert(current_task && kernel_task);
	assert(running_processes < (unsigned)MAX_TASKS || MAX_TASKS == -1);
	addr_t eip;
	task_t *task = task_create();
	page_dir_t *newspace;
	if(flags & FORK_SHAREDIR)
		newspace = vm_copy(current_task->pd);
	else
		newspace = vm_clone(current_task->pd, 0);
	if(!newspace)
	{
		kfree((void *)task);
		return -ENOMEM;
	}
	/* set the address space's entry for the current task.
	 * this is a fast and easy way to store the "what task am I" data
	 * that gets automatically updated when the scheduler switches
	 * into a new address space */
	arch_specific_set_current_task(newspace, (addr_t)task);
	/* Create the new task structure */
	task->pd = newspace;
	copy_task_struct(task, current_task, flags & FORK_SHAREDAT);
	add_atomic(&running_processes, 1);
	/* Set the state as usleep temporarily, so that it doesn't accidentally run.
	 * And then add it to the queue */
	task->state = TASK_USLEEP;
	tqueue_insert(primary_queue, (void *)task, task->listnode);
	cpu_t *cpu = (cpu_t *)current_task->cpu;
#if CONFIG_SMP
	cpu = fork_choose_cpu(current_task);
#endif
	/* Copy the stack */
	set_int(0);
	engage_new_stack(task, current_task);
	/* Here we read the EIP of this exact location. The parent then sets the
	 * eip of the child to this. On the reschedule for the child, it will 
	 * start here as well. */
	volatile task_t *parent = current_task;
	store_context_fork(task);
	eip = read_eip();
	if(current_task == parent)
	{
		/* These last things allow full execution of the task */
		task->eip=eip;
		task->state = TASK_RUNNING;
		task->cpu = cpu;
		add_atomic(&cpu->numtasks, 1);
		tqueue_insert(cpu->active_queue, (void *)task, task->activenode);
		__engage_idle();
		return task->pid;
	}
	return 0;
}
Esempio n. 3
0
void ipi_handler(volatile registers_t regs)
{
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	int previous_interrupt_flag = set_int(0);
	add_atomic(&int_count[regs.int_no], 1);
#if CONFIG_SMP
	/* delegate to the proper handler, in ipi.c */
	switch(regs.int_no) {
		case IPI_DEBUG:
		case IPI_SHUTDOWN:
		case IPI_PANIC:
			handle_ipi_cpu_halt(regs);
			break;
		case IPI_SCHED:
			handle_ipi_reschedule(regs);
			break;
		case IPI_TLB:
			handle_ipi_tlb(regs);
			break;
		case IPI_TLB_ACK:
			handle_ipi_tlb_ack(regs);
			break;
		default:
			panic(PANIC_NOSYNC, "invalid interprocessor interrupt number: %d", regs.int_no);
	}
#endif
	assert(!set_int(0));
	set_cpu_interrupt_flag(previous_interrupt_flag); /* assembly code will issue sti */
#if CONFIG_SMP
	lapic_eoi();
#endif
}
Esempio n. 4
0
void timer_handler(registers_t r)
{
	/* prevent multiple cpus from adding to ticks */
	if(!current_task || !current_task->cpu || ((cpu_t *)current_task->cpu) == primary_cpu)
		add_atomic(&ticks, 1);
	/* engage the idle task occasionally */
	if((ticks % current_hz*10) == 0)
		__engage_idle();
	do_tick();
}
Esempio n. 5
0
void copy_thread_data(task_t *task, task_t *parent)
{
	assert(parent->thread->magic == THREAD_MAGIC);
	if(parent->thread->root) {
		task->thread->root = parent->thread->root;
		add_atomic(&task->thread->root->count, 1);
	}
	if(parent->thread->pwd) {
		task->thread->pwd = parent->thread->pwd;
		add_atomic(&task->thread->pwd->count, 1);
	}
	memcpy((void *)task->thread->signal_act, (void *)parent->thread->signal_act, 128 * 
		sizeof(struct sigaction));
	task->thread->gid = parent->thread->gid;
	task->thread->uid = parent->thread->uid;
	task->thread->_uid = parent->thread->_uid;
	task->thread->_gid = parent->thread->_gid;
	task->thread->global_sig_mask = parent->thread->global_sig_mask;
}
Esempio n. 6
0
void init_multitasking()
{
	printk(KERN_DEBUG, "[sched]: Starting multitasking system...\n");
	/* make the kernel task */
	task_t *task = task_create();
	task->pid = next_pid++;
	task->pd = (page_dir_t *)kernel_dir;
	task->stack_end=STACK_LOCATION;
	task->priority = 1;
	task->cpu = primary_cpu;
	task->thread = thread_data_create();
	/* alarm_mutex is aquired inside a kernel tick, so we may not schedule. */
	alarm_mutex = mutex_create(0, MT_NOSCHED);
	
	kill_queue = ll_create(0);
	primary_queue = tqueue_create(0, 0);
	primary_cpu->active_queue = tqueue_create(0, 0);

	tqueue_insert(primary_queue, (void *)task, task->listnode);
	tqueue_insert(primary_cpu->active_queue, (void *)task, task->activenode);
	
	primary_cpu->cur = task;
	primary_cpu->ktask = task;
	primary_cpu->numtasks=1;
	/* make this the "current_task" by assigning a specific location
	 * in the page directory as the pointer to the task. */
	arch_specific_set_current_task((addr_t *)kernel_dir, (addr_t)task);
	kernel_task = task;
	/* this is the final thing to allow the system to begin scheduling
	 * once interrupts are enabled */
	primary_cpu->flags |= CPU_TASK;
	
	add_atomic(&running_processes, 1);
#if CONFIG_MODULES
	add_kernel_symbol(delay);
	add_kernel_symbol(delay_sleep);
	add_kernel_symbol(schedule);
	add_kernel_symbol(run_scheduler);
	add_kernel_symbol(exit);
	add_kernel_symbol(sys_setsid);
	add_kernel_symbol(do_fork);
	add_kernel_symbol(kill_task);
	add_kernel_symbol(do_send_signal);
	add_kernel_symbol(dosyscall);
	add_kernel_symbol(task_pause);
	add_kernel_symbol(task_resume);
	add_kernel_symbol(got_signal);
 #if CONFIG_SMP
	add_kernel_symbol(get_cpu);
 #endif
	_add_kernel_symbol((addr_t)(task_t **)&kernel_task, "kernel_task");
#endif
}
Esempio n. 7
0
struct inode *devfs_create(struct inode *base, char *name, mode_t mode)
{
	struct inode *i;
	i = (struct inode*)kmalloc(sizeof(struct inode));
	strncpy(i->name, name, INAME_LEN);
	i->i_ops = &devfs_inode_ops;
	i->parent = devfs_root;
	i->mode = mode | 0x1FF;
	i->num = add_atomic(&devfs_nodescount, 1);
	rwlock_create(&i->rwl);
	add_inode(base, i);
	return i;
}
Esempio n. 8
0
ext2_fs_t *get_new_fsvol()
{
	ext2_fs_t *fs = (ext2_fs_t *)kmalloc(sizeof(ext2_fs_t));
	fs->flag=add_atomic(&fs_num, 1);
	fs->sb = (ext2_superblock_t *)kmalloc(1024);
	fs->block_prev_alloc=0;
	fs->read_only=0;
	fs->m_node = mutex_create(0, 0);
	fs->m_block = mutex_create(0, 0);
	mutex_create(&fs->bg_lock, 0);
	mutex_create(&fs->fs_lock, 0);
	mutex_create(&fs->ac_lock, 0);
	char tm[32];
	sprintf(tm, "ext2-%d", fs_num);
	fs->cache = get_empty_cache(0, tm);
	fs->llnode = ll_insert(fslist, fs);
	return fs;
}
Esempio n. 9
0
cpu_t *fork_choose_cpu(task_t *parent)
{
	cpu_t *pc = parent->cpu;
	cpu_t *cpu = &cpu_array[__counter];
	add_atomic(&__counter, 1);
	if(__counter >= num_cpus)
		__counter=0;
	if(!(cpu->flags & CPU_TASK))
		return pc;
	if(cpu->active_queue->num < 2) return cpu;
	for(unsigned int i=0;i<num_cpus;i++) {
		cpu_t *tmp = &cpu_array[i];
		if(tmp->active_queue->num < cpu->active_queue->num)
			cpu = tmp;
	}
	if(!(cpu->flags & CPU_TASK))
		return pc;
	return cpu;
}
Esempio n. 10
0
int chroot(char *n)
{
	if(!n) 
		return -EINVAL;
	struct inode *i, *old = current_task->thread->root;
	if(current_task->thread->uid != 0)
		return -EPERM;
	i = get_idir(n, 0);
	if(!i)
		return -ENOENT;
	if(!is_directory(i)) {
		iput(i);
		return -ENOTDIR;
	}
	current_task->thread->root = i;
	add_atomic(&i->count, 1);
	ichdir(i);
	iput(old);
	return 0;
}
Esempio n. 11
0
int set_dirty(cache_t *c, struct ce_t *e, int dirty)
{
	int old = e->dirty;
	e->dirty=dirty;
	if(dirty)
	{
		if(old != dirty) {
			add_dlist(c, e);
			add_atomic(&c->dirty, 1);
		}
	} else
	{
		if(old != dirty) {
			assert(c->dirty);
			sub_atomic(&c->dirty, 1);
			remove_dlist(c, e);
		}
	}
	return old;
}
Esempio n. 12
0
void irq_handler(volatile registers_t regs)
{
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	/* ok, so the assembly entry function clears interrupts in the cpu, 
	 * but the kernel doesn't know that yet. So we clear the interrupt
	 * flag in the cpu structure as part of the normal set_int call, but
	 * it returns the interrupts-enabled flag from BEFORE the interrupt
	 * was recieved! F****n' brilliant! Back up that flag, so we can
	 * properly restore the flag later. */
	int previous_interrupt_flag = set_int(0);
	add_atomic(&int_count[regs.int_no], 1);
	/* save the registers so we can screw with iret later if we need to */
	char clear_regs=0;
	if(current_task && !current_task->regs) {
		/* of course, if we are already inside an interrupt, we shouldn't
		 * overwrite those. Also, we remember if we've saved this set of registers
		 * for later use */
		clear_regs=1;
		current_task->regs = &regs;
	}
	/* check if we're interrupting kernel code */
	char already_in_interrupt = 0;
	if(current_task->flags & TF_IN_INT)
		already_in_interrupt = 1;
	/* ...and set the flag so we know we're in an interrupt */
	raise_flag(TF_IN_INT);
	
	/* now, run through the stage1 handlers, and see if we need any
	 * stage2 handlers to run later */
	char need_second_stage = 0;
	for(int i=0;i<MAX_HANDLERS;i++)
	{
		if(interrupt_handlers[regs.int_no][i][0])
			(interrupt_handlers[regs.int_no][i][0])(&regs);
		if(interrupt_handlers[regs.int_no][i][1]) 
			need_second_stage = 1;
	}
	/* if we need a second stage handler, increment the count for this 
	 * interrupt number, and indicate that handlers should check for
	 * second stage handlers. */
	if(need_second_stage) {
		add_atomic(&stage2_count[regs.int_no], 1);
		maybe_handle_stage_2 = 1;
	}
	assert(!get_cpu_interrupt_flag());
	/* ok, now are we allowed to handle stage2's right here? */
	if(!already_in_interrupt && (maybe_handle_stage_2||need_second_stage))
	{
		maybe_handle_stage_2 = 0;
		/* handle the stage2 handlers. NOTE: this may change to only 
		 * handling one interrupt, and/or one function. For now, this works. */
		mutex_acquire(&s2_lock);
		for(int i=0;i<MAX_INTERRUPTS;i++)
		{
			if(stage2_count[i])
			{
				/* decrease the count for this interrupt number, and loop through
				 * all the second stage handlers and run them */
				sub_atomic(&stage2_count[i], 1);
				for(int j=0;j<MAX_HANDLERS;j++) {
					if(interrupt_handlers[i][j][1]) {
						(interrupt_handlers[i][j][1])(&regs);
					}
				}
			}
		}
		mutex_release(&s2_lock);
		assert(!get_cpu_interrupt_flag());
	}
	/* ok, now lets clean up */
	assert(!set_int(0));
	/* clear the registers if we saved the ones from this interrupt */
	if(current_task && clear_regs)
		current_task->regs=0;
	/* restore the flag in the cpu struct. The assembly routine will
	 * call iret, which will also restore the EFLAG state to what
	 * it was before, including the interrupts-enabled bit in eflags */
	set_cpu_interrupt_flag(previous_interrupt_flag);
	/* and clear the state flag if this is going to return to user-space code */
	if(!already_in_interrupt)
		lower_flag(TF_IN_INT);
	/* and send out the EOIs */
	if(interrupt_controller == IOINT_PIC) ack_pic(regs.int_no);
#if CONFIG_SMP
	lapic_eoi();
#endif
}
Esempio n. 13
0
/* this should NEVER enter from an interrupt handler, 
 * and only from kernel code in the one case of calling
 * sys_setup() */
void entry_syscall_handler(volatile registers_t regs)
{
	/* don't need to save the flag here, since it will always be true */
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(regs.int_no == 0x80 && ((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	set_int(0);
	add_atomic(&int_count[0x80], 1);
	if(current_task->flags & TF_IN_INT)
		panic(0, "attempted to enter syscall while handling an interrupt");
	/* set the interrupt handling flag... */
	raise_flag(TF_IN_INT);
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	if(regs.rax == 128) {
#elif CONFIG_ARCH == TYPE_ARCH_X86
	if(regs.eax == 128) {
#endif
		/* the injection code at the end of the signal handler calls
		 * a syscall with eax = 128. So here we handle returning from
		 * a signal handler. First, copy back the old registers, and
		 * reset flags and signal stuff */
		memcpy((void *)&regs, (void *)&current_task->reg_b, sizeof(registers_t));
		current_task->sig_mask = current_task->old_mask;
		current_task->cursig=0;
		lower_flag(TF_INSIG);
		lower_flag(TF_JUMPIN);
	} else {
		assert(!current_task->sysregs && !current_task->regs);
		/* otherwise, this is a normal system call. Save the regs for modification
		 * for signals and exec */
		current_task->regs = &regs;
		current_task->sysregs = &regs;
		syscall_handler(&regs);
		assert(!get_cpu_interrupt_flag());
		/* handle stage2's here...*/
		if(maybe_handle_stage_2 || !current_task->syscall_count) {
			mutex_acquire(&s2_lock);
			for(int i=0;i<MAX_INTERRUPTS;i++)
			{
				if(stage2_count[i])
				{
					sub_atomic(&stage2_count[i], 1);
					for(int j=0;j<MAX_HANDLERS;j++) {
						if(interrupt_handlers[i][j][1]) {
							(interrupt_handlers[i][j][1])(&regs);
						}
					}
				}
			}
			mutex_release(&s2_lock);
		}
		assert(!get_cpu_interrupt_flag());
	}
	assert(!set_int(0));
	current_task->sysregs=0;
	current_task->regs=0;
	/* we don't need worry about this being wrong, since we'll always be returning to
	 * user-space code */
	set_cpu_interrupt_flag(1);
	/* we're never returning to an interrupt, so we can
	 * safely reset this flag */
	lower_flag(TF_IN_INT);
#if CONFIG_SMP
	lapic_eoi();
#endif
}

/* This gets called from our ASM interrupt handler stub. */
void isr_handler(volatile registers_t regs)
{
#if CONFIG_ARCH == TYPE_ARCH_X86_64
	assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18));
#endif
	/* this is explained in the IRQ handler */
	int previous_interrupt_flag = set_int(0);
	add_atomic(&int_count[regs.int_no], 1);
	/* check if we're interrupting kernel code, and set the interrupt
	 * handling flag */
	char already_in_interrupt = 0;
	if(current_task->flags & TF_IN_INT)
		already_in_interrupt = 1;
	raise_flag(TF_IN_INT);
	/* run the stage1 handlers, and see if we need any stage2s. And if we
	 * don't handle it at all, we need to actually fault to handle the error
	 * and kill the process or kernel panic */
	char called=0;
	char need_second_stage = 0;
	for(int i=0;i<MAX_HANDLERS;i++)
	{
		if(interrupt_handlers[regs.int_no][i][0] || interrupt_handlers[regs.int_no][i][1])
		{
			/* we're able to handle the error! */
			called = 1;
			if(interrupt_handlers[regs.int_no][i][0])
				(interrupt_handlers[regs.int_no][i][0])(&regs);
			if(interrupt_handlers[regs.int_no][i][1])
				need_second_stage = 1;
		}
	}
	if(need_second_stage) {
		/* we need to run a second stage handler. Indicate that here... */
		add_atomic(&stage2_count[regs.int_no], 1);
		maybe_handle_stage_2 = 1;
	}
	/* clean up... Also, we don't handle stage 2 in ISR handling, since this
	 can occur from within a stage2 handler */
	assert(!set_int(0));
	/* if it went unhandled, kill the process or panic */
	if(!called)
		faulted(regs.int_no, !already_in_interrupt, regs.eip);
	/* restore previous interrupt state */
	set_cpu_interrupt_flag(previous_interrupt_flag);
	if(!already_in_interrupt)
		lower_flag(TF_IN_INT);
	/* send out the EOI... */
#if CONFIG_SMP
	lapic_eoi();
#endif
}