コード例 #1
0
ファイル: tick.c プロジェクト: bithinalangot/seakernel
void timer_handler(registers_t r)
{
	/* prevent multiple cpus from adding to ticks */
	if(!current_task || !current_task->cpu || ((cpu_t *)current_task->cpu) == primary_cpu)
		add_atomic(&ticks, 1);
	/* engage the idle task occasionally */
	if((ticks % current_hz*10) == 0)
		__engage_idle();
	do_tick();
}
コード例 #2
0
ファイル: fork.c プロジェクト: bithinalangot/seakernel
int do_fork(unsigned flags)
{
	assert(current_task && kernel_task);
	assert(running_processes < (unsigned)MAX_TASKS || MAX_TASKS == -1);
	addr_t eip;
	task_t *task = task_create();
	page_dir_t *newspace;
	if(flags & FORK_SHAREDIR)
		newspace = vm_copy(current_task->pd);
	else
		newspace = vm_clone(current_task->pd, 0);
	if(!newspace)
	{
		kfree((void *)task);
		return -ENOMEM;
	}
	/* set the address space's entry for the current task.
	 * this is a fast and easy way to store the "what task am I" data
	 * that gets automatically updated when the scheduler switches
	 * into a new address space */
	arch_specific_set_current_task(newspace, (addr_t)task);
	/* Create the new task structure */
	task->pd = newspace;
	copy_task_struct(task, current_task, flags & FORK_SHAREDAT);
	add_atomic(&running_processes, 1);
	/* Set the state as usleep temporarily, so that it doesn't accidentally run.
	 * And then add it to the queue */
	task->state = TASK_USLEEP;
	tqueue_insert(primary_queue, (void *)task, task->listnode);
	cpu_t *cpu = (cpu_t *)current_task->cpu;
#if CONFIG_SMP
	cpu = fork_choose_cpu(current_task);
#endif
	/* Copy the stack */
	set_int(0);
	engage_new_stack(task, current_task);
	/* Here we read the EIP of this exact location. The parent then sets the
	 * eip of the child to this. On the reschedule for the child, it will 
	 * start here as well. */
	volatile task_t *parent = current_task;
	store_context_fork(task);
	eip = read_eip();
	if(current_task == parent)
	{
		/* These last things allow full execution of the task */
		task->eip=eip;
		task->state = TASK_RUNNING;
		task->cpu = cpu;
		add_atomic(&cpu->numtasks, 1);
		tqueue_insert(cpu->active_queue, (void *)task, task->activenode);
		__engage_idle();
		return task->pid;
	}
	return 0;
}
コード例 #3
0
ファイル: physical.c プロジェクト: imgits/seaos-kernel
addr_t __pm_alloc_page(char *file, int line)
{
	if(!pm_location)
		panic(PANIC_MEM | PANIC_NOSYNC, "Physical memory allocation before initilization");
	unsigned ret;
	unsigned flag=0;
	try_again:
	ret=0;
	if(current_task) current_task->allocated++;
	mutex_on(&pm_mutex);
	if(paging_enabled)
	{
		if(pm_stack <= (PM_STACK_ADDR+sizeof(unsigned)*2)) {
			if(current_task == kernel_task || !current_task)
				panic(PANIC_MEM | PANIC_NOSYNC, "Ran out of physical memory");
			mutex_off(&pm_mutex);
			if(OOM_HANDLER == OOM_SLEEP) {
				if(!flag++) 
					printk(0, "Warning - Ran out of physical memory in task %d\n", 
							current_task->pid);
				task_full_uncritical();
				__engage_idle();
				force_schedule();
				goto try_again;
			} else if(OOM_HANDLER == OOM_KILL)
			{
				printk(0, "Warning - Ran out of physical memory in task %d. Killing...\n", 
						current_task->pid);
				exit(-10);
			}
			else
				panic(PANIC_MEM | PANIC_NOSYNC, "Ran out of physical memory");
		}
		pm_stack -= sizeof(unsigned int);
		ret = *(unsigned int *)pm_stack;
	} else {
		ret = pm_location;
		pm_location+=PAGE_SIZE;
	}
	++pm_used_pages;
	mutex_off(&pm_mutex);
	if(current_task)
		current_task->num_pages++;
	if(!ret)
		panic(PANIC_MEM | PANIC_NOSYNC, "found zero address in page stack\n");
	if(((ret > (unsigned)highest_page) || ret < (unsigned)lowest_page) 
			&& memory_has_been_mapped)
		panic(PANIC_MEM | PANIC_NOSYNC, "found invalid address in page stack: %x\n", ret);
	return ret;
}
コード例 #4
0
int syscall_handler(volatile registers_t *regs)
{
	/* SYSCALL_NUM_AND_RET is defined to be the correct register in the syscall regs struct. */
	if(unlikely(SYSCALL_NUM_AND_RET >= num_syscalls))
		return -ENOSYS;
	if(unlikely(!syscall_table[SYSCALL_NUM_AND_RET]))
		return -ENOSYS;
	volatile long ret;
	if(!check_pointers(regs))
		return -EINVAL;
	if(kernel_state_flags & KSF_SHUTDOWN)
		for(;;);
	//if(got_signal(current_task) || (unsigned)(ticks-current_task->slice) > (unsigned)current_task->cur_ts)
	//	schedule();
	enter_system(SYSCALL_NUM_AND_RET);
	/* most syscalls are re-entrant, so we enable interrupts and
	 * expect handlers to disable them if needed */
	set_int(1);
	/* start accounting information! */
	current_task->freed = current_task->allocated=0;
	
	#ifdef SC_DEBUG
	if(current_task->tty == curcons->tty) 
		printk(SC_DEBUG, "syscall %d: enter %d\n", current_task->pid, SYSCALL_NUM_AND_RET);
	int or_t = ticks;
	#endif
	__do_syscall_jump(ret, syscall_table[SYSCALL_NUM_AND_RET], _E_, _D_, 
					  _C_, _B_, _A_);
	#ifdef SC_DEBUG
	if(current_task->tty == curcons->tty && (ticks - or_t >= 10 || 1) 
		&& (ret < 0 || 1) && (ret == -EINTR || 1))
		printk(SC_DEBUG, "syscall %d: %d ret %d, took %d ticks\n", 
			   current_task->pid, current_task->system, ret, ticks - or_t);
		#endif
		
	set_int(0);
	exit_system();
	__engage_idle();
	/* if we need to reschedule, or we have overused our timeslice
	 * then we need to reschedule. this prevents tasks that do a continuous call
	 * to write() from starving the resources of other tasks. syscall_count resets
	 * on each call to schedule() */
	if(current_task->flags & TF_SCHED 
		|| (unsigned)(ticks-current_task->slice) > (unsigned)current_task->cur_ts
		|| ++current_task->syscall_count > 2)
	{
		/* clear out the flag. Either way in the if statement, we've rescheduled. */
		lower_flag(TF_SCHED);
		schedule();
	}
	/* store the return value in the regs */
	SYSCALL_NUM_AND_RET = ret;
	/* if we're going to jump to a signal here, we need to back up our 
	 * return value so that when we return to the system we have the
	 * original systemcall returning the proper value.
	 */
	if((current_task->flags & TF_INSIG) && (current_task->flags & TF_JUMPIN)) {
#if CONFIG_ARCH == TYPE_ARCH_X86
		current_task->reg_b.eax=ret;
#elif CONFIG_ARCH == TYPE_ARCH_X86_64
		current_task->reg_b.rax=ret;
#endif
		lower_flag(TF_JUMPIN);
	}
	return ret;
}