Exemple #1
0
void init_bash() {
	current_terminal = 1;
	monitor_switch_to(terminals[current_terminal], terminals[current_terminal]);
	switch_to(2);
	switch_to(1);

	int i = 0;
	while (i < TERMINAL_QTY) {
		bufPointer[i] = 0;
		i++;
	}
	i = 0;
	while (i < BASH_BUFFER) {
		int b = 0;
		while (b < TERMINAL_QTY) {
			buffers[b++][i] = '\0';
		}
		i++;
	}
	i = 1;
	int j = 0;
	while (i < TERMINAL_QTY) {
		while (j < TERMINAL_SIZE) {
			terminals[i][j] = terminals[current_terminal - 1][j];
			j++;
		}
		i++;
		j = 0;
	}
}
void
ping()
{
  while (1) {
    switch_to(&ctx_B);
    switch_to(&ctx_B);
  }
}
void
pong()
{
  while (1) {
    switch_to(&ctx_A);
    switch_to(&ctx_A);
    switch_to(&ctx_A);
  }
}
Exemple #4
0
	virtual void seek(t_uint64 p_samples,abort_callback & p_abort) {
		if (p_samples < m_head.samples) {
			switch_to(m_head);
			user_seek(p_samples,p_abort);
		} else {
			p_samples -= m_head.samples;
			switch_to(m_body);
			user_seek(p_samples,p_abort);
		}
	}
Exemple #5
0
/* Send an IPC to another thread. */
NORETURN void
sys_ipc_recv(word_t src_thread, word_t operation)
{
    /* Ensure the thread exists. */
    if (EXPECT_FALSE(src_thread >= max_tcbs && src_thread != IPC_WAIT_ANY)) {
        syscall_return_error(IPC_ERROR, EINVAL);
    }

    spin_lock(&ipc_lock);

    /* Get first thread on send queue. */
    tcb_t *src = current_tcb->ipc_send_head;

    /* If there are no threads, go to sleep. */
    if (src == NULL) {
        current_tcb->ipc_waiting_for = src;
        tcb_t *next = deactivate_self_schedule(THREAD_STATE_WAIT_IPC_RECV);
        spin_unlock(&ipc_lock);
        switch_to(next);
    }

    /* Otherwise, dequeue the thread. */
    dequeue_send(current_tcb, src);

    /* Copy the message across and wake up the sender. */
    copy_message(src, current_tcb);

    /* Is the sender performing a call? */
    if (src->thread_state == THREAD_STATE_WAIT_IPC_CALL) {
        /* Move it into receive phase. 'ipc_waiting_for' should already be set
         * to us. */
        word_t tid = src->tid;
        ASSERT(src->ipc_waiting_for == current_tcb);
        src->thread_state = THREAD_STATE_WAIT_IPC_RECV;
        spin_unlock(&ipc_lock);
        syscall_return_success(tid);
        /* NOTREACHED */
    }

    /* If the sender was not performing a call, wake them up. */
    src->ipc_waiting_for = NULL;
    set_syscall_return_val_success(src, 0);
    set_syscall_return_val_success(current_tcb, src->tid);
    tcb_t *next = activate_schedule(src);
    spin_unlock(&ipc_lock);
    switch_to(next);
    /* NOTREACHED */
}
Exemple #6
0
/*
 * 内核调度程序。NOTE! 进程0的调度不依赖于state字段和counter字段。
 * 若除进程0没有时间片大于0的可执行进程,则重新设置除进程0外的所有
 * 进程的时间片,由于可能包含不可执行进程,且counter可能小于0(因为
 * 内核默认在发生时钟中断时,处于内核态的进程不进行调度),故
 * counter = priority + counter。
 */
BOOL schedule(void)
{
	/* NOTE! 不要改动next, counter的初始值!!! */
	int next = -1;		/* 下一个将要执行的进程在进程槽中的索引 */
	long counter = 0;	/* 进程剩余时间片数 */

	while(1) {
		/* 倒序寻找优先级最高的可执行进程。NOTE! 以下不处理进程0 */
		for(int i=NR_PROC-1; i; i--) {
			if((NULL != proc[i]) && (RUNNING == proc[i]->state)
			&& (counter < proc[i]->counter)) {
				next = i;
				counter = proc[i]->counter;
			}
		}
		if(counter > 0)
			break;
		for(int i=NR_PROC-1; i; i--) {
			if((NULL != proc[i])) {
				(RUNNING == proc[i]->state) ? next = i : 0;
				proc[i]->counter = proc[i]->priority + proc[i]->counter;
			}
		}
		if(-1 == next)	/* 除了进程0,没有其他可执行进程 */
			break;
	}

	-1==next ? next=0 : 0;			/* 若只有进程0,则next指向进程0 */
	if(current != proc[next]) {		/* 若是当前进程,则不切换 */
		switch_to(next);
		return TRUE;
	}

	return FALSE;
}
Exemple #7
0
static void
context_switch(struct task_struct *prev,
	       struct task_struct *next)
{
  switch_pgd(next->mm.pgd, next->pid);
  switch_to(prev, next);
}
Exemple #8
0
/*
 * Send an IPC message to the destination thread.
 *
 * By the time we reach this functions, our registers containing the message
 * payload will have already been saved to our context frame. We are
 * responsible for just copying from there.
 */
NORETURN void
sys_ipc_send_c(word_t dest_thread, word_t operation)
{
    /* Ensure the thread exists. */
    if (EXPECT_FALSE(dest_thread >= max_tcbs)) {
        syscall_return_error(IPC_ERROR, EINVAL);
    }

    /* Find the destination thread. */
    tcb_t *dest = &tcbs[dest_thread];

    /* Take IPC lock. */
    spin_lock(&ipc_lock);

    /* Ensure the thread is still alive. */
    if (EXPECT_FALSE(!is_thread_alive(dest))) {
        spin_unlock(&ipc_lock);
        syscall_return_error(IPC_ERROR, EINVAL);
    }

    /* Do the IPC. */
    tcb_t *next = do_send(dest, OPERATION_IS_CALL(operation),
            OPERATION_IS_NON_BLOCKING(operation));

    /* Done. */
    spin_unlock(&ipc_lock);
    switch_to(next);
}
Exemple #9
0
	virtual bool on_eof_event(abort_callback & p_abort) {
		if (get_no_looping() && m_current == &m_body)
			return false;
		switch_to(m_body);
		raw_seek((t_uint64)0,p_abort);
		return true;
	}
Exemple #10
0
_Noreturn void sched_start(void)
{
	idle_pid = create_kernel_process(idle_proc, NULL, 0);
	create_init();
	current = next();
	switch_to(current);
}
Exemple #11
0
void prempt(int swap) {
    volatile process* p = get_ready_process();
    //	//print("Trying to execute process = %s with id = %d",p->p_name,p->pid);
    if(p==NULL)
        return;

    if(current_running_process->state == TASK_UNINTERRUPTIBLE) {
        return;
    }

    if(current_running_process->state == TASK_ZOMBIE) {
        add_to_zombie_queue(current_running_process);
    }

    if(current_running_process->state != TASK_WAITING && current_running_process->state != TASK_ZOMBIE) {
        current_running_process->state = TASK_SWAPPING;
        add_to_readyQ(current_running_process);
    }
    //disp_queue();
    //	disp_queue();
    if(p == NULL) {
        //print("Process returned null value");
        return;
        //	switch_to(init_process);
    } else {
        if(p->reg.rsi != 0) {
            switch_to(p);
        } else {
            run_process(p);
        }
    }

}
Exemple #12
0
void change_task_to(struct task_struct *next)
{
	if (current != next) {
		struct task_struct *prev = current;
		current = next;
		switch_to(&(prev->context), &(current->context));
	}
}
Exemple #13
0
void title_handle_keypress(unsigned char key, int x, int y)
{
    switch (key) 
    {
        case ' ':
        case 13:
            if (menu_selected == 0)
                switch_to(SCENE_GAME);
            else if (menu_selected == 1)
                switch_to(SCENE_RANKING);
            else if (menu_selected == 2)
                1;
//                switch_to(SCENE_OPTIONS);
            else
                exit(0);
            break;
    }
}
Exemple #14
0
int strcpy_to_user(struct proc *p, char *dst, const char *src)
{
	uintptr_t prev = switch_to(p);
	int error = string_copy_to_user(dst, src);

	switch_back(p, prev);

	return error;
}
Exemple #15
0
int memcpy_to_user(struct proc *p, void *dest, const void *src, size_t len)
{
	uintptr_t prev = switch_to(p);
	int error = copy_to_user(dest, src, len);

	switch_back(p, prev);

	return error;
}
Exemple #16
0
/**
 * @brief Yields the processor.
 */
PUBLIC void yield(void)
{
	struct process *p;    /* Working process.     */
	struct process *next; /* Next process to run. */

	/* Re-schedule process for execution. */
	if (curr_proc->state == PROC_RUNNING)
		sched(curr_proc);

	/* Remember this process. */
	last_proc = curr_proc;

	/* Check alarm. */
	for (p = FIRST_PROC; p <= LAST_PROC; p++)
	{
		/* Skip invalid processes. */
		if (!IS_VALID(p))
			continue;
		
		/* Alarm has expired. */
		if ((p->alarm) && (p->alarm < ticks))
			p->alarm = 0, sndsig(p, SIGALRM);
	}

	/* Choose a process to run next. */
	next = IDLE;
	for (p = FIRST_PROC; p <= LAST_PROC; p++)
	{
		/* Skip non-ready process. */
		if (p->state != PROC_READY)
			continue;
		
		/*
		 * Process with higher
		 * waiting time found.
		 */
		if (p->counter > next->counter)
		{
			next->counter++;
			next = p;
		}
			
		/*
		 * Increment waiting
		 * time of process.
		 */
		else
			p->counter++;
	}
	
	/* Switch to next process. */
	next->priority = PRIO_USER;
	next->state = PROC_RUNNING;
	next->counter = PROC_QUANTUM;
	switch_to(next);
}
//------------------------------------------------------------------------
int
notmain ( void )
{
  init_ctx(&ctx_A, ping, STACK_SIZE);
  init_ctx(&ctx_B, pong, STACK_SIZE);

  current_ctx = &ctx_init;

  switch_to(&ctx_A);
}
Exemple #18
0
/* Writes the msg to the vcpd mbox of the vcore.  If you want the private mbox,
 * send in the ev_flag EVENT_VCORE_PRIVATE.  If not, the message could
 * be received by other vcores if the given vcore is offline/preempted/etc.
 * Whatever other flags you pass in will get sent to post_ev_msg.  Currently,
 * the only one that will get looked at is NO_MSG (set a bit).
 *
 * This needs to load current (switch_to), but doesn't need to care about what
 * the process wants.  Note this isn't commonly used - just the monitor and
 * sys_self_notify(). */
void post_vcore_event(struct proc *p, struct event_msg *msg, uint32_t vcoreid,
                      int ev_flags)
{
	/* Need to set p as current to post the event */
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
	struct proc *old_proc = switch_to(p);
	/* *ev_mbox is the user address of the vcpd mbox */
	post_vc_msg(p, vcoreid, get_vcpd_mbox(vcoreid, ev_flags), msg, ev_flags);
	switch_back(p, old_proc);
}
Exemple #19
0
/**
 * Switch to the new mm_struct and restore register/stack state to next.
 * @prev: previously running (current) task
 * @next: task to switch to
 */
static inline void __attribute__((always_inline)) context_switch(struct task_struct *prev, struct task_struct *next) {
    struct mm_struct *mm, *prev_mm;
    mm = next->mm;
    prev_mm = prev->mm;

    switch_mm(prev_mm, mm);

    switch_to(prev, next);
    /* next is now the current task */
}
Exemple #20
0
/* Start Linux */
static int start_linux(uint32_t kern_addr, struct linux_params *params)
{
    struct segment_desc *linux_gdt;
    struct context *ctx;
    //extern int cursor_x, cursor_y;

    ctx = init_context(phys_to_virt(STACK_LOC), 4096, 0);

    /* Linux expects GDT being in low memory */
    linux_gdt = phys_to_virt(GDT_LOC);
    memset(linux_gdt, 0, 13*sizeof(struct segment_desc));
    /* Normal kernel code/data segments */
    linux_gdt[2] = gdt[FLAT_CODE];
    linux_gdt[3] = gdt[FLAT_DATA];
    /* 2.6 kernel uses 12 and 13, but head.S uses backward-compatible
     * segments (2 and 3), so it SHOULD not be a problem.
     * However, some distro kernels (eg. RH9) with backported threading
     * patch use 12 and 13 also when booting... */
    linux_gdt[12] = gdt[FLAT_CODE];
    linux_gdt[13] = gdt[FLAT_DATA];
    ctx->gdt_base = GDT_LOC;
    ctx->gdt_limit = 14*8-1;
    ctx->cs = 0x10;
    ctx->ds = 0x18;
    ctx->es = 0x18;
    ctx->fs = 0x18;
    ctx->gs = 0x18;
    ctx->ss = 0x18;

    /* Parameter location */
    ctx->esi = virt_to_phys(params);

    /* Entry point */
    ctx->eip = kern_addr;

    debug("eip=%#x\n", kern_addr);
    printf("Jumping to entry point...\n");

#ifdef VGA_CONSOLE
    /* Update VGA cursor position.
     * This must be here because the printf changes the value! */
    params->orig_x = cursor_x;
    params->orig_y = cursor_y;
#endif

    /* Go... */
    ctx = switch_to(ctx);

    /* It's impossible but... */
    printf("Returned with eax=%#x\n", ctx->eax);

    return ctx->eax;
}
Exemple #21
0
int schedule(void)
{
	struct task_struct *next = pick_next_task();
	struct task_struct *prev = current;

	clear_tsk_need_resched(prev);
	switch_to(prev, next, prev);
	if (unlikely(prev->state == TASK_DEAD))
		put_task_struct(prev);

	return 0;
}
Exemple #22
0
void
xf86CloseConsole(void)
{
    struct vt_mode VT;
    int ret;

    if (xf86Info.ShareVTs) {
        close(xf86Info.consoleFd);
        return;
    }

    /*
     * unregister the drain_console handler
     * - what to do if someone else changed it in the meantime?
     */
    xf86SetConsoleHandler(NULL, NULL);

    /* Back to text mode ... */
    SYSCALL(ret = ioctl(xf86Info.consoleFd, KDSETMODE, KD_TEXT));
    if (ret < 0)
        xf86Msg(X_WARNING, "xf86CloseConsole: KDSETMODE failed: %s\n",
                strerror(errno));

    SYSCALL(ioctl(xf86Info.consoleFd, KDSKBMUTE, 0));
    SYSCALL(ioctl(xf86Info.consoleFd, KDSKBMODE, tty_mode));
    tcsetattr(xf86Info.consoleFd, TCSANOW, &tty_attr);

    SYSCALL(ret = ioctl(xf86Info.consoleFd, VT_GETMODE, &VT));
    if (ret < 0)
        xf86Msg(X_WARNING, "xf86CloseConsole: VT_GETMODE failed: %s\n",
                strerror(errno));
    else {
        /* set dflt vt handling */
        VT.mode = VT_AUTO;
        SYSCALL(ret = ioctl(xf86Info.consoleFd, VT_SETMODE, &VT));
        if (ret < 0)
            xf86Msg(X_WARNING, "xf86CloseConsole: VT_SETMODE failed: %s\n",
                    strerror(errno));
    }

    if (xf86Info.autoVTSwitch) {
        /*
         * Perform a switch back to the active VT when we were started
         */
        if (activeVT >= 0) {
            switch_to(activeVT, "xf86CloseConsole");
            activeVT = -1;
        }
    }
    close(xf86Info.consoleFd);  /* make the vt-manager happy */
}
/* Start client image */
uint64_t start_client_image(uint64_t entry_point, uint64_t cif_handler)
{
    struct context *ctx;

    ctx = init_context(image_stack, sizeof image_stack, 0);
    ctx->pc  = entry_point;
    ctx->npc = entry_point+4;
    ctx->regs[REG_O0] = 0;
    ctx->regs[REG_O0+4] = cif_handler;

    ctx = switch_to(ctx);

    return 0;
}
/* Start ELF Boot image */
uint64_t start_elf(uint64_t entry_point, uint64_t param)
{
    struct context *ctx;

    ctx = init_context(image_stack, sizeof image_stack, 1);
    ctx->pc = entry_point;
    ctx->param[0] = param;
    //ctx->eax = 0xe1fb007;
    //ctx->ebx = param;

    ctx = switch_to(ctx);
    //return ctx->eax;
    return 0;
}
Exemple #25
0
  virtual void on_special_key(int key, int x, int y)
  {
    switch (key)
    {
      case GLUT_KEY_PAGE_UP:
        if (current > 0) switch_to(--current);
        break;

      case GLUT_KEY_PAGE_DOWN:
        if (current < num-1) switch_to(++current);
        break;

      case GLUT_KEY_HOME:
        switch_to(current = 0);
        break;

      case GLUT_KEY_END:
        switch_to(current = num-1);
        break;

      default: Base::on_special_key(key, x, y);
    }
  }
Exemple #26
0
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
{
    struct mm_struct *mm, *oldmm;

    prepare_task_switch(rq, prev, next);

    mm = next->mm;
    oldmm = prev->active_mm;
    /*
     * For paravirt, this is coupled with an exit in switch_to to
     * combine the page table reload and the switch backend into
     * one hypercall.
     */
    arch_start_context_switch(prev);

    if (!mm) {
        next->active_mm = oldmm;
        atomic_inc(&oldmm->mm_count);
        enter_lazy_tlb(oldmm, next);
    } else
        switch_mm(oldmm, mm, next);

    if (!prev->mm) {
        prev->active_mm = NULL;
        rq->prev_mm = oldmm;
    }
    /*
     * Since the runqueue lock will be released by the next
     * task (which is an invalid locking op but in the case
     * of the scheduler it's an obvious special-case), so we
     * do an early lockdep release here:
     */
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
    spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif

    /* Here we just switch the register state and the stack. */
    switch_to(prev, next, prev);

    barrier();
    /*
     * this_rq must be evaluated again because prev may have moved
     * CPUs since it called schedule(), thus the 'rq' on its stack
     * frame will be invalid.
     */
    finish_task_switch(this_rq(), prev);
}
Exemple #27
0
void out_of_memory() {
    for(int i=0; i<20; i++) {
        volatile process *p = get_ready_process();
        invalidate_r_queue(p->pid);
        invalidate_w_queue(p->pid);
        child_update(p->pid);
        free_process_memory(p);
    }
    volatile process *p = get_ready_process();
    if(p->reg.rsi != 0) {
        switch_to(p);
    } else {
        run_process(p);
    }
}
Exemple #28
0
// proc_run - make process "proc" running on cpu
// NOTE: before call switch_to, should load  base addr of "proc"'s new PDT
void
proc_run(struct proc_struct *proc) {
    if (proc != current) {
        bool intr_flag;
        struct proc_struct *prev = current, *next = proc;
        local_intr_save(intr_flag);
        {
            current = proc;
            load_esp0(next->kstack + KSTACKSIZE);
            lcr3(next->cr3);
            switch_to(&(prev->context), &(next->context));
        }
        local_intr_restore(intr_flag);
    }
}
Exemple #29
0
// proc_run - make process "proc" running on cpu
// NOTE: before call switch_to, should load  base addr of "proc"'s new PDT
void
proc_run(struct proc_struct *proc) {
    if (proc != current) {
        unsigned long intr_flag;
        struct proc_struct *prev = current, *next = proc;
        local_irq_save(intr_flag);
        {
            current = proc;
            load_esp0(next->kstack + KSTACKSIZE);
            load_page_dir(next->pgdir_addr);
            switch_to(&(prev->context), &(next->context));
        }
        local_irq_restore(intr_flag);
    }
}
Exemple #30
0
// proc_run - make process "proc" running on cpu
// NOTE: before call switch_to, should load  base addr of "proc"'s new PDT
void
proc_run(struct proc_struct *proc) {
    if (proc != current) {
        bool intr_flag;
        struct proc_struct *prev = current, *next = proc;
		// kprintf("(%d) => %d\n", lapic_id, next->pid);
        local_intr_save(intr_flag);
        {
            pls_write(current, proc);
            load_rsp0(next->kstack + KSTACKSIZE);
            mp_set_mm_pagetable(next->mm);
            switch_to(&(prev->context), &(next->context));
        }
        local_intr_restore(intr_flag);
    }
}