Ejemplo n.º 1
0
void
schedule(void) {
    bool intr_flag;
    struct proc_struct *next;
    local_intr_save(intr_flag);
    {
	cprintf("\n++++++ CURRENT PID = %d ++++++\n", current->pid);
        cprintf("++++++ IN SCHEDULE() ++++++\n");
        current->need_resched = 0;
        if (current->state == PROC_RUNNABLE) {
            cprintf("++++++ CURRENT PID = %d RUNNABLE, CALL ENQUEUE ++++++\n", current->pid);
            sched_class_enqueue(current);
        }
        if ((next = sched_class_pick_next()) != NULL) {
            cprintf("++++++ NEXT PID = %d TO RUN, CALL DEQUEUE ++++++\n", next->pid);
            sched_class_dequeue(next);
        }
        if (next == NULL) {
            next = idleproc;
        }
        next->runs ++;
        if (next != current) {
            proc_run(next);
        }
    }
    local_intr_restore(intr_flag);
}
Ejemplo n.º 2
0
void schedule(void)
{
	bool intr_flag;
	struct proc_struct *next;
	local_intr_save(intr_flag);
	{
		current->need_resched = 0;
		if (current->state == PROC_RUNNABLE)
		{
			sched_class_enqueue(current);
		}
		if ((next = sched_class_pick_next()) != NULL)
		{
			sched_class_dequeue(next);
		}
		if (next == NULL)
		{
			next = idleproc;
		}
		next->runs++;
		if (next != current)
		{
			proc_run(next);
		}
	}
	local_intr_restore(intr_flag);
}
Ejemplo n.º 3
0
void
schedule(void) {
    bool intr_flag;
    list_entry_t *le, *last;
    struct proc_struct *next = NULL;
    local_intr_save(intr_flag);
    {
        current->need_resched = 0;
        last = (current == idleproc) ? &proc_list : &(current->list_link);
        le = last;
        do {
            if ((le = list_next(le)) != &proc_list) {
                next = le2proc(le, list_link);
                if (next->state == PROC_RUNNABLE) {
                    break;
                }
            }
        } while (le != last);
        if (next == NULL || next->state != PROC_RUNNABLE) {
            next = idleproc;
        }
        next->runs ++;
        if (next != current) {
            proc_run(next);
        }
    }
    local_intr_restore(intr_flag);
}
Ejemplo n.º 4
0
pid_t
caproc(struct privsep *ps, struct privsep_proc *p)
{
	struct ca_store	*store;
	FILE		*fp = NULL;
	EVP_PKEY	*key;

	/*
	 * This function runs code before privsep
	 */
	if ((store = calloc(1, sizeof(*store))) == NULL)
		fatal("ca: failed to allocate cert store");

	/* Read private key */
	if ((fp = fopen(IKED_PRIVKEY, "r")) == NULL)
		fatal("ca: failed to open private key");

	if ((key = PEM_read_PrivateKey(fp, NULL, NULL, NULL)) == NULL)
		fatalx("ca: failed to read private key");
	fclose(fp);

	if (ca_privkey_serialize(key, &store->ca_privkey) != 0)
		fatalx("ca: failed to serialize private key");
	if (ca_pubkey_serialize(key, &store->ca_pubkey) != 0)
		fatalx("ca: failed to serialize public key");

	EVP_PKEY_free(key);

	return (proc_run(ps, p, procs, nitems(procs), ca_reset, store));
}
Ejemplo n.º 5
0
Archivo: sched.c Proyecto: ryz1/OSryz
void
schedule(void) {
    bool intr_flag;
    struct proc_struct *next;
    local_intr_save(intr_flag);
    {
        current->need_resched = 0;
        if (current->state == PROC_RUNNABLE) {
            cprintf("Timer finished for process pid = %d and push in the queue\n",current->pid);
            sched_class_enqueue(current);
        }
        if ((next = sched_class_pick_next()) != NULL) {
            cprintf("Picks process pid = %d to run and pop from the queue\n",next->pid);
            sched_class_dequeue(next);
        }
        if (next == NULL) {
            cprintf("No runnable process.\n");
            next = idleproc;
        }
        next->runs ++;
        if (next != current) {
            proc_run(next);
        }
    }
    local_intr_restore(intr_flag);
}
Ejemplo n.º 6
0
void
schedule(void) {
    bool intr_flag;
    list_entry_t *le, *last;
    struct proc_struct *next = NULL;
    local_intr_save(intr_flag);
    {
        current->need_resched = 0;
        last = (current == idleproc) ? &proc_list : &(current->list_link);
        le = last;
        do {
            if ((le = list_next(le)) != &proc_list) {
                next = le2proc(le, list_link);
                if (next->state == PROC_RUNNABLE) {
                    break;
                }
            }
        } while (le != last);
        if (next == NULL || next->state != PROC_RUNNABLE) {
            next = idleproc;
        }
        next->runs ++;
        if (next != current) {
        	//proc_print(next);
        	cprintf("sche! proc %d, pc=%08x\n\n",next->pid,next->context.pc);
            //cprintf("sp=%08x\n\n",read_sp());
            proc_run(next);
        }
    }
    local_intr_restore(intr_flag);
}
Ejemplo n.º 7
0
Archivo: grub.c Proyecto: jgraef/meinOS
/**
 * Runs grub modules
 *  @param modules List of GRUB modules
 */
int init_run_grub_modules(pid_t *modules) {
  size_t i;

  signal(SIGUSR1,sigusr1_handler);

  for (i=0;modules[i]!=0;i++) {
    char *name = getname(modules[i]);

    proc_run(modules[i]);

    if (init_wait_grub_module()==-1) {
      dbgmsg("init: Process %s does not respond. initialization failed!\n",name);
      return -1;
    }

    if (strcmp(name,"iso9660")==0) {
      // Initial mount of boot device
      vfs_mount(BOOT_FS,BOOT_MP,BOOT_DEV,BOOT_RO);
    }

    free(name);
  }

  return 0;
}
Ejemplo n.º 8
0
// Put the current process to sleep by "returning" to its parent process.
// Used both when a process calls the SYS_RET system call explicitly,
// and when a process causes an unhandled trap in user mode.
// The 'entry' parameter is as in proc_save().
void gcc_noreturn
proc_ret(trapframe *tf, int entry)
{
	proc *cp = proc_cur();
	proc *p = cp->parent;
	
	if (p == NULL){
		if (tf->trapno != T_SYSCALL){
			trap_print(tf);
			panic("no parent to reflect trap");
		}
		file_io(tf);
		cprintf("fileio done\n");
	}

	spinlock_acquire(&cp->lock);
	cp->state = PROC_STOP;
	cp->runcpu = NULL;
	proc_save(cp, tf, entry);
	spinlock_release(&cp->lock);
	
	spinlock_acquire(&p->lock);
	if (p->state == PROC_WAIT && p->waitchild == cp) {
		p->waitchild = NULL;
		proc_run(p);
	}
	spinlock_release(&p->lock);
	
	proc_sched();
}
Ejemplo n.º 9
0
/**
 * Entry point for all the processes.
 */
void proc_entry(void)
{
	/*
	 * Return from a context switch assumes interrupts are disabled, so
	 * we need to explicitly re-enable them as soon as possible.
	 */
	IRQ_ENABLE;
	/* Call the actual process's entry point */
	proc_run();
	proc_exit();
}
Ejemplo n.º 10
0
int main(int argc, char** argv)
{
	ASSERT(argc > 1, "Specify a bytecode file in the first and only argument, please\n");

	init_mdata();                       // Initalize the instruction defs
	init_adata();
	init_ins_def();

	rt_t* runtime = proc_init(argv[1]); // Initalize process

	proc_run(runtime);                  // Execute runtime

	proc_clean(runtime);                // Once `proc_run` returns, clean
	                                    // what sort of mess it made.

	return 0;
}
Ejemplo n.º 11
0
void gcc_noreturn
proc_sched(void)
{
	proc *p;
	do {
		spinlock_acquire(&proc_lock);
		if(proc_first) break;
		spinlock_release(&proc_lock);
		while(!proc_first) pause();
	} while(1);
	p = proc_first;
	spinlock_acquire(&(p->lock));
	proc_first = p->readynext;
	if(proc_first == NULL) proc_last = NULL;
	spinlock_release(&proc_lock);
	p->readynext = NULL;
	proc_run(p);
}
Ejemplo n.º 12
0
// Put the current process to sleep by "returning" to its parent process.
// Used both when a process calls the SYS_RET system call explicitly,
// and when a process causes an unhandled trap in user mode.
// The 'entry' parameter is as in proc_save().
void gcc_noreturn
proc_ret(trapframe *tf, int entry)
{
	proc *p;
	proc *cp;
	cp = cpu_cur()->proc;
	p = cp->parent;
	cprintf("proc_ret child=%p parent=%p\n", cp, p);
	spinlock_acquire(&(p->lock));
	spinlock_acquire(&(cp->lock));
	cp->state = PROC_STOP;
	proc_save(cp, tf, entry);
	spinlock_release(&(cp->lock));
	if(p->state == PROC_WAIT && p->waitchild == cp) {
		proc_run(p);
	}
	spinlock_release(&(p->lock));
	proc_sched();
}
Ejemplo n.º 13
0
void gcc_noreturn
proc_sched(void)
{
	spinlock_acquire(&proc_lock);
	while (proc_head == NULL) {
		spinlock_release(&proc_lock);
		sti(); //Enable kbd interrupts
		pause();
		cli(); //Disable kbd interrupts
		spinlock_acquire(&proc_lock);
	}

	proc *p = proc_head;
	proc_head = p->readynext;
	if (proc_head == NULL)
		proc_tail = NULL;

	spinlock_acquire(&p->lock);
	spinlock_release(&proc_lock);
	proc_run(p);
}
Ejemplo n.º 14
0
void
schedule(void) { // 这个用来分配进程的运行
	cprintf("\n==> schedule\n");
    bool intr_flag;
    struct proc_struct *next;
    local_intr_save(intr_flag);
    {
        current->need_resched = 0;
        if (current->state == PROC_RUNNABLE) { // 标记为可以运行
            sched_class_enqueue(current); // 将当前进程放入就绪队列
        }
        if ((next = sched_class_pick_next()) != NULL) { // 选出优先级最高的一个进程
            sched_class_dequeue(next);
        }
        if (next == NULL) {
            next = idleproc; // 如果没有找到就绪的进程,那么继续运行自己
        }
        next->runs ++;
        if (next != current) {
            proc_run(next); // 开始运行
        }
    }
    local_intr_restore(intr_flag);
}
Ejemplo n.º 15
0
void manager_brho(void)
{
    static bool first = TRUE;
    struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

    if (first) {
        printk("*** IRQs must be enabled for input emergency codes ***\n");
#ifdef CONFIG_X86
        printk("*** Hit ctrl-g to enter the monitor. ***\n");
        printk("*** Hit ctrl-q to force-enter the monitor. ***\n");
        printk("*** Hit ctrl-b for a backtrace of core 0 ***\n");
#else
        printk("*** Hit ctrl-g to enter the monitor. ***\n");
#warning "***** ctrl-g untested on riscv, check k/a/r/trap.c *****"
#endif
        first = FALSE;
    }
    /* just idle, and deal with things via interrupts.  or via face. */
    smp_idle();
    /* whatever we do in the manager, keep in mind that we need to not do
     * anything too soon (like make processes), since we'll drop in here during
     * boot if the boot sequence required any I/O (like EXT2), and we need to
     * PRKM() */
    assert(0);

#if 0 /* ancient tests below: (keeping around til we ditch the manager) */
    // for testing taking cores, check in case 1 for usage
    uint32_t corelist[MAX_NUM_CORES];
    uint32_t num = 3;
    struct file *temp_f;
    static struct proc *p;

    static uint8_t RACY progress = 0;	/* this will wrap around. */
    switch (progress++) {
    case 0:
        printk("Top of the manager to ya!\n");
        /* 124 is half of the available boxboro colors (with the kernel
         * getting 8) */
        //quick_proc_color_run("msr_dumb_while", p, 124, temp_f);
        quick_proc_run("/bin/hello", p, temp_f);
#if 0
        // this is how you can transition to a parallel process manually
        // make sure you don't proc run first
        __proc_set_state(p, PROC_RUNNING_S);
        __proc_set_state(p, PROC_RUNNABLE_M);
        p->resources[RES_CORES].amt_wanted = 5;
        spin_unlock(&p->proc_lock);
        core_request(p);
        panic("This is okay");
#endif
        break;
    case 1:
#if 0
        udelay(10000000);
        // this is a ghetto way to test restarting an _M
        printk("\nattempting to ghetto preempt...\n");
        spin_lock(&p->proc_lock);
        proc_take_allcores(p, __death);
        __proc_set_state(p, PROC_RUNNABLE_M);
        spin_unlock(&p->proc_lock);
        udelay(5000000);
        printk("\nattempting to restart...\n");
        core_request(p); // proc still wants the cores
        panic("This is okay");
        // this tests taking some cores, and later killing an _M
        printk("taking 3 cores from p\n");
        for (int i = 0; i < num; i++)
            corelist[i] = 7-i; // 7, 6, and 5
        spin_lock(&p->proc_lock);
        proc_take_cores(p, corelist, &num, __death);
        spin_unlock(&p->proc_lock);
        udelay(5000000);
        printk("Killing p\n");
        enable_irq();
        proc_destroy(p);
        printk("Killed p\n");
        panic("This is okay");

        envs[0] = kfs_proc_create(kfs_lookup_path("roslib_hello"));
        __proc_set_state(envs[0], PROC_RUNNABLE_S);
        proc_run(envs[0]);
        warn("DEPRECATED");
        break;
#endif
    case 2:
    /*
    test_smp_call_functions();
    test_checklists();
    test_barrier();
    test_print_info();
    test_lapic_status_bit();
    test_ipi_sending();
    test_pit();
    */
    default:
        printd("Manager Progress: %d\n", progress);
        // delay if you want to test rescheduling an MCP that yielded
        //udelay(15000000);
        run_scheduler();
    }
    panic("If you see me, then you probably screwed up");
    monitor(0);

    /*
    printk("Servicing syscalls from Core 0:\n\n");
    while (1) {
    	process_generic_syscalls(&envs[0], 1);
    	cpu_relax();
    }
    */
    return;
#endif
}
Ejemplo n.º 16
0
pid_t
control(struct privsep *ps, struct privsep_proc *p)
{
	return (proc_run(ps, p, procs, nitems(procs), control_run, NULL));
}
Ejemplo n.º 17
0
pid_t
ikev1(struct privsep *ps, struct privsep_proc *p)
{
	return (proc_run(ps, p, procs, nitems(procs), NULL, NULL));
}
Ejemplo n.º 18
0
void
schedule(void) {
  /* schedule in irq ctx is not allowed */
  assert(!ucore_in_interrupt());
  bool intr_flag;
  struct proc_struct *next;
#ifndef MT_SUPPORT
  list_entry_t head;
  int lapic_id = pls_read(lapic_id);
#endif

  local_intr_save(intr_flag);
  int lcpu_count = pls_read(lcpu_count);
  {
    current->need_resched = 0;
#ifndef MT_SUPPORT
    if (current->mm)
    {
      assert(current->mm->lapic == lapic_id);
      current->mm->lapic = -1;
    }
#endif
    if (current->state == PROC_RUNNABLE && current->pid >= lcpu_count) {
      sched_class_enqueue(current);
    }
#ifndef MT_SUPPORT
    list_init(&head);
    while (1)
    {
      next = sched_class_pick_next();
      if (next != NULL) sched_class_dequeue(next);

      if (next && next->mm && next->mm->lapic != -1)
      {
        list_add(&head, &(next->run_link));
      }
      else
      {
        list_entry_t *cur;
        while ((cur = list_next(&head)) != &head)
        {
          list_del_init(cur);
          sched_class_enqueue(le2proc(cur, run_link));
        }

        break;
      }
    }
#else
    next = sched_class_pick_next();
    if (next != NULL)
      sched_class_dequeue(next);
#endif  /* !MT_SUPPORT */
    if (next == NULL) {
      next = idleproc;
    }
    next->runs ++;
    /* Collect information here*/
    if (sched_collect_info) {
      int lcpu_count = pls_read(lcpu_count);
      int lcpu_idx = pls_read(lcpu_idx);
      int loc = sched_info_head[lcpu_idx];
      int prev = sched_info_pid[loc*lcpu_count + lcpu_idx];
      if (next->pid == prev)
        sched_info_times[loc*lcpu_count + lcpu_idx] ++;
      else {
        sched_info_head[lcpu_idx] ++;
        if (sched_info_head[lcpu_idx] >= PGSIZE / sizeof(uint16_t) / lcpu_count)
          sched_info_head[lcpu_idx] = 0;
        loc = sched_info_head[lcpu_idx];
        uint16_t prev_pid = sched_info_pid[loc*lcpu_count + lcpu_idx];
        uint16_t prev_times = sched_info_times[loc*lcpu_count + lcpu_idx];
        if (prev_times > 0 && prev_pid >= lcpu_count + 2)
          sched_slices[lcpu_idx][prev_pid % SLICEPOOL_SIZE] += prev_times;
        sched_info_pid[loc*lcpu_count + lcpu_idx] = next->pid;
        sched_info_times[loc*lcpu_count + lcpu_idx] = 1;
      }
    }
#ifndef MT_SUPPORT
    assert(!next->mm || next->mm->lapic == -1);
    if (next->mm)
      next->mm->lapic = lapic_id;
#endif
    if (next != current) {
#if 0
      kprintf("N %d to %d\n", current->pid, next->pid);
#endif
      proc_run(next);
    }
  }
  local_intr_restore(intr_flag);
}