Esempio n. 1
0
PUBLIC void context_stop_idle(void)
{
	int is_idle;
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;
#endif
	
	is_idle = get_cpu_var(cpu, cpu_is_idle);
	get_cpu_var(cpu, cpu_is_idle) = 0;

	context_stop(get_cpulocal_var_ptr(idle_proc));

	if (is_idle)
		restart_local_timer();
#if SPROFILE
	if (sprofiling)
		get_cpulocal_var(idle_interrupted) = 1;
#endif
}
Esempio n. 2
0
void context_stop(struct proc * p)
{
	u64_t tsc;
	u32_t tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);

	read_tsc_64(&tsc);
	assert(tsc >= *__tsc_ctr_switch);
	tsc_delta = tsc - *__tsc_ctr_switch;
	p->p_cycles += tsc_delta;

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		p->p_cpu_time_left = 0;
#else
		if (tsc_delta < p->p_cpu_time_left) {
			p->p_cpu_time_left -= tsc_delta;
		} else p->p_cpu_time_left = 0;
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Esempio n. 3
0
/*===========================================================================*
 *				do_update				     *
 *===========================================================================*/
int do_update(struct proc * caller, message * m_ptr)
{
/* Handle sys_update(). Update a process into another by swapping their process
 * slots.
 */
  endpoint_t src_e, dst_e;
  int src_p, dst_p;
  struct proc *src_rp, *dst_rp;
  struct priv *src_privp, *dst_privp;
  struct proc orig_src_proc;
  struct proc orig_dst_proc;
  struct priv orig_src_priv;
  struct priv orig_dst_priv;
  int i;

  /* Lookup slots for source and destination process. */
  src_e = m_ptr->SYS_UPD_SRC_ENDPT;
  if(!isokendpt(src_e, &src_p)) {
      return EINVAL;
  }
  src_rp = proc_addr(src_p);
  src_privp = priv(src_rp);
  if(!(src_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  dst_e = m_ptr->SYS_UPD_DST_ENDPT;
  if(!isokendpt(dst_e, &dst_p)) {
      return EINVAL;
  }
  dst_rp = proc_addr(dst_p);
  dst_privp = priv(dst_rp);
  if(!(dst_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  assert(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp));

  /* Check if processes are updatable. */
  if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) {
      return EBUSY;
  }

#if DEBUG
  printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif

  /* Let destination inherit the target mask from source. */
  for (i=0; i < NR_SYS_PROCS; i++) {
      if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) {
          set_sendto_bit(dst_rp, i);
      }
  }

  /* Save existing data. */
  orig_src_proc = *src_rp;
  orig_src_priv = *(priv(src_rp));
  orig_dst_proc = *dst_rp;
  orig_dst_priv = *(priv(dst_rp));

  /* Swap slots. */
  *src_rp = orig_dst_proc;
  *src_privp = orig_dst_priv;
  *dst_rp = orig_src_proc;
  *dst_privp = orig_src_priv;

  /* Adjust process slots. */
  adjust_proc_slot(src_rp, &orig_src_proc);
  adjust_proc_slot(dst_rp, &orig_dst_proc);

  /* Adjust privilege slots. */
  adjust_priv_slot(priv(src_rp), &orig_src_priv);
  adjust_priv_slot(priv(dst_rp), &orig_dst_priv);

  /* Swap global process slot addresses. */
  swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);

#if DEBUG
  printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif

#ifdef CONFIG_SMP
  bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS);
  bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS);
#endif

  return OK;
}
Esempio n. 4
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));

#ifdef CONFIG_SMP
		/*
		 * Since at the time we got a scheduling IPI we might have been
		 * waiting for BKL already, we may miss it due to a similar IPI to
		 * the cpu which is already waiting for us to handle its. This
		 * results in a live-lock of these two cpus.
		 *
		 * Therefore we always check if there is one pending and if so,
		 * we handle it straight away so the other cpu can continue and
		 * we do not deadlock.
		 */
		smp_sched_handler();
#endif
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Esempio n. 5
0
void bsp_finish_booting(void)
{
  int i;
#if SPROFILE
  sprofiling = 0;      /* we're not profiling until instructed to */
#endif /* SPROFILE */
  cprof_procs_no = 0;  /* init nr of hash table slots used */

  cpu_identify();

  vm_running = 0;
  krandom.random_sources = RANDOM_SOURCES;
  krandom.random_elements = RANDOM_ELEMENTS;

  /* MINIX is now ready. All boot image processes are on the ready queue.
   * Return to the assembly code to start running the current process. 
   */
  
  /* it should point somewhere */
  get_cpulocal_var(bill_ptr) = get_cpulocal_var_ptr(idle_proc);
  get_cpulocal_var(proc_ptr) = get_cpulocal_var_ptr(idle_proc);
  announce();				/* print MINIX startup banner */

  /*
   * we have access to the cpu local run queue, only now schedule the processes.
   * We ignore the slots for the former kernel tasks
   */
  for (i=0; i < NR_BOOT_PROCS - NR_TASKS; i++) {
	RTS_UNSET(proc_addr(i), RTS_PROC_STOP);
  }
  /*
   * enable timer interrupts and clock task on the boot CPU
   */
  if (boot_cpu_init_timer(system_hz)) {
	  panic("FATAL : failed to initialize timer interrupts, "
			  "cannot continue without any clock source!");
  }

  fpu_init();

/* Warnings for sanity checks that take time. These warnings are printed
 * so it's a clear warning no full release should be done with them
 * enabled.
 */
#if DEBUG_SCHED_CHECK
  FIXME("DEBUG_SCHED_CHECK enabled");
#endif
#if DEBUG_VMASSERT
  FIXME("DEBUG_VMASSERT enabled");
#endif
#if DEBUG_PROC_CHECK
  FIXME("PROC check enabled");
#endif

  DEBUGEXTRA(("cycles_accounting_init()... "));
  cycles_accounting_init();
  DEBUGEXTRA(("done\n"));

#ifdef CONFIG_SMP
  cpu_set_flag(bsp_cpu_id, CPU_IS_READY);
  machine.processors_count = ncpus;
  machine.bsp_id = bsp_cpu_id;
#else
  machine.processors_count = 1;
  machine.bsp_id = 0;
#endif

  /* Kernel may no longer use bits of memory as VM will be running soon */
  kernel_may_alloc = 0;

  switch_to_user();
  NOT_REACHABLE;
}
Esempio n. 6
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Esempio n. 7
0
/*===========================================================================*
 *				do_update				     *
 *===========================================================================*/
int do_update(struct proc * caller, message * m_ptr)
{
/* Handle sys_update(). Update a process into another by swapping their process
 * slots.
 */
  endpoint_t src_e, dst_e;
  int src_p, dst_p, flags;
  struct proc *src_rp, *dst_rp;
  struct priv *src_privp, *dst_privp;
  struct proc orig_src_proc;
  struct proc orig_dst_proc;
  struct priv orig_src_priv;
  struct priv orig_dst_priv;
  int i, r;

  /* Lookup slots for source and destination process. */
  flags = m_ptr->SYS_UPD_FLAGS;
  src_e = m_ptr->SYS_UPD_SRC_ENDPT;
  if(!isokendpt(src_e, &src_p)) {
      return EINVAL;
  }
  src_rp = proc_addr(src_p);
  src_privp = priv(src_rp);
  if(!(src_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  dst_e = m_ptr->SYS_UPD_DST_ENDPT;
  if(!isokendpt(dst_e, &dst_p)) {
      return EINVAL;
  }
  dst_rp = proc_addr(dst_p);
  dst_privp = priv(dst_rp);
  if(!(dst_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  assert(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp));

  /* Check if processes are updatable. */
  if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) {
      return EBUSY;
  }

#if DEBUG
  printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
  printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", src_rp->p_endpoint, src_rp->p_rts_flags, priv(src_rp)->s_asyntab, priv(src_rp)->s_asynendpoint, priv(src_rp)->s_grant_table, priv(src_rp)->s_grant_endpoint);
  printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", dst_rp->p_endpoint, dst_rp->p_rts_flags, priv(dst_rp)->s_asyntab, priv(dst_rp)->s_asynendpoint, priv(dst_rp)->s_grant_table, priv(dst_rp)->s_grant_endpoint);
#endif

  /* Let destination inherit allowed IRQ, I/O ranges, and memory ranges. */
  r = inherit_priv_irq(src_rp, dst_rp);
  if(r != OK) {
      return r;
  }
  r = inherit_priv_io(src_rp, dst_rp);
  if(r != OK) {
      return r;
  }
  r = inherit_priv_mem(src_rp, dst_rp);
  if(r != OK) {
      return r;
  }

  /* Let destination inherit the target mask from source. */
  for (i=0; i < NR_SYS_PROCS; i++) {
      if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) {
          set_sendto_bit(dst_rp, i);
      }
  }

  /* Save existing data. */
  orig_src_proc = *src_rp;
  orig_src_priv = *(priv(src_rp));
  orig_dst_proc = *dst_rp;
  orig_dst_priv = *(priv(dst_rp));

  /* Adjust asyn tables. */
  adjust_asyn_table(priv(src_rp), priv(dst_rp));
  adjust_asyn_table(priv(dst_rp), priv(src_rp));

  /* Abort any pending send() on rollback. */
  if(flags & SYS_UPD_ROLLBACK) {
      abort_proc_ipc_send(src_rp);
  }

  /* Swap slots. */
  *src_rp = orig_dst_proc;
  *src_privp = orig_dst_priv;
  *dst_rp = orig_src_proc;
  *dst_privp = orig_src_priv;

  /* Adjust process slots. */
  adjust_proc_slot(src_rp, &orig_src_proc);
  adjust_proc_slot(dst_rp, &orig_dst_proc);

  /* Adjust privilege slots. */
  adjust_priv_slot(priv(src_rp), &orig_src_priv);
  adjust_priv_slot(priv(dst_rp), &orig_dst_priv);

  /* Swap global process slot addresses. */
  swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);

  /* Swap VM request entries. */
  swap_memreq(src_rp, dst_rp);

#if DEBUG
  printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
  printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", src_rp->p_endpoint, src_rp->p_rts_flags, priv(src_rp)->s_asyntab, priv(src_rp)->s_asynendpoint, priv(src_rp)->s_grant_table, priv(src_rp)->s_grant_endpoint);
  printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", dst_rp->p_endpoint, dst_rp->p_rts_flags, priv(dst_rp)->s_asyntab, priv(dst_rp)->s_asynendpoint, priv(dst_rp)->s_grant_table, priv(dst_rp)->s_grant_endpoint);
#endif

#ifdef CONFIG_SMP
  bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS);
  bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS);
#endif

  return OK;
}