Example #1
0
static int __cpuinit xen_cpu_up(unsigned int cpu)
{
	struct task_struct *idle = idle_task(cpu);
	int rc;

#ifdef CONFIG_X86_64
	/* Allocate node local memory for AP pdas */
	WARN_ON(cpu == 0);
	if (cpu > 0) {
		rc = get_local_pda(cpu);
		if (rc)
			return rc;
	}
#endif

#ifdef CONFIG_X86_32
	init_gdt(cpu);
	per_cpu(current_task, cpu) = idle;
	irq_ctx_init(cpu);
#else
	cpu_pda(cpu)->pcurrent = idle;
	clear_tsk_thread_flag(idle, TIF_FORK);
#endif
	xen_setup_timer(cpu);
	xen_init_lock_cpu(cpu);

	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
		alternatives_smp_switch(1);

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
		HYPERVISOR_sched_op(SCHEDOP_yield, 0);
		barrier();
	}

	return 0;
}
Example #2
0
File: kernel.c Project: wuxx/sos
s32 os_main(u32 sp)
{
    struct __os_task__ *ptask;

    int_init();
    uart_init();
    dram_init();
    timer_init();
    mmc_init();

    PRINT_INFO("%s\n", sys_banner);

    coretimer_init();
    task_init();
    semaphore_init();

    PRINT_INFO("cpu_mode: %s; lr: 0x%x; sp: 0x%x; cpsr: 0x%x\n",
            get_cpu_mode(NULL), __get_lr(), sp, __get_cpsr());

    gpio_set_function(GPIO_16, OUTPUT);
    gpio_set_output(GPIO_16, 0);

    /* set_log_level(LOG_DEBUG); */

    /* create idle task */
    if ((ptask = tcb_alloc()) == NULL) {
        panic();
    }

    tcb_init(ptask, idle_task, 0, 256);

    /*os_ready_insert(ptask);*/

    current_task = &tcb[IDLE_TASK_ID];  /* assume that this is idle_task */

    /* create main task */
    if ((ptask = tcb_alloc()) == NULL) {
        panic();
    }

    tcb_init(ptask, main_task, 0, 100);

    os_ready_insert(ptask);

    /* 'slip into idle task', cause the os_main() is not a task (it's the god code of system) */
    __set_sp(&(task_stack[0][TASK_STK_SIZE]));
    current_task->state = TASK_RUNNING;
    idle_task(0);

    kassert(0);
    return 0;
}
Example #3
0
static int __cpuinit xen_cpu_up(unsigned int cpu)
{
	struct task_struct *idle = idle_task(cpu);
	int rc;

	per_cpu(current_task, cpu) = idle;
#ifdef CONFIG_X86_32
	irq_ctx_init(cpu);
#else
	clear_tsk_thread_flag(idle, TIF_FORK);
	per_cpu(kernel_stack, cpu) =
		(unsigned long)task_stack_page(idle) -
		KERNEL_STACK_OFFSET + THREAD_SIZE;
	per_cpu(kernel_stack8k, cpu) =
		(unsigned long)task_stack_page(idle) -
		KERNEL_STACK_OFFSET + THREAD_SIZE - 8192;
#endif
	xen_setup_runstate_info(cpu);
	xen_setup_timer(cpu);
	xen_init_lock_cpu(cpu);

	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
		alternatives_smp_switch(1);

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
		barrier();
	}

	return 0;
}
Example #4
0
File: time.c Project: E-LLP/n900
static void do_stolen_accounting(void)
{
	struct vcpu_runstate_info state;
	struct vcpu_runstate_info *snap;
	s64 blocked, runnable, offline, stolen;
	cputime_t ticks;

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

	snap = &__get_cpu_var(runstate_snapshot);

	/* work out how much time the VCPU has not been runn*ing*  */
	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];

	*snap = state;

	/* Add the appropriate number of ticks of stolen time,
	   including any left-overs from last time.  Passing NULL to
	   account_steal_time accounts the time as stolen. */
	stolen = runnable + offline + __get_cpu_var(residual_stolen);

	if (stolen < 0)
		stolen = 0;

	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
	__get_cpu_var(residual_stolen) = stolen;
	account_steal_time(NULL, ticks);

	/* Add the appropriate number of ticks of blocked time,
	   including any left-overs from last time.  Passing idle to
	   account_steal_time accounts the time as idle/wait. */
	blocked += __get_cpu_var(residual_blocked);

	if (blocked < 0)
		blocked = 0;

	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
	__get_cpu_var(residual_blocked) = blocked;
	account_steal_time(idle_task(smp_processor_id()), ticks);
}
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
static void rcu_idle_exit_common(long long oldval)
{
	if (oldval) {
		RCU_TRACE(trace_rcu_dyntick(TPS("++="),
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
			  oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
}
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
static void rcu_idle_exit_common(long long oldval)
{
	if (oldval) {
		RCU_TRACE(trace_rcu_dyntick("++=",
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
	if (!is_idle_task(current)) {
		struct task_struct *idle = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
			  oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
}
Example #7
0
File: smp.c Project: mobilipia/iods
int __cpuinit xen_cpu_up(unsigned int cpu)
{
	struct task_struct *idle = idle_task(cpu);
	int rc;

#if 0
	rc = cpu_up_check(cpu);
	if (rc)
		return rc;
#endif

	init_gdt(cpu);
	per_cpu(current_task, cpu) = idle;
	irq_ctx_init(cpu);
	xen_setup_timer(cpu);

	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
		alternatives_smp_switch(1);

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	smp_store_cpu_info(cpu);
	set_cpu_sibling_map(cpu);
	/* This must be done before setting cpu_online_map */
	wmb();

	cpu_set(cpu, cpu_online_map);

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

	return 0;
}
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
static void rcu_idle_enter_common(long long oldval)
{
	if (rcu_dynticks_nesting) {
		RCU_TRACE(trace_rcu_dyntick("--=",
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
	if (!is_idle_task(current)) {
		struct task_struct *idle = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
					    oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
	rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
}
Example #9
0
static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
{
	smp_mb__before_atomic_inc();  
	atomic_inc(&rdtp->dynticks);
	
	smp_mb__after_atomic_inc();  
	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
	rcu_cleanup_after_idle(smp_processor_id());
	trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
	if (!is_idle_task(current)) {
		struct task_struct *idle = idle_task(smp_processor_id());

		trace_rcu_dyntick("Error on exit: not idle task",
				  oldval, rdtp->dynticks_nesting);
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); 
	}
}
Example #10
0
static struct task_struct *getthread(struct pt_regs *regs, int tid)
{
	/*
	 * Non-positive TIDs are remapped to the cpu shadow information
	 */
	if (tid == 0 || tid == -1)
		tid = -atomic_read(&kgdb_active) - 2;
	if (tid < 0) {
		if (kgdb_info[-tid - 2].task)
			return kgdb_info[-tid - 2].task;
		else
			return idle_task(-tid - 2);
	}

	/*
	 * find_task_by_pid_ns() does not take the tasklist lock anymore
	 * but is nicely RCU locked - hence is a pretty resilient
	 * thing to use:
	 */
	return find_task_by_pid_ns(tid, &init_pid_ns);
}
Example #11
0
/*
 * Account time for a transition between system, hard irq or soft irq state.
 * Note that this function is called with interrupts enabled.
 */
void account_system_vtime(struct task_struct *tsk)
{
    struct thread_info *ti = task_thread_info(tsk);
    unsigned long flags;
    cputime_t delta_stime;
    __u64 now;

    local_irq_save(flags);

    now = ia64_get_itc();

    delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
    if (irq_count() || idle_task(smp_processor_id()) != tsk)
        account_system_time(tsk, 0, delta_stime, delta_stime);
    else
        account_idle_time(delta_stime);
    ti->ac_stime = 0;

    ti->ac_stamp = now;

    local_irq_restore(flags);
}
Example #12
0
/*
 * Called from the context switch with interrupts disabled, to charge all
 * accumulated times to the current process, and to prepare accounting on
 * the next process.
 */
void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
{
    struct thread_info *pi = task_thread_info(prev);
    struct thread_info *ni = task_thread_info(next);
    cputime_t delta_stime, delta_utime;
    __u64 now;

    now = ia64_get_itc();

    delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
    if (idle_task(smp_processor_id()) != prev)
        account_system_time(prev, 0, delta_stime, delta_stime);
    else
        account_idle_time(delta_stime);

    if (pi->ac_utime) {
        delta_utime = cycle_to_cputime(pi->ac_utime);
        account_user_time(prev, delta_utime, delta_utime);
    }

    pi->ac_stamp = ni->ac_stamp = now;
    ni->ac_stime = ni->ac_utime = 0;
}
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
static void rcu_idle_enter_common(long long newval)
{
	if (newval) {
		RCU_TRACE(trace_rcu_dyntick(TPS("--="),
					    rcu_dynticks_nesting, newval));
		rcu_dynticks_nesting = newval;
		return;
	}
	RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
				    rcu_dynticks_nesting, newval));
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
					    rcu_dynticks_nesting, newval));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
	rcu_sched_qs(); /* implies rcu_bh_inc() */
	barrier();
	rcu_dynticks_nesting = newval;
}
Example #14
0
void linsched_yield(void)
{
	/* If the current task is not the idle task, yield. */
	if (current != idle_task(smp_processor_id()))
		yield();
}
Example #15
0
//----------------------------------------------------------------------------------------------------//
//  @func - xilkernel_start
//! @desc
//!   Start the kernel by enabling interrupts and starting to execute the idle task.
//! @return
//!   - Nothing.
//! @note
//!   - Routine does not return.
//! @desc
//----------------------------------------------------------------------------------------------------//
void xilkernel_start (void)
{
    DBG_PRINT("XMK: Process scheduling starts.\r\n");
    Xil_ExceptionEnable();
    idle_task ();                                                       // Does not return
}
Example #16
0
/*! \brief Entry point of the audio management interface.
 *
 */
void com_task(void)
{
  static struct state_machine_context state_m = {
    .state                = STATE_INITIALIZATION,
    .async_cmd            = false,
    .view                 = GUI_UPDATE_VIEW_NONE,
    .view_elt             = GUI_UPDATE_ELT_NONE,
    .elapsed_time_timer.timer_state   = CPU_TIMER_STATE_STOPPED
  };

  // Update the GUI
  gui_update(state_m.view, &state_m.view_elt, &state_m.display_list, &state_m.info, &state_m.player_status);

  // Ask the audio interface to execute pending tasks
  ai_async_cmd_task();

  if (state_m.async_cmd)
  {
    // If current command is not done
    if (!is_ai_async_cmd_finished())
    {
      // If it is a new command that is being proceed
      if (state_m.in_progress_timer.timer_state == CPU_TIMER_STATE_STOPPED)
        cpu_set_timeout(cpu_ms_2_cy(500, FCPU_HZ), &state_m.in_progress_timer);
      // If current command is not done and it is taking a long
      else if (cpu_is_timeout(&state_m.in_progress_timer))
        state_m.view_elt |= GUI_UPDATE_ELT_IN_PROGRESS;
      return;
    }
    else
    {
      state_m.cmd_status = ai_async_cmd_out_status();
      cpu_stop_timeout(&state_m.in_progress_timer);
    }
  }

  // If a device is connected
  if (state_m.state != STATE_INITIALIZATION &&
      state_m.state != STATE_IDLE_ENTRY_POINT &&
      state_m.state != STATE_IDLE_WAIT_FOR_EVENT)
  {
    // If no device is connected, then jump to the disconnection state
    if (ai_is_none())
    {
      ai_command_abort();
      state_m.state = STATE_DEVICE_DISCONNECTED;
    }
  }

  switch (state_m.state)
  {
  case STATE_INITIALIZATION:
    state_m.state = STATE_IDLE_ENTRY_POINT;
    cpu_stop_timeout(&state_m.in_progress_timer);
    // Set default volume if specified
#if defined(DEFAULT_VOLUME)
    audio_mixer_dacs_set_volume(DEFAULT_VOLUME);
#endif
    break;
  case STATE_DEVICE_CONNECTED:
    controller_init(FCPU_HZ, FHSB_HZ, FPBB_HZ, FPBA_HZ);
    state_m.state = STATE_NAVIGATION_ENTRY_POINT;
    state_m.view_elt |= GUI_UPDATE_ELT_CONNECTED;
    break;
  case STATE_DEVICE_DISCONNECTED:
    controller_shutdown();
    state_m.state = STATE_IDLE_ENTRY_POINT;
    state_m.view_elt |= GUI_UPDATE_ELT_DISCONNECTED;
    break;
  case STATE_IDLE_ENTRY_POINT:
  case STATE_IDLE_WAIT_FOR_EVENT:
  case STATE_IDLE_DRIVE_LOAD:
    idle_task(&state_m);
    break;
  case STATE_NAVIGATION_ENTRY_POINT:
  case STATE_NAVIGATION_UPDATE_LIST:
  case STATE_NAVIGATION_UPDATE_LIST_GET_NAME:
  case STATE_NAVIGATION_UPDATE_LIST_STORE_NAME:
  case STATE_NAVIGATION_UPDATE_ISDIR:
  case STATE_NAVIGATION_WAIT_FOR_EVENT:
  case STATE_NAVIGATION_UPDATE_STATUS:
  case STATE_NAVIGATION_CD:
  case STATE_NAVIGATION_GOTOPARENT:
  case STATE_NAVIGATION_GOTOPARENT_ERROR_HANDLING:
  case STATE_NAVIGATION_PLAY_SELECTED_FILE:
  case STATE_NAVIGATION_WAIT_FOR_SELECTION:
  case STATE_NAVIGATION_UPDATE_METADATA_AND_PLAY:
    navigation_task(&state_m);
    break;
  case STATE_PLAYBACK_ENTRY_POINT:
  case STATE_PLAYBACK_WAIT_FOR_EVENT:
  case STATE_PLAYBACK_HANDLE_FAST_MODES:
  case STATE_PLAYBACK_UPDATE_TIME:
  case STATE_PLAYBACK_UPDATE_STATUS:
    playback_task(&state_m);
    break;
  case STATE_CONFIG_ENTRY_POINT:
  case STATE_CONFIG_WAIT_FOR_EVENT:
  case STATE_CONFIG_UPDATE_STATES:
  case STATE_CONFIG_READ_REPEAT_STATE:
  case STATE_CONFIG_READ_SHUFFLE_STATE:
    config_task(&state_m);
    break;
  case STATE_CHECK_DEVICE_ENTRY_POINT:
  case STATE_CHECK_DEVICE_UPDATE_STATUS:
    check_device_task(&state_m);
    break;
  case STATE_TRACK_CHANGED_ENTRY_POINT:
  case STATE_TRACK_CHANGED_TOTAL_TIME:
  case STATE_TRACK_CHANGED_FILE_NAME:
  case STATE_TRACK_CHANGED_ARTIST:
  case STATE_TRACK_CHANGED_TITLE:
  case STATE_TRACK_CHANGED_IMAGE:
  case STATE_TRACK_CHANGED_RESUME:
  case STATE_TRACK_CHECK_RESUME:
    track_changed_task(&state_m);
    break;
  case STATE_COMMAND_PLAY_ANY_SONG:
    command_task(&state_m);
    break;
  default:
    break;
  }
/*
  // Power sleep mode is managed here
  if( usb_device_get_state()==DEVICE_STATE_NOT_CONNECTED ) {
     if( cpu_is_timer_stopped(&sleep_timer) ) {
        cpu_set_timeout(cpu_ms_2_cy(SLEEP_MODE_MS, FCPU_HZ), &sleep_timer);
     }
     else if( cpu_is_timeout(&sleep_timer) ) {
        gui_enter_idle();
        SLEEP(AVR32_PM_SMODE_IDLE);
        gui_leave_idle();
     }
  } else {
     cpu_stop_timeout(&sleep_timer);
  }
*/
}
Example #17
0
static void fastcall reschedule_idle(struct task_struct * p)
{
#ifdef CONFIG_SMP
#if 0
  // not yet?
	int this_cpu = smp_processor_id();
	struct task_struct *tsk, *target_tsk;
	int cpu, best_cpu, i, max_prio;
	cycles_t oldest_idle;

	/*
	 * shortcut if the woken up task's last CPU is
	 * idle now.
	 */
	best_cpu = p->pcb$l_cpu_id;
	if (can_schedule(p, best_cpu)) {
		tsk = idle_task(best_cpu);
		if (cpu_curr(best_cpu) == tsk) {
			int need_resched;
send_now_idle:
			/*
			 * If need_resched == -1 then we can skip sending
			 * the IPI altogether, tsk->need_resched is
			 * actively watched by the idle thread.
			 */
			need_resched = tsk->need_resched;
			tsk->need_resched = 1;
			if ((best_cpu != this_cpu) && !need_resched)
				smp_send_reschedule(best_cpu);
			return;
		}
	}

	/*
	 * We know that the preferred CPU has a cache-affine current
	 * process, lets try to find a new idle CPU for the woken-up
	 * process. Select the least recently active idle CPU. (that
	 * one will have the least active cache context.) Also find
	 * the executing process which has the least priority.
	 */
	oldest_idle = (cycles_t) -1;
	target_tsk = NULL;
	max_prio = 0;

	for (i = 0; i < smp_num_cpus; i++) {
		cpu = cpu_logical_map(i);
		if (!can_schedule(p, cpu))
			continue;
		tsk = cpu_curr(cpu);
		/*
		 * We use the first available idle CPU. This creates
		 * a priority list between idle CPUs, but this is not
		 * a problem.
		 */
		if (tsk == idle_task(cpu)) {
			if (last_schedule(cpu) < oldest_idle) {
				oldest_idle = last_schedule(cpu);
				target_tsk = tsk;
			}
		} else {
			if (oldest_idle == -1ULL) {
				int prio = preemption_goodness(tsk, p, cpu);

				if (prio > max_prio) {
					max_prio = prio;
					target_tsk = tsk;
				}
			}
		}
	}
	tsk = target_tsk;
	if (tsk) {
		if (oldest_idle != -1ULL) {
			best_cpu = tsk->pcb$l_cpu_id;
			goto send_now_idle;
		}
		tsk->need_resched = 1;
		if (tsk->pcb$l_cpu_id != this_cpu)
			smp_send_reschedule(tsk->pcb$l_cpu_id);
	}
	return;
		
#endif
#else /* UP */
	int this_cpu = smp_processor_id();
	struct task_struct *tsk;

	tsk = ctl$gl_pcb;
	if (p->pcb$b_pri >= tsk->pcb$b_pri) /* previous was meaningless */
		tsk->need_resched = 1;
#endif
}
Example #18
0
asmlinkage void sch$resched(void) {
  int cpuid = smp_processor_id();
  struct _cpu * cpu=smp$gl_cpu_data[cpuid]; 
  struct _pcb * curpcb;
  unsigned long curpri;
  unsigned long qhead;
  int before,after;

  // lock sched db, soon
  //if (spl(IPL$_SCHED)) return;
  //    old=spl(IPL$_SCHED);
  // svpctx, do not think we need to do this here

#ifdef __x86_64__
  if (intr_blocked(IPL$_RESCHED))
    return;
  regtrap(REG_INTR,IPL$_RESCHED);
#endif

  /** spinlock sched and set ipl */
  setipl(IPL$_SCHED);
  vmslock(&SPIN_SCHED,-1);

  spin_lock_irq(&runqueue_lock); /* eventually change to sched? */

  /** get current pcb and priority */

  curpcb=cpu->cpu$l_curpcb;

  release_kernel_lock(curpcb, cpuid);

  curpri=cpu->cpu$b_cur_pri;

  /** clear bit in cpu_priority table */

  sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask );

  /** if no process with this pri on any cpu, clear bit in active_priority table */

  if (!sch$al_cpu_priority[curpri])
    sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri)));

  /** now some if's remaining from linux - TODO: check if still needed */

  if (curpcb == idle_task(curpcb->pcb$l_cpu_id))
    goto out;

  if (curpcb->state==TASK_INTERRUPTIBLE)
    if (signal_pending(curpcb)) {
      curpcb->state = TASK_RUNNING;
      curpcb->pcb$w_state = SCH$C_CUR;
    }

#if 0
  if (curpcb->state!=TASK_RUNNING) {
    curpcb->pcb$w_state=SCH$C_LEF; // use here temporarily
  }
#endif

#if 0
  if (curpcb->state==TASK_RUNNING) {
#endif
#ifdef DEBUG_SCHED
    before=numproc();
    //    printcom();
    //if (curpcb==0xa018c000 && qhead==0xa018c000)
    //  panic("aieeeeeh\n");
    mycheckaddr(0);
    //if (curpcb==qhead) panic(" a panic\n");
#endif

    /** set pri bit in comqs */

    sch$gl_comqs=sch$gl_comqs | (1 << curpri);
    //    curpcb->state=TASK_INTERRUPTIBLE; /* soon SCH$C_COM ? */

    /** set state of cur pcb to COM */

    curpcb->pcb$w_state=SCH$C_COM;

    /** insert pcb at tail of comqueue */

#ifdef __i386__    
    qhead=*(unsigned long *)&sch$aq_comt[curpri];
#else
    qhead=*(unsigned long *)&sch$aq_comh[curpri][1];
#endif
    if (!task_on_comqueue(curpcb)) {
      if (curpcb==qhead) panic(" a panic\n");
      insque(curpcb,qhead);
    } else {
      panic("something\n");
    }
#ifdef DEBUG_SCHED
    mycheckaddr(42);
#endif
    /** linux leftover */
    nr_running++;
#ifdef DEBUG_SCHED
    after=numproc();
    if(after-before!=1) {
      //printk("entry qhead %x %x\n",curpcb,qhead);
      printcom();
      panic("insq2 %x %x\n",before,after);
    }
#endif

  out:
    /** clear idle_cpus to signal all idle cpus to try to reschedule */
    sch$gl_idle_cpus=0;
#if 0
  }
#endif
  /** go some intro sch$sched */
  sch$sched(1);
}
Example #19
0
asmlinkage void sch$sched(int from_sch$resched) {
  int cpuid = smp_processor_id();
  struct _cpu * cpu=smp$gl_cpu_data[cpuid]; 
  struct _pcb *next = 0, *curpcb;
  int curpri, affinity;
  unsigned char tmppri;
  unsigned long qhead = 0;
  int after, before;

  curpcb=cpu->cpu$l_curpcb;
  curpri=cpu->cpu$b_cur_pri;

  //  if (!countme--) { countme=500; printk("."); }

  if (from_sch$resched == 1)
    goto skip_lock;

#if 0

  // NOT YET??? nope,not an interrupt. pushpsl+setipl/vmslock instead?

  if (intr_blocked(IPL$_SCHED))
    return;

  regtrap(REG_INTR,IPL$_SCHED);
#endif

  int ipl = getipl();
  if (ipl != 8 || SPIN_SCHED.spl$l_spinlock == 0)
    panic("schsch\n");

#if 0
  // temp workaround
  // must avoid nesting, since I do not know how to get out of it
  setipl(IPL$_SCHED);
  vmslock(&SPIN_SCHED,-1);
#endif

  /** clear cpu_priority for current pri bit - TODO: where did this come from? */

  sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask );

  /** skip if ... TODO: from where? */

  if (sch$al_cpu_priority[curpri]) 
    goto skip_lock;

  /** clear active_priority for current pri bit - TODO: where did this come from? */

  sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri)));

  //if (spl(IPL$_SCHED)) return;
  //  old=spl(IPL$_SCHED);

  /** now 4 linux leftovers */

  spin_lock_prefetch(&runqueue_lock);

  if (!curpcb->active_mm) BUG();

  release_kernel_lock(curpcb, cpuid);

  spin_lock_irq(&runqueue_lock);

 skip_lock:

  /** reset cpu affinity TODO: from where? */

  affinity=0;
  struct _pcb * aff_next = 0;

  /** find highest pri comqueue */

  tmppri=ffs(sch$gl_comqs);
#ifdef DEBUG_SCHED
  if (mydebug5)
    printk("ffs %x %x\n",tmppri,sch$gl_comqs);
#endif

  if (!tmppri) {
    /** if none found, idle */
#if 0
    // spot for more vms sched
    goto sch$idle;
#endif
  go_idle:
    /** set bit in idle_cpus */
    sch$gl_idle_cpus=sch$gl_idle_cpus | (cpu->cpu$l_cpuid_mask);
    /** store null pcb and -1 pri: MISSING check why */
    /** necessary idle_task line from linux */
    next=idle_task(cpuid);
    goto skip_cap;
  } else {