static int __switches_init(void) { int i; int e; printk("\nWait for it ...\n"); rt_typed_sem_init(&sem, 1, SEM_TYPE); rt_linux_use_fpu(1); thread = (RT_TASK *)kmalloc(ntasks*sizeof(RT_TASK), GFP_KERNEL); for (i = 0; i < ntasks; i++) { #ifdef DISTRIBUTE e = rt_task_init_cpuid(thread + i, pend_task, i, stack_size, 0, use_fpu, 0, i%2); #else e = rt_task_init_cpuid(thread + i, pend_task, i, stack_size, 0, use_fpu, 0, rtai_cpuid()); #endif if (e < 0) { task_init_has_failed: rt_printk("switches: failed to initialize task %d, error=%d\n", i, e); while (--i >= 0) rt_task_delete(thread + i); return -1; } } e = rt_task_init_cpuid(&task, sched_task, i, stack_size, 1, use_fpu, 0, rtai_cpuid()); if (e < 0) goto task_init_has_failed; rt_task_resume(&task); return 0; }
RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq (unsigned irq) { unsigned long flags; rtai_save_flags_and_cli(flags); hal_pend_uncond(irq, rtai_cpuid()); rtai_restore_flags(flags); }
static int mbx_wait_until(MBX *mbx, int *fravbs, RTIME time, RT_TASK *rt_current) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if (!(*fravbs)) { void *retp; rt_current->blocked_on = (void *)mbx; mbx->waiting_task = rt_current; if ((rt_current->resume_time = time) > rt_smp_time_h[rtai_cpuid()]) { rt_current->state |= (RT_SCHED_MBXSUSP | RT_SCHED_DELAYED); rem_ready_current(rt_current); enq_timed_task(rt_current); rt_schedule(); } if (unlikely((retp = rt_current->blocked_on) != NULL)) { mbx->waiting_task = NULL; rt_global_restore_flags(flags); return likely(retp > RTP_HIGERR) ? RTE_TIMOUT : (retp == RTP_UNBLKD ? RTE_UNBLKD : RTE_OBJREM); } } rt_global_restore_flags(flags); return 0; }
void rt_pend_linux_srq (unsigned srq) { if (srq > 0 && srq < RTAI_NR_SRQS) { unsigned long flags; set_bit(srq, &rtai_sysreq_pending); rtai_save_flags_and_cli(flags); hal_pend_uncond(rtai_sysreq_virq, rtai_cpuid()); rtai_restore_flags(flags); } }
RTAI_SYSCALL_MODE int rt_insert_timer(struct rt_tasklet_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data, int pid) { spinlock_t *lock; unsigned long flags, cpuid; RT_TASK *timer_manager; // timer initialization timer->uses_fpu = 0; if (pid >= 0) { if (!handler) { return -EINVAL; } timer->handler = handler; timer->data = data; } else { if (timer->handler != NULL || timer->handler == (void *)1) { timer->handler = (void *)1; timer->data = data; } } timer->priority = priority; REALTIME2COUNT(firing_time) timer->firing_time = firing_time; timer->period = period; if (!pid) { timer->task = 0; timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0; } else { timer->cpuid = cpuid = NUM_CPUS > 1 ? (timer->task)->runnable_on_cpus : 0; (timer->task)->priority = priority; rt_copy_to_user(timer->usptasklet, timer, sizeof(struct rt_usp_tasklet_struct)); } // timer insertion in timers_list flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); enq_timer(timer); rt_spin_unlock_irqrestore(flags, lock); // timers_manager priority inheritance if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) { timer_manager->priority = timer->priority; } // timers_task deadline inheritance flags = rt_global_save_flags_and_cli(); if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { timer_manager->resume_time = firing_time; rem_timed_task(timer_manager); enq_timed_task(timer_manager); rt_schedule(); } rt_global_restore_flags(flags); return 0; }
static void rt_timer_handler(unsigned long none) { #if 0 // diagnose and see if interrupts are coming in static int cnt[NR_RT_CPUS]; int cpuid = rtai_cpuid(); rt_printk("TIMER TICK: CPU %d, %d\n", cpuid, ++cnt[cpuid]); #endif rt_pend_linux_srq(srq); mod_timer(&timer, jiffies + (HZ/TIMER_FREQ)); return; }
int rt_request_timer (void (*handler)(void), unsigned tick, int use_apic) { unsigned long flags; rtai_save_flags_and_cli(flags); // read tick values: current time base register and linux tick rt_times.tick_time = rtai_rdtsc(); rt_times.linux_tick = tb_ticks_per_jiffy; if (tick > 0) { // periodic Mode // if tick is greater than tb_ticks_per_jiffy schedule a linux timer first if (tick > tb_ticks_per_jiffy) { tick = tb_ticks_per_jiffy; } rt_times.intr_time = rt_times.tick_time + tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = tick; #ifdef CONFIG_40x /* Set the PIT auto-reload mode */ mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE); /* Set the PIT reload value and just let it run. */ mtspr(SPRN_PIT, tick); #endif /* CONFIG_40x */ } else { //one-shot Mode // in this mode we set all to decade at linux_tick rt_times.intr_time = rt_times.tick_time + rt_times.linux_tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = rt_times.linux_tick; #ifdef CONFIG_40x /* Disable the PIT auto-reload mode */ mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_ARE); #endif /* CONFIG_40x */ } // request an IRQ and register it rt_release_irq(RTAI_TIMER_DECR_IRQ); decr_timer_handler = handler; // pass throught ipipe: register immediate timer_trap handler // on i386 for a periodic mode is rt_set_timer_delay(tick); -> is set rate generator at tick; in one shot set LATCH all for the 8254 timer. Here is the same. rtai_disarm_decr(rtai_cpuid(), 1); rt_set_timer_delay(rt_times.periodic_tick); rtai_set_gate_vector(DECR_VECTOR, rtai_decr_timer_handler, 0); rtai_request_tickdev(); rtai_restore_flags(flags); return 0; }
void rt_free_timer (void) { unsigned long flags; rtai_save_flags_and_cli(flags); rtai_release_tickdev(); #ifdef CONFIG_40x /* Re-enable the PIT auto-reload mode */ mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE); /* Set the PIT reload value and just let it run. */ mtspr(SPRN_PIT, tb_ticks_per_jiffy); #endif /* CONFIG_40x */ rtai_reset_gate_vector(DECR_VECTOR, 0, 0); rtai_disarm_decr(rtai_cpuid(), 0); rtai_restore_flags(flags); }
static void pend_task (long t) { unsigned long msg; while(1) { switch (change) { case 0: rt_task_suspend(thread + t); break; case 1: rt_sem_wait(&sem); break; case 2: rt_return(rt_receive(NULL, &msg), 0); break; } cpu_used[rtai_cpuid()]++; } }
static inline void rt_exec_signal(RT_TASK *sigtask, RT_TASK *task) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if (sigtask->suspdepth > 0 && !(--sigtask->suspdepth)) { if (task) { sigtask->priority = task->priority; if (!task->pstate++) { rem_ready_task(task); task->state |= RT_SCHED_SIGSUSP; } } sigtask->state &= ~RT_SCHED_SIGSUSP; sigtask->retval = (long)task; enq_ready_task(sigtask); RT_SCHEDULE(sigtask, rtai_cpuid()); } rt_global_restore_flags(flags); }
RTAI_SYSCALL_MODE int rt_timer_insert(struct rtdm_timer_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data) { spinlock_t *lock; unsigned long flags, cpuid; RT_TASK *timer_manager; if (!handler) { return -EINVAL; } timer->handler = handler; timer->data = data; timer->priority = priority; timer->firing_time = firing_time; timer->period = period; REALTIME2COUNT(firing_time) timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0; // timer insertion in timers_list flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); enq_timer(timer); rt_spin_unlock_irqrestore(flags, lock); // timers_manager priority inheritance if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) { timer_manager->priority = timer->priority; } // timers_task deadline inheritance flags = rt_global_save_flags_and_cli(); if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { timer_manager->resume_time = firing_time; rem_timed_task(timer_manager); enq_timed_task(timer_manager); rt_schedule(); } rt_global_restore_flags(flags); return 0; }
static inline RT_TASK* __task_init(unsigned long name, int prio, int stack_size, int max_msg_size, int cpus_allowed) { void *msg_buf0, *msg_buf1; RT_TASK *rt_task; if ((rt_task = current->rtai_tskext(TSKEXT0))) { if (num_online_cpus() > 1 && cpus_allowed) { cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed); } else { cpus_allowed = rtai_cpuid(); } put_current_on_cpu(cpus_allowed); return rt_task; } if (rt_get_adr(name)) { return 0; } if (prio > RT_SCHED_LOWEST_PRIORITY) { prio = RT_SCHED_LOWEST_PRIORITY; } if (!max_msg_size) { max_msg_size = USRLAND_MAX_MSG_SIZE; } if (!(msg_buf0 = rt_malloc(max_msg_size))) { return 0; } if (!(msg_buf1 = rt_malloc(max_msg_size))) { rt_free(msg_buf0); return 0; } rt_task = rt_malloc(sizeof(RT_TASK) + 3*sizeof(struct fun_args)); if (rt_task) { rt_task->magic = 0; if (num_online_cpus() > 1 && cpus_allowed) { cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed); } else { cpus_allowed = rtai_cpuid(); } if (!set_rtext(rt_task, prio, 0, 0, cpus_allowed, 0)) { rt_task->fun_args = (long *)((struct fun_args *)(rt_task + 1)); rt_task->msg_buf[0] = msg_buf0; rt_task->msg_buf[1] = msg_buf1; rt_task->max_msg_size[0] = rt_task->max_msg_size[1] = max_msg_size; if (rt_register(name, rt_task, IS_TASK, 0)) { rt_task->state = 0; #ifdef __IPIPE_FEATURE_ENABLE_NOTIFIER ipipe_enable_notifier(current); #else current->flags |= PF_EVNOTIFY; #endif #if (defined VM_PINNED) && (defined CONFIG_MMU) ipipe_disable_ondemand_mappings(current); #endif RTAI_OOM_DISABLE(); return rt_task; } else { clr_rtext(rt_task); } } rt_free(rt_task); } rt_free(msg_buf0); rt_free(msg_buf1); return 0; }
static int rtai_hirq_dispatcher(struct pt_regs *regs) { unsigned long cpuid; int irq; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14) if ((irq = ppc_md.get_irq()) >= RTAI_NR_IRQS) { #else if ((irq = ppc_md.get_irq(regs)) >= RTAI_NR_IRQS) { #endif spurious_interrupts++; return 0; } if (rtai_realtime_irq[irq].handler) { unsigned long sflags; HAL_LOCK_LINUX(); RTAI_IRQ_ACK(irq); // rtai_realtime_irq[irq].irq_ack(irq); mb(); RTAI_SCHED_ISR_LOCK(); rtai_realtime_irq[irq].handler(irq, rtai_realtime_irq[irq].cookie); RTAI_SCHED_ISR_UNLOCK(); HAL_UNLOCK_LINUX(); if (rtai_realtime_irq[irq].retmode || test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { return 0; } } else { unsigned long lflags; lflags = xchg((unsigned long *)ROOT_STATUS_ADR(cpuid = rtai_cpuid()), (1 << IPIPE_STALL_FLAG)); RTAI_IRQ_ACK(irq); // rtai_realtime_irq[irq].irq_ack(irq); mb(); hal_pend_uncond(irq, cpuid); ROOT_STATUS_VAL(cpuid) = lflags; if (test_bit(IPIPE_STALL_FLAG, &lflags)) { return 0; } } rtai_sti(); hal_fast_flush_pipeline(cpuid); return 1; } /* * rt_set_trap_handler */ RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler) { return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler, handler); } /* * rtai_trap_fault */ static int rtai_trap_fault (unsigned event, void *evdata) { #ifdef HINT_DIAG_TRAPS static unsigned long traps_in_hard_intr = 0; do { unsigned long flags; rtai_save_flags_and_cli(flags); if (!test_bit(RTAI_IFLAG, &flags)) { if (!test_and_set_bit(event, &traps_in_hard_intr)) { HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr);); } } } while (0);
static long long user_srq(unsigned long whatever) { extern int calibrate_8254(void); unsigned long args[MAXARGS]; copy_from_user(args, (unsigned long *)whatever, MAXARGS*sizeof(unsigned long)); switch (args[0]) { case CAL_8254: { return calibrate_8254(); //calibrate_apic() break; } case KTHREADS: case KLATENCY: { rt_set_oneshot_mode(); period = start_rt_timer(nano2count(args[1])); if (args[0] == KLATENCY) { rt_task_init_cpuid(&rtask, spv, args[2], STACKSIZE, 0, 0, 0, rtai_cpuid()); } else { rt_kthread_init_cpuid(&rtask, spv, args[2], STACKSIZE, 0, 0, 0, rtai_cpuid()); } expected = rt_get_time() + 100*period; rt_task_make_periodic(&rtask, expected, period); break; } case END_KLATENCY: { stop_rt_timer(); rt_task_delete(&rtask); break; } case FREQ_CAL: { times.intrs = -1; reset_count = args[1]*HZ; count = 0; rt_assign_irq_to_cpu(TIMER_8254_IRQ, 1 << hard_cpu_id()); rt_request_timer(just_ret, COUNT, 1); rt_request_global_irq(TIMER_8254_IRQ, calibrate); rt_set_irq_ack(TIMER_8254_IRQ, NULL); break; } case END_FREQ_CAL: { rt_free_timer(); rt_reset_irq_to_sym_mode(TIMER_8254_IRQ); rt_free_global_irq(TIMER_8254_IRQ); break; } case BUS_CHECK: { loops = maxj = 0; bus_period = imuldiv(args[1], CPU_FREQ, 1000000000); bus_threshold = imuldiv(args[2], CPU_FREQ, 1000000000); use_parport = args[3]; rt_assign_irq_to_cpu(TIMER_8254_IRQ, 1 << hard_cpu_id()); rt_request_timer((void *)rt_timer_tick_ext, imuldiv(args[1], FREQ_8254, 1000000000), 0); rt_set_global_irq_ext(TIMER_8254_IRQ, 1, 0); break; } case END_BUS_CHECK: { rt_free_timer(); rt_reset_irq_to_sym_mode(TIMER_8254_IRQ); break; } } return 0; }