unsigned long rt_reset_irq_to_sym_mode (int irq) { unsigned long oldmask, flags; if (irq >= IPIPE_NR_XIRQS) { return 0; } else { rtai_save_flags_and_cli(flags); spin_lock(&rtai_iset_lock); if (rtai_old_irq_affinity[irq] == 0) { spin_unlock(&rtai_iset_lock); rtai_restore_flags(flags); return -EINVAL; } cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity); if (rtai_old_irq_affinity[irq]) { hal_set_irq_affinity(irq, CPUMASK_T(rtai_old_irq_affinity[irq])); rtai_old_irq_affinity[irq] = 0; } spin_unlock(&rtai_iset_lock); rtai_restore_flags(flags); return oldmask; } }
RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq (unsigned irq) { unsigned long flags; rtai_save_flags_and_cli(flags); hal_pend_uncond(irq, rtai_cpuid()); rtai_restore_flags(flags); }
void rt_pend_linux_srq (unsigned srq) { if (srq > 0 && srq < RTAI_NR_SRQS) { unsigned long flags; set_bit(srq, &rtai_sysreq_pending); rtai_save_flags_and_cli(flags); hal_pend_uncond(rtai_sysreq_virq, rtai_cpuid()); rtai_restore_flags(flags); } }
void rt_free_timer (void) { unsigned long flags; rt_periodic = 0; __ipipe_mach_timerstolen = 0; // ipipe can reprogram timer for Linux now at91_tc_write(AT91_TC_CMR, AT91_TC_TIMER_CLOCK3); // back to oneshot mode rt_set_timer_delay(__ipipe_mach_ticks_per_jiffy); // regular timer delay rt_release_irq(RTAI_TIMER_IRQ); // free this irq rtai_save_flags_and_cli(flags); // critical section extern_timer_isr = NULL; // let ipipe run as normally rtai_restore_flags(flags); // end of critical section }
int rt_request_timer (void (*handler)(void), unsigned tick, int use_apic) { unsigned long flags; rtai_save_flags_and_cli(flags); // read tick values: current time base register and linux tick rt_times.tick_time = rtai_rdtsc(); rt_times.linux_tick = tb_ticks_per_jiffy; if (tick > 0) { // periodic Mode // if tick is greater than tb_ticks_per_jiffy schedule a linux timer first if (tick > tb_ticks_per_jiffy) { tick = tb_ticks_per_jiffy; } rt_times.intr_time = rt_times.tick_time + tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = tick; #ifdef CONFIG_40x /* Set the PIT auto-reload mode */ mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE); /* Set the PIT reload value and just let it run. */ mtspr(SPRN_PIT, tick); #endif /* CONFIG_40x */ } else { //one-shot Mode // in this mode we set all to decade at linux_tick rt_times.intr_time = rt_times.tick_time + rt_times.linux_tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = rt_times.linux_tick; #ifdef CONFIG_40x /* Disable the PIT auto-reload mode */ mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_ARE); #endif /* CONFIG_40x */ } // request an IRQ and register it rt_release_irq(RTAI_TIMER_DECR_IRQ); decr_timer_handler = handler; // pass throught ipipe: register immediate timer_trap handler // on i386 for a periodic mode is rt_set_timer_delay(tick); -> is set rate generator at tick; in one shot set LATCH all for the 8254 timer. Here is the same. rtai_disarm_decr(rtai_cpuid(), 1); rt_set_timer_delay(rt_times.periodic_tick); rtai_set_gate_vector(DECR_VECTOR, rtai_decr_timer_handler, 0); rtai_request_tickdev(); rtai_restore_flags(flags); return 0; }
void rt_free_timer (void) { unsigned long flags; rtai_save_flags_and_cli(flags); rtai_release_tickdev(); #ifdef CONFIG_40x /* Re-enable the PIT auto-reload mode */ mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE); /* Set the PIT reload value and just let it run. */ mtspr(SPRN_PIT, tb_ticks_per_jiffy); #endif /* CONFIG_40x */ rtai_reset_gate_vector(DECR_VECTOR, 0, 0); rtai_disarm_decr(rtai_cpuid(), 0); rtai_restore_flags(flags); }
int rt_free_linux_irq (unsigned irq, void *dev_id) { unsigned long flags; if (irq >= RTAI_NR_IRQS || rtai_linux_irq[irq].count == 0) { return -EINVAL; } rtai_save_flags_and_cli(flags); free_irq(irq, dev_id); spin_lock(&rtai_irq_desc(irq).lock); if (--rtai_linux_irq[irq].count == 0 && rtai_irq_desc(irq).action) { rtai_irq_desc(irq).action->flags = rtai_linux_irq[irq].flags; } spin_unlock(&rtai_irq_desc(irq).lock); rtai_restore_flags(flags); return 0; }
unsigned long rt_assign_irq_to_cpu (int irq, unsigned long cpumask) { if (irq >= IPIPE_NR_XIRQS || &rtai_irq_desc(irq) == NULL || rtai_irq_desc_chip(irq) == NULL || rtai_irq_desc_chip(irq)->irq_set_affinity == NULL) { return 0; } else { unsigned long oldmask, flags; rtai_save_flags_and_cli(flags); spin_lock(&rtai_iset_lock); cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity); hal_set_irq_affinity(irq, CPUMASK_T(cpumask)); if (oldmask) { rtai_old_irq_affinity[irq] = oldmask; } spin_unlock(&rtai_iset_lock); rtai_restore_flags(flags); return oldmask; } }
int rt_request_linux_irq (unsigned irq, void *handler, char *name, void *dev_id) { unsigned long flags; int retval; if (irq >= RTAI_NR_IRQS || !handler) { return -EINVAL; } rtai_save_flags_and_cli(flags); spin_lock(&irq_desc[irq].lock); if (rtai_linux_irq[irq].count++ == 0 && irq_desc[irq].action) { rtai_linux_irq[irq].flags = irq_desc[irq].action->flags; irq_desc[irq].action->flags |= IRQF_SHARED; } spin_unlock(&irq_desc[irq].lock); rtai_restore_flags(flags); retval = request_irq(irq, handler, IRQF_SHARED, name, dev_id); return 0; }
int rt_request_srq (unsigned label, void (*k_handler)(void), long long (*u_handler)(unsigned long)) { unsigned long flags; int srq; if (k_handler == NULL) { return -EINVAL; } rtai_save_flags_and_cli(flags); if (rtai_sysreq_map != ~0) { set_bit(srq = ffz(rtai_sysreq_map), &rtai_sysreq_map); rtai_sysreq_table[srq].k_handler = k_handler; rtai_sysreq_table[srq].u_handler = u_handler; rtai_sysreq_table[srq].label = label; } else { srq = -EBUSY; } rtai_restore_flags(flags); return srq; }
static void usi_restore_flags(unsigned long flags) { rtai_restore_flags(flags); }