/*! * mxc_hrt_start_timer() - start a timer for otg state machine * Set or reset timer to interrupt in number of uS (micro-seconds). * * XXX There may be a floor or minimum that can be effectively set. * XXX We have seen an occasional problem with US(25) for discharge for example. * * @param otg * @param usec */ int mxc_hrt_start_timer(struct otg_instance *otg, int usec) { TRACE_MSG1(OCD, "usec: %d", usec); mxc_hr_usec_set = usec; //TRACE_MSG1 (OCD, "usec: %d", usec); mxc_hr_active = FALSE; TRACE_MSG1(OCD, "resetting active: %d", mxc_hr_active); del_timer(&hr_timer); RETURN_ZERO_UNLESS(usec); mxc_hr_active = TRUE; TRACE_MSG1(OCD, "setting active: %d", mxc_hr_active); if (mxc_hr_usec_set >= 1000000) { hr_timer.expires = jiffies + ((mxc_hr_usec_set/1000000)*mxc_hr_jiffy_per_sec); hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); TRACE_MSG4 (OCD, "usec: %u jiffies: %8u expires: %8u arch_cycle_expires: %8u LONG", usec, jiffies, hr_timer.expires, hr_timer.arch_cycle_expires); } else { hr_timer.expires = jiffies; hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); if (mxc_hr_usec_set < 100) { TRACE_MSG1(OCD, "usec: %d set to minimum 100", mxc_hr_usec_set); mxc_hr_usec_set = 100; } hr_timer.arch_cycle_expires += nsec_to_arch_cycle(mxc_hr_usec_set * 1000); TRACE_MSG2(OCD, "arch_cycle_expires: %d arch_cycles_per_jiffy: %d", hr_timer.arch_cycle_expires, arch_cycles_per_jiffy); while (hr_timer.arch_cycle_expires >= arch_cycles_per_jiffy) { hr_timer.expires++; hr_timer.arch_cycle_expires -= arch_cycles_per_jiffy; } TRACE_MSG4 (OCD, "usec: %u jiffies: %8u expires: %8u arch_cycle_expires: %8u SHORT", usec, jiffies, hr_timer.expires, hr_timer.arch_cycle_expires); } add_timer(&hr_timer); return 0; }
int _schedule_next_int(unsigned long jiffie_f,long arch_cycle_in) { long arch_cycle_offset; unsigned long seq; /* * First figure where we are in time. * A note on locking. We are under the timerlist_lock here. This * means that interrupts are off already, so don't use irq versions. */ if (unlikely(!hrtimer_use)){ return 0; } do { seq = read_seqbegin(&xtime_lock); arch_cycle_offset = arch_cycle_in - get_arch_cycles(jiffie_f); } while (read_seqretry(&xtime_lock, seq)); /* * If time is already passed, just return saying so. */ if (arch_cycle_offset <= 0) return 1; __last_was_long = arch_cycles_per_jiffy == arch_cycle_in; reload_timer_chip(arch_cycle_offset); return 0; }
/* * High res timers changes: First we want to use full nsec for all * the math to avoid the double round off (on the offset and xtime). * Second, we want to allow a boot with HRT turned off at boot time. * This will cause hrtimer_use to be false, and we then fall back to * the old code. We also shorten the xtime lock region and eliminate * the lost tick code as this kernel will never have lost ticks under * the lock (i.e. wall_jiffies will never differ from jiffies except * when the write xtime lock is held). */ void do_gettimeofday(struct timeval *tv) { unsigned long seq; unsigned long sec, nsec, clk_nsec; unsigned long max_ntp_tick; do { seq = read_seqbegin(&xtime_lock); #ifdef CONFIG_HIGH_RES_TIMERS if (hrtimer_use) nsec = arch_cycle_to_nsec(get_arch_cycles(wall_jiffies)); else #endif nsec = cur_timer->get_offset() * NSEC_PER_USEC; sec = xtime.tv_sec; clk_nsec = xtime.tv_nsec; max_ntp_tick = current_tick_length() >> (SHIFT_SCALE - 10); } while (read_seqretry(&xtime_lock, seq)); /* ensure we don't advance beyond the current tick length */ nsec = min(nsec, max_ntp_tick); nsec += clk_nsec; while (nsec >= NSEC_PER_SEC) { nsec -= NSEC_PER_SEC; sec++; } tv->tv_sec = sec; tv->tv_usec = nsec / NSEC_PER_USEC; }
int _schedule_jiffies_int(unsigned long jiffie_f) { long past; unsigned long seq; if (unlikely(!hrtimer_use)) return 0; do { seq = read_seqbegin(&xtime_lock); past = get_arch_cycles(jiffie_f); } while (read_seqretry(&xtime_lock, seq)); return (past >= arch_cycles_per_jiffy); }
void mxc_hrt_callback (unsigned long arg) { TRACE_MSG1(OCD, "checking active: %d", mxc_hr_active); RETURN_UNLESS(mxc_hr_active); mxc_hr_active = FALSE; TRACE_MSG1(OCD, "resetting active: %d", mxc_hr_active); if (mxc_hr_usec_set >= 1000000) { //if requested period is in the range of 1 sec hr_timer.expires = jiffies + ((mxc_hr_usec_set/1000000)*mxc_hr_jiffy_per_sec); hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); hr_timer.function = mxc_hrt_callback; } else { hr_timer.expires = jiffies; hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); if (mxc_hr_usec_set < 100) { TRACE_MSG1(OCD, "usec: %d set to minimum 100", mxc_hr_usec_set); mxc_hr_usec_set = 100; } hr_timer.arch_cycle_expires += nsec_to_arch_cycle(mxc_hr_usec_set * 1000); //hr_timer.arch_cycle_expires += nsec_to_arch_cycle(100 * 1000 * 1000); while (hr_timer.arch_cycle_expires >= arch_cycles_per_jiffy) { hr_timer.expires++; hr_timer.arch_cycle_expires -= arch_cycles_per_jiffy; } } //end of else TRACE_MSG3 (OCD, "usec: %d expires: %8u arch_cycle_expires: %8u", mxc_hr_usec_set, hr_timer.expires, hr_timer.arch_cycle_expires); otg_event(ocd_instance->otg, TMOUT, OCD, "TMOUT"); // add_timer(&hr_timer); }
/* * HRT stuff goes here. */ int schedule_hr_timer_int(unsigned ref_jiffies, int ref_cycles) { unsigned long temp_cycles; volatile TimerStruct_t * subhz_timer = (volatile TimerStruct_t *) TIMER2_VA_BASE; temp_cycles = (ref_jiffies - jiffies) * arch_cycles_per_jiffy + ref_cycles - get_arch_cycles(jiffies); if(unlikely(temp_cycles <= 0)) return -ETIME; subhz_timer->TimerLoad = temp_cycles; subhz_timer->TimerControl = TIMER_CTRL | TIMER_CTRL_IE; return 0; }
/* * linux/arch/arm/kernel/vst.c * * VST code for ARM. * * 2004 VST and IDLE code, by George Anzinger * * 2004 (c) MontaVista Software, Inc. * Copyright 2004 Sony Corporation. * Copyright 2004 Matsushita Electric Industrial Co., Ltd. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/vst.h> #include <linux/hrtime.h> /* get_arch_cycles, arch_cycles_per_jiffy */ #include <linux/time.h> /* xtime_lock */ #include <asm/irq.h> /* to get the disable/enable irq code */ #include <asm/mach/irq.h> #define stop_timer() /* just let it expire.... */ void do_vst_wakeup(struct pt_regs *regs, int irq_flag) { unsigned long jiffies_delta, jiffies_f = jiffies; unsigned long flags; if (!in_vst_sleep()) return; vst_waking(); write_seqlock_irqsave(&xtime_lock, flags); if (irq_flag ) vst_successful_exit++; else vst_external_intr_exit++; stop_timer(); /* * OK, now we need to get jiffies up to the right value. Here * we lean on the HRT patch to give us some notion of where we * are. */ jiffies_delta = get_arch_cycles(jiffies_f) / arch_cycles_per_jiffy; if (jiffies_delta) { /* * One or more jiffie has elapsed. Do all but the last one * here and then call do_timer() to get the last and update * the wall clock. */ jiffies_delta--; vst_bump_jiffies_by(jiffies_delta); vst_skipped_interrupts += jiffies_delta; run_local_timers(); } else { conditional_run_timers(); } write_sequnlock_irqrestore(&xtime_lock, flags); return; }
/*! * mxc_hrt_ocd_mod_init() - initial tcd setup * Allocate interrupts and setup hardware. */ int mxc_hrt_mod_init (void) { int res = 0; #if 0 //test timer for 10 sec init_timer (&hr_timer); hr_timer.expires = jiffies + 630; hr_timer.function = mxc_hrt_callback; add_timer(&hr_timer); #endif #if 1 init_timer (&hr_timer); hr_timer.expires = jiffies + 10; hr_timer.function = mxc_hrt_callback; hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); hr_timer.arch_cycle_expires += nsec_to_arch_cycle(100 * 1000 * 1000); while (hr_timer.arch_cycle_expires >= arch_cycles_per_jiffy) { hr_timer.expires++; hr_timer.arch_cycle_expires -= arch_cycles_per_jiffy; } // add_timer(&hr_timer); #endif res = nsec_to_arch_cycle(100000); //printk(KERN_INFO"arch cycles for 100usec: %8X\n", res); res = arch_cycles_per_jiffy; mxc_hr_jiffy_per_sec = (nsec_to_arch_cycle(1000000000)/arch_cycles_per_jiffy); TRACE_MSG4(OCD, "arch cycles per jiffy: %8u Number of jiffy for 1 sec is %8u resolution: %8d nsec/cycle: %8u\n", arch_cycles_per_jiffy, mxc_hr_jiffy_per_sec, hr_time_resolution, nsec_to_arch_cycle(1) ); CATCH(error) { return -EINVAL; } return 0; }
int schedule_hr_timer_int(unsigned ref_jiffies, int ref_cycles) { int temp_cycles; volatile mputimer_regs_t * subhz_timer = mputimer_base(2); BUG_ON(ref_cycles < 0); /* * Get offset from last jiffy */ temp_cycles = (ref_jiffies - jiffies) * arch_cycles_per_jiffy + ref_cycles - get_arch_cycles(jiffies); if(unlikely(temp_cycles <= 0)) return -ETIME; subhz_timer->cntl = MPUTIM_CLOCK_ENABLE; subhz_timer->load_tim = temp_cycles; subhz_timer->cntl = MPUTIM_CLOCK_ENABLE | MPUTIM_ST; return 0; }
int schedule_hr_timer_int(unsigned ref_jiffies, int ref_cycles) { int temp_cycles; extern unsigned long processor_id; if ((processor_id & 0xf) < 4) { return -EIO; } /* * Get offset from last jiffy */ temp_cycles = (ref_jiffies - jiffies) * arch_cycles_per_jiffy + ref_cycles - get_arch_cycles(jiffies); if(unlikely(temp_cycles <= 0)) return -ETIME; ixp2000_reg_write(IXP2000_T2_CTL, 0); ixp2000_reg_write(IXP2000_T2_CLD, temp_cycles); ixp2000_reg_write(IXP2000_T2_CTL, (1 << 7)); return 0; }
/*! * mxc_hrt_trace_ticks() - get current ticks * */ u64 mxc_hrt_trace_ticks (void) { //ticks from jiffis is not enough we should use the arch cycle return (u64) get_arch_cycles(1); }