Example #1
0
int
clock_intr(void *v)
{
	struct clockframe *frame = v;
	extern u_int cpu_hzticks;
	u_int time_inval;

	/* Restart the interval timer. */
	mfctl(CR_ITMR, time_inval);
	mtctl(time_inval + cpu_hzticks, CR_ITMR);

	/* printf ("clock int 0x%x @ 0x%x for %p\n", t,
	   CLKF_PC(frame), curproc); */

	if (!cold)
		hardclock(frame);

#if 0
	ddb_regs = *frame;
	db_show_regs(NULL, 0, 0, NULL);
#endif

	/* printf ("clock out 0x%x\n", t); */

	return 1;
}
Example #2
0
int
clock_intr(void *arg)
{
	volatile struct timer_reg *timer;
	int whilecount = 0;

	if (!INTR_OCCURRED(NEXT_I_TIMER)) {
		return(0);
	}

	do {
		static int in_hardclock = 0;
		int s;
		
		timer = (volatile struct timer_reg *)IIOV(NEXT_P_TIMER);
		timer->csr |= TIMER_REG_UPDATE;

		if (! in_hardclock) {
			in_hardclock = 1;
			s = splclock ();
			hardclock(arg);
			splx(s);
			in_hardclock = 0;
		}
		if (whilecount++ > 10)
			panic ("whilecount");
	} while (INTR_OCCURRED(NEXT_I_TIMER));
	return(1);
}
Example #3
0
File: clock.c Project: MarginC/kame
/*
 *  Interrupt handler for targets using the internal count register
 *  as interval clock. Normally the system is run with the clock
 *  interrupt always enabled. Masking is done here and if the clock
 *  can not be run the tick is just counted and handled later when
 *  the clock is unmasked again.
 */
intrmask_t
clock_int5( intrmask_t mask, struct trap_frame *tf)
{
    u_int32_t clkdiff;

    /*
     * If clock is started count the tick, else just arm for a new.
     */
    if (clock_started && cpu_counter_interval != 0) {
        clkdiff = cp0_get_count() - cpu_counter_last;
        while (clkdiff >= cpu_counter_interval) {
            cpu_counter_last += cpu_counter_interval;
            clkdiff = cp0_get_count() - cpu_counter_last;
            pendingticks++;
        }
        cpu_counter_last += cpu_counter_interval;
        pendingticks++;
    } else {
        cpu_counter_last = cpu_counter_interval + cp0_get_count();
    }

    cp0_set_compare(cpu_counter_last);

    if ((tf->cpl & SPL_CLOCKMASK) == 0) {
        while (pendingticks) {
            hardclock(tf);
            pendingticks--;
        }
    }

    return CR_INT_5;	/* Clock is always on 5 */
}
void
mips3_clockintr(struct clockframe *cfp)
{
	struct cpu_info * const ci = curcpu();
	uint32_t new_cnt;

	ci->ci_ev_count_compare.ev_count++;

	KASSERT((ci->ci_cycles_per_hz & ~(0xffffffff)) == 0);
	ci->ci_next_cp0_clk_intr += (uint32_t)(ci->ci_cycles_per_hz & 0xffffffff);
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);

	/* Check for lost clock interrupts */
	new_cnt = mips3_cp0_count_read();

	/* 
	 * Missed one or more clock interrupts, so let's start 
	 * counting again from the current value.
	 */
	if ((ci->ci_next_cp0_clk_intr - new_cnt) & 0x80000000) {

		ci->ci_next_cp0_clk_intr = new_cnt + curcpu()->ci_cycles_per_hz;
		mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
		curcpu()->ci_ev_count_compare_missed.ev_count++;
	}

	/*
	 * Since hardclock is at the end, we can invoke it by a tailcall.
	 */

	hardclock(cfp);
	/* caller should renable clock interrupts */
}
Example #5
0
void
mainbus_interrupt(struct trapframe *tf)
{
	uint32_t cause;

	/* interrupts should be off */
	KASSERT(curthread->t_curspl > 0);

	cause = tf->tf_cause;
	if (cause & LAMEBUS_IRQ_BIT) {
		lamebus_interrupt(lamebus);
	}
	else if (cause & LAMEBUS_IPI_BIT) {
		interprocessor_interrupt();
		lamebus_clear_ipi(lamebus, curcpu);
	}
	else if (cause & MIPS_TIMER_BIT) {
		/* Reset the timer (this clears the interrupt) */
		mips_timer_set(CPU_FREQUENCY / HZ);
		/* and call hardclock */
		hardclock();
	}
	else {
		panic("Unknown interrupt; cause register is %08x\n", cause);
	}
}
Example #6
0
static void
dec_maxine_intr(uint32_t status, vaddr_t pc, uint32_t ipending)
{
	if (ipending & MIPS_INT_MASK_4)
		prom_haltbutton();

	/* handle clock interrupts ASAP */
	if (ipending & MIPS_INT_MASK_1) {
		struct clockframe cf;

		__asm volatile("lbu $0,48(%0)" ::
			"r"(ioasic_base + IOASIC_SLOT_8_START));
		cf.pc = pc;
		cf.sr = status;
		cf.intr = (curcpu()->ci_idepth > 1);
		hardclock(&cf);
		pmax_clock_evcnt.ev_count++;
	}

	if (ipending & MIPS_INT_MASK_3) {
		dec_maxine_ioasic_intr();
	}
	if (ipending & MIPS_INT_MASK_2) {
		kn02ba_errintr();
		pmax_memerr_evcnt.ev_count++;
	}
}
Example #7
0
/*
 * Maskable IPIs.
 *
 * These IPIs are received as non maskable, but are not processed in
 * the NMI handler; instead, they are processed from the soft interrupt
 * handler.
 *
 * XXX This is grossly suboptimal.
 */
void
m197_soft_ipi()
{
	struct cpu_info *ci = curcpu();
	struct trapframe faketf;
	int s;

	__mp_lock(&kernel_lock);
	s = splclock();

	if (ci->ci_h_sxip != 0) {
		faketf.tf_cpu = ci;
		faketf.tf_sxip = ci->ci_h_sxip;
		faketf.tf_epsr = ci->ci_h_epsr;
		ci->ci_h_sxip = 0;
		hardclock((struct clockframe *)&faketf);
	}

	if (ci->ci_s_sxip != 0) {
		faketf.tf_cpu = ci;
		faketf.tf_sxip = ci->ci_s_sxip;
		faketf.tf_epsr = ci->ci_s_epsr;
		ci->ci_s_sxip = 0;
		statclock((struct clockframe *)&faketf);
	}

	splx(s);
	__mp_unlock(&kernel_lock);
}
Example #8
0
/*
 * at91st_intr:
 *
 *Handle the hardclock interrupt.
 */
static int
at91st_intr(void *arg)
{
//    struct at91st_softc *sc = at91st_sc;

    /* make sure it's the kernel timer that generated the interrupt  */
    /* need to do this since the interrupt line is shared by the    */
    /* other interval and PWM timers                                */
    if (READ_ST(ST_SR) & ST_SR_PITS)
    {
        /* call the kernel timer handler */
        hardclock((struct clockframe*) arg);
#if 0
        if (hardclock_ticks % (HZ * 10) == 0)
            printf("time %i sec\n", hardclock_ticks/HZ);
#endif
        return 1;
    }
    else
    {
        /* it's one of the other timers; just pass it on */
        return 0;
    }
   
}
/*
 * Handle SMP hardclock() calling for this CPU.
 */
static void
hardclock_ipi(void *cap)
{
	int s = splsched();

	hardclock((struct clockframe *)cap);
	splx(s);
}
int
clockhandler(void *cookie)
{
	struct clockframe *frame = cookie;
	tickle_tc();

	hardclock(frame);
	return 0;	/* Pass the interrupt on down the chain */
}
Example #11
0
void
lapic_clockintr(void *arg)
{
	struct clockframe *frame = arg;

	hardclock(frame);

	clk_count.ec_count++;
}
int
clockhandler(void *aframe)
{
	struct clockframe *frame = aframe;
	bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
	    TIMER_1_CLEAR, 0);
	hardclock(frame);
	return 0;	/* Pass the interrupt on down the chain */
}
Example #13
0
int
hardclockintr(struct trapframe *frame)
{

	if (PCPU_GET(cpuid) == 0)
		hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	else
		hardclock_cpu(TRAPF_USERMODE(frame));
	return (FILTER_HANDLED);
}
Example #14
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	atomic_add_32(&s3c24x0_base, timer4_reload_value);

	hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
	return (FILTER_HANDLED);
}
Example #15
0
/*
 * Level 10 (clock) interrupts from system counter.
 */
int
clockintr_4(void *cap)
{

	/* read the limit register to clear the interrupt */
	*((volatile int *)&timerreg4->t_c10.t_limit);
	tickle_tc();
	hardclock((struct clockframe *)cap);
	return (1);
}
Example #16
0
/*
 * ixpclk_intr:
 *
 *	Handle the hardclock interrupt.
 */
static int
ixpclk_intr(void *arg)
{

	bus_space_write_4(ixpclk_sc->sc_iot, ixpclk_sc->sc_ioh,
			  IXPCLK_CLEAR, 1);

	hardclock((struct clockframe*) arg);
	return (1);
}
/*
 * ixpclk_intr:
 *
 *	Handle the hardclock interrupt.
 */
static int
ixpclk_intr(void *arg)
{

	bus_space_write_4(ixpclk_sc->sc_iot, ixpclk_sc->sc_ioh,
			  IXPCLK_CLEAR, 1);

	atomic_add_32(&ixpclk_base, ixpclk_sc->sc_coreclock_freq);

	hardclock((struct clockframe*) arg);
	return (1);
}
Example #18
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	/* The interrupt is shared, so we have to make sure it's for us. */
	if (RD4(ST_SR) & ST_SR_PITS) {
		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
	return (FILTER_STRAY);
}
Example #19
0
/* 
 * interrupt handler for clock interrupt (100Hz) 
 */
int
timer0_intr(void *arg)
{

	_reg_write_4(T0_MODE_REG, _reg_read_4(T0_MODE_REG) | T_MODE_EQUF);

	_playstation2_evcnt.clock.ev_count++;

	hardclock(&playstation2_clockframe);

	return (1);
}
Example #20
0
/*
 * ixpclk_intr:
 *
 *	Handle the hardclock interrupt.
 */
int
ixpclk_intr(void *arg)
{
    struct ixpclk_softc* sc = ixpclk_sc;
    struct trapframe *frame = arg;

    bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS,
                      OST_TIM0_INT);

    hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
    return (FILTER_HANDLED);
}
Example #21
0
static void
handleclock(void* arg)
{
	u_int32_t now = alpha_rpcc();
	u_int32_t delta = now - last_time;
	last_time = now;

	if (delta > max_cycles_per_tick) {
		int i, missed_ticks;
		missed_ticks = (delta * scaled_ticks_per_cycle) >> FIX_SHIFT;
		for (i = 0; i < missed_ticks; i++)
			hardclock(arg);
	}
Example #22
0
/*
 * ixpclk_intr:
 *
 *	Handle the hardclock interrupt.
 */
int
ixpclk_intr(void *arg)
{
	struct ixpclk_softc* sc = ixpclk_sc;
	struct clockframe *frame = arg;

	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS,
			  OST_TIM0_INT);

	hardclock(frame);

	return (1);
}
Example #23
0
void
ingenic_clockintr(struct clockframe *cf)
{
	int s = splsched();
	struct cpu_info * const ci = curcpu();
#ifdef USE_OST
	uint32_t new_cnt;
#endif

	/* clear flags */
	writereg(JZ_TC_TFCR, TFR_OSTFLAG);

	ci->ci_next_cp0_clk_intr += (uint32_t)(ci->ci_cycles_per_hz & 0xffffffff);
#ifdef USE_OST
	writereg(JZ_OST_DATA, ci->ci_next_cp0_clk_intr);

	/* Check for lost clock interrupts */
	new_cnt = readreg(JZ_OST_CNT_LO);

	/* 
	 * Missed one or more clock interrupts, so let's start 
	 * counting again from the current value.
	 */
	if ((ci->ci_next_cp0_clk_intr - new_cnt) & 0x80000000) {

		ci->ci_next_cp0_clk_intr = new_cnt + curcpu()->ci_cycles_per_hz;
		writereg(JZ_OST_DATA, ci->ci_next_cp0_clk_intr);
		curcpu()->ci_ev_count_compare_missed.ev_count++;
	}
	writereg(JZ_TC_TFCR, TFR_OSTFLAG);
#else
	writereg(JZ_TC_TFCR, TFR_FFLAG5);
#endif

#ifdef INGENIC_CLOCK_DEBUG
	cnt++;
	if (cnt == 100) {
		cnt = 0;
		ingenic_puts("+");
	}
#endif
#ifdef MULTIPROCESSOR
	/*
	 * XXX
	 * needs to take the IPI lock and ping all online CPUs, not just core 1
	 */
	MTC0(1 << IPI_CLOCK, 20, 1);
#endif
	hardclock(cf);
	splx(s);
}
Example #24
0
static int
clockintr(void *arg)
{
	struct clockframe *frame = arg;
	unsigned int newref;
	int ticks, i, oldirqstate;

	oldirqstate = disable_interrupts(I32_bit);
	newref = bus_space_read_4(ref_sc->sc_iot, ref_sc->sc_ioh,
				  MPU_READ_TIMER);
	ticks = hardref ? (hardref - newref) / counts_per_hz : 1;
	hardref = newref;
	restore_interrupts(oldirqstate);

	if (ticks == 0)
		ticks = 1;

#ifdef DEBUG
	if (ticks > 1)
		printf("Missed %d ticks.\n", ticks-1);
#endif


	for (i = 0; i < ticks; i++)
		hardclock(frame);

	if (ticks > 1) {
		newref = bus_space_read_4(ref_sc->sc_iot, ref_sc->sc_ioh,
					  MPU_READ_TIMER);

		if ((hardref - newref) / counts_per_hz)
			hardclock(frame);
	}

	return(1);
}
Example #25
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	/* The interrupt is shared, so we have to make sure it's for us. */
	if (RD4(ST_SR) & ST_SR_PITS) {
#ifdef SKYEYE_WORKAROUNDS
		tot_count += 32768 / hz;
#endif
		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
	return (FILTER_STRAY);
}
int hardclock_poll( void *data,
                    unsigned char *output, size_t len, size_t *olen )
{
    unsigned long timer = hardclock();
    ((void) data);
    *olen = 0;

    if( len < sizeof(unsigned long) )
        return( 0 );

    memcpy( output, &timer, sizeof(unsigned long) );
    *olen = sizeof(unsigned long);

    return( 0 );
}
Example #27
0
static int
pit_intr(void *arg)
{
	struct trapframe *fp = arg;
	uint32_t icnt;

	if (RD4(sc, PIT_SR) & PIT_PITS_DONE) {
		icnt = RD4(sc, PIT_PIVR) >> 20;

		/* Just add in the overflows we just read */
		timecount +=  PIT_PIV(RD4(sc, PIT_MR)) * icnt;

		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
void
mainbus_interrupt(struct trapframe *tf)
{
	uint32_t cause;
	bool seen = false;

	/* interrupts should be off */
	KASSERT(curthread->t_curspl > 0);

	cause = tf->tf_cause;
	if (cause & LAMEBUS_IRQ_BIT) {
		lamebus_interrupt(lamebus);
		seen = true;
	}
	if (cause & LAMEBUS_IPI_BIT) {
		interprocessor_interrupt();
		lamebus_clear_ipi(lamebus, curcpu);
		seen = true;
	}
	if (cause & MIPS_TIMER_BIT) {
		/* Reset the timer (this clears the interrupt) */
		mips_timer_set(CPU_FREQUENCY / HZ);
		/* and call hardclock */
		hardclock();
		seen = true;
	}

	if (!seen) {
		if ((cause & CCA_IRQS) == 0) {
			/*
			 * Don't panic here; this can happen if an
			 * interrupt line asserts (very) briefly and
			 * turns off again before we get as far as
			 * reading the cause register.  This was
			 * actually seen... once.
			 */
		}
		else {
			/*
			 * But if we get an interrupt on an interrupt
			 * line that's not supposed to be wired up,
			 * complain.
			 */
			panic("Unknown interrupt; cause register is %08x\n",
			      cause);
		}
	}
}
int mbedtls_hardware_poll( void *data,
                    unsigned char *output, size_t len, size_t *olen )
{
#if !defined(TARGET_STM32F4)
    unsigned long timer = hardclock();
    ((void) data);
    *olen = 0;

    if( len < sizeof(unsigned long) )
        return( 0 );

    memcpy( output, &timer, sizeof(unsigned long) );
    *olen = sizeof(unsigned long);
#endif
    return( 0 );
}
Example #30
0
static int
clockintr(void *arg)
{
	struct clockframe *frame = arg;		/* not strictly necessary */
	extern void isa_specific_eoi(int irq);
#ifdef TESTHAT
	static int ticks = 0;
#endif
	static int hatUnwedgeCtr = 0;

	gettimer0count(&timer0_at_last_clockintr);

	mc146818_read(NULL, MC_REGC); /* clear the clock interrupt */

	/* check to see if the high-availability timer needs to be unwedged */
	if (++hatUnwedgeCtr >= (hz / HAT_MIN_FREQ)) {
		hatUnwedgeCtr = 0;
		hatUnwedge(); 
	}

#ifdef TESTHAT
	++ticks;

	if (testHatOn && ((ticks & 0x3f) == 0)) {
		if (testHatOn == 1) {
			hatClkAdjust(hatCount2);
			testHatOn = 2;
		} else {
			testHatOn = 0;
			hatClkOff();
			printf("hat off status: %d %d %x\n", nHats,
			    nHatWedges, fiqReason);
		}
	} else if (!testHatOn && (ticks & 0x1ff) == 0) {
		printf("hat on status: %d %d %x\n",
		    nHats, nHatWedges, fiqReason);
		testHatOn = 1;
		nHats = 0;
		fiqReason = 0;
		hatClkOn(hatCount, hatTest, 0xfeedface,
		    hatStack + HATSTACKSIZE - sizeof(unsigned),
		    hatWedge);
	}
#endif
	hardclock(frame);
	return(1);
}