Exemplo n.º 1
0
void RealView_handle_interrupt(void* context)
{
    arm_saved_state_t* regs = (arm_saved_state_t*)context;
    uint32_t ack;

    /* Acknowledge interrupt */
    ack = HARDWARE_REGISTER(gRealviewPicBase + 0xC);

    /* Update absolute time */
    clock_absolute_time += (clock_decrementer - RealView_timer_value());
    
    /* Kill the timer */
    HARDWARE_REGISTER(gRealviewTimerBase + TIMER_INTCLR) = 1;

    rtclock_intr((arm_saved_state_t*) context);
    
    /* Restart timer. */
    HARDWARE_REGISTER(gRealviewTimerBase) = clock_decrementer;
    RealView_timer_enabled(TRUE);
    
    clock_had_irq = TRUE;
    
    /* EOI. */
    HARDWARE_REGISTER(gRealviewPicBase + 0x10) = ack;
    
    return;
}
Exemplo n.º 2
0
void S5L8900X_handle_interrupt(void *context)
{
    uint32_t current_irq = HwReg(gS5L8900XVic0Base + VICADDRESS);

    /*
     * Timer IRQs are handled by us. 
     */
    if (current_irq == 0x5) {
        /*
         * Disable timer 
         */
        S5L8900X_timer_enabled(FALSE);

        /*
         * Update absolute time 
         */
        clock_absolute_time += clock_decrementer;

        /*
         * Resynchronize deadlines. 
         */
        rtclock_intr((arm_saved_state_t *) context);

        /*
         * EOI. 
         */
        HwReg(gS5L8900XVic0Base + VICADDRESS) = 0;

        /*
         * Enable timer. 
         */
        S5L8900X_timer_enabled(TRUE);

        /*
         * We had an IRQ. 
         */
        clock_had_irq = TRUE;
    } else {
        irq_iokit_dispatch(current_irq);
    }

    return;
}
Exemplo n.º 3
0
void
delayed_clock(void)
{
    int	i;
    int	my_cpu;

    mp_disable_preemption();
    my_cpu = cpu_number();

    if (missed_clock[my_cpu] > 1 && detect_lost_tick)
        printf("hardclock: missed %d clock interrupt(s) at %x\n",
               missed_clock[my_cpu]-1, masked_pc[my_cpu]);
    if (my_cpu == master_cpu) {
        i = rtclock_intr();
        assert(i == 0);
    }
    hertz_tick(0, masked_pc[my_cpu]);
    missed_clock[my_cpu] = 0;

    mp_enable_preemption();
}
Exemplo n.º 4
0
void
hardclock(
    int				ivect,
    /* interrupt number */
    int				old_ipl,
    /* old interrupt level */
    char				* ret_addr,
    /* return address in interrupt handler */
    struct i386_interrupt_state	*regs)
/* saved registers */
{
    int mycpu;
    register unsigned pc;
    register boolean_t usermode;

    mp_disable_preemption();
    mycpu = cpu_number();

#ifdef	PARANOID_KDB
    if (paranoid_cpu == mycpu &&
            paranoid_current++ >= paranoid_count) {
        paranoid_current = 0;
        if (paranoid_debugger)
            Debugger("hardclock");
    }
#endif	/* PARANOID_KDB */

#if	MACH_MP_DEBUG
    /*
     * Increments counter of clock ticks handled under a masked state.
     * Debugger() is called if masked state is kept during 1 sec.
     * The counter is reset by splx() when ipl mask is set back to SPL0,
     * and by spl0().
     */
    if (SPL_CMP_GT((old_ipl & 0xFF), SPL0)) {
        if (masked_state_cnt[mycpu]++ >= masked_state_max) {
            int max_save = masked_state_max;

            masked_state_cnt[mycpu] = 0;
            masked_state_max = 0x7fffffff;

            if (ret_addr == return_to_iret) {
                usermode = (regs->efl & EFL_VM) ||
                           ((regs->cs & 0x03) != 0);
                pc = (unsigned)regs->eip;
            } else {
                usermode = FALSE;
                pc = (unsigned)
                     ((struct i386_interrupt_state *)&old_ipl)->eip;
            }
            printf("looping at high IPL, usermode=%d pc=0x%x\n",
                   usermode, pc);
            Debugger("");

            masked_state_cnt[mycpu] = 0;
            masked_state_max = max_save;
        }
    } else
        masked_state_cnt[mycpu] = 0;
#endif	/* MACH_MP_DEBUG */

#if	MACH_KPROF
    /*
     * If we were masked against the clock skip call
     * to rtclock_intr(). When MACH_KPROF is set, the
     * clock frequency of the master-cpu is confined
     * to the HZ rate.
     */
    if (SPL_CMP_LT(old_ipl & 0xFF, SPL7))
#endif	/* MACH_KPROF */
        /*
         * The master processor executes the rtclock_intr() routine
         * on every clock tick. The rtclock_intr() routine returns
         * a zero value on a HZ tick boundary.
         */
        if (mycpu == master_cpu) {
            if (rtclock_intr() != 0) {
                mp_enable_preemption();
                return;
            }
        }

    /*
     * The following code is executed at HZ rate by all processors
     * in the system. This implies that the clock rate on slave
     * processors must be HZ rate.
     */

    time_stamp_stat();

    if (ret_addr == return_to_iret) {
        /*
         * A kernel-loaded task executing within itself will look like
         * "kernel mode", here.  This is correct with syscalls
         * implemented using migrating threads, because it means that
         * the time spent in the server by a client thread will be
         * treated as "system" time for the client thread (and nothing
         * for the server).  This conforms to the CPU reporting for an
         * integrated kernel.
         */
        usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0);
        pc = (unsigned)regs->eip;
    } else {
        usermode = FALSE;
        pc = (unsigned)((struct i386_interrupt_state *)&old_ipl)->eip;
    }

#if	MACH_KPROF
    /*
     * If we were masked against the clock, just memorize pc
     * and the fact that the clock interrupt is delayed
     */
    if (SPL_CMP_GE((old_ipl & 0xFF), SPL7)) {
        assert(!usermode);
        if (missed_clock[mycpu]++ && detect_lost_tick > 1)
            Debugger("");
        masked_pc[mycpu] = pc;
    } else
#endif	/* MACH_KPROF */

        hertz_tick(usermode, pc);

#if	NCPUS >1 && AT386
    /*
     * Instead of having the master processor interrupt
     * all active processors, each processor in turn interrupts
     * the next active one. This avoids all slave processors
     * accessing the same R/W data simultaneously.
     */
    slave_clock();
#endif	/* NCPUS >1 && AT386 */

    mp_enable_preemption();
}