static void update_dtlb(unsigned long address, pte_t pte) { u32 tlbehi; u32 mmucr; tlbehi = sysreg_read(TLBEHI); tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); tlbehi |= address & MMU_VPN_MASK; tlbehi |= SYSREG_BIT(TLBEHI_V); sysreg_write(TLBEHI, tlbehi); __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (mmucr & SYSREG_BIT(MMUCR_N)) { unsigned int rp; u32 tlbar = sysreg_read(TLBARLO); rp = 32 - fls(tlbar); if (rp == 32) { rp = 0; sysreg_write(TLBARLO, -1L); } mmucr = SYSREG_BFINS(DRP, rp, mmucr); sysreg_write(MMUCR, mmucr); } sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); __builtin_tlbw(); }
static void __flush_tlb_page(unsigned long asid, unsigned long page) { u32 mmucr, tlbehi; tlbehi = page | asid; sysreg_write(TLBEHI, tlbehi); __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (!(mmucr & SYSREG_BIT(MMUCR_N))) { unsigned int entry; u32 tlbarlo; sysreg_write(TLBEHI, tlbehi); entry = SYSREG_BFEXT(DRP, mmucr); tlbarlo = sysreg_read(TLBARLO); tlbarlo |= (0x80000000UL >> entry); sysreg_write(TLBARLO, tlbarlo); __builtin_tlbw(); }
static void __flush_tlb_page(unsigned long asid, unsigned long page) { u32 mmucr, tlbehi; /* * Caller is responsible for masking out non-PFN bits in page * and changing the current ASID if necessary. This means that * we don't need to flush the pipeline after writing TLBEHI. */ tlbehi = page | asid; sysreg_write(TLBEHI, tlbehi); __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (!(mmucr & SYSREG_BIT(MMUCR_N))) { unsigned int entry; u32 tlbarlo; /* Clear the "valid" bit */ sysreg_write(TLBEHI, tlbehi); /* mark the entry as "not accessed" */ entry = SYSREG_BFEXT(DRP, mmucr); tlbarlo = sysreg_read(TLBARLO); tlbarlo |= (0x80000000UL >> entry); sysreg_write(TLBARLO, tlbarlo); /* update the entry with valid bit clear */ __builtin_tlbw(); }
/* * Taken from MIPS c0_hpt_timer_init(). * * The reason COUNT is written twice is probably to make sure we don't get any * timer interrupts while we are messing with the counter. */ int __weak avr32_hpt_start(void) { u32 count = sysreg_read(COUNT); expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; sysreg_write(COUNT, expirelo - cycles_per_jiffy); sysreg_write(COMPARE, expirelo); sysreg_write(COUNT, count); return 0; }
void __init time_init(void) { int ret; /* * Make sure we don't get any COMPARE interrupts before we can * handle them. */ sysreg_write(COMPARE, 0); xtime.tv_sec = rtc_get_time(); xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); ret = avr32_hpt_init(); if (ret) { pr_debug("timer: failed setup: %d\n", ret); return; } ret = clocksource_register(&clocksource_avr32); if (ret) pr_debug("timer: could not register clocksource: %d\n", ret); ret = avr32_hpt_start(); if (ret) { pr_debug("timer: failed starting: %d\n", ret); return; } }
void __init time_init(void) { unsigned long counter_hz; int ret; /* figure rate for counter */ counter_hz = clk_get_rate(boot_cpu_data.clk); ret = clocksource_register_hz(&counter, counter_hz); if (ret) pr_debug("timer: could not register clocksource: %d\n", ret); /* setup COMPARE clockevent */ comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; comparator.cpumask = cpumask_of(0); sysreg_write(COMPARE, 0); timer_irqaction.dev_id = &comparator; ret = setup_irq(0, &timer_irqaction); if (ret) pr_debug("timer: could not request IRQ 0: %d\n", ret); else { clockevents_register_device(&comparator); pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name, ((counter_hz + 500) / 1000) / 1000, ((counter_hz + 500) / 1000) % 1000); } }
/* * All interrupts go via intc at some point. */ asmlinkage void do_IRQ(int level, struct pt_regs *regs) { struct irq_desc *desc; struct pt_regs *old_regs; unsigned int irq; unsigned long status_reg; local_irq_disable(); old_regs = set_irq_regs(regs); irq_enter(); irq = intc_readl(&intc0, INTCAUSE0 - 4 * level); desc = irq_desc + irq; desc->handle_irq(irq, desc); /* * Clear all interrupt level masks so that we may handle * interrupts during softirq processing. If this is a nested * interrupt, interrupts must stay globally disabled until we * return. */ status_reg = sysreg_read(SR); status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M) | SYSREG_BIT(I2M) | SYSREG_BIT(I3M)); sysreg_write(SR, status_reg); irq_exit(); set_irq_regs(old_regs); }
void timer_init(void) { extern void timer_interrupt_handler(void); u64 tmp; sysreg_write(COUNT, 0); tmp = (u64)CFG_HZ << 32; tmp += gd->cpu_hz / 2; do_div(tmp, gd->cpu_hz); tb_factor = (u32)tmp; if (set_interrupt_handler(0, &timer_interrupt_handler, 3)) return; /* For all practical purposes, this gives us an overflow interrupt */ sysreg_write(COMPARE, 0xffffffff); }
static void avr32_timer_ack(void) { u32 count; /* Ack this timer interrupt and set the next one */ expirelo += cycles_per_jiffy; /* setting COMPARE to 0 stops the COUNT-COMPARE */ if (expirelo == 0) { sysreg_write(COMPARE, expirelo + 1); } else { sysreg_write(COMPARE, expirelo); } /* Check to see if we have missed any timer interrupts */ count = sysreg_read(COUNT); if ((count - expirelo) < 0x7fffffff) { expirelo = count + cycles_per_jiffy; sysreg_write(COMPARE, expirelo); } }
void __init init_IRQ(void) { extern void _evba(void); extern void irq_level0(void); struct resource *regs; struct clk *pclk; unsigned int i; u32 offset, readback; regs = platform_get_resource(&at32_intc0_device, IORESOURCE_MEM, 0); if (!regs) { printk(KERN_EMERG "intc: no mmio resource defined\n"); goto fail; } pclk = clk_get(&at32_intc0_device.dev, "pclk"); if (IS_ERR(pclk)) { printk(KERN_EMERG "intc: no clock defined\n"); goto fail; } clk_enable(pclk); intc0.regs = ioremap(regs->start, regs->end - regs->start + 1); if (!intc0.regs) { printk(KERN_EMERG "intc: failed to map registers (0x%08lx)\n", (unsigned long)regs->start); goto fail; } /* * Initialize all interrupts to level 0 (lowest priority). The * priority level may be changed by calling * irq_set_priority(). * */ offset = (unsigned long)&irq_level0 - (unsigned long)&_evba; for (i = 0; i < NR_INTERNAL_IRQS; i++) { intc_writel(&intc0, INTPR0 + 4 * i, offset); readback = intc_readl(&intc0, INTPR0 + 4 * i); if (readback == offset) set_irq_chip_and_handler(i, &intc0.chip, handle_simple_irq); } /* Unmask all interrupt levels */ sysreg_write(SR, (sysreg_read(SR) & ~(SR_I3M | SR_I2M | SR_I1M | SR_I0M))); return; fail: panic("Interrupt controller initialization failed!\n"); }
static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evdev = dev_id; /* * Disable the interrupt until the clockevent subsystem * reprograms it. */ sysreg_write(COMPARE, 0); evdev->event_handler(evdev); return IRQ_HANDLED; }
static ssize_t store_pc1count(struct sys_device *dev, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; sysreg_write(PCNT1, val); return count; }
static void update_dtlb(unsigned long address, pte_t pte) { u32 tlbehi; u32 mmucr; /* * We're not changing the ASID here, so no need to flush the * pipeline. */ tlbehi = sysreg_read(TLBEHI); tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); tlbehi |= address & MMU_VPN_MASK; tlbehi |= SYSREG_BIT(TLBEHI_V); sysreg_write(TLBEHI, tlbehi); /* Does this mapping already exist? */ __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (mmucr & SYSREG_BIT(MMUCR_N)) { /* Not found -- pick a not-recently-accessed entry */ unsigned int rp; u32 tlbar = sysreg_read(TLBARLO); rp = 32 - fls(tlbar); if (rp == 32) { rp = 0; sysreg_write(TLBARLO, -1L); } mmucr = SYSREG_BFINS(DRP, rp, mmucr); sysreg_write(MMUCR, mmucr); } sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); /* Let's go */ __builtin_tlbw(); }
static ssize_t store_pc1event(struct sys_device *dev, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf || val > 0x3f) return -EINVAL; val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff); sysreg_write(PCCR, val); return count; }
static ssize_t store_pccycles(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; sysreg_write(PCCNT, val); return count; }
static void show_dtlb_entry(unsigned int index) { u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; unsigned long flags; local_irq_save(flags); mmucr_save = sysreg_read(MMUCR); tlbehi_save = sysreg_read(TLBEHI); mmucr = SYSREG_BFINS(DRP, index, mmucr_save); sysreg_write(MMUCR, mmucr); __builtin_tlbr(); cpu_sync_pipeline(); tlbehi = sysreg_read(TLBEHI); tlbelo = sysreg_read(TLBELO); printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", index, SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0', SYSREG_BFEXT(G, tlbelo) ? '1' : '0', SYSREG_BFEXT(ASID, tlbehi), SYSREG_BFEXT(VPN, tlbehi) >> 2, SYSREG_BFEXT(PFN, tlbelo) >> 2, SYSREG_BFEXT(AP, tlbelo), SYSREG_BFEXT(SZ, tlbelo), SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ', SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ', SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ', SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' '); sysreg_write(MMUCR, mmucr_save); sysreg_write(TLBEHI, tlbehi_save); cpu_sync_pipeline(); local_irq_restore(flags); }
void set_timer(unsigned long t) { unsigned long long ticks = t; unsigned long lo, hi, hi_new; ticks = (ticks * get_tbclk()) / CFG_HZ; hi = ticks >> 32; lo = ticks & 0xffffffffUL; do { timer_overflow = hi; sysreg_write(COUNT, lo); hi_new = timer_overflow; } while (hi_new != hi); }
static int comparator_shutdown(struct clock_event_device *evdev) { pr_debug("%s: %s\n", __func__, evdev->name); sysreg_write(COMPARE, 0); if (disable_cpu_idle_poll) { disable_cpu_idle_poll = false; /* * Only disable idle poll if we have forced that * in a previous call. */ cpu_idle_poll_ctrl(false); } return 0; }
static ssize_t store_pcenable(struct sys_device *dev, const char *buf, size_t count) { unsigned long pccr, val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; if (val) val = 1; pccr = sysreg_read(PCCR); pccr = (pccr & ~1UL) | val; sysreg_write(PCCR, pccr); return count; }
static void comparator_mode(enum clock_event_mode mode, struct clock_event_device *evdev) { switch (mode) { case CLOCK_EVT_MODE_ONESHOT: pr_debug("%s: start\n", evdev->name); /* FALLTHROUGH */ case CLOCK_EVT_MODE_RESUME: cpu_disable_idle_sleep(); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: sysreg_write(COMPARE, 0); pr_debug("%s: stop\n", evdev->name); cpu_enable_idle_sleep(); break; default: BUG(); } }
void __init time_init(void) { unsigned long counter_hz; int ret; xtime.tv_sec = mktime(2007, 1, 1, 0, 0, 0); xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); /* figure rate for counter */ counter_hz = clk_get_rate(boot_cpu_data.clk); counter.mult = clocksource_hz2mult(counter_hz, counter.shift); ret = clocksource_register(&counter); if (ret) pr_debug("timer: could not register clocksource: %d\n", ret); /* setup COMPARE clockevent */ comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; sysreg_write(COMPARE, 0); timer_irqaction.dev_id = &comparator; ret = setup_irq(0, &timer_irqaction); if (ret) pr_debug("timer: could not request IRQ 0: %d\n", ret); else { clockevents_register_device(&comparator); pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name, ((counter_hz + 500) / 1000) / 1000, ((counter_hz + 500) / 1000) % 1000); } }
asmlinkage void do_IRQ(int level, struct pt_regs *regs) { struct pt_regs *old_regs; unsigned int irq; unsigned long status_reg; local_irq_disable(); old_regs = set_irq_regs(regs); irq_enter(); irq = intc_readl(&intc0, INTCAUSE0 - 4 * level); generic_handle_irq(irq); status_reg = sysreg_read(SR); status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M) | SYSREG_BIT(I2M) | SYSREG_BIT(I3M)); sysreg_write(SR, status_reg); irq_exit(); set_irq_regs(old_regs); }
asmlinkage struct pt_regs *do_debug(struct pt_regs *regs) { struct thread_info *ti; unsigned long trampoline_addr; u32 status; u32 ctrl; int code; status = ocd_read(DS); ti = current_thread_info(); code = TRAP_BRKPT; pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n", status, regs->pc, regs->sr, ti->flags); if (!user_mode(regs)) { unsigned long die_val = DIE_BREAKPOINT; if (status & (1 << OCD_DS_SSS_BIT)) die_val = DIE_SSTEP; if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return regs; if ((status & (1 << OCD_DS_SWB_BIT)) && test_and_clear_ti_thread_flag( ti, TIF_BREAKPOINT)) { /* * Explicit breakpoint from trampoline or * exception/syscall/interrupt handler. * * The real saved regs are on the stack right * after the ones we saved on entry. */ regs++; pr_debug(" -> TIF_BREAKPOINT done, adjusted regs:" "PC=0x%08lx SR=0x%08lx\n", regs->pc, regs->sr); BUG_ON(!user_mode(regs)); if (test_thread_flag(TIF_SINGLE_STEP)) { pr_debug("Going to do single step...\n"); return regs; } /* * No TIF_SINGLE_STEP means we're done * stepping over a syscall. Do the trap now. */ code = TRAP_TRACE; } else if ((status & (1 << OCD_DS_SSS_BIT)) && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) { pr_debug("Stepped into something, " "setting TIF_BREAKPOINT...\n"); set_ti_thread_flag(ti, TIF_BREAKPOINT); /* * We stepped into an exception, interrupt or * syscall handler. Some exception handlers * don't check for pending work, so we need to * set up a trampoline just in case. * * The exception entry code will undo the * trampoline stuff if it does a full context * save (which also means that it'll check for * pending work later.) */ if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) { trampoline_addr = (unsigned long)&debug_trampoline; pr_debug("Setting up trampoline...\n"); ti->rar_saved = sysreg_read(RAR_EX); ti->rsr_saved = sysreg_read(RSR_EX); sysreg_write(RAR_EX, trampoline_addr); sysreg_write(RSR_EX, (MODE_EXCEPTION | SR_EM | SR_GM)); BUG_ON(ti->rsr_saved & MODE_MASK); } /* * If we stepped into a system call, we * shouldn't do a single step after we return * since the return address is right after the * "scall" instruction we were told to step * over. */ if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) { pr_debug("Supervisor; no single step\n"); clear_ti_thread_flag(ti, TIF_SINGLE_STEP); } ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); return regs; } else { printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n", status); printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags); die("Unhandled debug trap in kernel mode", regs, SIGTRAP); } } else if (status & (1 << OCD_DS_SSS_BIT)) { /* Single step in user mode */ code = TRAP_TRACE; ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); } pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n", code, regs->pc, regs->sr); clear_thread_flag(TIF_SINGLE_STEP); _exception(SIGTRAP, regs, code, instruction_pointer(regs)); return regs; }
void reset_timer(void) { sysreg_write(COUNT, 0); cpu_sync_pipeline(); /* process any pending interrupts */ timer_overflow = 0; }