static void update_dtlb(unsigned long address, pte_t pte) { u32 tlbehi; u32 mmucr; tlbehi = sysreg_read(TLBEHI); tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); tlbehi |= address & MMU_VPN_MASK; tlbehi |= SYSREG_BIT(TLBEHI_V); sysreg_write(TLBEHI, tlbehi); __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (mmucr & SYSREG_BIT(MMUCR_N)) { unsigned int rp; u32 tlbar = sysreg_read(TLBARLO); rp = 32 - fls(tlbar); if (rp == 32) { rp = 0; sysreg_write(TLBARLO, -1L); } mmucr = SYSREG_BFINS(DRP, rp, mmucr); sysreg_write(MMUCR, mmucr); } sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); __builtin_tlbw(); }
static void __flush_tlb_page(unsigned long asid, unsigned long page) { u32 mmucr, tlbehi; /* * Caller is responsible for masking out non-PFN bits in page * and changing the current ASID if necessary. This means that * we don't need to flush the pipeline after writing TLBEHI. */ tlbehi = page | asid; sysreg_write(TLBEHI, tlbehi); __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (!(mmucr & SYSREG_BIT(MMUCR_N))) { unsigned int entry; u32 tlbarlo; /* Clear the "valid" bit */ sysreg_write(TLBEHI, tlbehi); /* mark the entry as "not accessed" */ entry = SYSREG_BFEXT(DRP, mmucr); tlbarlo = sysreg_read(TLBARLO); tlbarlo |= (0x80000000UL >> entry); sysreg_write(TLBARLO, tlbarlo); /* update the entry with valid bit clear */ __builtin_tlbw(); }
static void __flush_tlb_page(unsigned long asid, unsigned long page) { u32 mmucr, tlbehi; tlbehi = page | asid; sysreg_write(TLBEHI, tlbehi); __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (!(mmucr & SYSREG_BIT(MMUCR_N))) { unsigned int entry; u32 tlbarlo; sysreg_write(TLBEHI, tlbehi); entry = SYSREG_BFEXT(DRP, mmucr); tlbarlo = sysreg_read(TLBARLO); tlbarlo |= (0x80000000UL >> entry); sysreg_write(TLBARLO, tlbarlo); __builtin_tlbw(); }
/* * For short delays only. It will overflow after a few seconds. */ void __udelay(unsigned long usec) { unsigned long cycles; unsigned long base; unsigned long now; base = sysreg_read(COUNT); cycles = ((usec * (get_tbclk() / 10000)) + 50) / 100; do { now = sysreg_read(COUNT); } while ((now - base) < cycles); }
/* * For short delays only. It will overflow after a few seconds. */ void udelay(unsigned long usec) { unsigned long now, end; now = sysreg_read(COUNT); end = ((usec * (get_tbclk() / 10000)) + 50) / 100; end += now; while (now > end) now = sysreg_read(COUNT); while (now < end) now = sysreg_read(COUNT); }
/* * All interrupts go via intc at some point. */ asmlinkage void do_IRQ(int level, struct pt_regs *regs) { struct irq_desc *desc; struct pt_regs *old_regs; unsigned int irq; unsigned long status_reg; local_irq_disable(); old_regs = set_irq_regs(regs); irq_enter(); irq = intc_readl(&intc0, INTCAUSE0 - 4 * level); desc = irq_desc + irq; desc->handle_irq(irq, desc); /* * Clear all interrupt level masks so that we may handle * interrupts during softirq processing. If this is a nested * interrupt, interrupts must stay globally disabled until we * return. */ status_reg = sysreg_read(SR); status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M) | SYSREG_BIT(I2M) | SYSREG_BIT(I3M)); sysreg_write(SR, status_reg); irq_exit(); set_irq_regs(old_regs); }
static ssize_t show_pccycles(struct sys_device *dev, char *buf) { unsigned long pccnt; pccnt = sysreg_read(PCCNT); return sprintf(buf, "%lu\n", pccnt); }
static ssize_t show_pcenable(struct sys_device *dev, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "%c\n", (pccr & 1)?'1':'0'); }
static ssize_t show_pc1event(struct sys_device *dev, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f); }
int twi_master_init(volatile avr32_twi_t *twi, const twi_options_t *opt) { irqflags_t flags = sysreg_read(AVR32_SR); int status = TWI_SUCCESS; // Set pointer to TWIM instance for IT twi_inst = twi; // Disable TWI interrupts cpu_irq_disable(); twi->idr = ~0UL; twi->sr; // Reset TWI twi->cr = AVR32_TWI_CR_SWRST_MASK; cpu_irq_restore(flags); // Dummy read in SR twi->sr; // register Register twim_master_interrupt_handler interrupt // on level CONF_TWI_IRQ_LEVEL flags = cpu_irq_save(); irq_register_handler(&twi_master_interrupt_handler, CONF_TWI_IRQ_LINE, CONF_TWI_IRQ_LEVEL); cpu_irq_restore(flags); // Select the speed twi_set_speed(twi, opt->speed, opt->pba_hz); // Probe the component //status = twi_probe(twi, opt->chip); return status; }
static ssize_t show_pc1count(struct sys_device *dev, char *buf) { unsigned long pcnt1; pcnt1 = sysreg_read(PCNT1); return sprintf(buf, "%lu\n", pcnt1); }
static ssize_t show_pc0count(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { unsigned long pcnt0; pcnt0 = sysreg_read(PCNT0); return sprintf(buf, "%lu\n", pcnt0); }
int Audio_Dec_codec(COS_HANDLE codech,COS_BYTE* buf,COS_DWORD buf_size,COS_BYTE *data,COS_DWORD *data_size,COS_SHORT *vu) //int decode_frame(MPADecodeContext * s, // void *data, int *data_size, // uint8_t * buf, int buf_size) { #ifdef DEBUG_TIME int begin,end; #endif int audio_find_header_ret; short cur; int i; MPADecodeContext *s = (MPADecodeContext*)codech; unsigned char* pPosFound; s->frame_size=0; *data_size = 0; audio_find_header_ret = mp3_Inner_Find_MPEG_1_L2_Head_New(s,buf,buf_size,&pPosFound); if(mp3_find_header_ret == 0) { short *out_samples = (short*)data; s->inbuf_ptr = s->inbuf; memcpy(s->inbuf_ptr, pPosFound, s->frame_size); s->inbuf_ptr += s->frame_size; buf_size -= (s->cur_offset + s->frame_size); *data_size = mp_decode_frame(s, out_samples); #ifdef DEBUG_TIME begin = sysreg_read(reg_CYCLES); #endif #ifdef DEBUG_TIME end = sysreg_read(reg_CYCLES); vu_count +=(end - begin); #endif s->inbuf_ptr = s->inbuf; return (s->cur_offset + s->frame_size);//s->frame_size; } else if (mp3_find_header_ret == -1) { return s->cur_offset; } else if(mp3_find_header_ret == -2) return (buf_size-3);//0; // reserve 4 bytes for the header in the next time //0; }
/* * Taken from MIPS c0_hpt_timer_init(). * * The reason COUNT is written twice is probably to make sure we don't get any * timer interrupts while we are messing with the counter. */ int __weak avr32_hpt_start(void) { u32 count = sysreg_read(COUNT); expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; sysreg_write(COUNT, expirelo - cycles_per_jiffy); sysreg_write(COMPARE, expirelo); sysreg_write(COUNT, count); return 0; }
uint32_t TProfiler<sum_shift>::time_interval() { //return 1; static uint32_t Cycles; uint32_t Cyc = sysreg_read(reg_CYCLES); uint32_t Res = Cyc - Cycles; Cycles = Cyc; return Res; }
void __init init_IRQ(void) { extern void _evba(void); extern void irq_level0(void); struct resource *regs; struct clk *pclk; unsigned int i; u32 offset, readback; regs = platform_get_resource(&at32_intc0_device, IORESOURCE_MEM, 0); if (!regs) { printk(KERN_EMERG "intc: no mmio resource defined\n"); goto fail; } pclk = clk_get(&at32_intc0_device.dev, "pclk"); if (IS_ERR(pclk)) { printk(KERN_EMERG "intc: no clock defined\n"); goto fail; } clk_enable(pclk); intc0.regs = ioremap(regs->start, regs->end - regs->start + 1); if (!intc0.regs) { printk(KERN_EMERG "intc: failed to map registers (0x%08lx)\n", (unsigned long)regs->start); goto fail; } /* * Initialize all interrupts to level 0 (lowest priority). The * priority level may be changed by calling * irq_set_priority(). * */ offset = (unsigned long)&irq_level0 - (unsigned long)&_evba; for (i = 0; i < NR_INTERNAL_IRQS; i++) { intc_writel(&intc0, INTPR0 + 4 * i, offset); readback = intc_readl(&intc0, INTPR0 + 4 * i); if (readback == offset) set_irq_chip_and_handler(i, &intc0.chip, handle_simple_irq); } /* Unmask all interrupt levels */ sysreg_write(SR, (sysreg_read(SR) & ~(SR_I3M | SR_I2M | SR_I1M | SR_I0M))); return; fail: panic("Interrupt controller initialization failed!\n"); }
unsigned long long get_ticks(void) { unsigned long lo, hi_now, hi_prev; do { hi_prev = timer_overflow; lo = sysreg_read(COUNT); hi_now = timer_overflow; } while (hi_prev != hi_now); return ((unsigned long long)hi_now << 32) | lo; }
static ssize_t store_pc1event(struct sys_device *dev, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf || val > 0x3f) return -EINVAL; val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff); sysreg_write(PCCR, val); return count; }
static void update_dtlb(unsigned long address, pte_t pte) { u32 tlbehi; u32 mmucr; /* * We're not changing the ASID here, so no need to flush the * pipeline. */ tlbehi = sysreg_read(TLBEHI); tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); tlbehi |= address & MMU_VPN_MASK; tlbehi |= SYSREG_BIT(TLBEHI_V); sysreg_write(TLBEHI, tlbehi); /* Does this mapping already exist? */ __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (mmucr & SYSREG_BIT(MMUCR_N)) { /* Not found -- pick a not-recently-accessed entry */ unsigned int rp; u32 tlbar = sysreg_read(TLBARLO); rp = 32 - fls(tlbar); if (rp == 32) { rp = 0; sysreg_write(TLBARLO, -1L); } mmucr = SYSREG_BFINS(DRP, rp, mmucr); sysreg_write(MMUCR, mmucr); } sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); /* Let's go */ __builtin_tlbw(); }
asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) { int syscall = 0; if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) syscall = 1; if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs, ¤t->blocked, syscall); if (ti->flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } }
static void show_dtlb_entry(unsigned int index) { u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; unsigned long flags; local_irq_save(flags); mmucr_save = sysreg_read(MMUCR); tlbehi_save = sysreg_read(TLBEHI); mmucr = SYSREG_BFINS(DRP, index, mmucr_save); sysreg_write(MMUCR, mmucr); __builtin_tlbr(); cpu_sync_pipeline(); tlbehi = sysreg_read(TLBEHI); tlbelo = sysreg_read(TLBELO); printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", index, SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0', SYSREG_BFEXT(G, tlbelo) ? '1' : '0', SYSREG_BFEXT(ASID, tlbehi), SYSREG_BFEXT(VPN, tlbehi) >> 2, SYSREG_BFEXT(PFN, tlbelo) >> 2, SYSREG_BFEXT(AP, tlbelo), SYSREG_BFEXT(SZ, tlbelo), SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ', SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ', SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ', SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' '); sysreg_write(MMUCR, mmucr_save); sysreg_write(TLBEHI, tlbehi_save); cpu_sync_pipeline(); local_irq_restore(flags); }
asmlinkage void do_debug_priv(struct pt_regs *regs) { unsigned long dc, ds; unsigned long die_val; ds = __mfdr(DBGREG_DS); pr_debug("do_debug_priv: pc = %08lx, ds = %08lx\n", regs->pc, ds); if (ds & DS_SSS) die_val = DIE_SSTEP; else die_val = DIE_BREAKPOINT; if (notify_die(die_val, regs, 0, SIGTRAP) == NOTIFY_STOP) return; if (likely(ds & DS_SSS)) { extern void itlb_miss(void); extern void tlb_miss_common(void); struct thread_info *ti; dc = __mfdr(DBGREG_DC); dc &= ~DC_SS; __mtdr(DBGREG_DC, dc); ti = current_thread_info(); set_ti_thread_flag(ti, TIF_BREAKPOINT); /* The TLB miss handlers don't check thread flags */ if ((regs->pc >= (unsigned long)&itlb_miss) && (regs->pc <= (unsigned long)&tlb_miss_common)) { __mtdr(DBGREG_BWA2A, sysreg_read(RAR_EX)); __mtdr(DBGREG_BWC2A, 0x40000001 | (get_asid() << 1)); } /* * If we're running in supervisor mode, the breakpoint * will take us where we want directly, no need to * single step. */ if ((regs->sr & MODE_MASK) != MODE_SUPERVISOR) set_ti_thread_flag(ti, TIF_SINGLE_STEP); } else { panic("Unable to handle debug trap at pc = %08lx\n", regs->pc); } }
static int comparator_next_event(unsigned long delta, struct clock_event_device *evdev) { unsigned long flags; raw_local_irq_save(flags); /* The time to read COUNT then update COMPARE must be less * than the min_delta_ns value for this clockevent source. */ sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1); raw_local_irq_restore(flags); return 0; }
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { unsigned long dc; pr_debug("preparing to singlestep over %p (PC=%08lx)\n", p->addr, regs->pc); BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D))); dc = ocd_read(DC); dc |= 1 << OCD_DC_SS_BIT; ocd_write(DC, dc); *p->addr = p->opcode; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); }
int twi_slave_init(volatile avr32_twi_t *twi, const twi_options_t *opt, const twi_slave_fct_t *slave_fct) { irqflags_t flags = sysreg_read(AVR32_SR); // Set pointer to TWIM instance for IT twi_inst = twi; // Disable TWI interrupts cpu_irq_disable(); twi->idr = ~0UL; twi->sr; // Reset TWI twi->cr = AVR32_TWI_CR_SWRST_MASK; cpu_irq_restore(flags); // Dummy read in SR twi->sr; // register Register twim_master_interrupt_handler interrupt // on level CONF_TWI_IRQ_LEVEL flags = cpu_irq_save(); irq_register_handler(&twi_slave_interrupt_handler, CONF_TWI_IRQ_LINE, CONF_TWI_IRQ_LEVEL); cpu_irq_restore(flags); // Set slave address twi->smr = (opt->chip << AVR32_TWI_SMR_SADR_OFFSET); // Disable master transfer twi->cr = AVR32_TWI_CR_MSDIS_MASK; // Enable slave twi->cr = AVR32_TWI_CR_SVEN_MASK; // get a pointer to applicative routines twi_slave_fct = *slave_fct; // Slave Access Interrupt Enable twi_it_mask = AVR32_TWI_IER_SVACC_MASK; twi->ier = twi_it_mask; // Everything went ok return TWI_SUCCESS; }
static ssize_t store_pcenable(struct sys_device *dev, const char *buf, size_t count) { unsigned long pccr, val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; if (val) val = 1; pccr = sysreg_read(PCCR); pccr = (pccr & ~1UL) | val; sysreg_write(PCCR, pccr); return count; }
static void avr32_timer_ack(void) { u32 count; /* Ack this timer interrupt and set the next one */ expirelo += cycles_per_jiffy; /* setting COMPARE to 0 stops the COUNT-COMPARE */ if (expirelo == 0) { sysreg_write(COMPARE, expirelo + 1); } else { sysreg_write(COMPARE, expirelo); } /* Check to see if we have missed any timer interrupts */ count = sysreg_read(COUNT); if ((count - expirelo) < 0x7fffffff) { expirelo = count + cycles_per_jiffy; sysreg_write(COMPARE, expirelo); } }
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { unsigned long dc; pr_debug("preparing to singlestep over %p (PC=%08lx)\n", p->addr, regs->pc); BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D))); dc = __mfdr(DBGREG_DC); dc |= DC_SS; __mtdr(DBGREG_DC, dc); /* * We must run the instruction from its original location * since it may actually reference PC. * * TODO: Do the instruction replacement directly in icache. */ *p->addr = p->opcode; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); }
asmlinkage void do_IRQ(int level, struct pt_regs *regs) { struct pt_regs *old_regs; unsigned int irq; unsigned long status_reg; local_irq_disable(); old_regs = set_irq_regs(regs); irq_enter(); irq = intc_readl(&intc0, INTCAUSE0 - 4 * level); generic_handle_irq(irq); status_reg = sysreg_read(SR); status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M) | SYSREG_BIT(I2M) | SYSREG_BIT(I3M)); sysreg_write(SR, status_reg); irq_exit(); set_irq_regs(old_regs); }
asmlinkage struct pt_regs *do_debug(struct pt_regs *regs) { struct thread_info *ti; unsigned long trampoline_addr; u32 status; u32 ctrl; int code; status = ocd_read(DS); ti = current_thread_info(); code = TRAP_BRKPT; pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n", status, regs->pc, regs->sr, ti->flags); if (!user_mode(regs)) { unsigned long die_val = DIE_BREAKPOINT; if (status & (1 << OCD_DS_SSS_BIT)) die_val = DIE_SSTEP; if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return regs; if ((status & (1 << OCD_DS_SWB_BIT)) && test_and_clear_ti_thread_flag( ti, TIF_BREAKPOINT)) { /* * Explicit breakpoint from trampoline or * exception/syscall/interrupt handler. * * The real saved regs are on the stack right * after the ones we saved on entry. */ regs++; pr_debug(" -> TIF_BREAKPOINT done, adjusted regs:" "PC=0x%08lx SR=0x%08lx\n", regs->pc, regs->sr); BUG_ON(!user_mode(regs)); if (test_thread_flag(TIF_SINGLE_STEP)) { pr_debug("Going to do single step...\n"); return regs; } /* * No TIF_SINGLE_STEP means we're done * stepping over a syscall. Do the trap now. */ code = TRAP_TRACE; } else if ((status & (1 << OCD_DS_SSS_BIT)) && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) { pr_debug("Stepped into something, " "setting TIF_BREAKPOINT...\n"); set_ti_thread_flag(ti, TIF_BREAKPOINT); /* * We stepped into an exception, interrupt or * syscall handler. Some exception handlers * don't check for pending work, so we need to * set up a trampoline just in case. * * The exception entry code will undo the * trampoline stuff if it does a full context * save (which also means that it'll check for * pending work later.) */ if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) { trampoline_addr = (unsigned long)&debug_trampoline; pr_debug("Setting up trampoline...\n"); ti->rar_saved = sysreg_read(RAR_EX); ti->rsr_saved = sysreg_read(RSR_EX); sysreg_write(RAR_EX, trampoline_addr); sysreg_write(RSR_EX, (MODE_EXCEPTION | SR_EM | SR_GM)); BUG_ON(ti->rsr_saved & MODE_MASK); } /* * If we stepped into a system call, we * shouldn't do a single step after we return * since the return address is right after the * "scall" instruction we were told to step * over. */ if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) { pr_debug("Supervisor; no single step\n"); clear_ti_thread_flag(ti, TIF_SINGLE_STEP); } ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); return regs; } else { printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n", status); printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags); die("Unhandled debug trap in kernel mode", regs, SIGTRAP); } } else if (status & (1 << OCD_DS_SSS_BIT)) { /* Single step in user mode */ code = TRAP_TRACE; ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); } pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n", code, regs->pc, regs->sr); clear_thread_flag(TIF_SINGLE_STEP); _exception(SIGTRAP, regs, code, instruction_pointer(regs)); return regs; }