/* handle the perfmon overflow vector */ static void pa6t_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc = mfspr(SPRN_PA6T_SIAR); int is_kernel = is_kernel_addr(pc); u64 val; int i; u64 mmcr0; /* disable perfmon counting until rfid */ mmcr0 = mfspr(SPRN_PA6T_MMCR0); mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS); /* Record samples. We've got one global bit for whether a sample * was taken, so add it for any counter that triggered overflow. */ for (i = 0; i < cur_cpu_spec->num_pmcs; i++) { val = ctr_read(i); if (val & (0x1UL << 39)) { /* Overflow bit set */ if (oprofile_running && ctr[i].enabled) { if (mmcr0 & PA6T_MMCR0_SIARLOG) oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0UL); } } } /* Restore mmcr0 to a good known value since the PMI changes it */ mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS; mtspr(SPRN_PA6T_MMCR0, mmcr0); }
static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* set the PMM bit (see comment below) */ mtmsr(mfmsr() | MSR_PMM); pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); }
/* Setup a receive transfer. (non blocking) */ int usb_drv_recv(int endpoint, void* ptr, int length) { struct endpoint_t *ep; int ep_num = EP_NUM(endpoint); if (ep_num == 0) { ep = &ctrlep[DIR_OUT]; ctr_read(); } else { ep = &endpoints[ep_num]; /* clear NAK bit */ BOUT_RXCON(ep_num) &= ~(1<<3); BOUT_DMAOUTLMADDR(ep_num) = (uint32_t)ptr; BOUT_DMAOUTCTL(ep_num) = (1<<1); } ep->buf = ptr; ep->len = ep->cnt = length; return 0; }
static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. The * counters won't actually start until the rfi clears the PMM * bit. The PMM bit should not be set until after the interrupt * is cleared to avoid it getting lost in some hypervisor * environments. */ mtmsr(mfmsr() | MSR_PMM); pmc_start_ctrs(1); }
static int bfin533_reg_setup(struct op_counter_config *ctr) { unsigned int pfctl = ctr_read(); unsigned int count[2]; /* set Blackfin perf monitor regs with ctr */ if (ctr[0].enabled) { pfctl |= (PM_CTL0_ENABLE | ((char)ctr[0].event << 5)); count[0] = 0xFFFFFFFF - ctr[0].count; curr_count[0] = count[0]; } if (ctr[1].enabled) { pfctl |= (PM_CTL1_ENABLE | ((char)ctr[1].event << 16)); count[1] = 0xFFFFFFFF - ctr[1].count; curr_count[1] = count[1]; } pr_debug("ctr[0].enabled=%d,ctr[1].enabled=%d,ctr[0].event<<5=0x%x,ctr[1].event<<16=0x%x\n", ctr[0].enabled, ctr[1].enabled, ctr[0].event << 5, ctr[1].event << 16); pfctl |= COUNT_EDGE_ONLY; curr_pfctl = pfctl; pr_debug("write 0x%x to pfctl\n", pfctl); ctr_write(pfctl); count_write(count); return 0; }
int pm_overflow_handler(int irq, struct pt_regs *regs) { int is_kernel; int i, cpu; unsigned int pc, pfctl; unsigned int count[2]; pr_debug("get interrupt in %s\n", __FUNCTION__); if (oprofile_running == 0) { pr_debug("error: entering interrupt when oprofile is stopped.\n\r"); return -1; } is_kernel = get_kernel(); cpu = smp_processor_id(); pc = regs->pc; pfctl = ctr_read(); /* read the two event counter regs */ count_read(count); /* if the counter overflows, add sample to oprofile buffer */ for (i = 0; i < 2; ++i) { if (oprofile_running) { oprofile_add_sample(regs, i); } } /* reset the perfmon counter */ ctr_write(curr_pfctl); count_write(curr_count); return 0; }
static void pa6t_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc = mfspr(SPRN_PA6T_SIAR); int is_kernel = is_kernel_addr(pc); u64 val; int i; u64 mmcr0; mmcr0 = mfspr(SPRN_PA6T_MMCR0); mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS); for (i = 0; i < cur_cpu_spec->num_pmcs; i++) { val = ctr_read(i); if (val & (0x1UL << 39)) { if (oprofile_running && ctr[i].enabled) { if (mmcr0 & PA6T_MMCR0_SIARLOG) oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0UL); } } } mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS; mtspr(SPRN_PA6T_MMCR0, mmcr0); }
static void power4_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; unsigned int mmcr0; unsigned long mmcra; mmcra = mfspr(SPRN_MMCRA); pc = get_pc(regs); is_kernel = get_kernel(pc, mmcra); /* set the PMM bit (see comment below) */ mtmsrd(mfmsr() | MSR_PMM); for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } mmcr0 = mfspr(SPRN_MMCR0); /* reset the perfmon trigger */ mmcr0 |= MMCR0_PMXE; /* * We must clear the PMAO bit on some (GQ) chips. Just do it * all the time */ mmcr0 &= ~MMCR0_PMAO; /* Clear the appropriate bits in the MMCRA */ mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear; mtspr(SPRN_MMCRA, mmcra); /* * now clear the freeze bit, counting will not start until we * rfid from this exception, because only at that point will * the PMM bit be cleared */ mmcr0 &= ~MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); }
static void bfin533_stop(void) { int pfctl; pfctl = ctr_read(); pfctl &= ~PM_ENABLE; /* freeze counters */ ctr_write(pfctl); oprofile_running = 0; pr_debug("stop oprofile counter \n"); }
static int bfin533_start(struct op_counter_config *ctr) { unsigned int pfctl = ctr_read(); pfctl |= PM_ENABLE; curr_pfctl = pfctl; ctr_write(pfctl); oprofile_running = 1; pr_debug("start oprofile counter \n"); return 0; }
static void rs64_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned int mmcr0; int val; int i; unsigned long pc = mfspr(SPRN_SIAR); int is_kernel = (pc >= KERNELBASE); /* set the PMM bit (see comment below) */ mtmsrd(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (ctr[i].enabled) { oprofile_add_pc(pc, is_kernel, i); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } mmcr0 = mfspr(SPRN_MMCR0); /* reset the perfmon trigger */ mmcr0 |= MMCR0_PMXE; /* * now clear the freeze bit, counting will not start until we * rfid from this exception, because only at that point will * the PMM bit be cleared */ mmcr0 &= ~MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); }
/* UDC ISR function */ void INT_UDC(void) { uint32_t txstat, rxstat; int tmp, ep_num; /* read what caused UDC irq */ uint32_t intsrc = INT2FLAG & 0x7fffff; if (intsrc & (1<<1)) /* setup interrupt */ { setup_received(); } else if (intsrc & (1<<2)) /* ep0 in interrupt */ { txstat = TX0STAT; /* read clears flags */ /* TODO handle errors */ if (txstat & (1<<18)) /* check TxACK flag */ { if (ctrlep[DIR_IN].cnt >= 0) { /* we still have data to send (or ZLP) */ ctr_write(); } else { /* final ack received */ usb_core_transfer_complete(0, /* ep */ USB_DIR_IN, /* dir */ 0, /* status */ ctrlep[DIR_IN].len); /* length */ /* release semaphore for blocking transfer */ if (ctrlep[DIR_IN].block) semaphore_release(&ctrlep[DIR_IN].complete); } } } else if (intsrc & (1<<3)) /* ep0 out interrupt */ { rxstat = RX0STAT; /* TODO handle errors */ if (rxstat & (1<<18)) /* RxACK */ { if (ctrlep[DIR_OUT].cnt > 0) ctr_read(); else usb_core_transfer_complete(0, /* ep */ USB_DIR_OUT, /* dir */ 0, /* status */ ctrlep[DIR_OUT].len); /* length */ } } else if (intsrc & (1<<4)) /* usb reset */ { usb_drv_init(); } else if (intsrc & (1<<5)) /* usb resume */ { TX0CON |= (1<<0); /* TxClr */ TX0CON &= ~(1<<0); RX0CON |= (1<<1); /* RxClr */ RX0CON &= (1<<1); } else if (intsrc & (1<<6)) /* usb suspend */ { } else if (intsrc & (1<<7)) /* usb connect */ { } else { /* lets figure out which ep generated irq */ tmp = intsrc >> 7; for (ep_num=1; ep_num < 15; ep_num++) { tmp >>= ep_num; if (tmp & 0x01) break; } if (intsrc & ((1<<8)|(1<<11)|(1<<14)|(1<<17)|(1<<20))) { /* bulk out */ rxstat = BOUT_RXSTAT(ep_num); /* TODO handle errors */ if (rxstat & (1<<18)) /* RxACK */ { if (endpoints[ep_num].cnt > 0) blk_read(ep_num); else usb_core_transfer_complete(ep_num, /* ep */ USB_DIR_OUT, /* dir */ 0, /* status */ endpoints[ep_num].len); /* length */ } } else if (intsrc & ((1<<9)|(1<<12)|(1<<15)|(1<<18)|(1<<21))) { /* bulk in */ txstat = BIN_TXSTAT(ep_num); /* TODO handle errors */ if (txstat & (1<<18)) /* check TxACK flag */ { if (endpoints[ep_num].cnt >= 0) { /* we still have data to send (or ZLP) */ blk_write(ep_num); } else { /* final ack received */ usb_core_transfer_complete(ep_num, /* ep */ USB_DIR_IN, /* dir */ 0, /* status */ endpoints[ep_num].len); /* length */ /* release semaphore for blocking transfer */ if (endpoints[ep_num].block) semaphore_release(&endpoints[ep_num].complete); } } } else if (intsrc & ((1<<10)|(1<13)|(1<<16)|(1<<19)|(1<<22))) { /* int in */ txstat = IIN_TXSTAT(ep_num); /* TODO handle errors */ if (txstat & (1<<18)) /* check TxACK flag */ { if (endpoints[ep_num].cnt >= 0) { /* we still have data to send (or ZLP) */ int_write(ep_num); } else { /* final ack received */ usb_core_transfer_complete(ep_num, /* ep */ USB_DIR_IN, /* dir */ 0, /* status */ endpoints[ep_num].len); /* length */ /* release semaphore for blocking transfer */ if (endpoints[ep_num].block) semaphore_release(&endpoints[ep_num].complete); } } } } }