static void etm_init_arch_data(void *info) { uint32_t val; struct etm_ctx *etmdata = info; ETM_UNLOCK(etmdata); etm_os_lock_init(etmdata); val = etm_readl(etmdata, TRCIDR1); etmdata->arch = BMVAL(val, 4, 11); /* number of resources trace unit supports */ val = etm_readl(etmdata, TRCIDR4); etmdata->nr_addr_cmp = BMVAL(val, 0, 3); etmdata->nr_data_cmp = BMVAL(val, 4, 7); etmdata->nr_resource = BMVAL(val, 16, 19); etmdata->nr_ss_cmp = BMVAL(val, 20, 23); etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27); etmdata->nr_vmid_cmp = BMVAL(val, 28, 31); val = etm_readl(etmdata, TRCIDR5); etmdata->nr_seq_state = BMVAL(val, 25, 27); etmdata->nr_cntr = BMVAL(val, 28, 30); ETM_LOCK(etmdata); }
static int trace_start(struct tracectx *t) { u32 v; unsigned long timeout = TRACER_TIMEOUT; etb_unlock(t); etb_writel(t, 0, ETBR_FORMATTERCTRL); etb_writel(t, 1, ETBR_CTRL); etb_lock(t); /* configure etm */ v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz); if (t->flags & TRACER_CYCLE_ACC) v |= ETMCTRL_CYCLEACCURATE; etm_unlock(t); etm_writel(t, v, ETMR_CTRL); while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); etm_lock(t); return -EFAULT; } etm_setup_address_range(t, 1, (unsigned long)_stext, (unsigned long)_etext, 0, 0); etm_writel(t, 0, ETMR_TRACEENCTRL2); etm_writel(t, 0, ETMR_TRACESSCTRL); etm_writel(t, 0x6f, ETMR_TRACEENEVT); v &= ~ETMCTRL_PROGRAM; v |= ETMCTRL_PORTSEL; etm_writel(t, v, ETMR_CTRL); timeout = TRACER_TIMEOUT; while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n"); etm_lock(t); return -EFAULT; } etm_lock(t); t->flags |= TRACER_RUNNING; return 0; }
static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset, int position, int value) { int i; u32 val; for (i = TIMEOUT_US; i > 0; i--) { val = etm_readl(drvdata, offset); /* Waiting on the bit to go from 0 to 1 */ if (value) { if (val & BIT(position)) return 0; /* Waiting on the bit to go from 1 to 0 */ } else { if (!(val & BIT(position))) return 0; } /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure * we don't wait needlessly on the last iteration. */ if (i - 1) udelay(1); } return -EAGAIN; }
int etm_get_trace_id(struct etm_drvdata *drvdata) { unsigned long flags; int trace_id = -1; if (!drvdata) goto out; if (!local_read(&drvdata->mode)) return drvdata->traceid; pm_runtime_get_sync(drvdata->dev); spin_lock_irqsave(&drvdata->spinlock, flags); CS_UNLOCK(drvdata->base); trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); pm_runtime_put(drvdata->dev); out: return trace_id; }
static void etm_init_arch_data(void *info) { u32 etmidr; u32 etmccr; struct etm_drvdata *drvdata = info; /* Make sure all registers are accessible */ etm_os_unlock(drvdata); CS_UNLOCK(drvdata->base); /* First dummy read */ (void)etm_readl(drvdata, ETMPDSR); /* Provide power to ETM: ETMPDCR[3] == 1 */ etm_set_pwrup(drvdata); /* * Clear power down bit since when this bit is set writes to * certain registers might be ignored. */ etm_clr_pwrdwn(drvdata); /* * Set prog bit. It will be set from reset but this is included to * ensure it is set */ etm_set_prog(drvdata); /* Find all capabilities */ etmidr = etm_readl(drvdata, ETMIDR); drvdata->arch = BMVAL(etmidr, 4, 11); drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK; drvdata->etmccer = etm_readl(drvdata, ETMCCER); etmccr = etm_readl(drvdata, ETMCCR); drvdata->etmccr = etmccr; drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2; drvdata->nr_cntr = BMVAL(etmccr, 13, 15); drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19); drvdata->nr_ext_out = BMVAL(etmccr, 20, 22); drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25); etm_set_pwrdwn(drvdata); etm_clr_pwrup(drvdata); CS_LOCK(drvdata->base); }
static void etm_os_lock_init(struct etm_ctx *etmdata) { uint32_t etmoslsr; etmoslsr = etm_readl(etmdata, TRCOSLSR); if ((BVAL(etmoslsr, 0) == 0) && BVAL(etmoslsr, 3)) etmdata->os_lock_present = true; else etmdata->os_lock_present = false; }
static void etm_clr_pwrdwn(struct etm_drvdata *drvdata) { u32 etmcr; etmcr = etm_readl(drvdata, ETMCR); etmcr &= ~ETMCR_PWD_DWN; etm_writel(drvdata, etmcr, ETMCR); /* Ensure pwrup completes before subsequent cp14 accesses */ mb(); isb(); }
static void etm_set_pwrdwn(struct etm_drvdata *drvdata) { u32 etmcr; /* Ensure pending cp14 accesses complete before setting pwrdwn */ mb(); isb(); etmcr = etm_readl(drvdata, ETMCR); etmcr |= ETMCR_PWD_DWN; etm_writel(drvdata, etmcr, ETMCR); }
static void etm_disable_hw(void *info) { int i; struct etm_drvdata *drvdata = info; struct etm_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); etm_set_prog(drvdata); /* Read back sequencer and counters for post trace analysis */ config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); for (i = 0; i < drvdata->nr_cntr; i++) config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); etm_set_pwrdwn(drvdata); CS_LOCK(drvdata->base); dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); }
static void etm_disable_hw(void *info) { int i; struct etm_drvdata *drvdata = info; CS_UNLOCK(drvdata->base); etm_set_prog(drvdata); /* Program trace enable to low by using always false event */ etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR); /* Read back sequencer and counters for post trace analysis */ drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); for (i = 0; i < drvdata->nr_cntr; i++) drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); etm_set_pwrdwn(drvdata); CS_LOCK(drvdata->base); dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); }
static ssize_t trace_info_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st; int datalen; etb_unlock(&tracer); datalen = etb_getdatalen(&tracer); etb_wa = etb_readl(&tracer, ETBR_WRITEADDR); etb_ra = etb_readl(&tracer, ETBR_READADDR); etb_st = etb_readl(&tracer, ETBR_STATUS); etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL); etb_lock(&tracer); etm_unlock(&tracer); etm_ctrl = etm_readl(&tracer, ETMR_CTRL); etm_st = etm_readl(&tracer, ETMR_STATUS); etm_lock(&tracer); return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n" "ETBR_WRITEADDR:\t%08x\n" "ETBR_READADDR:\t%08x\n" "ETBR_STATUS:\t%08x\n" "ETBR_FORMATTERCTRL:\t%08x\n" "ETMR_CTRL:\t%08x\n" "ETMR_STATUS:\t%08x\n", datalen, tracer.ncmppairs, etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st ); }
static ssize_t etmsr_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long flags, val; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); pm_runtime_get_sync(drvdata->dev); spin_lock_irqsave(&drvdata->spinlock, flags); CS_UNLOCK(drvdata->base); val = etm_readl(drvdata, ETMSR); CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); pm_runtime_put(drvdata->dev); return sprintf(buf, "%#lx\n", val); }
static void etm_clr_prog(struct etm_drvdata *drvdata) { u32 etmcr; etmcr = etm_readl(drvdata, ETMCR); etmcr &= ~ETMCR_ETM_PRG; etm_writel(drvdata, etmcr, ETMCR); /* * Recommended by spec for cp14 accesses to ensure etmcr write is * complete before polling etmsr */ isb(); if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) { dev_err(drvdata->dev, "%s: timeout observed when probing at offset %#x\n", __func__, ETMSR); } }
static int etm_trace_id(struct coresight_device *csdev) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); unsigned long flags; int trace_id = -1; if (!drvdata->enable) return drvdata->traceid; pm_runtime_get_sync(csdev->dev.parent); spin_lock_irqsave(&drvdata->spinlock, flags); CS_UNLOCK(drvdata->base); trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); pm_runtime_put(csdev->dev.parent); return trace_id; }
static int trace_stop(struct tracectx *t) { unsigned long timeout = TRACER_TIMEOUT; etm_unlock(t); etm_writel(t, 0x440, ETMR_CTRL); while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); etm_lock(t); return -EFAULT; } etm_lock(t); etb_unlock(t); etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL); timeout = TRACER_TIMEOUT; while (etb_readl(t, ETBR_FORMATTERCTRL) & ETBFF_MANUAL_FLUSH && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for formatter flush to commence " "timed out\n"); etb_lock(t); return -EFAULT; } etb_writel(t, 0, ETBR_CTRL); etb_lock(t); t->flags &= ~TRACER_RUNNING; return 0; }
static ssize_t traceid_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long val, flags; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); if (!drvdata->enable) { val = drvdata->traceid; goto out; } pm_runtime_get_sync(drvdata->dev); spin_lock_irqsave(&drvdata->spinlock, flags); CS_UNLOCK(drvdata->base); val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); pm_runtime_put(drvdata->dev); out: return sprintf(buf, "%#lx\n", val); }
static ssize_t cntr_val_show(struct device *dev, struct device_attribute *attr, char *buf) { int i, ret = 0; u32 val; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); if (!drvdata->enable) { spin_lock(&drvdata->spinlock); for (i = 0; i < drvdata->nr_cntr; i++) ret += sprintf(buf, "counter %d: %x\n", i, drvdata->cntr_val[i]); spin_unlock(&drvdata->spinlock); return ret; } for (i = 0; i < drvdata->nr_cntr; i++) { val = etm_readl(drvdata, ETMCNTVRn(i)); ret += sprintf(buf, "counter %d: %x\n", i, val); } return ret; }
static ssize_t seq_curr_state_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long val, flags; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etm_config *config = &drvdata->config; if (!local_read(&drvdata->mode)) { val = config->seq_curr_state; goto out; } pm_runtime_get_sync(drvdata->dev); spin_lock_irqsave(&drvdata->spinlock, flags); CS_UNLOCK(drvdata->base); val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); pm_runtime_put(drvdata->dev); out: return sprintf(buf, "%#lx\n", val); }
static void etm_enable_hw(void *info) { int i; u32 etmcr; struct etm_drvdata *drvdata = info; CS_UNLOCK(drvdata->base); /* Turn engine on */ etm_clr_pwrdwn(drvdata); /* Apply power to trace registers */ etm_set_pwrup(drvdata); /* Make sure all registers are accessible */ etm_os_unlock(drvdata); etm_set_prog(drvdata); etmcr = etm_readl(drvdata, ETMCR); etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG); etmcr |= drvdata->port_size; etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR); etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER); etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR); etm_writel(drvdata, drvdata->enable_event, ETMTEEVR); etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1); etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR); for (i = 0; i < drvdata->nr_addr_cmp; i++) { etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i)); etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i)); } for (i = 0; i < drvdata->nr_cntr; i++) { etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i)); etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i)); etm_writel(drvdata, drvdata->cntr_rld_event[i], ETMCNTRLDEVRn(i)); etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i)); } etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR); etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR); etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR); etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR); etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR); etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR); etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR); for (i = 0; i < drvdata->nr_ext_out; i++) etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); for (i = 0; i < drvdata->nr_ctxid_cmp; i++) etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i)); etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); /* No external input selected */ etm_writel(drvdata, 0x0, ETMEXTINSELR); etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR); /* No auxiliary control selected */ etm_writel(drvdata, 0x0, ETMAUXCR); etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); /* No VMID comparator value selected */ etm_writel(drvdata, 0x0, ETMVMIDCVR); /* Ensures trace output is enabled from this ETM */ etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR); etm_clr_prog(drvdata); CS_LOCK(drvdata->base); dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); }
static inline void etm_save_state(struct etm_ctx *etmdata) { int i, j, count; i = 0; mb(); isb(); ETM_UNLOCK(etmdata); switch (etmdata->arch) { case ETM_ARCH_V4: etm_os_lock(etmdata); /* poll until programmers' model becomes stable */ for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 1) != 1) && count > 0; count--) udelay(1); if (count == 0) pr_err_ratelimited("programmers model is not stable\n" ); /* main control and configuration registers */ etmdata->state[i++] = etm_readl(etmdata, TRCPROCSELR); etmdata->state[i++] = etm_readl(etmdata, TRCCONFIGR); etmdata->state[i++] = etm_readl(etmdata, TRCAUXCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL0R); etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL1R); etmdata->state[i++] = etm_readl(etmdata, TRCSTALLCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCTSCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCSYNCPR); etmdata->state[i++] = etm_readl(etmdata, TRCCCCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCBBCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCTRACEIDR); etmdata->state[i++] = etm_readl(etmdata, TRCQCTLR); /* filtering control registers */ etmdata->state[i++] = etm_readl(etmdata, TRCVICTLR); etmdata->state[i++] = etm_readl(etmdata, TRCVIIECTLR); etmdata->state[i++] = etm_readl(etmdata, TRCVISSCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCVIPCSSCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCVDCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCVDSACCTLR); etmdata->state[i++] = etm_readl(etmdata, TRCVDARCCTLR); /* derived resource registers */ for (j = 0; j < etmdata->nr_seq_state-1; j++) etmdata->state[i++] = etm_readl(etmdata, TRCSEQEVRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCSEQRSTEVR); etmdata->state[i++] = etm_readl(etmdata, TRCSEQSTR); etmdata->state[i++] = etm_readl(etmdata, TRCEXTINSELR); for (j = 0; j < etmdata->nr_cntr; j++) { etmdata->state[i++] = etm_readl(etmdata, TRCCNTRLDVRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCCNTCTLRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCCNTVRn(j)); } /* resource selection registers */ for (j = 0; j < etmdata->nr_resource; j++) etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j)); /* comparator registers */ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) { etmdata->state[i++] = etm_readq(etmdata, TRCACVRn(j)); etmdata->state[i++] = etm_readq(etmdata, TRCACATRn(j)); } for (j = 0; j < etmdata->nr_data_cmp; j++) { etmdata->state[i++] = etm_readq(etmdata, TRCDVCVRn(j)); etmdata->state[i++] = etm_readq(etmdata, TRCDVCMRn(i)); } for (j = 0; j < etmdata->nr_ctxid_cmp; j++) etmdata->state[i++] = etm_readq(etmdata, TRCCIDCVRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR0); etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR1); for (j = 0; j < etmdata->nr_vmid_cmp; j++) etmdata->state[i++] = etm_readq(etmdata, TRCVMIDCVRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR0); etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR1); /* single-shot comparator registers */ for (j = 0; j < etmdata->nr_ss_cmp; j++) { etmdata->state[i++] = etm_readl(etmdata, TRCSSCCRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCSSCSRn(j)); etmdata->state[i++] = etm_readl(etmdata, TRCSSPCICRn(j)); } /* claim tag registers */ etmdata->state[i++] = etm_readl(etmdata, TRCCLAIMCLR); /* program ctrl register */ etmdata->state[i++] = etm_readl(etmdata, TRCPRGCTLR); /* ensure trace unit is idle to be powered down */ for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 0) != 1) && count > 0; count--) udelay(1); if (count == 0) pr_err_ratelimited("timeout waiting for idle state\n"); atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL); break; default: pr_err_ratelimited("unsupported etm arch %d in %s\n", etmdata->arch, __func__); } ETM_LOCK(etmdata); }
static int etm_probe(struct platform_device *pdev) { static int first_device = 0; struct etm_driver_data *data = dev_get_platdata(&pdev->dev); int ret = 0; void __iomem **new_regs; struct etm_info *new_info; int new_count; u32 id, config_code, config_code_extension, system_config; mutex_lock(&tracer.mutex); new_count = tracer.nr_etm_regs + 1; new_regs = krealloc(tracer.etm_regs, sizeof(tracer.etm_regs[0]) * new_count, GFP_KERNEL); if (!new_regs) { pr_err("Failed to allocate ETM register array\n"); ret = -ENOMEM; goto out; } tracer.etm_regs = new_regs; new_info = krealloc(tracer.etm_info, sizeof(tracer.etm_info[0]) * new_count, GFP_KERNEL); if (!new_info) { pr_err("Failed to allocate ETM info array\n"); ret = -ENOMEM; goto out; } tracer.etm_info = new_info; tracer.etm_regs[tracer.nr_etm_regs] = data->etm_regs; if (!first_device) { first_device = 1; if (unlikely((ret = misc_register(&etm_device)) != 0)) { pr_err("Fail to register etm device\n"); goto out; } if (unlikely((ret = create_files()) != 0)) { pr_err("Fail to create device files\n"); goto deregister; } } memset(&(tracer.etm_info[tracer.nr_etm_regs]), 0, sizeof(struct etm_info)); tracer.etm_info[tracer.nr_etm_regs].enable = 1; tracer.etm_info[tracer.nr_etm_regs].is_ptm = data->is_ptm; tracer.etm_info[tracer.nr_etm_regs].pwr_down = data->pwr_down; id = etm_readl(&tracer, tracer.nr_etm_regs, ETMIDR); config_code = etm_readl(&tracer, tracer.nr_etm_regs, ETMCCR); config_code_extension = etm_readl(&tracer, tracer.nr_etm_regs, ETMCCER); system_config = etm_readl(&tracer, tracer.nr_etm_regs, ETMSCR); tracer.nr_etm_regs = new_count; out: mutex_unlock(&tracer.mutex); return ret; deregister: misc_deregister(&etm_device); mutex_unlock(&tracer.mutex); return ret; }
static void trace_start(void) { int i; int pwr_down; if (tracer.state == TRACE_STATE_TRACING) { pr_info("ETM trace is already running\n"); return; } get_online_cpus(); mutex_lock(&tracer.mutex); /* AHBAP_EN to enable master port, then ETR could write the trace to bus */ __raw_writel(DEM_UNLOCK_MAGIC, DEM_UNLOCK); mt65xx_reg_sync_writel(AHB_EN, AHBAP_EN); etb_unlock(&tracer); for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { cs_cpu_unlock(tracer.etm_regs[i]); } } cs_cpu_unlock(tracer.tpiu_regs); cs_cpu_unlock(tracer.funnel_regs); cs_cpu_unlock(tracer.etb_regs); cs_cpu_funnel_setup(); cs_cpu_etb_setup(); /* Power-up TMs */ for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { cs_cpu_ptm_powerup(tracer.etm_regs[i]); } } /* Disable TMs so that they can be set up safely */ for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { cs_cpu_ptm_progbit(tracer.etm_regs[i]); } } /* Set up TMs */ for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { cs_cpu_test_common_ptm_setup(tracer.etm_regs[i], i); } } /* Set up CoreSightTraceID */ for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { cs_cpu_write(tracer.etm_regs[i], 0x200, i + 1); } } /* update the ETMCR and ETMCCER */ for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { tracer.etm_info[i].etmcr = etm_readl(&tracer, i, ETMCR); tracer.etm_info[i].etmccer = etm_readl(&tracer, i, ETMCCER); } } /* Enable TMs now everything has been set up */ for (i = 0; i < tracer.nr_etm_regs; i++) { if (tracer.etm_info[i].pwr_down == NULL) { pwr_down = 0; } else { pwr_down = *(tracer.etm_info[i].pwr_down); } if (!pwr_down) { cs_cpu_ptm_clear_progbit(tracer.etm_regs[i]); } } /* Avoid DBG_sys being reset */ __raw_writel(DEM_UNLOCK_MAGIC, DEM_UNLOCK); __raw_writel(POWER_ON_RESET, DBGRST_ALL); __raw_writel(BUSCLK_EN, DBGBUSCLK_EN); mt65xx_reg_sync_writel(SYSCLK_EN, DBGSYSCLK_EN); tracer.state = TRACE_STATE_TRACING; etb_lock(&tracer); mutex_unlock(&tracer.mutex); put_online_cpus(); }
static inline void etm_restore_state(struct etm_ctx *etmdata) { int i, j; i = 0; ETM_UNLOCK(etmdata); switch (etmdata->arch) { case ETM_ARCH_V4: atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL); /* check OS lock is locked */ if (BVAL(etm_readl(etmdata, TRCOSLSR), 1) != 1) { pr_err_ratelimited("OS lock is unlocked\n"); etm_os_lock(etmdata); } /* main control and configuration registers */ etm_writel(etmdata, etmdata->state[i++], TRCPROCSELR); etm_writel(etmdata, etmdata->state[i++], TRCCONFIGR); etm_writel(etmdata, etmdata->state[i++], TRCAUXCTLR); etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL0R); etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL1R); etm_writel(etmdata, etmdata->state[i++], TRCSTALLCTLR); etm_writel(etmdata, etmdata->state[i++], TRCTSCTLR); etm_writel(etmdata, etmdata->state[i++], TRCSYNCPR); etm_writel(etmdata, etmdata->state[i++], TRCCCCTLR); etm_writel(etmdata, etmdata->state[i++], TRCBBCTLR); etm_writel(etmdata, etmdata->state[i++], TRCTRACEIDR); etm_writel(etmdata, etmdata->state[i++], TRCQCTLR); /* filtering control registers */ etm_writel(etmdata, etmdata->state[i++], TRCVICTLR); etm_writel(etmdata, etmdata->state[i++], TRCVIIECTLR); etm_writel(etmdata, etmdata->state[i++], TRCVISSCTLR); etm_writel(etmdata, etmdata->state[i++], TRCVIPCSSCTLR); etm_writel(etmdata, etmdata->state[i++], TRCVDCTLR); etm_writel(etmdata, etmdata->state[i++], TRCVDSACCTLR); etm_writel(etmdata, etmdata->state[i++], TRCVDARCCTLR); /* derived resources registers */ for (j = 0; j < etmdata->nr_seq_state-1; j++) etm_writel(etmdata, etmdata->state[i++], TRCSEQEVRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCSEQRSTEVR); etm_writel(etmdata, etmdata->state[i++], TRCSEQSTR); etm_writel(etmdata, etmdata->state[i++], TRCEXTINSELR); for (j = 0; j < etmdata->nr_cntr; j++) { etm_writel(etmdata, etmdata->state[i++], TRCCNTRLDVRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCCNTCTLRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j)); } /* resource selection registers */ for (j = 0; j < etmdata->nr_resource; j++) etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j)); /* comparator registers */ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) { etm_writeq(etmdata, etmdata->state[i++], TRCACVRn(j)); etm_writeq(etmdata, etmdata->state[i++], TRCACATRn(j)); } for (j = 0; j < etmdata->nr_data_cmp; j++) { etm_writeq(etmdata, etmdata->state[i++], TRCDVCVRn(j)); etm_writeq(etmdata, etmdata->state[i++], TRCDVCMRn(j)); } for (j = 0; j < etmdata->nr_ctxid_cmp; j++) etm_writeq(etmdata, etmdata->state[i++], TRCCIDCVRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR0); etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR1); for (j = 0; j < etmdata->nr_vmid_cmp; j++) etm_writeq(etmdata, etmdata->state[i++], TRCVMIDCVRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR0); etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR1); /* e-shot comparator registers */ for (j = 0; j < etmdata->nr_ss_cmp; j++) { etm_writel(etmdata, etmdata->state[i++], TRCSSCCRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCSSCSRn(j)); etm_writel(etmdata, etmdata->state[i++], TRCSSPCICRn(j)); } /* claim tag registers */ etm_writel(etmdata, etmdata->state[i++], TRCCLAIMSET); /* program ctrl register */ etm_writel(etmdata, etmdata->state[i++], TRCPRGCTLR); etm_os_unlock(etmdata); break; default: pr_err_ratelimited("unsupported etm arch %d in %s\n", etmdata->arch, __func__); } ETM_LOCK(etmdata); }
static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id) { struct tracectx *t = &tracer; int ret = 0; if (t->etm_regs) { dev_dbg(&dev->dev, "ETM already initialized\n"); ret = -EBUSY; goto out; } ret = amba_request_regions(dev, NULL); if (ret) goto out; t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); if (!t->etm_regs) { ret = -ENOMEM; goto out_release; } amba_set_drvdata(dev, t); mutex_init(&t->mutex); t->dev = &dev->dev; t->flags = TRACER_CYCLE_ACC; t->etm_portsz = 1; etm_unlock(t); (void)etm_readl(t, ETMMR_PDSR); /* dummy first read */ (void)etm_readl(&tracer, ETMMR_OSSRR); t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; etm_writel(t, 0x440, ETMR_CTRL); etm_lock(t); ret = sysfs_create_file(&dev->dev.kobj, &trace_running_attr.attr); if (ret) goto out_unmap; /* failing to create any of these two is not fatal */ ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr); if (ret) dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n"); ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr); if (ret) dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n"); dev_dbg(t->dev, "ETM AMBA driver initialized.\n"); out: return ret; out_unmap: amba_set_drvdata(dev, NULL); iounmap(t->etm_regs); out_release: amba_release_regions(dev); return ret; }
static int etm_enable_hw(struct etm_drvdata *drvdata) { int i, rc; u32 etmcr; struct etm_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); rc = coresight_claim_device_unlocked(drvdata->base); if (rc) goto done; /* Turn engine on */ etm_clr_pwrdwn(drvdata); /* Apply power to trace registers */ etm_set_pwrup(drvdata); /* Make sure all registers are accessible */ etm_os_unlock(drvdata); etm_set_prog(drvdata); etmcr = etm_readl(drvdata, ETMCR); /* Clear setting from a previous run if need be */ etmcr &= ~ETM3X_SUPPORTED_OPTIONS; etmcr |= drvdata->port_size; etmcr |= ETMCR_ETM_EN; etm_writel(drvdata, config->ctrl | etmcr, ETMCR); etm_writel(drvdata, config->trigger_event, ETMTRIGGER); etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR); etm_writel(drvdata, config->enable_event, ETMTEEVR); etm_writel(drvdata, config->enable_ctrl1, ETMTECR1); etm_writel(drvdata, config->fifofull_level, ETMFFLR); for (i = 0; i < drvdata->nr_addr_cmp; i++) { etm_writel(drvdata, config->addr_val[i], ETMACVRn(i)); etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i)); } for (i = 0; i < drvdata->nr_cntr; i++) { etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i)); etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i)); etm_writel(drvdata, config->cntr_rld_event[i], ETMCNTRLDEVRn(i)); etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i)); } etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR); etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR); etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR); etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR); etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR); etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR); etm_writel(drvdata, config->seq_curr_state, ETMSQR); for (i = 0; i < drvdata->nr_ext_out; i++) etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); for (i = 0; i < drvdata->nr_ctxid_cmp; i++) etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i)); etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR); etm_writel(drvdata, config->sync_freq, ETMSYNCFR); /* No external input selected */ etm_writel(drvdata, 0x0, ETMEXTINSELR); etm_writel(drvdata, config->timestamp_event, ETMTSEVR); /* No auxiliary control selected */ etm_writel(drvdata, 0x0, ETMAUXCR); etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); /* No VMID comparator value selected */ etm_writel(drvdata, 0x0, ETMVMIDCVR); etm_clr_prog(drvdata); done: CS_LOCK(drvdata->base); dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n", drvdata->cpu, rc); return rc; }