static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int i, ret; unsigned long val; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); ret = kstrtoul(buf, 16, &val); if (ret) return ret; if (val) { spin_lock(&drvdata->spinlock); drvdata->mode = ETM_MODE_EXCLUDE; drvdata->ctrl = 0x0; drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; drvdata->startstop_ctrl = 0x0; drvdata->addr_idx = 0x0; for (i = 0; i < drvdata->nr_addr_cmp; i++) { drvdata->addr_val[i] = 0x0; drvdata->addr_acctype[i] = 0x0; drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; } drvdata->cntr_idx = 0x0; etm_set_default(drvdata); spin_unlock(&drvdata->spinlock); } return size; }
static void etm_init_default_data(struct etm_drvdata *drvdata) { /* * A trace ID of value 0 is invalid, so let's start at some * random value that fits in 7 bits and will be just as good. */ static int etm3x_traceid = 0x10; u32 flags = (1 << 0 | /* instruction execute*/ 3 << 3 | /* ARM instruction */ 0 << 5 | /* No data value comparison */ 0 << 7 | /* No exact mach */ 0 << 8 | /* Ignore context ID */ 0 << 10); /* Security ignored */ /* * Initial configuration only - guarantees sources handled by * this driver have a unique ID at startup time but not between * all other types of sources. For that we lean on the core * framework. */ drvdata->traceid = etm3x_traceid++; drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN); drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; if (drvdata->nr_addr_cmp >= 2) { drvdata->addr_val[0] = (u32) _stext; drvdata->addr_val[1] = (u32) _etext; drvdata->addr_acctype[0] = flags; drvdata->addr_acctype[1] = flags; drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; } etm_set_default(drvdata); }
static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int i, ret; unsigned long val; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etm_config *config = &drvdata->config; ret = kstrtoul(buf, 16, &val); if (ret) return ret; if (val) { spin_lock(&drvdata->spinlock); memset(config, 0, sizeof(struct etm_config)); config->mode = ETM_MODE_EXCLUDE; config->trigger_event = ETM_DEFAULT_EVENT_VAL; for (i = 0; i < drvdata->nr_addr_cmp; i++) { config->addr_type[i] = ETM_ADDR_TYPE_NONE; } etm_set_default(config); spin_unlock(&drvdata->spinlock); } return size; }
static int etm_parse_event_config(struct etm_drvdata *drvdata, struct perf_event *event) { struct etm_config *config = &drvdata->config; struct perf_event_attr *attr = &event->attr; if (!attr) return -EINVAL; /* Clear configuration from previous run */ memset(config, 0, sizeof(struct etm_config)); if (attr->exclude_kernel) config->mode = ETM_MODE_EXCL_KERN; if (attr->exclude_user) config->mode = ETM_MODE_EXCL_USER; /* Always start from the default config */ etm_set_default(config); /* * By default the tracers are configured to trace the whole address * range. Narrow the field only if requested by user space. */ if (config->mode) etm_config_trace_mode(config); /* * At this time only cycle accurate, return stack and timestamp * options are available. */ if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) return -EINVAL; config->ctrl = attr->config; /* * Possible to have cores with PTM (supports ret stack) and ETM * (never has ret stack) on the same SoC. So if we have a request * for return stack that can't be honoured on this core then * clear the bit - trace will still continue normally */ if ((config->ctrl & ETMCR_RETURN_STACK) && !(drvdata->etmccer & ETMCCER_RETSTACK)) config->ctrl &= ~ETMCR_RETURN_STACK; return 0; }
static int etm_parse_event_config(struct etm_drvdata *drvdata, struct perf_event_attr *attr) { struct etm_config *config = &drvdata->config; if (!attr) return -EINVAL; /* Clear configuration from previous run */ memset(config, 0, sizeof(struct etm_config)); if (attr->exclude_kernel) config->mode = ETM_MODE_EXCL_KERN; if (attr->exclude_user) config->mode = ETM_MODE_EXCL_USER; /* Always start from the default config */ etm_set_default(config); /* * By default the tracers are configured to trace the whole address * range. Narrow the field only if requested by user space. */ if (config->mode) etm_config_trace_mode(config); /* * At this time only cycle accurate and timestamp options are * available. */ if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) return -EINVAL; config->ctrl = attr->config; return 0; }
static int etm_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct etm_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc *desc; struct device_node *np = adev->dev.of_node; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14"); } drvdata->dev = &adev->dev; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; spin_lock_init(&drvdata->spinlock); drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } drvdata->cpu = pdata ? pdata->cpu : 0; get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; if (smp_call_function_single(drvdata->cpu, etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); if (!etm_count++) { cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, "AP_ARM_CORESIGHT_STARTING", etm_starting_cpu, etm_dying_cpu); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "AP_ARM_CORESIGHT_ONLINE", etm_online_cpu, NULL); if (ret < 0) goto err_arch_supported; hp_online = ret; } put_online_cpus(); if (etm_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; } etm_init_trace_id(drvdata); etm_set_default(&drvdata->config); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; desc->ops = &etm_cs_ops; desc->pdata = pdata; desc->dev = dev; desc->groups = coresight_etm_groups; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto err_arch_supported; } ret = etm_perf_symlink(drvdata->csdev, true); if (ret) { coresight_unregister(drvdata->csdev); goto err_arch_supported; } pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; } return 0; err_arch_supported: if (--etm_count == 0) { cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); if (hp_online) cpuhp_remove_state_nocalls(hp_online); } return ret; }