static int etm_parse_event_config(struct etm_drvdata *drvdata, struct perf_event *event) { struct etm_config *config = &drvdata->config; struct perf_event_attr *attr = &event->attr; if (!attr) return -EINVAL; /* Clear configuration from previous run */ memset(config, 0, sizeof(struct etm_config)); if (attr->exclude_kernel) config->mode = ETM_MODE_EXCL_KERN; if (attr->exclude_user) config->mode = ETM_MODE_EXCL_USER; /* Always start from the default config */ etm_set_default(config); /* * By default the tracers are configured to trace the whole address * range. Narrow the field only if requested by user space. */ if (config->mode) etm_config_trace_mode(config); /* * At this time only cycle accurate, return stack and timestamp * options are available. */ if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) return -EINVAL; config->ctrl = attr->config; /* * Possible to have cores with PTM (supports ret stack) and ETM * (never has ret stack) on the same SoC. So if we have a request * for return stack that can't be honoured on this core then * clear the bit - trace will still continue normally */ if ((config->ctrl & ETMCR_RETURN_STACK) && !(drvdata->etmccer & ETMCCER_RETSTACK)) config->ctrl &= ~ETMCR_RETURN_STACK; return 0; }
static int etm_parse_event_config(struct etm_drvdata *drvdata, struct perf_event_attr *attr) { struct etm_config *config = &drvdata->config; if (!attr) return -EINVAL; /* Clear configuration from previous run */ memset(config, 0, sizeof(struct etm_config)); if (attr->exclude_kernel) config->mode = ETM_MODE_EXCL_KERN; if (attr->exclude_user) config->mode = ETM_MODE_EXCL_USER; /* Always start from the default config */ etm_set_default(config); /* * By default the tracers are configured to trace the whole address * range. Narrow the field only if requested by user space. */ if (config->mode) etm_config_trace_mode(config); /* * At this time only cycle accurate and timestamp options are * available. */ if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) return -EINVAL; config->ctrl = attr->config; return 0; }
static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; unsigned long val; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etm_config *config = &drvdata->config; ret = kstrtoul(buf, 16, &val); if (ret) return ret; spin_lock(&drvdata->spinlock); config->mode = val & ETM_MODE_ALL; if (config->mode & ETM_MODE_EXCLUDE) config->enable_ctrl1 |= ETMTECR1_INC_EXC; else config->enable_ctrl1 &= ~ETMTECR1_INC_EXC; if (config->mode & ETM_MODE_CYCACC) config->ctrl |= ETMCR_CYC_ACC; else config->ctrl &= ~ETMCR_CYC_ACC; if (config->mode & ETM_MODE_STALL) { if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { dev_warn(drvdata->dev, "stall mode not supported\n"); ret = -EINVAL; goto err_unlock; } config->ctrl |= ETMCR_STALL_MODE; } else config->ctrl &= ~ETMCR_STALL_MODE; if (config->mode & ETM_MODE_TIMESTAMP) { if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { dev_warn(drvdata->dev, "timestamp not supported\n"); ret = -EINVAL; goto err_unlock; } config->ctrl |= ETMCR_TIMESTAMP_EN; } else config->ctrl &= ~ETMCR_TIMESTAMP_EN; if (config->mode & ETM_MODE_CTXID) config->ctrl |= ETMCR_CTXID_SIZE; else config->ctrl &= ~ETMCR_CTXID_SIZE; if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) etm_config_trace_mode(config); spin_unlock(&drvdata->spinlock); return size; err_unlock: spin_unlock(&drvdata->spinlock); return ret; }