static int etm_enable(struct coresight_device *csdev, struct perf_event_attr *attr, u32 mode) { int ret; u32 val; struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); /* Someone is already using the tracer */ if (val) return -EBUSY; switch (mode) { case CS_MODE_SYSFS: ret = etm_enable_sysfs(csdev); break; case CS_MODE_PERF: ret = etm_enable_perf(csdev, attr); break; default: ret = -EINVAL; } /* The tracer didn't start */ if (ret) local_set(&drvdata->mode, CS_MODE_DISABLED); return ret; }
static int etb_enable(struct coresight_device *csdev, u32 mode) { u32 val; unsigned long flags; struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); /* * When accessing from Perf, a HW buffer can be handled * by a single trace entity. In sysFS mode many tracers * can be logging to the same HW buffer. */ if (val == CS_MODE_PERF) return -EBUSY; /* Nothing to do, the tracer is already enabled. */ if (val == CS_MODE_SYSFS) goto out; spin_lock_irqsave(&drvdata->spinlock, flags); etb_enable_hw(drvdata); spin_unlock_irqrestore(&drvdata->spinlock, flags); out: dev_info(drvdata->dev, "ETB enabled\n"); return 0; }
static int etb_open(struct inode *inode, struct file *file) { struct etb_drvdata *drvdata = container_of(file->private_data, struct etb_drvdata, miscdev); if (local_cmpxchg(&drvdata->reading, 0, 1)) return -EBUSY; dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); return 0; }
static int stm_enable(struct coresight_device *csdev, struct perf_event *event, u32 mode) { u32 val; struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); if (mode != CS_MODE_SYSFS) return -EINVAL; val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); /* Someone is already using the tracer */ if (val) return -EBUSY; pm_runtime_get_sync(drvdata->dev); spin_lock(&drvdata->spinlock); stm_enable_hw(drvdata); spin_unlock(&drvdata->spinlock); dev_dbg(drvdata->dev, "STM tracing enabled\n"); return 0; }
int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size) { struct ring_buffer *rb; unsigned long tail, offset, head; int have_lost; struct perf_sample_data sample_data; struct { struct perf_event_header header; u64 id; u64 lost; } lost_event; rcu_read_lock(); /* * For inherited events we send all the output towards the parent. */ if (event->parent) event = event->parent; rb = rcu_dereference(event->rb); if (!rb) goto out; handle->rb = rb; handle->event = event; if (!rb->nr_pages) goto out; have_lost = local_read(&rb->lost); if (have_lost) { lost_event.header.size = sizeof(lost_event); perf_event_header__init_id(&lost_event.header, &sample_data, event); size += lost_event.header.size; } perf_output_get_handle(handle); do { /* * Userspace could choose to issue a mb() before updating the * tail pointer. So that all reads will be completed before the * write is issued. */ tail = ACCESS_ONCE(rb->user_page->data_tail); smp_rmb(); offset = head = local_read(&rb->head); head += size; if (unlikely(!perf_output_space(rb, tail, offset, head))) goto fail; } while (local_cmpxchg(&rb->head, offset, head) != offset); if (head - local_read(&rb->wakeup) > rb->watermark) local_add(rb->watermark, &rb->wakeup); handle->page = offset >> (PAGE_SHIFT + page_order(rb)); handle->page &= rb->nr_pages - 1; handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); handle->addr = rb->data_pages[handle->page]; handle->addr += handle->size; handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.id = event->id; lost_event.lost = local_xchg(&rb->lost, 0); perf_output_put(handle, lost_event); perf_event__output_id_sample(event, handle, &sample_data); } return 0; fail: local_inc(&rb->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); return -ENOSPC; }