/** * adreno_coresight_disable() - Generic function to disable coresight debugging * @csdev: Pointer to coresight's device struct * * This is a generic function to disable coresight debug bus on adreno * devices. This should be used in all cases of disabling * coresight debug bus for adreno devices. This function in turn calls * the adreno device specific function through the gpudev hook. * This function is registered as the coresight disable function * with coresight driver. It should only be called through coresight driver * as that would ensure that the necessary setup required to be done on * coresight driver's part is also done. */ static void adreno_coresight_disable(struct coresight_device *csdev) { struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent); struct adreno_device *adreno_dev; struct adreno_gpudev *gpudev; struct adreno_coresight *coresight; int i; if (device == NULL) return; adreno_dev = ADRENO_DEVICE(device); gpudev = ADRENO_GPU_DEVICE(adreno_dev); coresight = gpudev->coresight; if (coresight == NULL) return; mutex_lock(&device->mutex); if (!kgsl_active_count_get(device)) { for (i = 0; i < coresight->count; i++) kgsl_regwrite(device, coresight->registers[i].offset, 0); kgsl_active_count_put(device); } clear_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv); mutex_unlock(&device->mutex); }
static ssize_t coresight_read_reg(struct kgsl_device *device, unsigned int offset, char *buf) { unsigned int regval = 0; mutex_lock(&device->mutex); if (!kgsl_active_count_get(device)) { kgsl_regread(device, offset, ®val); kgsl_active_count_put(device); } mutex_unlock(&device->mutex); return snprintf(buf, PAGE_SIZE, "0x%X", regval); }
long adreno_compat_ioctl(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_device *device = dev_priv->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int result = 0; switch (cmd) { case IOCTL_KGSL_PERFCOUNTER_QUERY_COMPAT: { struct kgsl_perfcounter_query_compat *query32 = data; struct kgsl_perfcounter_query query; query.groupid = query32->groupid; query.countables = (unsigned int __user *)(uintptr_t) query32->countables; query.count = query32->count; query.max_counters = query32->max_counters; result = adreno_perfcounter_query_group(adreno_dev, query.groupid, query.countables, query.count, &query.max_counters); query32->max_counters = query.max_counters; break; } case IOCTL_KGSL_PERFCOUNTER_READ_COMPAT: { struct kgsl_perfcounter_read_compat *read32 = data; struct kgsl_perfcounter_read read; read.reads = (struct kgsl_perfcounter_read_group __user *) (uintptr_t)read32->reads; read.count = read32->count; result = kgsl_active_count_get(device); if (result) break; result = adreno_perfcounter_read_group(adreno_dev, read.reads, read.count); kgsl_active_count_put(device); break; } default: KGSL_DRV_INFO(dev_priv->device, "invalid ioctl code %08x\n", cmd); result = -ENOIOCTLCMD; break; } return result; }
ssize_t adreno_coresight_show_register(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev->parent); struct adreno_device *adreno_dev; struct adreno_coresight_attr *cattr = TO_ADRENO_CORESIGHT_ATTR(attr); if (device == NULL) return -EINVAL; adreno_dev = ADRENO_DEVICE(device); if (cattr->reg == NULL) return -EINVAL; /* * Return the current value of the register if coresight is enabled, * otherwise report 0 */ mutex_lock(&device->mutex); if (test_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) { /* * If the device isn't power collapsed read the actual value * from the hardware - otherwise return the cached value */ if (device->state == KGSL_STATE_ACTIVE || device->state == KGSL_STATE_NAP) { if (!kgsl_active_count_get(device)) { kgsl_regread(device, cattr->reg->offset, &cattr->reg->value); kgsl_active_count_put(device); } } val = cattr->reg->value; } mutex_unlock(&device->mutex); return snprintf(buf, PAGE_SIZE, "0x%X", val); }
ssize_t adreno_coresight_store_register(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct kgsl_device *device = dev_get_drvdata(dev->parent); struct adreno_device *adreno_dev; struct adreno_coresight_attr *cattr = TO_ADRENO_CORESIGHT_ATTR(attr); unsigned long val; int ret; if (device == NULL) return -EINVAL; adreno_dev = ADRENO_DEVICE(device); if (cattr->reg == NULL) return -EINVAL; ret = kstrtoul(buf, 0, &val); if (ret) return ret; mutex_lock(&device->mutex); /* Ignore writes while coresight is off */ if (!test_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) goto out; cattr->reg->value = val; /* Program the hardware if it is not power collapsed */ if (device->state == KGSL_STATE_ACTIVE || device->state == KGSL_STATE_NAP) { if (!kgsl_active_count_get(device)) { kgsl_regwrite(device, cattr->reg->offset, cattr->reg->value); kgsl_active_count_put(device); } } out: mutex_unlock(&device->mutex); return size; }
/** * adreno_coresight_enable() - Generic function to enable coresight debugging * @csdev: Pointer to coresight's device struct * * This is a generic function to enable coresight debug bus on adreno * devices. This should be used in all cases of enabling * coresight debug bus for adreno devices. This function is registered as the * coresight enable function with coresight driver. It should only be called * through coresight driver as that would ensure that the necessary setup * required to be done on coresight driver's part is also done. */ static int adreno_coresight_enable(struct coresight_device *csdev) { struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent); struct adreno_device *adreno_dev; struct adreno_gpudev *gpudev; struct adreno_coresight *coresight; int ret = 0; if (device == NULL) return -ENODEV; adreno_dev = ADRENO_DEVICE(device); gpudev = ADRENO_GPU_DEVICE(adreno_dev); coresight = gpudev->coresight; if (coresight == NULL) return -ENODEV; mutex_lock(&device->mutex); if (!test_and_set_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) { int i; /* Reset all the debug registers to their default values */ for (i = 0; i < coresight->count; i++) coresight->registers[i].value = coresight->registers[i].initial; ret = kgsl_active_count_get(device); if (!ret) { ret = _adreno_coresight_set(adreno_dev); kgsl_active_count_put(device); } } mutex_unlock(&device->mutex); return ret; }
int adreno_perfcounter_read_group(struct adreno_device *adreno_dev, struct kgsl_perfcounter_read_group __user *reads, unsigned int count) { struct kgsl_device *device = &adreno_dev->dev; struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_perfcounters *counters = gpudev->perfcounters; struct adreno_perfcount_group *group; struct kgsl_perfcounter_read_group *list = NULL; unsigned int i, j; int ret = 0; if (NULL == counters) return -EINVAL; /* sanity check for later */ if (!gpudev->perfcounter_read) return -EINVAL; /* sanity check params passed in */ if (reads == NULL || count == 0 || count > 100) return -EINVAL; list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count, GFP_KERNEL); if (!list) return -ENOMEM; if (copy_from_user(list, reads, sizeof(struct kgsl_perfcounter_read_group) * count)) { ret = -EFAULT; goto done; } mutex_lock(&device->mutex); ret = kgsl_active_count_get(device); if (ret) { mutex_unlock(&device->mutex); goto done; } /* list iterator */ for (j = 0; j < count; j++) { list[j].value = 0; /* Verify that the group ID is within range */ if (list[j].groupid >= counters->group_count) { ret = -EINVAL; break; } group = &(counters->groups[list[j].groupid]); /* group/counter iterator */ for (i = 0; i < group->reg_count; i++) { if (group->regs[i].countable == list[j].countable) { list[j].value = gpudev->perfcounter_read( adreno_dev, list[j].groupid, i); break; } } } kgsl_active_count_put(device); mutex_unlock(&device->mutex); /* write the data */ if (ret == 0) if (copy_to_user(reads, list, sizeof(struct kgsl_perfcounter_read_group) * count)) ret = -EFAULT; done: kfree(list); return ret; }
/** * adreno_drawctxt_detach(): detach a context from the GPU * @context: Generic KGSL context container for the context * */ void adreno_drawctxt_detach(struct kgsl_context *context) { struct kgsl_device *device; struct adreno_device *adreno_dev; struct adreno_context *drawctxt; struct adreno_ringbuffer *rb; int ret; if (context == NULL) return; device = context->device; adreno_dev = ADRENO_DEVICE(device); drawctxt = ADRENO_CONTEXT(context); rb = drawctxt->rb; /* deactivate context */ mutex_lock(&device->mutex); if (rb->drawctxt_active == drawctxt) { if (adreno_dev->cur_rb == rb) { if (!kgsl_active_count_get(device)) { adreno_drawctxt_switch(adreno_dev, rb, NULL, 0); kgsl_active_count_put(device); } else BUG(); } else adreno_drawctxt_switch(adreno_dev, rb, NULL, 0); } mutex_unlock(&device->mutex); spin_lock(&drawctxt->lock); while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; spin_unlock(&drawctxt->lock); /* * If the context is deteached while we are waiting for * the next command in GFT SKIP CMD, print the context * detached status here. */ adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch); /* * Don't hold the drawctxt mutex while the cmdbatch is being * destroyed because the cmdbatch destroy takes the device * mutex and the world falls in on itself */ kgsl_cmdbatch_destroy(cmdbatch); spin_lock(&drawctxt->lock); } spin_unlock(&drawctxt->lock); /* * internal_timestamp is set in adreno_ringbuffer_addcmds, * which holds the device mutex. */ mutex_lock(&device->mutex); /* * Wait for the last global timestamp to pass before continuing. * The maxumum wait time is 30s, some large IB's can take longer * than 10s and if hang happens then the time for the context's * commands to retire will be greater than 10s. 30s should be sufficient * time to wait for the commands even if a hang happens. */ ret = adreno_drawctxt_wait_rb(adreno_dev, context, drawctxt->internal_timestamp, 30 * 1000); /* * If the wait for global fails due to timeout then nothing after this * point is likely to work very well - BUG_ON() so we can take advantage * of the debug tools to figure out what the h - e - double hockey * sticks happened. If EAGAIN error is returned then recovery will kick * in and there will be no more commands in the RB pipe from this * context which is waht we are waiting for, so ignore -EAGAIN error */ BUG_ON(ret && ret != -EAGAIN); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), drawctxt->timestamp); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), drawctxt->timestamp); adreno_profile_process_results(adreno_dev); mutex_unlock(&device->mutex); /* wake threads waiting to submit commands from this context */ wake_up_all(&drawctxt->waiting); wake_up_all(&drawctxt->wq); }