/** * adreno_drawctxt_dump() - dump information about a draw context * @device: KGSL device that owns the context * @context: KGSL context to dump information about * * Dump specific information about the context to the kernel log. Used for * fence timeout callbacks */ void adreno_drawctxt_dump(struct kgsl_device *device, struct kgsl_context *context) { unsigned int queue, start, retire; struct adreno_context *drawctxt = ADRENO_CONTEXT(context); queue = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED); start = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED); retire = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); spin_lock(&drawctxt->lock); dev_err(device->dev, " context[%d]: queue=%d, submit=%d, start=%d, retire=%d\n", context->id, queue, drawctxt->submitted_timestamp, start, retire); if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) { dev_err(device->dev, " possible deadlock. Context %d might be blocked for itself\n", context->id); goto done; } /* * We may have cmdbatch timer running, which also uses same * lock, take a lock with software interrupt disabled (bh) * to avoid spin lock recursion. */ spin_lock_bh(&cmdbatch->lock); if (!list_empty(&cmdbatch->synclist)) { dev_err(device->dev, " context[%d] (ts=%d) Active sync points:\n", context->id, cmdbatch->timestamp); kgsl_dump_syncpoints(device, cmdbatch); } spin_unlock_bh(&cmdbatch->lock); } done: spin_unlock(&drawctxt->lock); }
/** * adreno_drawctxt_dump() - dump information about a draw context * @device: KGSL device that owns the context * @context: KGSL context to dump information about * * Dump specific information about the context to the kernel log. Used for * fence timeout callbacks */ void adreno_drawctxt_dump(struct kgsl_device *device, struct kgsl_context *context) { unsigned int queue, start, retire; struct adreno_context *drawctxt = ADRENO_CONTEXT(context); int index, pos; char buf[120]; kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queue); kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED, &start); kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire); spin_lock(&drawctxt->lock); dev_err(device->dev, " context[%d]: queue=%d, submit=%d, start=%d, retire=%d\n", context->id, queue, drawctxt->submitted_timestamp, start, retire); if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) { dev_err(device->dev, " possible deadlock. Context %d might be blocked for itself\n", context->id); goto stats; } /* * We may have cmdbatch timer running, which also uses same * lock, take a lock with software interrupt disabled (bh) * to avoid spin lock recursion. */ spin_lock_bh(&cmdbatch->lock); if (!list_empty(&cmdbatch->synclist)) { dev_err(device->dev, " context[%d] (ts=%d) Active sync points:\n", context->id, cmdbatch->timestamp); kgsl_dump_syncpoints(device, cmdbatch); } spin_unlock_bh(&cmdbatch->lock); } stats: memset(buf, 0, sizeof(buf)); pos = 0; for (index = 0; index < SUBMIT_RETIRE_TICKS_SIZE; index++) { uint64_t msecs; unsigned int usecs; if (!drawctxt->submit_retire_ticks[index]) continue; msecs = drawctxt->submit_retire_ticks[index] * 10; usecs = do_div(msecs, 192); usecs = do_div(msecs, 1000); pos += snprintf(buf + pos, sizeof(buf) - pos, "%d.%0d ", (unsigned int)msecs, usecs); } dev_err(device->dev, " context[%d]: submit times: %s\n", context->id, buf); spin_unlock(&drawctxt->lock); }