int nvhost_syncpt_is_expired_ext(struct platform_device *dev, u32 id, u32 thresh) { struct nvhost_master *master = nvhost_get_host(dev); struct nvhost_syncpt *sp = &master->syncpt; return nvhost_syncpt_is_expired(sp, id, thresh); }
/** * Updated sync point form hardware, and returns true if syncpoint is expired, * false if we may need to wait */ static bool syncpt_update_min_is_expired( struct nvhost_syncpt *sp, u32 id, u32 thresh) { syncpt_op().update_min(sp, id); return nvhost_syncpt_is_expired(sp, id, thresh); }
/* check for old WAITs to be removed (avoiding a wrap) */ static int t20_syncpt_wait_check(struct nvhost_syncpt *sp, struct nvmap_client *nvmap, u32 waitchk_mask, struct nvhost_waitchk *wait, int num_waitchk) { u32 idx; int err = 0; /* get current syncpt values */ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { if (BIT(idx) & waitchk_mask) nvhost_syncpt_update_min(sp, idx); } BUG_ON(!wait && !num_waitchk); /* compare syncpt vs wait threshold */ while (num_waitchk) { u32 override; BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); trace_nvhost_syncpt_wait_check(wait->mem, wait->offset, wait->syncpt_id, wait->thresh); if (nvhost_syncpt_is_expired(sp, wait->syncpt_id, wait->thresh)) { /* * NULL an already satisfied WAIT_SYNCPT host method, * by patching its args in the command stream. The * method data is changed to reference a reserved * (never given out or incr) NVSYNCPT_GRAPHICS_HOST * syncpt with a matching threshold value of 0, so * is guaranteed to be popped by the host HW. */ dev_dbg(&syncpt_to_dev(sp)->dev->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, syncpt_op(sp).name(sp, wait->syncpt_id), wait->thresh, nvhost_syncpt_read_min(sp, wait->syncpt_id)); /* patch the wait */ override = nvhost_class_host_wait_syncpt( NVSYNCPT_GRAPHICS_HOST, 0); err = nvmap_patch_word(nvmap, (struct nvmap_handle *)wait->mem, wait->offset, override); if (err) break; }
/* * Check driver supplied waitchk structs for syncpt thresholds * that have already been satisfied and NULL the comparison (to * avoid a wrap condition in the HW). */ static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp, u32 patch_mem, void *patch_addr) { int i; /* compare syncpt vs wait threshold */ for (i = 0; i < job->num_waitchk; i++) { struct nvhost_waitchk *wait = &job->waitchk[i]; /* skip all other gathers */ if (patch_mem != wait->mem) continue; trace_nvhost_syncpt_wait_check(wait->mem, wait->offset, wait->syncpt_id, wait->thresh, nvhost_syncpt_read(sp, wait->syncpt_id)); if (nvhost_syncpt_is_expired(sp, wait->syncpt_id, wait->thresh)) { /* * NULL an already satisfied WAIT_SYNCPT host method, * by patching its args in the command stream. The * method data is changed to reference a reserved * (never given out or incr) NVSYNCPT_GRAPHICS_HOST * syncpt with a matching threshold value of 0, so * is guaranteed to be popped by the host HW. */ dev_dbg(&syncpt_to_dev(sp)->dev->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, syncpt_op().name(sp, wait->syncpt_id), wait->thresh, nvhost_syncpt_read_min(sp, wait->syncpt_id)); /* patch the wait */ nvhost_syncpt_patch_wait(sp, (patch_addr + wait->offset)); } wait->mem = 0; } return 0; }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *waiter; int err = 0, check_count = 0, low_timeout = 0; u32 val; if (value) *value = 0; /* first check cache */ if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); return 0; } /* keep host alive */ nvhost_module_busy(syncpt_to_dev(sp)->dev); /* try to read from register */ val = syncpt_op().update_min(sp, id); if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = val; goto done; } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ waiter = nvhost_intr_alloc_waiter(); if (!waiter) { err = -ENOMEM; goto done; } err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, waiter, &ref); if (err) goto done; err = -EAGAIN; /* Caller-specified timeout may be impractically low */ if (timeout < SYNCPT_CHECK_PERIOD) low_timeout = timeout; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain = wait_event_interruptible_timeout(wq, syncpt_update_min_is_expired(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) timeout -= check; if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", current->comm, id, syncpt_op().name(sp, id), thresh, timeout); syncpt_op().debug(sp); if (check_count == MAX_STUCK_CHECK_COUNT) { if (low_timeout) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "is timeout %d too low?\n", low_timeout); } nvhost_debug_dump(syncpt_to_dev(sp)); } check_count++; } } nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref); done: nvhost_module_idle(syncpt_to_dev(sp)->dev); return err; }
int nvhost_gr3d_t20_read_reg( struct nvhost_device *dev, struct nvhost_channel *channel, struct nvhost_hwctx *hwctx, u32 offset, u32 *value) { struct host1x_hwctx *hwctx_to_save = NULL; struct nvhost_hwctx_handler *h = hwctx->h; struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); bool need_restore = false; u32 syncpt_incrs = 4; unsigned int pending = 0; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *ctx_waiter, *read_waiter, *completed_waiter; struct nvhost_job *job; u32 syncval; int err; if (hwctx->has_timedout) return -ETIMEDOUT; ctx_waiter = nvhost_intr_alloc_waiter(); read_waiter = nvhost_intr_alloc_waiter(); completed_waiter = nvhost_intr_alloc_waiter(); if (!ctx_waiter || !read_waiter || !completed_waiter) { err = -ENOMEM; goto done; } job = nvhost_job_alloc(channel, hwctx, NULL, nvhost_get_host(dev)->memmgr, 0, 0); if (!job) { err = -ENOMEM; goto done; } /* keep module powered */ nvhost_module_busy(dev); /* get submit lock */ err = mutex_lock_interruptible(&channel->submitlock); if (err) { nvhost_module_idle(dev); return err; } /* context switch */ if (channel->cur_ctx != hwctx) { hwctx_to_save = channel->cur_ctx ? to_host1x_hwctx(channel->cur_ctx) : NULL; if (hwctx_to_save) { syncpt_incrs += hwctx_to_save->save_incrs; hwctx_to_save->hwctx.valid = true; nvhost_job_get_hwctx(job, &hwctx_to_save->hwctx); } channel->cur_ctx = hwctx; if (channel->cur_ctx && channel->cur_ctx->valid) { need_restore = true; syncpt_incrs += to_host1x_hwctx(channel->cur_ctx) ->restore_incrs; } } syncval = nvhost_syncpt_incr_max(&nvhost_get_host(dev)->syncpt, p->syncpt, syncpt_incrs); job->syncpt_id = p->syncpt; job->syncpt_incrs = syncpt_incrs; job->syncpt_end = syncval; /* begin a CDMA submit */ nvhost_cdma_begin(&channel->cdma, job); /* push save buffer (pre-gather setup depends on unit) */ if (hwctx_to_save) h->save_push(&hwctx_to_save->hwctx, &channel->cdma); /* gather restore buffer */ if (need_restore) nvhost_cdma_push(&channel->cdma, nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx) ->restore_size), to_host1x_hwctx(channel->cur_ctx)->restore_phys); /* Switch to 3D - wait for it to complete what it was doing */ nvhost_cdma_push(&channel->cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt)); nvhost_cdma_push(&channel->cdma, nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_wait_syncpt_base_r(), 1), nvhost_class_host_wait_syncpt_base(p->syncpt, p->waitbase, 1)); /* Tell 3D to send register value to FIFO */ nvhost_cdma_push(&channel->cdma, nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1), nvhost_class_host_indoff_reg_read( host1x_uclass_indoff_indmodid_gr3d_v(), offset, false)); nvhost_cdma_push(&channel->cdma, nvhost_opcode_imm(host1x_uclass_inddata_r(), 0), NVHOST_OPCODE_NOOP); /* Increment syncpt to indicate that FIFO can be read */ nvhost_cdma_push(&channel->cdma, nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_immediate_v(), p->syncpt), NVHOST_OPCODE_NOOP); /* Wait for value to be read from FIFO */ nvhost_cdma_push(&channel->cdma, nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1), nvhost_class_host_wait_syncpt_base(p->syncpt, p->waitbase, 3)); /* Indicate submit complete */ nvhost_cdma_push(&channel->cdma, nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1), nvhost_class_host_incr_syncpt_base(p->waitbase, 4)); nvhost_cdma_push(&channel->cdma, NVHOST_OPCODE_NOOP, nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_immediate_v(), p->syncpt)); /* end CDMA submit */ nvhost_cdma_end(&channel->cdma, job); nvhost_job_put(job); job = NULL; /* * schedule a context save interrupt (to drain the host FIFO * if necessary, and to release the restore buffer) */ if (hwctx_to_save) { err = nvhost_intr_add_action( &nvhost_get_host(dev)->intr, p->syncpt, syncval - syncpt_incrs + hwctx_to_save->save_incrs - 1, NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save, ctx_waiter, NULL); ctx_waiter = NULL; WARN(err, "Failed to set context save interrupt"); } /* Wait for FIFO to be ready */ err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr, p->syncpt, syncval - 2, NVHOST_INTR_ACTION_WAKEUP, &wq, read_waiter, &ref); read_waiter = NULL; WARN(err, "Failed to set wakeup interrupt"); wait_event(wq, nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt, p->syncpt, syncval - 2)); nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, p->syncpt, ref); /* Read the register value from FIFO */ err = nvhost_channel_drain_read_fifo(channel, value, 1, &pending); /* Indicate we've read the value */ nvhost_syncpt_cpu_incr(&nvhost_get_host(dev)->syncpt, p->syncpt); /* Schedule a submit complete interrupt */ err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr, p->syncpt, syncval, NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel, completed_waiter, NULL); completed_waiter = NULL; WARN(err, "Failed to set submit complete interrupt"); mutex_unlock(&channel->submitlock); done: kfree(ctx_waiter); kfree(read_waiter); kfree(completed_waiter); return err; }
int nvhost_gr3d_t30_read_reg( struct platform_device *dev, struct nvhost_channel *channel, struct nvhost_hwctx *hwctx, u32 offset, u32 *value) { struct host1x_hwctx_handler *h = to_host1x_hwctx_handler(hwctx->h); u32 syncpt_incrs = 1; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *read_waiter = NULL; struct nvhost_job *job; int err; struct mem_handle *mem = NULL; u32 *mem_ptr = NULL; u32 *cmdbuf_ptr = NULL; struct sg_table *mem_sgt = NULL; struct mem_mgr *memmgr = hwctx->memmgr; u32 opcodes[] = { /* Switch to 3D - set up output to memory */ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1), nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), 0xdeadbeef, /* Get host1x to request a register read */ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_indoff_r(), 1), nvhost_class_host_indoff_reg_read( host1x_uclass_indoff_indmodid_gr3d_v(), offset, false), nvhost_opcode_imm(host1x_uclass_inddata_r(), 0), /* send reg reads back to host */ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0), /* Finalize with syncpt increment */ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_incr_syncpt_base_r(), 1), nvhost_class_host_incr_syncpt_base(h->h.waitbase, 1), nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_immediate_v(), h->h.syncpt), }; /* 12 slots for gather, and one slot for storing the result value */ mem = nvhost_memmgr_alloc(memmgr, sizeof(opcodes)+4, 32, mem_mgr_flag_uncacheable); if (IS_ERR(mem)) return PTR_ERR(mem); mem_ptr = nvhost_memmgr_mmap(mem); if (!mem_ptr) { err = -ENOMEM; goto done; } cmdbuf_ptr = mem_ptr + 1; mem_sgt = nvhost_memmgr_pin(memmgr, mem, &channel->dev->dev); if (IS_ERR(mem_sgt)) { err = -ENOMEM; mem_sgt = NULL; goto done; } /* Set address of target memory slot to the stream */ opcodes[3] = sg_dma_address(mem_sgt->sgl); read_waiter = nvhost_intr_alloc_waiter(); if (!read_waiter) { err = -ENOMEM; goto done; } job = nvhost_job_alloc(channel, hwctx, 1, 0, 0, 1, memmgr); if (!job) { err = -ENOMEM; goto done; } job->hwctx_syncpt_idx = 0; job->sp->id = h->h.syncpt; job->sp->waitbase = h->h.waitbase; job->sp->incrs = syncpt_incrs; job->num_syncpts = 1; job->serialize = 1; memcpy(cmdbuf_ptr, opcodes, sizeof(opcodes)); /* Submit job */ nvhost_job_add_gather(job, nvhost_memmgr_handle_to_id(mem), ARRAY_SIZE(opcodes), 4); err = nvhost_job_pin(job, &nvhost_get_host(dev)->syncpt); if (err) goto done; err = nvhost_channel_submit(job); if (err) goto done; /* Wait for read to be ready */ err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr, h->h.syncpt, job->sp->fence, NVHOST_INTR_ACTION_WAKEUP, &wq, read_waiter, &ref); read_waiter = NULL; WARN(err, "Failed to set wakeup interrupt"); wait_event(wq, nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt, h->h.syncpt, job->sp->fence)); nvhost_job_put(job); job = NULL; nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, h->h.syncpt, ref); *value = *mem_ptr; done: kfree(read_waiter); if (mem_ptr) nvhost_memmgr_munmap(mem, mem_ptr); if (mem_sgt) nvhost_memmgr_unpin(memmgr, mem, &channel->dev->dev, mem_sgt); if (mem) nvhost_memmgr_put(memmgr, mem); return err; }