/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val; BUG_ON(!syncpt_op().update_min); nvhost_module_busy(syncpt_to_dev(sp)->dev); val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/** * Resets syncpoint and waitbase values to sw shadows */ void nvhost_syncpt_reset(struct nvhost_syncpt *sp) { u32 i; for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) syncpt_op().reset(sp, i); for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++) syncpt_op().reset_wait_base(sp, i); wmb(); }
/** * Get the current syncpoint base */ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) { u32 val; BUG_ON(!syncpt_op().read_wait_base); nvhost_module_busy(syncpt_to_dev(sp)->dev); syncpt_op().read_wait_base(sp, id); val = sp->base_val[id]; nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/** * Resets syncpoint and waitbase values to sw shadows */ void nvhost_syncpt_reset(struct nvhost_syncpt *sp) { u32 i; BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base)); for (i = 0; i < sp->nb_pts; i++) syncpt_op(sp).reset(sp, i); for (i = 0; i < sp->nb_bases; i++) syncpt_op(sp).reset_wait_base(sp, i); wmb(); }
/** * Updates the last value read from hardware. */ u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { u32 val; BUG_ON(!syncpt_op().update_min); val = syncpt_op().update_min(sp, id); trace_nvhost_syncpt_update_min(id, val); return val; }
/** * Updates sw shadow state for client managed registers */ void nvhost_syncpt_save(struct nvhost_syncpt *sp) { u32 i; for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) { if (nvhost_syncpt_client_managed(sp, i)) syncpt_op().update_min(sp, i); else WARN_ON(!nvhost_syncpt_min_eq_max(sp, i)); } for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++) syncpt_op().read_wait_base(sp, i); }
/** * Resets syncpoint and waitbase values of a * single client to sw shadows */ void nvhost_syncpt_reset_client(struct platform_device *pdev) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_master *nvhost_master = nvhost_get_host(pdev); u32 id; BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base)); for_each_set_bit(id, (unsigned long *)&pdata->syncpts, BITS_PER_LONG) syncpt_op().reset(&nvhost_master->syncpt, id); for_each_set_bit(id, (unsigned long *)&pdata->waitbases, BITS_PER_LONG) syncpt_op().reset_wait_base(&nvhost_master->syncpt, id); wmb(); }
/** * Updates sw shadow state for client managed registers */ void nvhost_syncpt_save(struct nvhost_syncpt *sp) { u32 i; BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base)); for (i = 0; i < sp->nb_pts; i++) { if (client_managed(i)) syncpt_op(sp).update_min(sp, i); else BUG_ON(!nvhost_syncpt_min_eq_max(sp, i)); } for (i = 0; i < sp->nb_bases; i++) syncpt_op(sp).read_wait_base(sp, i); }
void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id) { struct nvhost_master *master = nvhost_get_host(dev); struct nvhost_syncpt *sp = &master->syncpt; atomic_set(&sp->min_val[id], atomic_read(&sp->max_val[id])); syncpt_op().reset(sp, id); }
/** * Updated sync point form hardware, and returns true if syncpoint is expired, * false if we may need to wait */ static bool syncpt_update_min_is_expired( struct nvhost_syncpt *sp, u32 id, u32 thresh) { syncpt_op().update_min(sp, id); return nvhost_syncpt_is_expired(sp, id, thresh); }
/** * Resets syncpoint and waitbase values of a * single client to sw shadows */ void nvhost_syncpt_reset_client(struct platform_device *pdev) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_master *nvhost_master = nvhost_get_host(pdev); u32 id; BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base)); for (id = 0; pdata->syncpts[id] && (id < NVHOST_MODULE_MAX_SYNCPTS); ++id) syncpt_op().reset(&nvhost_master->syncpt, pdata->syncpts[id]); for (id = 0; pdata->waitbases[id] && (id < NVHOST_MODULE_MAX_WAITBASES); ++id) syncpt_op().reset_wait_base(&nvhost_master->syncpt, pdata->waitbases[id]); wmb(); }
void nvhost_syncpt_cpu_set_wait_base(struct platform_device *pdev, u32 id, u32 val) { struct nvhost_syncpt *sp = &(nvhost_get_host(pdev)->syncpt); sp->base_val[id] = val; syncpt_op().reset_wait_base(sp, id); wmb(); }
/* check for old WAITs to be removed (avoiding a wrap) */ int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp, struct nvmap_client *nvmap, u32 waitchk_mask, struct nvhost_waitchk *wait, int num_waitchk) { return syncpt_op(sp).wait_check(sp, nvmap, waitchk_mask, wait, num_waitchk); }
/** * Return current syncpoint value on success */ int nvhost_syncpt_read_check(struct nvhost_syncpt *sp, u32 id, u32 *val) { if (nvhost_module_busy(syncpt_to_dev(sp)->dev)) return -EINVAL; *val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return 0; }
/** * Updates the last value read from hardware. */ u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(syncpt_to_dev(sp)->dev); val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); trace_nvhost_syncpt_update_min(id, val); return val; }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val = 0xffffffff; int err; err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return val; val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/* check for old WAITs to be removed (avoiding a wrap) */ static int t20_syncpt_wait_check(struct nvhost_syncpt *sp, struct nvmap_client *nvmap, u32 waitchk_mask, struct nvhost_waitchk *wait, int num_waitchk) { u32 idx; int err = 0; /* get current syncpt values */ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { if (BIT(idx) & waitchk_mask) nvhost_syncpt_update_min(sp, idx); } BUG_ON(!wait && !num_waitchk); /* compare syncpt vs wait threshold */ while (num_waitchk) { u32 override; BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); trace_nvhost_syncpt_wait_check(wait->mem, wait->offset, wait->syncpt_id, wait->thresh); if (nvhost_syncpt_is_expired(sp, wait->syncpt_id, wait->thresh)) { /* * NULL an already satisfied WAIT_SYNCPT host method, * by patching its args in the command stream. The * method data is changed to reference a reserved * (never given out or incr) NVSYNCPT_GRAPHICS_HOST * syncpt with a matching threshold value of 0, so * is guaranteed to be popped by the host HW. */ dev_dbg(&syncpt_to_dev(sp)->dev->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, syncpt_op(sp).name(sp, wait->syncpt_id), wait->thresh, nvhost_syncpt_read_min(sp, wait->syncpt_id)); /* patch the wait */ override = nvhost_class_host_wait_syncpt( NVSYNCPT_GRAPHICS_HOST, 0); err = nvmap_patch_word(nvmap, (struct nvmap_handle *)wait->mem, wait->offset, override); if (err) break; }
/** * Get the current syncpoint base */ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) { u32 val = 0xffffffff; int err; err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return val; syncpt_op().read_wait_base(sp, id); val = sp->base_val[id]; nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx) { struct nvhost_master *host = syncpt_to_dev(sp); u32 reg; nvhost_module_busy(host->dev); reg = syncpt_op().mutex_try_lock(sp, idx); if (reg) { nvhost_module_idle(host->dev); return -EBUSY; } atomic_inc(&sp->lock_counts[idx]); return 0; }
/* * Check driver supplied waitchk structs for syncpt thresholds * that have already been satisfied and NULL the comparison (to * avoid a wrap condition in the HW). */ static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp, u32 patch_mem, void *patch_addr) { int i; /* compare syncpt vs wait threshold */ for (i = 0; i < job->num_waitchk; i++) { struct nvhost_waitchk *wait = &job->waitchk[i]; /* skip all other gathers */ if (patch_mem != wait->mem) continue; trace_nvhost_syncpt_wait_check(wait->mem, wait->offset, wait->syncpt_id, wait->thresh, nvhost_syncpt_read(sp, wait->syncpt_id)); if (nvhost_syncpt_is_expired(sp, wait->syncpt_id, wait->thresh)) { /* * NULL an already satisfied WAIT_SYNCPT host method, * by patching its args in the command stream. The * method data is changed to reference a reserved * (never given out or incr) NVSYNCPT_GRAPHICS_HOST * syncpt with a matching threshold value of 0, so * is guaranteed to be popped by the host HW. */ dev_dbg(&syncpt_to_dev(sp)->dev->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, syncpt_op().name(sp, wait->syncpt_id), wait->thresh, nvhost_syncpt_read_min(sp, wait->syncpt_id)); /* patch the wait */ nvhost_syncpt_patch_wait(sp, (patch_addr + wait->offset)); } wait->mem = 0; } return 0; }
void nvhost_syncpt_patch_check(struct nvhost_syncpt *sp) { /* reset syncpoint value back to 0 */ atomic_set(&sp->min_val[0], 0); syncpt_op().reset(sp, 0); }
/* remove a wait pointed to by patch_addr */ int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr) { return syncpt_op().patch_wait(sp, patch_addr); }
void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx) { syncpt_op().mutex_unlock(sp, idx); nvhost_module_idle(syncpt_to_dev(sp)->dev); atomic_dec(&sp->lock_counts[idx]); }
void nvhost_syncpt_debug(struct nvhost_syncpt *sp) { syncpt_op().debug(sp); }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *waiter; int err = 0, check_count = 0, low_timeout = 0; u32 val; if (value) *value = 0; /* first check cache */ if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); return 0; } /* keep host alive */ nvhost_module_busy(syncpt_to_dev(sp)->dev); /* try to read from register */ val = syncpt_op().update_min(sp, id); if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = val; goto done; } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ waiter = nvhost_intr_alloc_waiter(); if (!waiter) { err = -ENOMEM; goto done; } err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, waiter, &ref); if (err) goto done; err = -EAGAIN; /* Caller-specified timeout may be impractically low */ if (timeout < SYNCPT_CHECK_PERIOD) low_timeout = timeout; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain = wait_event_interruptible_timeout(wq, syncpt_update_min_is_expired(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) timeout -= check; if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", current->comm, id, syncpt_op().name(sp, id), thresh, timeout); syncpt_op().debug(sp); if (check_count == MAX_STUCK_CHECK_COUNT) { if (low_timeout) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "is timeout %d too low?\n", low_timeout); } nvhost_debug_dump(syncpt_to_dev(sp)); } check_count++; } } nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref); done: nvhost_module_idle(syncpt_to_dev(sp)->dev); return err; }
/** * Write a cpu syncpoint increment to the hardware, without touching * the cache. Caller is responsible for host being powered. */ void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) { BUG_ON(!syncpt_op().cpu_incr); syncpt_op().cpu_incr(sp, id); }
void nvhost_syncpt_set_min_eq_max(struct nvhost_syncpt *sp, u32 id) { atomic_set(&sp->min_val[id], atomic_read(&sp->max_val[id])); syncpt_op().reset(sp, id); }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *waiter; int err = 0, check_count = 0, low_timeout = 0; static int print_once = 0; if (value) *value = 0; BUG_ON(!syncpt_op(sp).update_min); if (!nvhost_syncpt_check_max(sp, id, thresh)) { dev_warn(&syncpt_to_dev(sp)->pdev->dev, "wait %d (%s) for (%d) wouldn't be met (max %d)\n", id, syncpt_op(sp).name(sp, id), thresh, nvhost_syncpt_read_max(sp, id)); nvhost_debug_dump(syncpt_to_dev(sp)); return -EINVAL; } /* first check cache */ if (nvhost_syncpt_min_cmp(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); return 0; } /* keep host alive */ nvhost_module_busy(syncpt_to_dev(sp)->dev); if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) { /* try to read from register */ u32 val = syncpt_op(sp).update_min(sp, id); if ((s32)(val - thresh) >= 0) { if (value) *value = val; goto done; } } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ waiter = nvhost_intr_alloc_waiter(); if (!waiter) { err = -ENOMEM; goto done; } err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, waiter, &ref); if (err) goto done; err = -EAGAIN; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain = wait_event_interruptible_timeout(wq, nvhost_syncpt_min_cmp(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) { if (timeout < SYNCPT_CHECK_PERIOD) { /* Caller-specified timeout may be impractically low */ low_timeout = timeout; } timeout -= check; } if (timeout) { dev_warn(&syncpt_to_dev(sp)->pdev->dev, "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", current->comm, id, syncpt_op(sp).name(sp, id), thresh, timeout); syncpt_op(sp).debug(sp); print_once++; if (print_once == 1) { nvhost_debug_dump(syncpt_to_dev(sp)); debug_stuck_syncpoint(); } if (check_count > MAX_STUCK_CHECK_COUNT) { if (low_timeout) { dev_warn(&syncpt_to_dev(sp)->pdev->dev, "is timeout %d too low?\n", low_timeout); } nvhost_debug_dump(syncpt_to_dev(sp)); BUG(); } check_count++; } } nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref); done: nvhost_module_idle(syncpt_to_dev(sp)->dev); return err; }
/** * Updates the last value read from hardware. */ u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { BUG_ON(!syncpt_op(sp).update_min); return syncpt_op(sp).update_min(sp, id); }
/** * Write a cpu syncpoint increment to the hardware, without touching * the cache. Caller is responsible for host being powered. */ void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) { syncpt_op().cpu_incr(sp, id); }