/** * Increment syncpoint value from cpu, updating cache */ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { nvhost_syncpt_incr_max(sp, id, 1); nvhost_module_busy(&syncpt_to_dev(sp)->mod); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(&syncpt_to_dev(sp)->mod); }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(syncpt_to_dev(sp)->dev); val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/** * Increment syncpoint value from cpu, updating cache */ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { if (nvhost_syncpt_client_managed(sp, id)) nvhost_syncpt_incr_max(sp, id, 1); nvhost_module_busy(syncpt_to_dev(sp)->dev); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); }
/** * Get the current syncpoint base */ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(syncpt_to_dev(sp)->dev); syncpt_op().read_wait_base(sp, id); val = sp->base_val[id]; nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(&syncpt_to_dev(sp)->mod); val = nvhost_syncpt_update_min(sp, id); nvhost_module_idle(&syncpt_to_dev(sp)->mod); return val; }
/** * Return current syncpoint value on success */ int nvhost_syncpt_read_check(struct nvhost_syncpt *sp, u32 id, u32 *val) { if (nvhost_module_busy(syncpt_to_dev(sp)->dev)) return -EINVAL; *val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return 0; }
/** * Get the current syncpoint base */ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) { u32 val; BUG_ON(!syncpt_op(sp).read_wait_base); nvhost_module_busy(&syncpt_to_dev(sp)->mod); syncpt_op(sp).read_wait_base(sp, id); val = sp->base_val[id]; nvhost_module_idle(&syncpt_to_dev(sp)->mod); return val; }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val = 0xffffffff; int err; err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return val; val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/** * Write a cpu syncpoint increment to the hardware, without touching * the cache. Caller is responsible for host being powered. */ void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) { struct nvhost_dev *dev = syncpt_to_dev(sp); if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) { dev_err(&syncpt_to_dev(sp)->pdev->dev, "Syncpoint id %d \n", id); BUG(); } writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR); wmb(); }
/** * Write a cpu syncpoint increment to the hardware, without touching * the cache. Caller is responsible for host being powered. */ static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); BUG_ON(!nvhost_module_powered(dev->dev)); if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) { dev_err(&syncpt_to_dev(sp)->pdev->dev, "Trying to increment syncpoint id %d beyond max\n", id); nvhost_debug_dump(syncpt_to_dev(sp)); return; } writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR); wmb(); }
/** * Increment syncpoint value from cpu, updating cache */ int nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { int err; err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return err; if (nvhost_syncpt_client_managed(sp, id)) nvhost_syncpt_incr_max(sp, id, 1); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return 0; }
/** * Write a cpu syncpoint increment to the hardware, without touching * the cache. Caller is responsible for host being powered. */ void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) { struct nvhost_dev *dev = syncpt_to_dev(sp); BUG_ON(!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)); writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR); wmb(); }
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev) { struct nvhost_master *master = nvhost_get_host(dev); struct nvhost_syncpt *sp = &master->syncpt; return syncpt_to_dev(sp)->info.nb_pts; }
/** * Updates the last value read from hardware. */ u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { struct nvhost_dev *dev = syncpt_to_dev(sp); void __iomem *sync_regs = dev->sync_aperture; u32 old, live, maxsp; do { smp_rmb(); old = (u32)atomic_read(&sp->min_val[id]); live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4)); } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old); if(!check_max(sp, id, live)) { smp_rmb(); maxsp = (u32)atomic_read(&sp->max_val[id]); nvhost_sync_reg_dump(dev); printk("%s check_max failed: id=%lu max=%lu real=%lu \n",__func__, (unsigned long)id, (unsigned long)maxsp, (unsigned long)live); BUG(); } return live; }
/** * Write a cpu syncpoint increment to the hardware, without touching * the cache. Caller is responsible for host being powered. */ static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); u32 reg_offset = id / 32; if (!nvhost_syncpt_client_managed(sp, id) && nvhost_syncpt_min_eq_max(sp, id)) { dev_err(&syncpt_to_dev(sp)->dev->dev, "Trying to increment syncpoint id %d beyond max\n", id); nvhost_debug_dump(syncpt_to_dev(sp)); return; } writel(bit_mask(id), dev->sync_aperture + host1x_sync_syncpt_cpu_incr_r() + reg_offset * 4); }
/** * Write the current syncpoint value back to hw. */ static void reset_syncpt(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); int min; smp_rmb(); min = atomic_read(&sp->min_val[id]); writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4)); }
/** * Increment syncpoint value from cpu, updating cache */ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { #ifdef CONFIG_MACH_N1 u32 min, max; max = nvhost_syncpt_incr_max(sp, id, 1); min = nvhost_syncpt_incr_min(sp, id, 1); if (sp->restore_needed) { /* XXX restore_needed used only for logging (to be removed in final checkin) */ dev_warn(&syncpt_to_dev(sp)->pdev->dev, "syncpoint id %d (%s) incremented min = %d, max = %d while nvhost suspended\n", id, nvhost_syncpt_name(id), min, max); } #else nvhost_syncpt_incr_max(sp, id, 1); #endif nvhost_module_busy(&syncpt_to_dev(sp)->mod); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(&syncpt_to_dev(sp)->mod); }
/** * Updates the last value read from hardware. * (was nvhost_syncpt_update_min) */ static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); void __iomem *sync_regs = dev->sync_aperture; u32 old, live; do { old = nvhost_syncpt_read_min(sp, id); live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4)); } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old); if (!nvhost_syncpt_check_max(sp, id, live)) dev_err(&syncpt_to_dev(sp)->dev->dev, "%s failed: id=%u, min=%d, max=%d\n", __func__, nvhost_syncpt_read_min(sp, id), nvhost_syncpt_read_max(sp, id), id); return live; }
/** * Updates the last value read from hardware. * (was nvhost_syncpt_update_min) */ static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); void __iomem *sync_regs = dev->sync_aperture; u32 old, live; do { old = nvhost_syncpt_read_min(sp, id); live = readl(sync_regs + (host1x_sync_syncpt_0_r() + id * 4)); } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old); return live; }
/* check for old WAITs to be removed (avoiding a wrap) */ static int t20_syncpt_wait_check(struct nvhost_syncpt *sp, struct nvmap_client *nvmap, u32 waitchk_mask, struct nvhost_waitchk *wait, int num_waitchk) { u32 idx; int err = 0; /* get current syncpt values */ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { if (BIT(idx) & waitchk_mask) nvhost_syncpt_update_min(sp, idx); } BUG_ON(!wait && !num_waitchk); /* compare syncpt vs wait threshold */ while (num_waitchk) { u32 override; BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); trace_nvhost_syncpt_wait_check(wait->mem, wait->offset, wait->syncpt_id, wait->thresh); if (nvhost_syncpt_is_expired(sp, wait->syncpt_id, wait->thresh)) { /* * NULL an already satisfied WAIT_SYNCPT host method, * by patching its args in the command stream. The * method data is changed to reference a reserved * (never given out or incr) NVSYNCPT_GRAPHICS_HOST * syncpt with a matching threshold value of 0, so * is guaranteed to be popped by the host HW. */ dev_dbg(&syncpt_to_dev(sp)->dev->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, syncpt_op(sp).name(sp, wait->syncpt_id), wait->thresh, nvhost_syncpt_read_min(sp, wait->syncpt_id)); /* patch the wait */ override = nvhost_class_host_wait_syncpt( NVSYNCPT_GRAPHICS_HOST, 0); err = nvmap_patch_word(nvmap, (struct nvmap_handle *)wait->mem, wait->offset, override); if (err) break; }
int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx) { struct nvhost_master *host = syncpt_to_dev(sp); u32 reg; nvhost_module_busy(host->dev); reg = syncpt_op().mutex_try_lock(sp, idx); if (reg) { nvhost_module_idle(host->dev); return -EBUSY; } atomic_inc(&sp->lock_counts[idx]); return 0; }
void nvhost_syncpt_debug(struct nvhost_syncpt *sp) { u32 i; for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) { u32 max = nvhost_syncpt_read_max(sp, i); if (!max) continue; dev_info(&syncpt_to_dev(sp)->pdev->dev, "id %d (%s) min %d max %d\n", i, nvhost_syncpt_name(i), nvhost_syncpt_update_min(sp, i), max); } }
/** * Updates the last value read from hardware. */ u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); void __iomem *sync_regs = dev->sync_aperture; u32 old, live; do { smp_rmb(); old = (u32)atomic_read(&sp->min_val[id]); live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4)); } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old); BUG_ON(!check_max(sp, id, live)); return live; }
/* * Check driver supplied waitchk structs for syncpt thresholds * that have already been satisfied and NULL the comparison (to * avoid a wrap condition in the HW). */ static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp, u32 patch_mem, void *patch_addr) { int i; /* compare syncpt vs wait threshold */ for (i = 0; i < job->num_waitchk; i++) { struct nvhost_waitchk *wait = &job->waitchk[i]; /* skip all other gathers */ if (patch_mem != wait->mem) continue; trace_nvhost_syncpt_wait_check(wait->mem, wait->offset, wait->syncpt_id, wait->thresh, nvhost_syncpt_read(sp, wait->syncpt_id)); if (nvhost_syncpt_is_expired(sp, wait->syncpt_id, wait->thresh)) { /* * NULL an already satisfied WAIT_SYNCPT host method, * by patching its args in the command stream. The * method data is changed to reference a reserved * (never given out or incr) NVSYNCPT_GRAPHICS_HOST * syncpt with a matching threshold value of 0, so * is guaranteed to be popped by the host HW. */ dev_dbg(&syncpt_to_dev(sp)->dev->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, syncpt_op().name(sp, wait->syncpt_id), wait->thresh, nvhost_syncpt_read_min(sp, wait->syncpt_id)); /* patch the wait */ nvhost_syncpt_patch_wait(sp, (patch_addr + wait->offset)); } wait->mem = 0; } return 0; }
/* check for old WAITs to be removed (avoiding a wrap) */ int nvhost_syncpt_wait_check(struct nvmap_client *nvmap, struct nvhost_syncpt *sp, u32 waitchk_mask, struct nvhost_waitchk *waitp, u32 waitchks) { u32 idx; int err = 0; /* get current syncpt values */ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { if (BIT(idx) & waitchk_mask) { nvhost_syncpt_update_min(sp, idx); } } BUG_ON(!waitp); /* compare syncpt vs wait threshold */ while (waitchks) { u32 syncpt, override; BUG_ON(waitp->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); syncpt = atomic_read(&sp->min_val[waitp->syncpt_id]); if (nvhost_syncpt_wrapping_comparison(syncpt, waitp->thresh)) { /* wait has completed already, so can be removed */ dev_dbg(&syncpt_to_dev(sp)->pdev->dev, "drop WAIT id %d (%s) thresh 0x%x, syncpt 0x%x\n", waitp->syncpt_id, nvhost_syncpt_name(waitp->syncpt_id), waitp->thresh, syncpt); /* move wait to a kernel reserved syncpt (that's always 0) */ override = nvhost_class_host_wait_syncpt(NVSYNCPT_GRAPHICS_HOST, 0); /* patch the wait */ err = nvmap_patch_wait(nvmap, (struct nvmap_handle *)waitp->mem, waitp->offset, override); if (err) break; }
static u32 nvhost_get_syncpt(struct nvhost_syncpt *sp, bool client_managed, const char *syncpt_name) { u32 id; int err = 0; struct nvhost_master *host = syncpt_to_dev(sp); struct device *d = &host->dev->dev; mutex_lock(&sp->syncpt_mutex); /* find a syncpt which is free */ id = nvhost_find_free_syncpt(sp); if (!id) { nvhost_err(d, "failed to get new free syncpt\n"); mutex_unlock(&sp->syncpt_mutex); return 0; } /* if we get one, then reserve it */ err = nvhost_reserve_syncpt(sp, id, client_managed); if (err) { nvhost_err(d, "syncpt reservation failed\n"); mutex_unlock(&sp->syncpt_mutex); return 0; } /* assign a name for debugging purpose */ err = nvhost_syncpt_assign_name(sp, id, syncpt_name); if (err) { nvhost_err(d, "syncpt name assignment failed\n"); mutex_unlock(&sp->syncpt_mutex); return 0; } mutex_unlock(&sp->syncpt_mutex); return id; }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; int err = 0; if (value) *value = 0; BUG_ON(!check_max(sp, id, thresh)); /* first check cache */ if (nvhost_syncpt_min_cmp(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); return 0; } /* keep host alive */ nvhost_module_busy(&syncpt_to_dev(sp)->mod); if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) { /* try to read from register */ u32 val = nvhost_syncpt_update_min(sp, id); if ((s32)(val - thresh) >= 0) { if (value) *value = val; goto done; } } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref); if (err) goto done; err = -EAGAIN; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain = wait_event_interruptible_timeout(wq, nvhost_syncpt_min_cmp(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) timeout -= check; if (timeout) { dev_warn(&syncpt_to_dev(sp)->pdev->dev, "syncpoint id %d (%s) stuck waiting %d\n", id, nvhost_syncpt_name(id), thresh); nvhost_syncpt_debug(sp); } }; nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref); done: nvhost_module_idle(&syncpt_to_dev(sp)->mod); return err; }
/** * Write the current waitbase value back to hw. */ static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); writel(sp->base_val[id], dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4)); }
/** * Read waitbase value from hw. */ static void read_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); sp->base_val[id] = readl(dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4)); }
/** * Write the current syncpoint value back to hw. */ static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id) { struct nvhost_master *dev = syncpt_to_dev(sp); int min = nvhost_syncpt_read_min(sp, id); writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4)); }