void nvhost_syncpt_debug(struct nvhost_syncpt *sp) { u32 i; for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) { u32 max = nvhost_syncpt_read_max(sp, i); if (!max) continue; dev_info(&syncpt_to_dev(sp)->pdev->dev, "id %d (%s) min %d max %d\n", i, nvhost_syncpt_name(i), nvhost_syncpt_update_min(sp, i), max); } }
/** * Increment syncpoint value from cpu, updating cache */ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { #ifdef CONFIG_MACH_N1 u32 min, max; max = nvhost_syncpt_incr_max(sp, id, 1); min = nvhost_syncpt_incr_min(sp, id, 1); if (sp->restore_needed) { /* XXX restore_needed used only for logging (to be removed in final checkin) */ dev_warn(&syncpt_to_dev(sp)->pdev->dev, "syncpoint id %d (%s) incremented min = %d, max = %d while nvhost suspended\n", id, nvhost_syncpt_name(id), min, max); } #else nvhost_syncpt_incr_max(sp, id, 1); #endif nvhost_module_busy(&syncpt_to_dev(sp)->mod); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(&syncpt_to_dev(sp)->mod); }
/* check for old WAITs to be removed (avoiding a wrap) */ int nvhost_syncpt_wait_check(struct nvmap_client *nvmap, struct nvhost_syncpt *sp, u32 waitchk_mask, struct nvhost_waitchk *waitp, u32 waitchks) { u32 idx; int err = 0; /* get current syncpt values */ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { if (BIT(idx) & waitchk_mask) { nvhost_syncpt_update_min(sp, idx); } } BUG_ON(!waitp); /* compare syncpt vs wait threshold */ while (waitchks) { u32 syncpt, override; BUG_ON(waitp->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); syncpt = atomic_read(&sp->min_val[waitp->syncpt_id]); if (nvhost_syncpt_wrapping_comparison(syncpt, waitp->thresh)) { /* wait has completed already, so can be removed */ dev_dbg(&syncpt_to_dev(sp)->pdev->dev, "drop WAIT id %d (%s) thresh 0x%x, syncpt 0x%x\n", waitp->syncpt_id, nvhost_syncpt_name(waitp->syncpt_id), waitp->thresh, syncpt); /* move wait to a kernel reserved syncpt (that's always 0) */ override = nvhost_class_host_wait_syncpt(NVSYNCPT_GRAPHICS_HOST, 0); /* patch the wait */ err = nvmap_patch_wait(nvmap, (struct nvmap_handle *)waitp->mem, waitp->offset, override); if (err) break; }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; int err = 0; if (value) *value = 0; BUG_ON(!check_max(sp, id, thresh)); /* first check cache */ if (nvhost_syncpt_min_cmp(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); return 0; } /* keep host alive */ nvhost_module_busy(&syncpt_to_dev(sp)->mod); if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) { /* try to read from register */ u32 val = nvhost_syncpt_update_min(sp, id); if ((s32)(val - thresh) >= 0) { if (value) *value = val; goto done; } } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref); if (err) goto done; err = -EAGAIN; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain = wait_event_interruptible_timeout(wq, nvhost_syncpt_min_cmp(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) timeout -= check; if (timeout) { dev_warn(&syncpt_to_dev(sp)->pdev->dev, "syncpoint id %d (%s) stuck waiting %d\n", id, nvhost_syncpt_name(id), thresh); nvhost_syncpt_debug(sp); } }; nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref); done: nvhost_module_idle(&syncpt_to_dev(sp)->mod); return err; }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; int err = 0; //struct nvhost_dev *dev = syncpt_to_dev(sp); BUG_ON(!check_max(sp, id, thresh)); /* first check cache */ if (nvhost_syncpt_min_cmp(sp, id, thresh)) return 0; /* keep host alive */ nvhost_module_busy(&syncpt_to_dev(sp)->mod); if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) { /* try to read from register */ u32 val = nvhost_syncpt_update_min(sp, id); if ((s32)(val - thresh) >= 0) goto done; } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref); if (err) goto done; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); err = wait_event_interruptible_timeout(wq, nvhost_syncpt_min_cmp(sp, id, thresh), check); if (err != 0) break; if (timeout != NVHOST_NO_TIMEOUT) timeout -= SYNCPT_CHECK_PERIOD; if (timeout) { dev_warn(&syncpt_to_dev(sp)->pdev->dev, "syncpoint id %d (%s) stuck waiting %d timeout=%d\n", id, nvhost_syncpt_name(id), thresh, timeout); /* A wait queue in nvhost driver maybe run frequently when early suspend/late resume. These log will be printed,then the early suspend/late resume maybe blocked,then it will triger early suspend/late resume watchdog. Now we cancel these log. */ /* nvhost_syncpt_debug(sp); nvhost_channel_fifo_debug(dev); nvhost_sync_reg_dump(dev); */ } }; if (err > 0) err = 0; else if (err == 0) err = -EAGAIN; nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref); done: nvhost_module_idle(&syncpt_to_dev(sp)->mod); return err; }