static void show_syncpts(struct nvhost_master *m, struct output *o) { int i; BUG_ON(!nvhost_get_chip_ops()->syncpt.name); nvhost_debug_output(o, "---- syncpts ----\n"); for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) { u32 max = nvhost_syncpt_read_max(&m->syncpt, i); u32 min = nvhost_syncpt_update_min(&m->syncpt, i); if (!min && !max) continue; nvhost_debug_output(o, "id %d (%s) min %d max %d\n", i, nvhost_get_chip_ops()->syncpt.name(&m->syncpt, i), min, max); } for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) { u32 base_val; base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i); if (base_val) nvhost_debug_output(o, "waitbase id %d val %d\n", i, base_val); } nvhost_debug_output(o, "\n"); }
static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx, struct nvhost_ctrl_syncpt_read_args *args) { if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt)) return -EINVAL; args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id); return 0; }
static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx, struct nvhost_ctrl_syncpt_incr_args *args) { if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt)) return -EINVAL; trace_nvhost_ioctl_ctrl_syncpt_incr(args->id); nvhost_syncpt_incr(&ctx->dev->syncpt, args->id); return 0; }
/** * Resets syncpoint and waitbase values to sw shadows */ void nvhost_syncpt_reset(struct nvhost_syncpt *sp) { u32 i; for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) syncpt_op().reset(sp, i); for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++) syncpt_op().reset_wait_base(sp, i); wmb(); }
/** * performs a sequential search and returns first free syncpt id */ static u32 nvhost_find_free_syncpt(struct nvhost_syncpt *sp) { u32 i; for (i = NVHOST_FREE_SYNCPT_BASE; i < nvhost_syncpt_nb_pts(sp); ++i) if (!sp->assigned[i]) return i; return 0; }
/** * Updates sw shadow state for client managed registers */ void nvhost_syncpt_save(struct nvhost_syncpt *sp) { u32 i; for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) { if (nvhost_syncpt_client_managed(sp, i)) syncpt_op().update_min(sp, i); else WARN_ON(!nvhost_syncpt_min_eq_max(sp, i)); } for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++) syncpt_op().read_wait_base(sp, i); }
static void nvhost_syncpt_deinit_timeline(struct nvhost_syncpt *sp) { #if CONFIG_TEGRA_GRHOST_SYNC int i; for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) { if (sp->timeline && sp->timeline[i]) { sync_timeline_destroy( (struct sync_timeline *)sp->timeline[i]); } } kfree(sp->timeline); sp->timeline = NULL; if (sp->timeline_invalid) sync_timeline_destroy((struct sync_timeline *)sp->timeline_invalid); #endif }
static int __devinit nvhost_alloc_resources(struct nvhost_master *host) { int err; err = nvhost_init_chip_support(host); if (err) return err; host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) * nvhost_syncpt_nb_pts(&host->syncpt), GFP_KERNEL); if (!host->intr.syncpt) { /* frees happen in the support removal phase */ return -ENOMEM; } return 0; }
static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx, struct nvhost_ctrl_syncpt_waitex_args *args) { u32 timeout; int err; if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt)) return -EINVAL; if (args->timeout == NVHOST_NO_TIMEOUT) timeout = MAX_SCHEDULE_TIMEOUT; else timeout = (u32)msecs_to_jiffies(args->timeout); err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id, args->thresh, timeout, &args->value); trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh, args->timeout, args->value, err); return err; }
int nvhost_syncpt_init(struct nvhost_device *dev, struct nvhost_syncpt *sp) { int i; struct nvhost_master *host = syncpt_to_dev(sp); int err = 0; /* Allocate structs for min, max and base values */ sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp), GFP_KERNEL); sp->lock_counts = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp), GFP_KERNEL); if (!(sp->min_val && sp->max_val && sp->base_val && sp->lock_counts)) { /* frees happen in the deinit */ err = -ENOMEM; goto fail; } sp->kobj = kobject_create_and_add("syncpt", &dev->dev.kobj); if (!sp->kobj) { err = -EIO; goto fail; } /* Allocate two attributes for each sync point: min and max */ sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs) * nvhost_syncpt_nb_pts(sp) * 2, GFP_KERNEL); if (!sp->syncpt_attrs) { err = -ENOMEM; goto fail; } /* Fill in the attributes */ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) { char name[MAX_SYNCPT_LENGTH]; struct kobject *kobj; struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*2]; struct nvhost_syncpt_attr *max = &sp->syncpt_attrs[i*2+1]; /* Create one directory per sync point */ snprintf(name, sizeof(name), "%d", i); kobj = kobject_create_and_add(name, sp->kobj); if (!kobj) { err = -EIO; goto fail; } min->id = i; min->host = host; min->attr.attr.name = min_name; min->attr.attr.mode = S_IRUGO; min->attr.show = syncpt_min_show; if (sysfs_create_file(kobj, &min->attr.attr)) { err = -EIO; goto fail; } max->id = i; max->host = host; max->attr.attr.name = max_name; max->attr.attr.mode = S_IRUGO; max->attr.show = syncpt_max_show; if (sysfs_create_file(kobj, &max->attr.attr)) { err = -EIO; goto fail; } } return err; fail: nvhost_syncpt_deinit(sp); return err; }
int nvhost_syncpt_init(struct platform_device *dev, struct nvhost_syncpt *sp) { int i; struct nvhost_master *host = syncpt_to_dev(sp); int err = 0; /* Allocate structs for min, max and base values */ sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp), GFP_KERNEL); sp->lock_counts = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp), GFP_KERNEL); sp->caps_nodes = kzalloc(sizeof(struct nvhost_capability_node) * 3, GFP_KERNEL); #if CONFIG_TEGRA_GRHOST_SYNC sp->timeline = kzalloc(sizeof(struct nvhost_sync_timeline *) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); if (!sp->timeline) { err = -ENOMEM; goto fail; } #endif if (!(sp->min_val && sp->max_val && sp->base_val && sp->lock_counts && sp->caps_nodes)) { /* frees happen in the deinit */ err = -ENOMEM; goto fail; } sp->kobj = kobject_create_and_add("syncpt", &dev->dev.kobj); if (!sp->kobj) { err = -EIO; goto fail; } sp->caps_kobj = kobject_create_and_add("capabilities", &dev->dev.kobj); if (!sp->caps_kobj) { err = -EIO; goto fail; } if (nvhost_syncpt_set_sysfs_capability_node(sp, num_syncpts_name, sp->caps_nodes, &nvhost_syncpt_nb_pts)) { err = -EIO; goto fail; } if (nvhost_syncpt_set_sysfs_capability_node(sp, num_waitbases_name, sp->caps_nodes + 1, &nvhost_syncpt_nb_bases)) { err = -EIO; goto fail; } if (nvhost_syncpt_set_sysfs_capability_node(sp, num_mutexes_name, sp->caps_nodes + 2, &nvhost_syncpt_nb_mlocks)) { err = -EIO; goto fail; } /* Allocate two attributes for each sync point: min and max */ sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs) * nvhost_syncpt_nb_pts(sp) * 2, GFP_KERNEL); if (!sp->syncpt_attrs) { err = -ENOMEM; goto fail; } /* Fill in the attributes */ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) { struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*2]; struct nvhost_syncpt_attr *max = &sp->syncpt_attrs[i*2+1]; err = nvhost_syncpt_timeline_attr(host, sp, min, max, i); if (err) goto fail; #if CONFIG_TEGRA_GRHOST_SYNC sp->timeline[i] = nvhost_sync_timeline_create(sp, i); if (!sp->timeline[i]) { err = -ENOMEM; goto fail; } #endif } err = nvhost_syncpt_timeline_attr(host, sp, &sp->invalid_min_attr, &sp->invalid_max_attr, NVSYNCPT_INVALID); if (err) goto fail; sp->timeline_invalid = nvhost_sync_timeline_create(sp, NVSYNCPT_INVALID); if (!sp->timeline_invalid) { err = -ENOMEM; goto fail; } return err; fail: nvhost_syncpt_deinit(sp); return err; }
int nvhost_syncpt_init(struct platform_device *dev, struct nvhost_syncpt *sp) { int i; struct nvhost_master *host = syncpt_to_dev(sp); int err = 0; /* Allocate structs for min, max and base values */ sp->assigned = kzalloc(sizeof(bool) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->client_managed = kzalloc(sizeof(bool) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->syncpt_names = kzalloc(sizeof(char *) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp), GFP_KERNEL); sp->lock_counts = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp), GFP_KERNEL); #ifdef CONFIG_TEGRA_GRHOST_SYNC sp->timeline = kzalloc(sizeof(struct nvhost_sync_timeline *) * nvhost_syncpt_nb_pts(sp), GFP_KERNEL); if (!sp->timeline) { err = -ENOMEM; goto fail; } #endif if (!(sp->assigned && sp->client_managed && sp->min_val && sp->max_val && sp->base_val && sp->lock_counts)) { /* frees happen in the deinit */ err = -ENOMEM; goto fail; } sp->kobj = kobject_create_and_add("syncpt", &dev->dev.kobj); if (!sp->kobj) { err = -EIO; goto fail; } mutex_init(&sp->syncpt_mutex); /* Allocate two attributes for each sync point: min and max */ sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs) * nvhost_syncpt_nb_pts(sp) * NUM_SYSFS_ENTRY, GFP_KERNEL); if (!sp->syncpt_attrs) { err = -ENOMEM; goto fail; } /* Fill in the attributes */ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) { struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*NUM_SYSFS_ENTRY]; struct nvhost_syncpt_attr *max = &sp->syncpt_attrs[i*NUM_SYSFS_ENTRY+1]; struct nvhost_syncpt_attr *name = &sp->syncpt_attrs[i*NUM_SYSFS_ENTRY+2]; struct nvhost_syncpt_attr *syncpt_type = &sp->syncpt_attrs[i*NUM_SYSFS_ENTRY+3]; struct nvhost_syncpt_attr *syncpt_assigned = &sp->syncpt_attrs[i*NUM_SYSFS_ENTRY+4]; err = nvhost_syncpt_timeline_attr(host, sp, min, max, name, syncpt_type, syncpt_assigned, i); if (err) goto fail; /* initialize syncpt status */ sp->assigned[i] = false; sp->client_managed[i] = false; #ifdef CONFIG_TEGRA_GRHOST_SYNC sp->timeline[i] = nvhost_sync_timeline_create(sp, i); if (!sp->timeline[i]) { err = -ENOMEM; goto fail; } #endif } #ifdef CONFIG_TEGRA_GRHOST_SYNC err = nvhost_syncpt_timeline_attr(host, sp, &sp->invalid_min_attr, &sp->invalid_max_attr, &sp->invalid_name_attr, &sp->invalid_syncpt_type_attr, &sp->invalid_assigned_attr, NVSYNCPT_INVALID); if (err) goto fail; sp->timeline_invalid = nvhost_sync_timeline_create(sp, NVSYNCPT_INVALID); if (!sp->timeline_invalid) { err = -ENOMEM; goto fail; } #endif /* * some syncpts need to be reserved (hard-coded) because of * external dependencies / constraints */ nvhost_reserve_syncpts(sp); return err; fail: nvhost_syncpt_deinit(sp); return err; }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value, struct timespec *ts, bool interruptible) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *waiter; int err = 0, check_count = 0, low_timeout = 0; u32 val, old_val, new_val; if (!id || id >= nvhost_syncpt_nb_pts(sp)) return -EINVAL; if (value) *value = 0; /* first check cache */ if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); if (ts) ktime_get_ts(ts); return 0; } /* keep host alive */ err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return err; /* try to read from register */ val = syncpt_op().update_min(sp, id); if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = val; if (ts) ktime_get_ts(ts); goto done; } if (!timeout) { err = -EAGAIN; goto done; } old_val = val; /* schedule a wakeup when the syncpoint value is reached */ waiter = nvhost_intr_alloc_waiter(); if (!waiter) { err = -ENOMEM; goto done; } err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, interruptible ? NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE : NVHOST_INTR_ACTION_WAKEUP, &wq, waiter, &ref); if (err) goto done; err = -EAGAIN; /* Caller-specified timeout may be impractically low */ if (timeout < SYNCPT_CHECK_PERIOD) low_timeout = timeout; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain; if (interruptible) remain = wait_event_interruptible_timeout(wq, syncpt_update_min_is_expired(sp, id, thresh), check); else remain = wait_event_timeout(wq, syncpt_update_min_is_expired(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); if (ts) { err = nvhost_intr_release_time(ref, ts); if (err) ktime_get_ts(ts); } err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) timeout -= check; if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { new_val = syncpt_op().update_min(sp, id); if (old_val == new_val) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", current->comm, id, syncpt_op().name(sp, id), thresh, timeout); syncpt_op().debug(sp); } else { old_val = new_val; dev_warn(&syncpt_to_dev(sp)->dev->dev, "%s: syncpoint id %d (%s) progressing slowly %d, timeout=%d\n", current->comm, id, syncpt_op().name(sp, id), thresh, timeout); } if (check_count == MAX_STUCK_CHECK_COUNT) { if (low_timeout) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "is timeout %d too low?\n", low_timeout); } nvhost_debug_dump(syncpt_to_dev(sp)); } check_count++; } } nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref); done: nvhost_module_idle(syncpt_to_dev(sp)->dev); return err; }