Beispiel #1
0
void nvhost_msenc_deinit(struct nvhost_device *dev)
{
	struct msenc *m = get_msenc(dev);

	/* unpin, free ucode memory */
	if (m->mem_r) {
		mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r, m->pa);
		mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r);
		m->mem_r = 0;
	}
}
Beispiel #2
0
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev)
{
	struct nvhost_master *master = nvhost_get_host(dev);
	struct nvhost_syncpt *sp = &master->syncpt;

	return syncpt_to_dev(sp)->info.nb_pts;
}
Beispiel #3
0
void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id)
{
	struct nvhost_master *master = nvhost_get_host(dev);
	struct nvhost_syncpt *sp = &master->syncpt;
	atomic_set(&sp->min_val[id], atomic_read(&sp->max_val[id]));
	syncpt_op().reset(sp, id);
}
Beispiel #4
0
static int show_channels(struct device *dev, void *data)
{
	struct nvhost_channel *ch;
	struct nvhost_device *nvdev = to_nvhost_device(dev);
	struct output *o = data;
	struct nvhost_master *m;

	if (nvdev == NULL)
		return 0;

	m = nvhost_get_host(nvdev);
	ch = nvdev->channel;
	if (ch) {
		mutex_lock(&ch->reflock);
		if (ch->refcount) {
			mutex_lock(&ch->cdma.lock);
			nvhost_get_chip_ops()->debug.show_channel_fifo(m, ch, o, nvdev->index);
			nvhost_get_chip_ops()->debug.show_channel_cdma(m, ch, o, nvdev->index);
			mutex_unlock(&ch->cdma.lock);
		}
		mutex_unlock(&ch->reflock);
	}

	return 0;
}
Beispiel #5
0
int nvhost_syncpt_is_expired_ext(struct platform_device *dev,
				 u32 id, u32 thresh)
{
	struct nvhost_master *master = nvhost_get_host(dev);
	struct nvhost_syncpt *sp = &master->syncpt;
	return nvhost_syncpt_is_expired(sp, id, thresh);
}
Beispiel #6
0
static struct nvhost_channel *t30_alloc_nvhost_channel(
		struct nvhost_device *dev)
{
	return nvhost_alloc_channel_internal(dev->index,
		nvhost_get_host(dev)->info.nb_channels,
		&t30_num_alloc_channels);
}
Beispiel #7
0
static int set_submit(struct nvhost_channel_userctx *ctx)
{
	struct nvhost_device *ndev = ctx->ch->dev;
	struct nvhost_master *host = nvhost_get_host(ndev);

	/* submit should have at least 1 cmdbuf */
	if (!ctx->hdr.num_cmdbufs ||
			!nvhost_syncpt_is_valid(&host->syncpt,
				ctx->hdr.syncpt_id))
		return -EIO;

	if (!ctx->nvmap) {
		dev_err(&ndev->dev, "no nvmap context set\n");
		return -EFAULT;
	}

	ctx->job = nvhost_job_realloc(ctx->job,
			ctx->hwctx,
			&ctx->hdr,
			ctx->nvmap,
			ctx->priority,
			ctx->clientid);
	if (!ctx->job)
		return -ENOMEM;
	ctx->job->timeout = ctx->timeout;

	if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
		ctx->num_relocshifts = ctx->hdr.num_relocs;

	return 0;
}
Beispiel #8
0
static int show_channels(struct platform_device *pdev, void *data)
{
	struct nvhost_channel *ch;
	struct output *o = data;
	struct nvhost_master *m;
	struct nvhost_device_data *pdata;

	if (pdev == NULL)
		return 0;

	pdata = platform_get_drvdata(pdev);
	m = nvhost_get_host(pdev);
	ch = pdata->channel;
	if (ch) {
		int locked = mutex_trylock(&ch->reflock);
		if (ch->refcount) {
			mutex_lock(&ch->cdma.lock);
			nvhost_get_chip_ops()->debug.show_channel_fifo(
				m, ch, o, pdata->index);
			nvhost_get_chip_ops()->debug.show_channel_cdma(
				m, ch, o, pdata->index);
			mutex_unlock(&ch->cdma.lock);
		}
		if (locked)
			mutex_unlock(&ch->reflock);
	}

	return 0;
}
Beispiel #9
0
void nvhost_gr2d_t30_finalize_poweron(struct nvhost_device *dev)
{
	void __iomem *sync_aperture = nvhost_get_host(dev)->sync_aperture;

	writel(host1x_sync_mod_teardown_epp_teardown_f(1)
			+ host1x_sync_mod_teardown_gr2d_teardown_f(1),
			sync_aperture + host1x_sync_mod_teardown_r());
}
Beispiel #10
0
static struct nvhost_channel *t114_alloc_nvhost_channel(
	struct platform_device *dev)
{
	struct nvhost_device_data *pdata = platform_get_drvdata(dev);
	return nvhost_alloc_channel_internal(pdata->index,
		nvhost_get_host(dev)->info.nb_channels,
		&t114_num_alloc_channels);
}
Beispiel #11
0
int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id,
	u32 thresh, u32 timeout, u32 *value, struct timespec *ts)
{
	struct nvhost_master *master = nvhost_get_host(dev);
	struct nvhost_syncpt *sp = &master->syncpt;
	return nvhost_syncpt_wait_timeout(sp, id, thresh, timeout, value, ts,
			false);
}
void nvhost_syncpt_cpu_set_wait_base(struct platform_device *pdev, u32 id,
					u32 val)
{
	struct nvhost_syncpt *sp = &(nvhost_get_host(pdev)->syncpt);

	sp->base_val[id] = val;
	syncpt_op().reset_wait_base(sp, id);
	wmb();
}
Beispiel #13
0
const char *nvhost_syncpt_get_name(struct platform_device *pdev, int id)
{
	struct nvhost_master *host = nvhost_get_host(pdev);
	struct nvhost_syncpt *sp = &host->syncpt;
	const char *name = NULL;

	name = sp->syncpt_names[id];

	return name ? name : "";
}
void nvhost_msenc_deinit(struct platform_device *dev)
{
	struct msenc *m = get_msenc(dev);

	/* unpin, free ucode memory */
	if (m->mapped) {
		mem_op().munmap(m->mem_r, m->mapped);
		m->mapped = NULL;
	}
	if (m->pa) {
		mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r,
			m->pa);
		m->pa = NULL;
	}
	if (m->mem_r) {
		mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r);
		m->mem_r = NULL;
	}
}
Beispiel #15
0
static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
{
	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
	struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);

	/* wait for 3d idle */
	nvhost_cdma_push(cdma,
			nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
			nvhost_opcode_imm_incr_syncpt(
				host1x_uclass_incr_syncpt_cond_op_done_v(),
				p->syncpt));
	nvhost_cdma_push(cdma,
			nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
					host1x_uclass_wait_syncpt_base_r(), 1),
			nvhost_class_host_wait_syncpt_base(p->syncpt,
							p->waitbase, 1));
	/* back to 3d */
	nvhost_cdma_push(cdma,
			nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
			NVHOST_OPCODE_NOOP);

	/* invalidate the FDC to prevent cache-coherency issues across GPUs
	   note that we assume FDC_CONTROL_0 is left in the reset state by all
	   contexts.  the invalidate bit will clear itself, so the register
	   should be unchanged after this */
	nvhost_cdma_push(cdma,
		nvhost_opcode_imm(AR3D_FDC_CONTROL_0,
			AR3D_FDC_CONTROL_0_RESET_VAL
				| AR3D_FDC_CONTROL_0_INVALIDATE),
		NVHOST_OPCODE_NOOP);

	/* set register set 0 and 1 register read memory output addresses,
	   and send their reads to memory */

	nvhost_cdma_push(cdma,
		nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2),
		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
	nvhost_cdma_push(cdma,
		nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
		ctx->restore_phys + restore_set1_offset * 4);

	nvhost_cdma_push(cdma,
		nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1),
		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
	nvhost_cdma_push(cdma,
		nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
		ctx->restore_phys);
	/* gather the save buffer */
	nvhost_cdma_push_gather(cdma,
			nvhost_get_host(nctx->channel->dev)->memmgr,
			p->save_buf,
			0,
			nvhost_opcode_gather(p->save_size),
			p->save_phys);
}
struct nvhost_hwctx_handler *nvhost_gr3d_t114_ctxhandler_init(
		u32 syncpt, u32 waitbase,
		struct nvhost_channel *ch)
{
	struct mem_mgr *memmgr;
	u32 *save_ptr;
	struct host1x_hwctx_handler *p;

	p = kmalloc(sizeof(*p), GFP_KERNEL);
	if (!p)
		return NULL;

	memmgr = nvhost_get_host(ch->dev)->memmgr;

	p->h.syncpt = syncpt;
	p->h.waitbase = waitbase;

	setup_save(p, NULL);

	p->save_buf = nvhost_memmgr_alloc(memmgr, p->save_size * 4, 32,
				mem_mgr_flag_write_combine);
	if (IS_ERR(p->save_buf))
		goto fail_alloc;

	save_ptr = nvhost_memmgr_mmap(p->save_buf);
	if (!save_ptr)
		goto fail_mmap;

	p->save_sgt = nvhost_memmgr_pin(memmgr, p->save_buf, &ch->dev->dev);
	if (IS_ERR(p->save_sgt))
		goto fail_pin;
	p->save_phys = sg_dma_address(p->save_sgt->sgl);

	setup_save(p, save_ptr);

	nvhost_memmgr_munmap(p->save_buf, save_ptr);

	p->save_slots = 5;
	p->h.alloc = ctx3d_alloc_v1;
	p->h.save_push = save_push_v1;
	p->h.restore_push = nvhost_3dctx_restore_push;
	p->h.save_service = NULL;
	p->h.get = nvhost_3dctx_get;
	p->h.put = nvhost_3dctx_put;

	return &p->h;

fail_pin:
	nvhost_memmgr_munmap(p->save_buf, save_ptr);
fail_mmap:
	nvhost_memmgr_put(memmgr, p->save_buf);
fail_alloc:
	kfree(p);
	return NULL;
}
Beispiel #17
0
struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init(
		u32 syncpt, u32 waitbase,
		struct nvhost_channel *ch)
{
	struct mem_mgr *memmgr;
	u32 *save_ptr;
	struct host1x_hwctx_handler *p;

	p = kmalloc(sizeof(*p), GFP_KERNEL);
	if (!p)
		return NULL;
	memmgr = nvhost_get_host(ch->dev)->memmgr;

	p->syncpt = syncpt;
	p->waitbase = waitbase;

	setup_save(p, NULL);

	p->save_buf = mem_op().alloc(memmgr, p->save_size * sizeof(u32), 32,
				mem_mgr_flag_write_combine);
	if (IS_ERR_OR_NULL(p->save_buf))
		goto fail_alloc;

	save_ptr = mem_op().mmap(p->save_buf);
	if (IS_ERR_OR_NULL(save_ptr))
		goto fail_mmap;

	p->save_sgt = mem_op().pin(memmgr, p->save_buf);
	if (IS_ERR_OR_NULL(p->save_sgt))
		goto fail_pin;
	p->save_phys = sg_dma_address(p->save_sgt->sgl);

	setup_save(p, save_ptr);

	mem_op().munmap(p->save_buf, save_ptr);

	p->save_slots = 1;
	p->h.alloc = ctx3d_alloc_v0;
	p->h.save_push = save_push_v0;
	p->h.save_service = ctx3d_save_service;
	p->h.get = nvhost_3dctx_get;
	p->h.put = nvhost_3dctx_put;

	return &p->h;

fail_pin:
	mem_op().munmap(p->save_buf, save_ptr);
fail_mmap:
	mem_op().put(memmgr, p->save_buf);
fail_alloc:
	kfree(p);
	return NULL;
}
Beispiel #18
0
int nvhost_vic03_init(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
	struct vic03 *v = get_vic03(dev);
	char *fw_name;

	nvhost_dbg_fn("in dev:%p v:%p", dev, v);

	fw_name = vic_get_fw_name(dev);
	if (!fw_name) {
		dev_err(&dev->dev, "couldn't determine firmware name");
		return -EINVAL;
	}

	if (!v) {
		nvhost_dbg_fn("allocating vic03 support");
		v = kzalloc(sizeof(*v), GFP_KERNEL);
		if (!v) {
			dev_err(&dev->dev, "couldn't alloc vic03 support");
			err = -ENOMEM;
			goto clean_up;
		}
		set_vic03(dev, v);
		v->is_booted = false;
	}
	nvhost_dbg_fn("primed dev:%p v:%p", dev, v);

	v->host = nvhost_get_host(dev);

	if (!v->ucode.valid)
		err = vic03_read_ucode(dev, fw_name);
	if (err)
		goto clean_up;

	kfree(fw_name);
	fw_name = NULL;

	nvhost_module_busy(dev);
	err = vic03_boot(dev);
	nvhost_module_idle(dev);

	if (pdata->scaling_init)
		nvhost_scale_hw_init(dev);

	return 0;

 clean_up:
	kfree(fw_name);
	nvhost_err(&dev->dev, "failed");

	return err;
}
Beispiel #19
0
static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
{
	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
	struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);

	nvhost_cdma_push_gather(cdma,
			nvhost_get_host(nctx->channel->dev)->memmgr,
			p->save_buf,
			0,
			nvhost_opcode_gather(p->save_size),
			p->save_phys);
}
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
		u32 syncpt, u32 waitbase,
		struct nvhost_channel *ch)
{
	struct nvmap_client *nvmap;
	u32 *save_ptr;
	struct host1x_hwctx_handler *p;

	p = kmalloc(sizeof(*p), GFP_KERNEL);
	if (!p)
		return NULL;

	nvmap = nvhost_get_host(ch->dev)->nvmap;

	register_sets = tegra_gpu_register_sets();
	BUG_ON(register_sets == 0 || register_sets > 2);

	p->syncpt = syncpt;
	p->waitbase = waitbase;

	setup_save(p, NULL);

	p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32,
				NVMAP_HANDLE_WRITE_COMBINE, 0);
	if (IS_ERR(p->save_buf)) {
		p->save_buf = NULL;
		return NULL;
	}

	p->save_slots = 6;
	if (register_sets == 2)
		p->save_slots += 2;

	save_ptr = nvmap_mmap(p->save_buf);
	if (!save_ptr) {
		nvmap_free(nvmap, p->save_buf);
		p->save_buf = NULL;
		return NULL;
	}

	p->save_phys = nvmap_pin(nvmap, p->save_buf);

	setup_save(p, save_ptr);

	p->h.alloc = ctx3d_alloc_v1;
	p->h.save_push = save_push_v1;
	p->h.save_service = NULL;
	p->h.get = nvhost_3dctx_get;
	p->h.put = nvhost_3dctx_put;

	return &p->h;
}
void nvhost_syncpt_cpu_incr_ext(struct platform_device *dev, u32 id)
{
	struct platform_device *pdev;
	struct nvhost_syncpt *sp;

	BUG_ON(!nvhost_get_parent(dev));

	/* get the parent */
	pdev = to_platform_device(dev->dev.parent);
	sp = &(nvhost_get_host(pdev)->syncpt);

	nvhost_syncpt_cpu_incr(sp, id);
}
Beispiel #22
0
void nvhost_3dctx_free(struct kref *ref)
{
	struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
	struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;

	if (ctx->restore_virt)
		mem_op().munmap(ctx->restore, ctx->restore_virt);

	mem_op().unpin(memmgr, ctx->restore, ctx->restore_sgt);
	mem_op().put(memmgr, ctx->restore);
	kfree(ctx);
}
u32 nvhost_syncpt_read_ext(struct platform_device *dev, u32 id)
{
	struct platform_device *pdev;
	struct nvhost_syncpt *sp;

	BUG_ON(!nvhost_get_parent(dev));

	/* get the parent */
	pdev = to_platform_device(dev->dev.parent);
	sp = &(nvhost_get_host(pdev)->syncpt);

	return nvhost_syncpt_read(sp, id);
}
static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
{
	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
	struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);

	/* wait for 3d idle */
	nvhost_cdma_push(cdma,
			nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
			nvhost_opcode_imm_incr_syncpt(
				host1x_uclass_incr_syncpt_cond_op_done_v(),
				p->h.syncpt));
	nvhost_cdma_push(cdma,
			nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
					host1x_uclass_wait_syncpt_base_r(), 1),
			nvhost_class_host_wait_syncpt_base(p->h.syncpt,
							p->h.waitbase, 1));
	/* back to 3d */
	nvhost_cdma_push(cdma,
			nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
			NVHOST_OPCODE_NOOP);

	/* invalidate the FDC to prevent cache-coherency issues across GPUs
	   note that we assume FDC_CONTROL_0 is left in the reset state by all
	   contexts.  the invalidate bit will clear itself, so the register
	   should be unchanged after this */
	/* bug 990395 T114 HW no longer can automatically clear the invalidate
	   bit. Luckily that the ctx switching always happens on the push
	   buffer boundary, and 3d driver inserts a FDC flush & invalidate &
	   clear the invalidate bit in the beginning of the each push buffer.
	   So we do not need to explicitly clear the invalidate bit here. */

	nvhost_cdma_push(cdma,
		nvhost_opcode_imm(AR3D_FDC_CONTROL_0,
			AR3D_FDC_CONTROL_0_RESET_VAL
				| AR3D_FDC_CONTROL_0_INVALIDATE),
		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
	/* bug 972588 requires SW to clear the reg 0x403 and 0xe45 */
	nvhost_cdma_push(cdma,
		nvhost_opcode_imm(0xe45, 0),
		nvhost_opcode_imm(0x403, 0));
	nvhost_cdma_push(cdma,
		nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
		ctx->restore_phys);
	/* gather the save buffer */
	nvhost_cdma_push_gather(cdma,
			nvhost_get_host(nctx->channel->dev)->memmgr,
			p->save_buf,
			0,
			nvhost_opcode_gather(p->save_size),
			p->save_phys);
}
/**
 * Resets syncpoint and waitbase values of a
 * single client to sw shadows
 */
void nvhost_syncpt_reset_client(struct platform_device *pdev)
{
	struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
	struct nvhost_master *nvhost_master = nvhost_get_host(pdev);
	u32 id;

	BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base));

	for_each_set_bit(id, (unsigned long *)&pdata->syncpts, BITS_PER_LONG)
		syncpt_op().reset(&nvhost_master->syncpt, id);
	for_each_set_bit(id, (unsigned long *)&pdata->waitbases, BITS_PER_LONG)
		syncpt_op().reset_wait_base(&nvhost_master->syncpt, id);
	wmb();
}
int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id,
	u32 thresh, u32 timeout, u32 *value)
{
	struct platform_device *pdev;
	struct nvhost_syncpt *sp;

	BUG_ON(!nvhost_get_parent(dev));

	/* get the parent */
	pdev = to_platform_device(dev->dev.parent);
	sp = &(nvhost_get_host(pdev)->syncpt);

	return nvhost_syncpt_wait_timeout(sp, id, thresh, timeout, value);
}
Beispiel #27
0
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
		u32 syncpt, u32 waitbase,
		struct nvhost_channel *ch)
{
	struct mem_mgr *memmgr;
	u32 *save_ptr;
	struct host1x_hwctx_handler *p;

	p = kmalloc(sizeof(*p), GFP_KERNEL);
	if (!p)
		return NULL;

	memmgr = nvhost_get_host(ch->dev)->memmgr;

	p->syncpt = syncpt;
	p->waitbase = waitbase;

	setup_save(p, NULL);

	p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32,
				mem_mgr_flag_write_combine);
	if (IS_ERR_OR_NULL(p->save_buf)) {
		p->save_buf = NULL;
		return NULL;
	}

	p->save_slots = 8;

	save_ptr = mem_op().mmap(p->save_buf);
	if (!save_ptr) {
		mem_op().put(memmgr, p->save_buf);
		p->save_buf = NULL;
		return NULL;
	}

	p->save_phys = mem_op().pin(memmgr, p->save_buf);

	setup_save(p, save_ptr);

	mem_op().munmap(p->save_buf, save_ptr);

	p->h.alloc = ctx3d_alloc_v1;
	p->h.save_push = save_push_v1;
	p->h.save_service = NULL;
	p->h.get = nvhost_3dctx_get;
	p->h.put = nvhost_3dctx_put;

	return &p->h;
}
Beispiel #28
0
static void ctx3d_save_service(struct nvhost_hwctx *nctx)
{
	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);

	u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
	unsigned int pending = 0;

	ptr = save_regs_v0(ptr, &pending, nctx->channel,
			ctxsave_regs_3d_global,
			ARRAY_SIZE(ctxsave_regs_3d_global));

	wmb();
	nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
			host1x_hwctx_handler(ctx)->syncpt);
}
Beispiel #29
0
/*** ctx3d ***/
struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p,
		struct nvhost_channel *ch, bool map_restore)
{
	struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
	struct host1x_hwctx *ctx;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return NULL;
	ctx->restore = mem_op().alloc(memmgr, p->restore_size * 4, 32,
		map_restore ? mem_mgr_flag_write_combine
			    : mem_mgr_flag_uncacheable);
	if (IS_ERR_OR_NULL(ctx->restore))
		goto fail_alloc;

	if (map_restore) {
		ctx->restore_virt = mem_op().mmap(ctx->restore);
		if (IS_ERR_OR_NULL(ctx->restore_virt))
			goto fail_mmap;
	} else
		ctx->restore_virt = NULL;

	ctx->restore_sgt = mem_op().pin(memmgr, ctx->restore);
	if (IS_ERR_OR_NULL(ctx->restore_sgt))
		goto fail_pin;
	ctx->restore_phys = sg_dma_address(ctx->restore_sgt->sgl);

	kref_init(&ctx->hwctx.ref);
	ctx->hwctx.h = &p->h;
	ctx->hwctx.channel = ch;
	ctx->hwctx.valid = false;
	ctx->save_incrs = p->save_incrs;
	ctx->save_thresh = p->save_thresh;
	ctx->save_slots = p->save_slots;

	ctx->restore_size = p->restore_size;
	ctx->restore_incrs = p->restore_incrs;
	return ctx;

fail_pin:
	if (map_restore)
		mem_op().munmap(ctx->restore, ctx->restore_virt);
fail_mmap:
	mem_op().put(memmgr, ctx->restore);
fail_alloc:
	kfree(ctx);
	return NULL;
}
u32 nvhost_syncpt_read_ext(struct platform_device *dev, u32 id)
{
	struct platform_device *pdev;
	struct nvhost_syncpt *sp;

	if (!nvhost_get_parent(dev)) {
		dev_err(&dev->dev, "Read called with wrong dev\n");
		return 0;
	}

	/* get the parent */
	pdev = to_platform_device(dev->dev.parent);
	sp = &(nvhost_get_host(pdev)->syncpt);

	return nvhost_syncpt_read(sp, id);
}