static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); /* wait for 3d idle */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt)); nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_wait_syncpt_base_r(), 1), nvhost_class_host_wait_syncpt_base(p->syncpt, p->waitbase, 1)); /* back to 3d */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), NVHOST_OPCODE_NOOP); /* invalidate the FDC to prevent cache-coherency issues across GPUs note that we assume FDC_CONTROL_0 is left in the reset state by all contexts. the invalidate bit will clear itself, so the register should be unchanged after this */ nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_FDC_CONTROL_0, AR3D_FDC_CONTROL_0_RESET_VAL | AR3D_FDC_CONTROL_0_INVALIDATE), NVHOST_OPCODE_NOOP); /* set register set 0 and 1 register read memory output addresses, and send their reads to memory */ nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), ctx->restore_phys + restore_set1_offset * 4); nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), ctx->restore_phys); /* gather the save buffer */ nvhost_cdma_push_gather(cdma, nvhost_get_host(nctx->channel->dev)->memmgr, p->save_buf, 0, nvhost_opcode_gather(p->save_size), p->save_phys); }
static void user_hwctx_restore_push(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct user_hwctx *ctx = to_user_hwctx(nctx); nvhost_cdma_push_gather(cdma, nctx->memmgr, ctx->restore, ctx->restore_offset, nvhost_opcode_gather(ctx->restore_size), nvhost_memmgr_dma_addr(ctx->restore_sgt)); }
static void user_hwctx_save_push(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct user_hwctx *ctx = to_user_hwctx(nctx); nvhost_cdma_push_gather(cdma, nctx->memmgr, ctx->save_buf, ctx->save_offset, nvhost_opcode_gather(ctx->save_size), sg_dma_address(ctx->save_sgt->sgl)); }
static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); nvhost_cdma_push_gather(cdma, nvhost_get_host(nctx->channel->dev)->memmgr, p->save_buf, 0, nvhost_opcode_gather(p->save_size), p->save_phys); }
static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); /* wait for 3d idle */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_op_done_v(), p->h.syncpt)); nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_wait_syncpt_base_r(), 1), nvhost_class_host_wait_syncpt_base(p->h.syncpt, p->h.waitbase, 1)); /* back to 3d */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), NVHOST_OPCODE_NOOP); /* invalidate the FDC to prevent cache-coherency issues across GPUs note that we assume FDC_CONTROL_0 is left in the reset state by all contexts. the invalidate bit will clear itself, so the register should be unchanged after this */ /* bug 990395 T114 HW no longer can automatically clear the invalidate bit. Luckily that the ctx switching always happens on the push buffer boundary, and 3d driver inserts a FDC flush & invalidate & clear the invalidate bit in the beginning of the each push buffer. So we do not need to explicitly clear the invalidate bit here. */ nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_FDC_CONTROL_0, AR3D_FDC_CONTROL_0_RESET_VAL | AR3D_FDC_CONTROL_0_INVALIDATE), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); /* bug 972588 requires SW to clear the reg 0x403 and 0xe45 */ nvhost_cdma_push(cdma, nvhost_opcode_imm(0xe45, 0), nvhost_opcode_imm(0x403, 0)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), ctx->restore_phys); /* gather the save buffer */ nvhost_cdma_push_gather(cdma, nvhost_get_host(nctx->channel->dev)->memmgr, p->save_buf, 0, nvhost_opcode_gather(p->save_size), p->save_phys); }
static void ctxvic03_restore_push(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_VIC_CLASS_ID, 0, 0), NVHOST_OPCODE_NOOP); nvhost_cdma_push_gather(cdma, ctx->cpuva, ctx->iova, 0, nvhost_opcode_gather(ctx->restore_size), ctx->iova); }
static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); /* wait for 3d idle */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, p->syncpt)); nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), nvhost_class_host_wait_syncpt_base(p->syncpt, p->waitbase, 1)); /* back to 3d */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), NVHOST_OPCODE_NOOP); /* set register set 0 and 1 register read memory output addresses, and send their reads to memory */ if (register_sets == 2) { nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(0x904, 1), ctx->restore_phys + restore_set1_offset * 4); } nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), ctx->restore_phys); /* gather the save buffer */ nvhost_cdma_push_gather(cdma, nvhost_get_host(nctx->channel->dev)->nvmap, p->save_buf->handle, 0, nvhost_opcode_gather(p->save_size), p->save_phys); }
static void save_push_v1(struct nvhost_cdma *cdma, struct nvhost_hwctx *ctx) { /* wait for 3d idle */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, NVSYNCPT_3D)); nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D, NVWAITBASE_3D, 1)); /* back to 3d */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), NVHOST_OPCODE_NOOP); /* set register set 0 and 1 register read memory output addresses, and send their reads to memory */ if (register_sets == 2) { nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(0x904, 1), ctx->restore_phys + restore_set1_offset * 4); } nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), ctx->restore_phys); /* gather the save buffer */ nvhost_cdma_push_gather(cdma, (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, nvhost_opcode_gather(save_size), save_phys); }
/** * Push two words into a push buffer slot * Blocks as necessary if the push buffer is full. */ void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2) { nvhost_cdma_push_gather(cdma, NULL, op1, op2); }