/* create buffers for saving/restoring registers, constants, & GMEM */ static int a2xx_ctxt_gmem_shadow(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int result; calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmemspace.sizebytes); tmp_ctx.gmem_base = adreno_dev->gmemspace.gpu_base; result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow, drawctxt->pagetable, drawctxt->context_gmem_shadow.size); if (result) return result; /* we've allocated the shadow, when swapped out, GMEM must be saved. */ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_GMEM_SAVE; /* blank out gmem shadow. */ kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0, drawctxt->context_gmem_shadow.size); /* build quad vertex buffer */ build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow, &tmp_ctx.cmd); /* build TP0_CHICKEN register restore command buffer */ tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt); /* build indirect command buffers to save & restore gmem */ /* Idle because we are reading PM override registers */ adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT); drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd; tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt, &drawctxt->context_gmem_shadow); drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd; tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt, &drawctxt->context_gmem_shadow); kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow, KGSL_CACHE_OP_FLUSH); kgsl_cffdump_syncmem(NULL, &drawctxt->context_gmem_shadow.gmemshadow, drawctxt->context_gmem_shadow.gmemshadow.gpuaddr, drawctxt->context_gmem_shadow.gmemshadow.size, false); return 0; }
static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int result; calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size); tmp_ctx.gmem_base = adreno_dev->gmem_base; result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow, drawctxt->pagetable, drawctxt->context_gmem_shadow.size); if (result) return result; drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW; kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0, drawctxt->context_gmem_shadow.size); build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow, &tmp_ctx.cmd); if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt); drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd; tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt, &drawctxt->context_gmem_shadow); drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd; tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt, &drawctxt->context_gmem_shadow); kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow, KGSL_CACHE_OP_FLUSH); kgsl_cffdump_syncmem(NULL, &drawctxt->context_gmem_shadow.gmemshadow, drawctxt->context_gmem_shadow.gmemshadow.gpuaddr, drawctxt->context_gmem_shadow.gmemshadow.size, false); return 0; }
static int a2xx_drawctxt_create(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int ret; ret = kgsl_allocate(&drawctxt->gpustate, drawctxt->pagetable, _context_size(adreno_dev)); if (ret) return ret; kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, _context_size(adreno_dev)); tmp_ctx.cmd = tmp_ctx.start = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET); if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) { ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt); if (ret) goto done; drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE; } if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) { ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt); if (ret) goto done; } kgsl_cache_range_op(&drawctxt->gpustate, KGSL_CACHE_OP_FLUSH); kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate, drawctxt->gpustate.gpuaddr, drawctxt->gpustate.size, false); done: if (ret) kgsl_sharedmem_free(&drawctxt->gpustate); return ret; }
/* create buffers for saving/restoring registers, constants, & GMEM */ static int a2xx_ctxt_gpustate_shadow(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int result; /* Allocate vmalloc memory to store the gpustate */ result = kgsl_allocate(&drawctxt->gpustate, drawctxt->pagetable, _context_size(adreno_dev)); if (result) return result; drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW; /* Blank out h/w register, constant, and command buffer shadows. */ kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, _context_size(adreno_dev)); /* set-up command and vertex buffer pointers */ tmp_ctx.cmd = tmp_ctx.start = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET); /* build indirect command buffers to save & restore regs/constants */ adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT); build_regrestore_cmds(adreno_dev, drawctxt); build_regsave_cmds(adreno_dev, drawctxt); build_shader_save_restore_cmds(adreno_dev, drawctxt); kgsl_cache_range_op(&drawctxt->gpustate, KGSL_CACHE_OP_FLUSH); kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate, drawctxt->gpustate.gpuaddr, drawctxt->gpustate.size, false); return 0; }
/* create buffers for saving/restoring registers, constants, & GMEM */ static int create_gpustate_shadow(struct kgsl_device *device, struct adreno_context *drawctxt, struct tmp_ctx *ctx) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int result; /* Allocate vmalloc memory to store the gpustate */ result = kgsl_allocate(&drawctxt->gpustate, drawctxt->pagetable, CONTEXT_SIZE); if (result) return result; drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW; /* Blank out h/w register, constant, and command buffer shadows. */ kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE); /* set-up command and vertex buffer pointers */ ctx->cmd = ctx->start = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET); /* build indirect command buffers to save & restore regs/constants */ adreno_idle(device, KGSL_TIMEOUT_DEFAULT); build_regrestore_cmds(adreno_dev, drawctxt, ctx); build_regsave_cmds(adreno_dev, drawctxt, ctx); build_shader_save_restore_cmds(drawctxt, ctx); kgsl_cache_range_op(&drawctxt->gpustate, KGSL_CACHE_OP_FLUSH); return 0; }