void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt; if (context == NULL || context->devctxt == NULL) return; drawctxt = context->devctxt; if (adreno_dev->drawctxt_active == drawctxt) { drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); #ifdef CONFIG_MSM_KGSL_GPU_USAGE device->current_process_priv = NULL; #endif adreno_drawctxt_switch(adreno_dev, NULL, 0); } if (device->state != KGSL_STATE_HUNG) adreno_idle(device); if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active) kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id, KGSL_MMUFLAGS_PTUPDATE); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt; if (context == NULL || context->devctxt == NULL) return; drawctxt = context->devctxt; /* deactivate context */ if (adreno_dev->drawctxt_active == drawctxt) { /* no need to save GMEM or shader, the context is * being destroyed. */ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); adreno_drawctxt_switch(adreno_dev, NULL, 0); } adreno_idle(device); if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active) kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id, KGSL_MMUFLAGS_PTUPDATE); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
static void a2xx_ctxt_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (context == NULL) { /* No context - set the default apgetable and thats it */ kgsl_mmu_setstate(device, device->mmu.defaultpagetable); return; } KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags); cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(current_context); cmds[4] = (unsigned int) context; adreno_ringbuffer_issuecmds(device, 0, cmds, 5); kgsl_mmu_setstate(device, context->pagetable); #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP kgsl_cffdump_syncmem(NULL, &context->gpustate, context->gpustate.gpuaddr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); #endif /* restore gmem. * (note: changes shader. shader must not already be restored.) */ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) { adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow.gmem_restore, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, context->chicken_restore, 3); context->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } /* restore registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, context->reg_restore, 3); /* restore shader instructions & partitioning. */ if (context->flags & CTXT_FLAGS_SHADER_RESTORE) { adreno_ringbuffer_issuecmds(device, 0, context->shader_restore, 3); } if (adreno_is_a20x(adreno_dev)) { cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1); cmds[1] = context->bin_base_offset; adreno_ringbuffer_issuecmds(device, 0, cmds, 2); } }
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PM4_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PM4_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PM4_FW; } else { KGSL_DRV_ERR(device, "Could not load PM4 file\n"); return -EINVAL; } if (adreno_dev->pm4_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; } KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); err: return ret; }
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PFP_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PFP_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PFP_FW; } else { KGSL_DRV_ERR(device, "Could not load PFP firmware\n"); return -EINVAL; } if (adreno_dev->pfp_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; } KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n", adreno_dev->pfp_fw[0]); adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < adreno_dev->pfp_fw_size; i++) adreno_regwrite(device, REG_CP_PFP_UCODE_DATA, adreno_dev->pfp_fw[i]); err: return ret; }
void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, int *remain, int hang) { struct kgsl_device *device = &adreno_dev->dev; struct kgsl_snapshot_registers regs; unsigned int pmoverride; /* Choose the register set to dump */ if (adreno_is_a20x(adreno_dev)) { regs.regs = (unsigned int *) a200_registers; regs.count = a200_registers_count; } else { regs.regs = (unsigned int *) a220_registers; regs.count = a220_registers_count; } /* Master set of (non debug) registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain, kgsl_snapshot_dump_regs, ®s); /* CP_STATE_DEBUG indexed registers */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_CP_STATE_DEBUG_INDEX, REG_CP_STATE_DEBUG_DATA, 0x0, 0x14); /* CP_ME indexed registers */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS, 64, 44); /* * Need to temporarily turn off clock gating for the debug bus to * work */ adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride); adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF); /* SX debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_sxdebug, NULL); /* SU debug indexed registers (only for < 470) */ if (!adreno_is_a22x(adreno_dev)) snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_PA_SU_DEBUG_CNTL, REG_PA_SU_DEBUG_DATA, 0, 0x1B); /* CP debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_cpdebug, NULL); /* MH debug indexed registers */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40); /* Leia only register sets */ if (adreno_is_a22x(adreno_dev)) { /* RB DEBUG indexed regisers */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8); /* RB DEBUG indexed registers bank 2 */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000, 0, 8); /* PC_DEBUG indexed registers */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8); /* GRAS_DEBUG indexed registers */ snapshot = a2xx_snapshot_indexed_registers(device, snapshot, remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4); /* MIU debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_miudebug, NULL); /* SQ DEBUG debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_sqdebug, NULL); /* * Reading SQ THREAD causes bad things to happen on a running * system, so only read it if the GPU is already hung */ if (hang) { /* SQ THREAD debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_sqthreaddebug, NULL); } } /* Reset the clock gating */ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride); return snapshot; }
static uint32_t adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, struct adreno_context *context, unsigned int flags, unsigned int *cmds, int sizedwords) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); unsigned int *ringcmds; unsigned int timestamp; unsigned int total_sizedwords = sizedwords + 6; unsigned int i; unsigned int rcmd_gpu; /* reserve space to temporarily turn off protected mode * error checking if needed */ total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0; total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0; /* 2 dwords to store the start of command sequence */ total_sizedwords += 2; if (adreno_is_a2xx(adreno_dev)) total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */ if (adreno_is_a20x(adreno_dev)) total_sizedwords += 2; /* CACHE_FLUSH */ ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords); /* GPU may hang during space allocation, if thats the case the current * context may have hung the GPU */ if (context && context->flags & CTXT_FLAGS_GPU_HANG) { KGSL_CTXT_WARN(rb->device, "Context %p caused a gpu hang. Will not accept commands for context %d\n", context, context->id); return rb->timestamp; } rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-total_sizedwords); GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER); if (flags & KGSL_CMD_FLAGS_PMODE) { /* disable protected mode error checking */ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, 0); } for (i = 0; i < sizedwords; i++) { GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds); cmds++; } if (flags & KGSL_CMD_FLAGS_PMODE) { /* re-enable protected mode error checking */ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, 1); } rb->timestamp++; timestamp = rb->timestamp; /* HW Workaround for MMU Page fault * due to memory getting free early before * GPU completes it. */ if (adreno_is_a2xx(adreno_dev)) { GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_WAIT_FOR_IDLE, 1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00); } GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3)); GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS); GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp))); GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); if (adreno_is_a20x(adreno_dev)) { GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH); } if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) { /* Conditional execution based on memory values */ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_COND_EXEC, 4)); GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2); GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2); GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); /* # of conditional command DWORDs */ GSL_RB_WRITE(ringcmds, rcmd_gpu, 2); GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_INTERRUPT, 1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK); }
static void build_regrestore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; cmd++; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1; #else *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; #endif if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } for (i = 0; i < (reg_array_size/2); i++) { cmd = reg_range(cmd, ptr_register_ranges[i*2], ptr_register_ranges[i*2+1]); } start[2] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3); #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES start[4] |= (0 << 24) | (4 << 16); #else start[4] |= (1 << 24) | (4 << 16); #endif *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1); tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00040400; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1); tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; if (adreno_is_a20x(adreno_dev)) { *cmd++ = cp_type0_packet(REG_RB_BC_CONTROL, 1); tmp_ctx.reg_values[2] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; } if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type0_packet(i, 1); tmp_ctx.reg_values[j] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; j++; } } *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = (0 << 24) | (0 << 16) | 0; #else *cmd++ = (1 << 24) | (0 << 16) | 0; #endif *cmd++ = ALU_CONSTANTS; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = (0 << 24) | (1 << 16) | 0; #else *cmd++ = (1 << 24) | (1 << 16) | 0; #endif *cmd++ = TEX_CONSTANTS; *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS); *cmd++ = (2 << 16) | 0; tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate); cmd += BOOL_CONSTANTS; *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS); *cmd++ = (3 << 16) | 0; tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate); cmd += LOOP_CONSTANTS; create_ib1(drawctxt, drawctxt->reg_restore, start, cmd); tmp_ctx.cmd = cmd; }
static void build_regsave_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); *cmd++ = 0; { unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } for (i = 0; i < (reg_array_size/2) ; i++) { build_reg_to_mem_range(ptr_register_ranges[i*2], ptr_register_ranges[i*2+1], &cmd, drawctxt); } } cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, REG_SQ_CONSTANT_0, ALU_CONSTANTS); cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, REG_SQ_FETCH_0, TEX_CONSTANTS); #else *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; *cmd++ = 0x0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; *cmd++ = 0 << 16; *cmd++ = 0x0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; *cmd++ = 1 << 16; *cmd++ = 0x0; #endif *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_SQ_GPR_MANAGEMENT; *cmd++ = tmp_ctx.reg_values[0]; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; if (adreno_is_a20x(adreno_dev)) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_RB_BC_CONTROL; *cmd++ = tmp_ctx.reg_values[2]; } if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = i; *cmd++ = tmp_ctx.reg_values[j]; j++; } } cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS, BOOL_CONSTANTS); cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS); create_ib1(drawctxt, drawctxt->reg_save, start, cmd); tmp_ctx.cmd = cmd; }
static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (context == NULL) { kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable, adreno_dev->drawctxt_active->id); return; } KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags); cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[4] = context->id; adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 5); kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id); #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP kgsl_cffdump_syncmem(NULL, &context->gpustate, context->gpustate.gpuaddr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); #endif if (context->flags & CTXT_FLAGS_GMEM_RESTORE) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow.gmem_restore, 3); if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->chicken_restore, 3); } context->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->reg_restore, 3); if (context->flags & CTXT_FLAGS_SHADER_RESTORE) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->shader_restore, 3); } } if (adreno_is_a20x(adreno_dev)) { cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1); cmds[1] = context->bin_base_offset; adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 2); } }