/* save h/w regs, alu constants, texture contants, etc. ... * requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr */ static void build_regsave_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* Make sure the HW context has the correct register values * before reading them. */ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); *cmd++ = 0; { unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; /* Based on chip id choose the register ranges */ if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } /* Write HW registers into shadow */ for (i = 0; i < (reg_array_size/2) ; i++) { build_reg_to_mem_range(ptr_register_ranges[i*2], ptr_register_ranges[i*2+1], &cmd, drawctxt); } } /* Copy ALU constants */ cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, REG_SQ_CONSTANT_0, ALU_CONSTANTS); /* Copy Tex constants */ cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, REG_SQ_FETCH_0, TEX_CONSTANTS); #else /* Insert a wait for idle packet before reading the registers. * This is to fix a hang/reset seen during stress testing. In this * hang, CP encountered a timeout reading SQ's boolean constant * register. There is logic in the HW that blocks reading of this * register when the SQ block is not idle, which we believe is * contributing to the hang.*/ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; /* H/w registers are already shadowed; just need to disable shadowing * to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; /* regs, start=0 */ *cmd++ = 0x0; /* count = 0 */ /* ALU constants are already shadowed; just need to disable shadowing * to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; *cmd++ = 0 << 16; /* ALU, start=0 */ *cmd++ = 0x0; /* count = 0 */ /* Tex constants are already shadowed; just need to disable shadowing * to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; *cmd++ = 1 << 16; /* Tex, start=0 */ *cmd++ = 0x0; /* count = 0 */ #endif /* Need to handle some of the registers separately */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_SQ_GPR_MANAGEMENT; *cmd++ = tmp_ctx.reg_values[0]; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = i; *cmd++ = tmp_ctx.reg_values[j]; j++; } } /* Copy Boolean constants */ cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS, BOOL_CONSTANTS); /* Copy Loop constants */ cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS); /* create indirect buffer command for above command sequence */ create_ib1(drawctxt, drawctxt->reg_save, start, cmd); tmp_ctx.cmd = cmd; }
static void build_regsave_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); *cmd++ = 0; { unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } for (i = 0; i < (reg_array_size/2) ; i++) { build_reg_to_mem_range(ptr_register_ranges[i*2], ptr_register_ranges[i*2+1], &cmd, drawctxt); } } cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, REG_SQ_CONSTANT_0, ALU_CONSTANTS); cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, REG_SQ_FETCH_0, TEX_CONSTANTS); #else *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; *cmd++ = 0x0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; *cmd++ = 0 << 16; *cmd++ = 0x0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; *cmd++ = 1 << 16; *cmd++ = 0x0; #endif *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_SQ_GPR_MANAGEMENT; *cmd++ = tmp_ctx.reg_values[0]; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; if (adreno_is_a20x(adreno_dev)) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_RB_BC_CONTROL; *cmd++ = tmp_ctx.reg_values[2]; } if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = i; *cmd++ = tmp_ctx.reg_values[j]; j++; } } cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS, BOOL_CONSTANTS); cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS); create_ib1(drawctxt, drawctxt->reg_save, start, cmd); tmp_ctx.cmd = cmd; }