static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PM4_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PM4_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PM4_FW; } else { KGSL_DRV_ERR(device, "Could not load PM4 file\n"); return -EINVAL; } if (adreno_dev->pm4_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; } KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); err: return ret; }
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PFP_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PFP_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PFP_FW; } else { KGSL_DRV_ERR(device, "Could not load PFP firmware\n"); return -EINVAL; } if (adreno_dev->pfp_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; } KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n", adreno_dev->pfp_fw[0]); adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < adreno_dev->pfp_fw_size; i++) adreno_regwrite(device, REG_CP_PFP_UCODE_DATA, adreno_dev->pfp_fw[i]); err: return ret; }
static void build_regrestore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; /* H/W Registers */ /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */ cmd++; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* Force mismatch */ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1; #else *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; #endif /* Based on chip id choose the registers ranges*/ if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } for (i = 0; i < (reg_array_size/2); i++) { cmd = reg_range(cmd, ptr_register_ranges[i*2], ptr_register_ranges[i*2+1]); } /* Now we know how many register blocks we have, we can compute command * length */ start[2] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3); /* Enable shadowing for the entire register block. */ #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES start[4] |= (0 << 24) | (4 << 16); /* Disable shadowing. */ #else start[4] |= (1 << 24) | (4 << 16); #endif /* Need to handle some of the registers separately */ *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1); tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00040400; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1); tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type0_packet(i, 1); tmp_ctx.reg_values[j] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; j++; } } /* ALU Constants */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = (0 << 24) | (0 << 16) | 0; /* Disable shadowing */ #else *cmd++ = (1 << 24) | (0 << 16) | 0; #endif *cmd++ = ALU_CONSTANTS; /* Texture Constants */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* Disable shadowing */ *cmd++ = (0 << 24) | (1 << 16) | 0; #else *cmd++ = (1 << 24) | (1 << 16) | 0; #endif *cmd++ = TEX_CONSTANTS; /* Boolean Constants */ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS); *cmd++ = (2 << 16) | 0; /* the next BOOL_CONSTANT dwords is the shadow area for * boolean constants. */ tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate); cmd += BOOL_CONSTANTS; /* Loop Constants */ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS); *cmd++ = (3 << 16) | 0; /* the next LOOP_CONSTANTS dwords is the shadow area for * loop constants. */ tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate); cmd += LOOP_CONSTANTS; /* create indirect buffer command for above command sequence */ create_ib1(drawctxt, drawctxt->reg_restore, start, cmd); tmp_ctx.cmd = cmd; }
/* save h/w regs, alu constants, texture contants, etc. ... * requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr */ static void build_regsave_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* Make sure the HW context has the correct register values * before reading them. */ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); *cmd++ = 0; { unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; /* Based on chip id choose the register ranges */ if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } /* Write HW registers into shadow */ for (i = 0; i < (reg_array_size/2) ; i++) { build_reg_to_mem_range(ptr_register_ranges[i*2], ptr_register_ranges[i*2+1], &cmd, drawctxt); } } /* Copy ALU constants */ cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, REG_SQ_CONSTANT_0, ALU_CONSTANTS); /* Copy Tex constants */ cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, REG_SQ_FETCH_0, TEX_CONSTANTS); #else /* Insert a wait for idle packet before reading the registers. * This is to fix a hang/reset seen during stress testing. In this * hang, CP encountered a timeout reading SQ's boolean constant * register. There is logic in the HW that blocks reading of this * register when the SQ block is not idle, which we believe is * contributing to the hang.*/ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; /* H/w registers are already shadowed; just need to disable shadowing * to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; /* regs, start=0 */ *cmd++ = 0x0; /* count = 0 */ /* ALU constants are already shadowed; just need to disable shadowing * to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; *cmd++ = 0 << 16; /* ALU, start=0 */ *cmd++ = 0x0; /* count = 0 */ /* Tex constants are already shadowed; just need to disable shadowing * to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; *cmd++ = 1 << 16; /* Tex, start=0 */ *cmd++ = 0x0; /* count = 0 */ #endif /* Need to handle some of the registers separately */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_SQ_GPR_MANAGEMENT; *cmd++ = tmp_ctx.reg_values[0]; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = i; *cmd++ = tmp_ctx.reg_values[j]; j++; } } /* Copy Boolean constants */ cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS, BOOL_CONSTANTS); /* Copy Loop constants */ cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS); /* create indirect buffer command for above command sequence */ create_ib1(drawctxt, drawctxt->reg_save, start, cmd); tmp_ctx.cmd = cmd; }
/*copy colour, depth, & stencil buffers from system memory to graphics memory*/ static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, struct tmp_ctx *ctx, struct gmem_shadow_t *shadow) { unsigned int *cmds = shadow->gmem_restore_commands; unsigned int *start = cmds; /* Store TP0_CHICKEN register */ *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2); *cmds++ = REG_TP0_CHICKEN; if (ctx) *cmds++ = ctx->chicken_restore; else cmds++; *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1); *cmds++ = 0; /* Set TP0_CHICKEN to zero */ *cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1); *cmds++ = 0x00000000; /* Set PA_SC_AA_CONFIG to 0 */ *cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1); *cmds++ = 0x00000000; /* shader constants */ /* vertex buffer constants */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 7); *cmds++ = (0x1 << 16) | (9 * 6); /* valid(?) vtx constant flag & addr */ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3; /* limit = 12 dwords */ *cmds++ = 0x00000030; /* valid(?) vtx constant flag & addr */ *cmds++ = shadow->quad_texcoords.gpuaddr | 0x3; /* limit = 8 dwords */ *cmds++ = 0x00000020; *cmds++ = 0; *cmds++ = 0; /* Invalidate L2 cache to make sure vertices are updated */ *cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1); *cmds++ = 0x1; cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN); /* Load the patched fragment shader stream */ cmds = program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN); /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL); *cmds++ = 0x10030002; *cmds++ = 0x00000008; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_SC_AA_MASK); *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */ if (!adreno_is_a220(adreno_dev)) { /* PA_SC_VIZ_QUERY */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_SC_VIZ_QUERY); *cmds++ = 0x0; /*REG_PA_SC_VIZ_QUERY */ } /* RB_COLORCONTROL */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_COLORCONTROL); *cmds++ = 0x00000c20; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4); *cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX); *cmds++ = 0x00ffffff; /* mmVGT_MAX_VTX_INDX */ *cmds++ = 0x0; /* mmVGT_MIN_VTX_INDX */ *cmds++ = 0x00000000; /* mmVGT_INDX_OFFSET */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL); *cmds++ = 0x00000002; /* mmVGT_VERTEX_REUSE_BLOCK_CNTL */ *cmds++ = 0x00000002; /* mmVGT_OUT_DEALLOC_CNTL */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_SQ_INTERPOLATOR_CNTL); *cmds++ = 0xffffffff; /* mmSQ_INTERPOLATOR_CNTL */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_SC_AA_CONFIG); *cmds++ = 0x00000000; /* REG_PA_SC_AA_CONFIG */ /* set REG_PA_SU_SC_MODE_CNTL * Front_ptype = draw triangles * Back_ptype = draw triangles * Provoking vertex = last */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL); *cmds++ = 0x00080240; /* texture constants */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1)); *cmds++ = (0x1 << 16) | (0 * 6); memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2); cmds[0] |= (shadow->pitch >> 5) << 22; cmds[1] |= shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format]; cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13; cmds += SYS2GMEM_TEX_CONST_LEN; /* program surface info */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_RB_SURFACE_INFO); *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */ /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0, * Base=gmem_base */ if (ctx) *cmds++ = (shadow-> format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx-> gmem_base; else { unsigned int temp = *cmds; *cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) | (shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT); } /* RB_DEPTHCONTROL */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_DEPTHCONTROL); if (adreno_is_a220(adreno_dev)) *cmds++ = 8; /* disable Z */ else *cmds++ = 0; /* disable Z */ /* Use maximum scissor values -- quad vertices already * have the correct bounds */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL); *cmds++ = (0 << 16) | 0; *cmds++ = ((0x1fff) << 16) | 0x1fff; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL); *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0); *cmds++ = ((0x1fff) << 16) | 0x1fff; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL); /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */ *cmds++ = 0x00000b00; /*load the viewport so that z scale = clear depth and z offset = 0.0f */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE); *cmds++ = 0xbf800000; *cmds++ = 0x0; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_COLOR_MASK); *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK); *cmds++ = 0xffffffff; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_SQ_WRAPPING_0); *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* load the stencil ref value * $AAM - do this later */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_MODECONTROL); /* draw pixels with color and depth/stencil component */ *cmds++ = 0x4; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL); *cmds++ = 0x00010000; if (adreno_is_a220(adreno_dev)) { *cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1); *cmds++ = 0; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL); *cmds++ = 0x0000000; *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3); *cmds++ = 0; /* viz query info. */ /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */ *cmds++ = 0x00004088; *cmds++ = 3; /* NumIndices=3 */ } else { /* queue the draw packet */ *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2); *cmds++ = 0; /* viz query info. */ /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */ *cmds++ = 0x00030088; } /* create indirect buffer command for above command sequence */ create_ib1(drawctxt, shadow->gmem_restore, start, cmds); return cmds; }
/*copy colour, depth, & stencil buffers from graphics memory to system memory*/ static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, struct tmp_ctx *ctx, struct gmem_shadow_t *shadow) { unsigned int *cmds = shadow->gmem_save_commands; unsigned int *start = cmds; /* Calculate the new offset based on the adjusted base */ unsigned int bytesperpixel = format2bytesperpixel[shadow->format]; unsigned int addr = shadow->gmemshadow.gpuaddr; unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel; /* Store TP0_CHICKEN register */ *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2); *cmds++ = REG_TP0_CHICKEN; if (ctx) *cmds++ = ctx->chicken_restore; else cmds++; *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1); *cmds++ = 0; /* Set TP0_CHICKEN to zero */ *cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1); *cmds++ = 0x00000000; /* Set PA_SC_AA_CONFIG to 0 */ *cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1); *cmds++ = 0x00000000; /* program shader */ /* load shader vtx constants ... 5 dwords */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4); *cmds++ = (0x1 << 16) | SHADER_CONST_ADDR; *cmds++ = 0; /* valid(?) vtx constant flag & addr */ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3; /* limit = 12 dwords */ *cmds++ = 0x00000030; /* Invalidate L2 cache to make sure vertices are updated */ *cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1); *cmds++ = 0x1; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4); *cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX); *cmds++ = 0x00ffffff; /* REG_VGT_MAX_VTX_INDX */ *cmds++ = 0x0; /* REG_VGT_MIN_VTX_INDX */ *cmds++ = 0x00000000; /* REG_VGT_INDX_OFFSET */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_SC_AA_MASK); *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_COLORCONTROL); *cmds++ = 0x00000c20; /* load the patched vertex shader stream */ cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN); /* Load the patched fragment shader stream */ cmds = program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN); /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL); if (adreno_is_a220(adreno_dev)) *cmds++ = 0x10018001; else *cmds++ = 0x10010001; *cmds++ = 0x00000008; /* resolve */ /* PA_CL_VTE_CNTL */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL); /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */ *cmds++ = 0x00000b00; /* program surface info */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_RB_SURFACE_INFO); *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */ /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0, * Base=gmem_base */ /* gmem base assumed 4K aligned. */ if (ctx) { BUG_ON(ctx->gmem_base & 0xFFF); *cmds++ = (shadow-> format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx-> gmem_base; } else { unsigned int temp = *cmds; *cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) | (shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT); } /* disable Z */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_DEPTHCONTROL); if (adreno_is_a220(adreno_dev)) *cmds++ = 0x08; else *cmds++ = 0; /* set REG_PA_SU_SC_MODE_CNTL * Front_ptype = draw triangles * Back_ptype = draw triangles * Provoking vertex = last */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL); *cmds++ = 0x00080240; /* Use maximum scissor values -- quad vertices already have the * correct bounds */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL); *cmds++ = (0 << 16) | 0; *cmds++ = (0x1fff << 16) | (0x1fff); *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL); *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0); *cmds++ = (0x1fff << 16) | (0x1fff); /* load the viewport so that z scale = clear depth and * z offset = 0.0f */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE); *cmds++ = 0xbf800000; /* -1.0f */ *cmds++ = 0x0; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_COLOR_MASK); *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK); *cmds++ = 0xffffffff; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3); *cmds++ = PM4_REG(REG_SQ_WRAPPING_0); *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* load the stencil ref value * $AAM - do this later */ /* load the COPY state */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 6); *cmds++ = PM4_REG(REG_RB_COPY_CONTROL); *cmds++ = 0; /* RB_COPY_CONTROL */ *cmds++ = addr & 0xfffff000; /* RB_COPY_DEST_BASE */ *cmds++ = shadow->pitch >> 5; /* RB_COPY_DEST_PITCH */ /* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither, * MaskWrite:R=G=B=A=1 */ *cmds++ = 0x0003c008 | (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT); /* Make sure we stay in offsetx field. */ BUG_ON(offset & 0xfffff000); *cmds++ = offset; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_RB_MODECONTROL); *cmds++ = 0x6; /* EDRAM copy */ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL); *cmds++ = 0x00010000; if (adreno_is_a220(adreno_dev)) { *cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1); *cmds++ = 0; *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2); *cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL); *cmds++ = 0x0000000; *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3); *cmds++ = 0; /* viz query info. */ /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */ *cmds++ = 0x00004088; *cmds++ = 3; /* NumIndices=3 */ } else { /* queue the draw packet */ *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2); *cmds++ = 0; /* viz query info. */ /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */ *cmds++ = 0x00030088; } /* create indirect buffer command for above command sequence */ create_ib1(drawctxt, shadow->gmem_save, start, cmds); return cmds; }
/* switch drawing contexts */ void adreno_drawctxt_switch(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, unsigned int flags) { struct adreno_context *active_ctxt = adreno_dev->drawctxt_active; struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (drawctxt) { if (flags & KGSL_CONTEXT_SAVE_GMEM) /* Set the flag in context so that the save is done * when this context is switched out. */ drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE; else /* Remove GMEM saving flag from the context */ drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE; } /* already current? */ if (active_ctxt == drawctxt) return; KGSL_CTXT_INFO(device, "from %p to %p flags %d\n", adreno_dev->drawctxt_active, drawctxt, flags); /* save old context*/ if (active_ctxt && active_ctxt->flags & CTXT_FLAGS_GPU_HANG) KGSL_CTXT_WARN(device, "Current active context has caused gpu hang\n"); if (active_ctxt != NULL) { KGSL_CTXT_INFO(device, "active_ctxt flags %08x\n", active_ctxt->flags); /* save registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, active_ctxt->reg_save, 3); if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE) { /* save shader partitioning and instructions. */ adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, active_ctxt->shader_save, 3); /* fixup shader partitioning parameter for * SET_SHADER_BASES. */ adreno_ringbuffer_issuecmds(device, 0, active_ctxt->shader_fixup, 3); active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE; } if (active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE && active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW) { /* save gmem. * (note: changes shader. shader must already be saved.) */ adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, active_ctxt->context_gmem_shadow.gmem_save, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, active_ctxt->chicken_restore, 3); active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE; } } adreno_dev->drawctxt_active = drawctxt; /* restore new context */ if (drawctxt != NULL) { KGSL_CTXT_INFO(device, "drawctxt flags %08x\n", drawctxt->flags); cmds[0] = pm4_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(current_context); cmds[4] = (unsigned int)adreno_dev->drawctxt_active; adreno_ringbuffer_issuecmds(device, 0, cmds, 5); kgsl_mmu_setstate(device, drawctxt->pagetable); #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate, drawctxt->gpustate.gpuaddr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); #endif /* restore gmem. * (note: changes shader. shader must not already be restored.) */ if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE) { adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, drawctxt->context_gmem_shadow.gmem_restore, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, drawctxt->chicken_restore, 3); drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } /* restore registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, drawctxt->reg_restore, 3); /* restore shader instructions & partitioning. */ if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE) { adreno_ringbuffer_issuecmds(device, 0, drawctxt->shader_restore, 3); } cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1); cmds[1] = drawctxt->bin_base_offset; if (!adreno_is_a220(adreno_dev)) adreno_ringbuffer_issuecmds(device, 0, cmds, 2); } else kgsl_mmu_setstate(device, device->mmu.defaultpagetable); }
void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, int *remain, int hang) { struct kgsl_device *device = &adreno_dev->dev; struct kgsl_snapshot_registers_list list; struct kgsl_snapshot_registers regs; unsigned int pmoverride; /* Choose the register set to dump */ if (adreno_is_a20x(adreno_dev)) { regs.regs = (unsigned int *) a200_registers; regs.count = a200_registers_count; } else if (adreno_is_a220(adreno_dev)) { regs.regs = (unsigned int *) a220_registers; regs.count = a220_registers_count; } else if (adreno_is_a225(adreno_dev)) { regs.regs = (unsigned int *) a225_registers; regs.count = a225_registers_count; } list.registers = ®s; list.count = 1; /* Master set of (non debug) registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain, kgsl_snapshot_dump_regs, &list); /* CP_STATE_DEBUG indexed registers */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_CP_STATE_DEBUG_INDEX, REG_CP_STATE_DEBUG_DATA, 0x0, 0x14); /* CP_ME indexed registers */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS, 64, 44); /* * Need to temporarily turn off clock gating for the debug bus to * work */ adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride); adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF); /* SX debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_sxdebug, NULL); /* SU debug indexed registers (only for < 470) */ if (!adreno_is_a22x(adreno_dev)) snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_PA_SU_DEBUG_CNTL, REG_PA_SU_DEBUG_DATA, 0, 0x1B); /* CP debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_cpdebug, NULL); /* MH debug indexed registers */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40); /* Leia only register sets */ if (adreno_is_a22x(adreno_dev)) { /* RB DEBUG indexed regisers */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8); /* RB DEBUG indexed registers bank 2 */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000, 0, 8); /* PC_DEBUG indexed registers */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8); /* GRAS_DEBUG indexed registers */ snapshot = kgsl_snapshot_indexed_registers(device, snapshot, remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4); /* MIU debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_miudebug, NULL); /* SQ DEBUG debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_sqdebug, NULL); /* * Reading SQ THREAD causes bad things to happen on a running * system, so only read it if the GPU is already hung */ if (hang) { /* SQ THREAD debug registers */ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, a2xx_snapshot_sqthreaddebug, NULL); } } /* Reset the clock gating */ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride); return snapshot; }
static void build_regrestore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; cmd++; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1; #else *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; #endif if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } for (i = 0; i < (reg_array_size/2); i++) { cmd = reg_range(cmd, ptr_register_ranges[i*2], ptr_register_ranges[i*2+1]); } start[2] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3); #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES start[4] |= (0 << 24) | (4 << 16); #else start[4] |= (1 << 24) | (4 << 16); #endif *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1); tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00040400; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1); tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; if (adreno_is_a20x(adreno_dev)) { *cmd++ = cp_type0_packet(REG_RB_BC_CONTROL, 1); tmp_ctx.reg_values[2] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; } if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type0_packet(i, 1); tmp_ctx.reg_values[j] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; j++; } } *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = (0 << 24) | (0 << 16) | 0; #else *cmd++ = (1 << 24) | (0 << 16) | 0; #endif *cmd++ = ALU_CONSTANTS; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = (0 << 24) | (1 << 16) | 0; #else *cmd++ = (1 << 24) | (1 << 16) | 0; #endif *cmd++ = TEX_CONSTANTS; *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS); *cmd++ = (2 << 16) | 0; tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate); cmd += BOOL_CONSTANTS; *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS); *cmd++ = (3 << 16) | 0; tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate); cmd += LOOP_CONSTANTS; create_ib1(drawctxt, drawctxt->reg_restore, start, cmd); tmp_ctx.cmd = cmd; }
static void build_regsave_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); *cmd++ = 0; { unsigned int i = 0; unsigned int reg_array_size = 0; const unsigned int *ptr_register_ranges; if (adreno_is_a220(adreno_dev)) { ptr_register_ranges = register_ranges_a220; reg_array_size = ARRAY_SIZE(register_ranges_a220); } else if (adreno_is_a225(adreno_dev)) { ptr_register_ranges = register_ranges_a225; reg_array_size = ARRAY_SIZE(register_ranges_a225); } else { ptr_register_ranges = register_ranges_a20x; reg_array_size = ARRAY_SIZE(register_ranges_a20x); } for (i = 0; i < (reg_array_size/2) ; i++) { build_reg_to_mem_range(ptr_register_ranges[i*2], ptr_register_ranges[i*2+1], &cmd, drawctxt); } } cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, REG_SQ_CONSTANT_0, ALU_CONSTANTS); cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, REG_SQ_FETCH_0, TEX_CONSTANTS); #else *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; *cmd++ = 0x0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; *cmd++ = 0 << 16; *cmd++ = 0x0; *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; *cmd++ = 1 << 16; *cmd++ = 0x0; #endif *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_SQ_GPR_MANAGEMENT; *cmd++ = tmp_ctx.reg_values[0]; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; if (adreno_is_a20x(adreno_dev)) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = REG_RB_BC_CONTROL; *cmd++ = tmp_ctx.reg_values[2]; } if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; for (i = REG_A220_VSC_BIN_SIZE; i <= REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = i; *cmd++ = tmp_ctx.reg_values[j]; j++; } } cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS, BOOL_CONSTANTS); cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS); create_ib1(drawctxt, drawctxt->reg_save, start, cmd); tmp_ctx.cmd = cmd; }