static void a2xx_drawctxt_save(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; if (context == NULL) return; if (context->flags & CTXT_FLAGS_GPU_HANG) KGSL_CTXT_WARN(device, "Current active context has caused gpu hang\n"); if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->reg_save, 3); if (context->flags & CTXT_FLAGS_SHADER_SAVE) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->shader_save, 3); adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->shader_fixup, 3); context->flags |= CTXT_FLAGS_SHADER_RESTORE; } } if ((context->flags & CTXT_FLAGS_GMEM_SAVE) && (context->flags & CTXT_FLAGS_GMEM_SHADOW)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow.gmem_save, 3); if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->chicken_restore, 3); } adreno_dev->gpudev->ctx_switches_since_last_draw = 0; context->flags |= CTXT_FLAGS_GMEM_RESTORE; } else if (adreno_is_a2xx(adreno_dev)) a2xx_drawctxt_draw_workaround(adreno_dev, context); }
/** * adreno_context_restore() - generic context restore handler * @adreno_dev: the device * @context: the context * * Basic context restore handler that writes the context identifier * to the ringbuffer and issues pagetable switch commands if necessary. */ static int adreno_context_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device; unsigned int cmds[8]; if (adreno_dev == NULL || context == NULL) return -EINVAL; device = &adreno_dev->dev; /* write the context identifier to the ringbuffer */ cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[4] = context->base.id; /* Flush the UCHE for new context */ cmds[5] = cp_type0_packet( adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2); cmds[6] = 0; if (adreno_is_a4xx(adreno_dev)) cmds[7] = 0x12; else if (adreno_is_a3xx(adreno_dev)) cmds[7] = 0x90000000; return adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 8); }
static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; unsigned int cmd[11]; unsigned int *cmds = &cmd[0]; if (adreno_is_a225(adreno_dev)) { adreno_dev->gpudev->ctx_switches_since_last_draw++; if (adreno_dev->gpudev->ctx_switches_since_last_draw > ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW) adreno_dev->gpudev->ctx_switches_since_last_draw = 0; else return; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = (0x4 << 16) | (REG_PA_SU_SC_MODE_CNTL - 0x2000); *cmds++ = 0; *cmds++ = cp_type3_packet(CP_DRAW_INDX, 5); *cmds++ = 0; *cmds++ = 1<<14; *cmds++ = 0; *cmds++ = device->mmu.setstate_memory.gpuaddr; *cmds++ = 0; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; } else { *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1); *cmds++ = adreno_encode_istore_size(adreno_dev) | adreno_dev->pix_shader_start; } adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, &cmd[0], cmds - cmd); }
static void a2xx_ctxt_save(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; if (context == NULL) return; if (context->flags & CTXT_FLAGS_GPU_HANG) KGSL_CTXT_WARN(device, "Current active context has caused gpu hang\n"); KGSL_CTXT_INFO(device, "active context flags %08x\n", context->flags); /* save registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, context->reg_save, 3); if (context->flags & CTXT_FLAGS_SHADER_SAVE) { /* save shader partitioning and instructions. */ adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, context->shader_save, 3); /* fixup shader partitioning parameter for * SET_SHADER_BASES. */ adreno_ringbuffer_issuecmds(device, 0, context->shader_fixup, 3); context->flags |= CTXT_FLAGS_SHADER_RESTORE; } if ((context->flags & CTXT_FLAGS_GMEM_SAVE) && (context->flags & CTXT_FLAGS_GMEM_SHADOW)) { /* save gmem. * (note: changes shader. shader must already be saved.) */ adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow.gmem_save, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, context->chicken_restore, 3); context->flags |= CTXT_FLAGS_GMEM_RESTORE; } }
static void a2xx_ctxt_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (context == NULL) { /* No context - set the default apgetable and thats it */ kgsl_mmu_setstate(device, device->mmu.defaultpagetable); return; } KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags); cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(current_context); cmds[4] = (unsigned int) context; adreno_ringbuffer_issuecmds(device, 0, cmds, 5); kgsl_mmu_setstate(device, context->pagetable); #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP kgsl_cffdump_syncmem(NULL, &context->gpustate, context->gpustate.gpuaddr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); #endif /* restore gmem. * (note: changes shader. shader must not already be restored.) */ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) { adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow.gmem_restore, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, context->chicken_restore, 3); context->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } /* restore registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, context->reg_restore, 3); /* restore shader instructions & partitioning. */ if (context->flags & CTXT_FLAGS_SHADER_RESTORE) { adreno_ringbuffer_issuecmds(device, 0, context->shader_restore, 3); } if (adreno_is_a20x(adreno_dev)) { cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1); cmds[1] = context->bin_base_offset; adreno_ringbuffer_issuecmds(device, 0, cmds, 2); } }
/** * adreno_context_restore() - generic context restore handler * @rb: The RB in which context is to be restored * * Basic context restore handler that writes the context identifier * to the ringbuffer and issues pagetable switch commands if necessary. */ static void adreno_context_restore(struct adreno_ringbuffer *rb) { struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt = rb->drawctxt_active; unsigned int cmds[11]; int ret; if (!drawctxt) return; /* * write the context identifier to the ringbuffer, write to both * the global index and the index of the RB in which the context * operates. The global values will always be reliable since we * could be in middle of RB switch in which case the RB value may * not be accurate */ cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_RB_OFFSET(rb, current_context); cmds[4] = drawctxt->base.id; cmds[5] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[6] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[7] = drawctxt->base.id; /* Flush the UCHE for new context */ cmds[8] = cp_type0_packet( adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2); cmds[9] = 0; if (adreno_is_a4xx(adreno_dev)) cmds[10] = 0x12; else if (adreno_is_a3xx(adreno_dev)) cmds[10] = 0x90000000; ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_NONE, cmds, 11); if (ret) { /* * A failure to submit commands to ringbuffer means RB may * be full, in this case wait for idle and use CPU */ ret = adreno_idle(device); BUG_ON(ret); _adreno_context_restore_cpu(rb, drawctxt); } }
int adreno_context_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device; unsigned int cmds[5]; if (adreno_dev == NULL || context == NULL) return -EINVAL; device = &adreno_dev->dev; cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[4] = context->base.id; return adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 5); }
/* switch drawing contexts */ void adreno_drawctxt_switch(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, unsigned int flags) { struct adreno_context *active_ctxt = adreno_dev->drawctxt_active; struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (drawctxt) { if (flags & KGSL_CONTEXT_SAVE_GMEM) /* Set the flag in context so that the save is done * when this context is switched out. */ drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE; else /* Remove GMEM saving flag from the context */ drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE; } /* already current? */ if (active_ctxt == drawctxt) return; KGSL_CTXT_INFO(device, "from %p to %p flags %d\n", adreno_dev->drawctxt_active, drawctxt, flags); /* save old context*/ if (active_ctxt && active_ctxt->flags & CTXT_FLAGS_GPU_HANG) KGSL_CTXT_WARN(device, "Current active context has caused gpu hang\n"); if (active_ctxt != NULL) { KGSL_CTXT_INFO(device, "active_ctxt flags %08x\n", active_ctxt->flags); /* save registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, active_ctxt->reg_save, 3); if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE) { /* save shader partitioning and instructions. */ adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, active_ctxt->shader_save, 3); /* fixup shader partitioning parameter for * SET_SHADER_BASES. */ adreno_ringbuffer_issuecmds(device, 0, active_ctxt->shader_fixup, 3); active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE; } if (active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE && active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW) { /* save gmem. * (note: changes shader. shader must already be saved.) */ adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, active_ctxt->context_gmem_shadow.gmem_save, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, active_ctxt->chicken_restore, 3); active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE; } } adreno_dev->drawctxt_active = drawctxt; /* restore new context */ if (drawctxt != NULL) { KGSL_CTXT_INFO(device, "drawctxt flags %08x\n", drawctxt->flags); cmds[0] = pm4_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(current_context); cmds[4] = (unsigned int)adreno_dev->drawctxt_active; adreno_ringbuffer_issuecmds(device, 0, cmds, 5); kgsl_mmu_setstate(device, drawctxt->pagetable); #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate, drawctxt->gpustate.gpuaddr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); #endif /* restore gmem. * (note: changes shader. shader must not already be restored.) */ if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE) { adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE, drawctxt->context_gmem_shadow.gmem_restore, 3); /* Restore TP0_CHICKEN */ adreno_ringbuffer_issuecmds(device, 0, drawctxt->chicken_restore, 3); drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } /* restore registers and constants. */ adreno_ringbuffer_issuecmds(device, 0, drawctxt->reg_restore, 3); /* restore shader instructions & partitioning. */ if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE) { adreno_ringbuffer_issuecmds(device, 0, drawctxt->shader_restore, 3); } cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1); cmds[1] = drawctxt->bin_base_offset; if (!adreno_is_a220(adreno_dev)) adreno_ringbuffer_issuecmds(device, 0, cmds, 2); } else kgsl_mmu_setstate(device, device->mmu.defaultpagetable); }
static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (context == NULL) { kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable, adreno_dev->drawctxt_active->id); return; } KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags); cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[4] = context->id; adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 5); kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id); #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP kgsl_cffdump_syncmem(NULL, &context->gpustate, context->gpustate.gpuaddr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); #endif if (context->flags & CTXT_FLAGS_GMEM_RESTORE) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow.gmem_restore, 3); if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->chicken_restore, 3); } context->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->reg_restore, 3); if (context->flags & CTXT_FLAGS_SHADER_RESTORE) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->shader_restore, 3); } } if (adreno_is_a20x(adreno_dev)) { cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1); cmds[1] = context->bin_base_offset; adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 2); } }