/** * _adreno_mmu_set_pt_update_condition() - Generate commands to setup a * flag to indicate whether pt switch is required or not by comparing * current pt id and incoming pt id * @rb: The RB on which the commands will execute * @cmds: The pointer to memory where the commands are placed. * @ptname: Incoming pt id to set to * * Returns number of commands added. */ static unsigned int _adreno_mmu_set_pt_update_condition( struct adreno_ringbuffer *rb, unsigned int *cmds, unsigned int ptname) { struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); unsigned int *cmds_orig = cmds; /* * write 1 to switch pt flag indicating that we need to execute the * pt switch commands */ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = rb->pagetable_desc.gpuaddr + offsetof(struct adreno_ringbuffer_pagetable_info, switch_pt_enable); *cmds++ = 1; *cmds++ = cp_type3_packet(CP_WAIT_MEM_WRITES, 1); *cmds++ = 0; *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1); *cmds++ = 0; if (ADRENO_FEATURE(adreno_dev, ADRENO_HAS_REG_TO_REG_CMDS)) { /* copy current ptid value to register SCRATCH_REG7 */ *cmds++ = cp_type3_packet(CP_MEM_TO_REG, 2); *cmds++ = adreno_getreg(adreno_dev, ADRENO_REG_CP_SCRATCH_REG7); *cmds++ = adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr + offsetof(struct adreno_ringbuffer_pagetable_info, current_global_ptname); /* copy the incoming ptid to SCRATCH_REG6 */ *cmds++ = cp_type3_packet(CP_MEM_TO_REG, 2); *cmds++ = adreno_getreg(adreno_dev, ADRENO_REG_CP_SCRATCH_REG6); *cmds++ = rb->pagetable_desc.gpuaddr + offsetof(struct adreno_ringbuffer_pagetable_info, incoming_ptname); /* * compare the incoming ptid to current ptid and make the * the pt switch commands optional based on condition * that current_global_ptname(SCRATCH_REG7) == * incoming_ptid(SCRATCH_REG6) */ *cmds++ = cp_type3_packet(CP_COND_REG_EXEC, 3); *cmds++ = (2 << 28) | adreno_getreg(adreno_dev, ADRENO_REG_CP_SCRATCH_REG6); *cmds++ = adreno_getreg(adreno_dev, ADRENO_REG_CP_SCRATCH_REG7); *cmds++ = 7; /* * if the incoming and current pt are equal then set the pt * switch flag to 0 so that the pt switch commands will be * skipped */ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = rb->pagetable_desc.gpuaddr + offsetof(struct adreno_ringbuffer_pagetable_info, switch_pt_enable); *cmds++ = 0; } else {
/** * adreno_context_restore() - generic context restore handler * @adreno_dev: the device * @context: the context * * Basic context restore handler that writes the context identifier * to the ringbuffer and issues pagetable switch commands if necessary. */ static int adreno_context_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device; unsigned int cmds[8]; if (adreno_dev == NULL || context == NULL) return -EINVAL; device = &adreno_dev->dev; /* write the context identifier to the ringbuffer */ cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[4] = context->base.id; /* Flush the UCHE for new context */ cmds[5] = cp_type0_packet( adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2); cmds[6] = 0; if (adreno_is_a4xx(adreno_dev)) cmds[7] = 0x12; else if (adreno_is_a3xx(adreno_dev)) cmds[7] = 0x90000000; return adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 8); }
/** * adreno_context_restore() - generic context restore handler * @rb: The RB in which context is to be restored * * Basic context restore handler that writes the context identifier * to the ringbuffer and issues pagetable switch commands if necessary. */ static void adreno_context_restore(struct adreno_ringbuffer *rb) { struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt = rb->drawctxt_active; unsigned int cmds[11]; int ret; if (!drawctxt) return; /* * write the context identifier to the ringbuffer, write to both * the global index and the index of the RB in which the context * operates. The global values will always be reliable since we * could be in middle of RB switch in which case the RB value may * not be accurate */ cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_RB_OFFSET(rb, current_context); cmds[4] = drawctxt->base.id; cmds[5] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[6] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[7] = drawctxt->base.id; /* Flush the UCHE for new context */ cmds[8] = cp_type0_packet( adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2); cmds[9] = 0; if (adreno_is_a4xx(adreno_dev)) cmds[10] = 0x12; else if (adreno_is_a3xx(adreno_dev)) cmds[10] = 0x90000000; ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_NONE, cmds, 11); if (ret) { /* * A failure to submit commands to ringbuffer means RB may * be full, in this case wait for idle and use CPU */ ret = adreno_idle(device); BUG_ON(ret); _adreno_context_restore_cpu(rb, drawctxt); } }