Ejemplo n.º 1
0
static void a2xx_ctxt_restore(struct adreno_device *adreno_dev,
			struct adreno_context *context)
{
	struct kgsl_device *device = &adreno_dev->dev;
	unsigned int cmds[5];

	if (context == NULL) {
		/* No context - set the default apgetable and thats it */
		kgsl_mmu_setstate(device, device->mmu.defaultpagetable);
		return;
	}

	KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);

	cmds[0] = cp_nop_packet(1);
	cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
	cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
	cmds[3] = device->memstore.gpuaddr +
		KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
	cmds[4] = (unsigned int) context;
	adreno_ringbuffer_issuecmds(device, 0, cmds, 5);
	kgsl_mmu_setstate(device, context->pagetable);

#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
	kgsl_cffdump_syncmem(NULL, &context->gpustate,
		context->gpustate.gpuaddr, LCC_SHADOW_SIZE +
		REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false);
#endif

	/* restore gmem.
	 *  (note: changes shader. shader must not already be restored.)
	 */
	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
			context->context_gmem_shadow.gmem_restore, 3);

		/* Restore TP0_CHICKEN */
		adreno_ringbuffer_issuecmds(device, 0,
			context->chicken_restore, 3);

		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
	}

	/* restore registers and constants. */
	adreno_ringbuffer_issuecmds(device, 0,
		context->reg_restore, 3);

	/* restore shader instructions & partitioning. */
	if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
		adreno_ringbuffer_issuecmds(device, 0,
			context->shader_restore, 3);
	}

	if (adreno_is_a20x(adreno_dev)) {
		cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
		cmds[1] = context->bin_base_offset;
		adreno_ringbuffer_issuecmds(device, 0, cmds, 2);
	}
}
Ejemplo n.º 2
0
/* create buffers for saving/restoring registers, constants, & GMEM */
static int a2xx_ctxt_gmem_shadow(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt)
{
	int result;

	calc_gmemsize(&drawctxt->context_gmem_shadow,
		adreno_dev->gmemspace.sizebytes);
	tmp_ctx.gmem_base = adreno_dev->gmemspace.gpu_base;

	result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
		drawctxt->pagetable, drawctxt->context_gmem_shadow.size);

	if (result)
		return result;

	/* we've allocated the shadow, when swapped out, GMEM must be saved. */
	drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_GMEM_SAVE;

	/* blank out gmem shadow. */
	kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
			   drawctxt->context_gmem_shadow.size);

	/* build quad vertex buffer */
	build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
		&tmp_ctx.cmd);

	/* build TP0_CHICKEN register restore command buffer */
	tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);

	/* build indirect command buffers to save & restore gmem */
	/* Idle because we are reading PM override registers */
	adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT);
	drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_gmem2sys_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);
	drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_sys2gmem_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);

	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL,
			&drawctxt->context_gmem_shadow.gmemshadow,
			drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
			drawctxt->context_gmem_shadow.gmemshadow.size, false);

	return 0;
}
static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt)
{
	int result;

	calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
	tmp_ctx.gmem_base = adreno_dev->gmem_base;

	result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
		drawctxt->pagetable, drawctxt->context_gmem_shadow.size);

	if (result)
		return result;

	
	drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;

	
	kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
			   drawctxt->context_gmem_shadow.size);

	
	build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
		&tmp_ctx.cmd);

	
	if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE))
		tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);

	
	drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_gmem2sys_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);
	drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_sys2gmem_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);

	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL,
			&drawctxt->context_gmem_shadow.gmemshadow,
			drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
			drawctxt->context_gmem_shadow.gmemshadow.size, false);

	return 0;
}
static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
	struct adreno_context *drawctxt)
{
	int ret;


	ret = kgsl_allocate(&drawctxt->gpustate,
		drawctxt->pagetable, _context_size(adreno_dev));

	if (ret)
		return ret;

	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0,
		_context_size(adreno_dev));

	tmp_ctx.cmd = tmp_ctx.start
	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);

	if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
		ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt);
		if (ret)
			goto done;

		drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
	}

	if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) {
		ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt);
		if (ret)
			goto done;
	}

	

	kgsl_cache_range_op(&drawctxt->gpustate,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
			drawctxt->gpustate.gpuaddr,
			drawctxt->gpustate.size, false);

done:
	if (ret)
		kgsl_sharedmem_free(&drawctxt->gpustate);

	return ret;
}
Ejemplo n.º 5
0
/* create buffers for saving/restoring registers, constants, & GMEM */
static int a2xx_ctxt_gpustate_shadow(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt)
{
	int result;

	/* Allocate vmalloc memory to store the gpustate */
	result = kgsl_allocate(&drawctxt->gpustate,
		drawctxt->pagetable, _context_size(adreno_dev));

	if (result)
		return result;

	drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;

	/* Blank out h/w register, constant, and command buffer shadows. */
	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0,
			   _context_size(adreno_dev));

	/* set-up command and vertex buffer pointers */
	tmp_ctx.cmd = tmp_ctx.start
	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);

	/* build indirect command buffers to save & restore regs/constants */
	adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT);
	build_regrestore_cmds(adreno_dev, drawctxt);
	build_regsave_cmds(adreno_dev, drawctxt);

	build_shader_save_restore_cmds(adreno_dev, drawctxt);

	kgsl_cache_range_op(&drawctxt->gpustate,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
			drawctxt->gpustate.gpuaddr,
			drawctxt->gpustate.size, false);
	return 0;
}
Ejemplo n.º 6
0
/* switch drawing contexts */
void
adreno_drawctxt_switch(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt,
			unsigned int flags)
{
	struct adreno_context *active_ctxt =
	  adreno_dev->drawctxt_active;
	struct kgsl_device *device = &adreno_dev->dev;
	unsigned int cmds[5];

	if (drawctxt) {
		if (flags & KGSL_CONTEXT_SAVE_GMEM)
			/* Set the flag in context so that the save is done
			* when this context is switched out. */
			drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
		else
			/* Remove GMEM saving flag from the context */
			drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
	}
	/* already current? */
	if (active_ctxt == drawctxt)
		return;

	KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
			adreno_dev->drawctxt_active, drawctxt, flags);
	/* save old context*/
	if (active_ctxt && active_ctxt->flags & CTXT_FLAGS_GPU_HANG)
		KGSL_CTXT_WARN(device,
			"Current active context has caused gpu hang\n");

	if (active_ctxt != NULL) {
		KGSL_CTXT_INFO(device,
			"active_ctxt flags %08x\n", active_ctxt->flags);
		/* save registers and constants. */
		adreno_ringbuffer_issuecmds(device, 0,
				active_ctxt->reg_save, 3);

		if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE) {
			/* save shader partitioning and instructions. */
			adreno_ringbuffer_issuecmds(device,
					KGSL_CMD_FLAGS_PMODE,
					active_ctxt->shader_save, 3);

			/* fixup shader partitioning parameter for
			 *  SET_SHADER_BASES.
			 */
			adreno_ringbuffer_issuecmds(device, 0,
					active_ctxt->shader_fixup, 3);

			active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE;
		}

		if (active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE
			&& active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW) {
			/* save gmem.
			 * (note: changes shader. shader must already be saved.)
			 */
			adreno_ringbuffer_issuecmds(device,
				KGSL_CMD_FLAGS_PMODE,
				active_ctxt->context_gmem_shadow.gmem_save, 3);

			/* Restore TP0_CHICKEN */
			adreno_ringbuffer_issuecmds(device, 0,
				active_ctxt->chicken_restore, 3);

			active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE;
		}
	}

	adreno_dev->drawctxt_active = drawctxt;

	/* restore new context */
	if (drawctxt != NULL) {

		KGSL_CTXT_INFO(device,
			"drawctxt flags %08x\n", drawctxt->flags);
		cmds[0] = pm4_nop_packet(1);
		cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
		cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2);
		cmds[3] = device->memstore.gpuaddr +
				KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
		cmds[4] = (unsigned int)adreno_dev->drawctxt_active;
		adreno_ringbuffer_issuecmds(device, 0, cmds, 5);
		kgsl_mmu_setstate(device, drawctxt->pagetable);

#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
		kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
			drawctxt->gpustate.gpuaddr, LCC_SHADOW_SIZE +
			REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE,
			false);
#endif

		/* restore gmem.
		 *  (note: changes shader. shader must not already be restored.)
		 */
		if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE) {
			adreno_ringbuffer_issuecmds(device,
				KGSL_CMD_FLAGS_PMODE,
				drawctxt->context_gmem_shadow.gmem_restore, 3);

			/* Restore TP0_CHICKEN */
			adreno_ringbuffer_issuecmds(device, 0,
				drawctxt->chicken_restore, 3);

			drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
		}

		/* restore registers and constants. */
		adreno_ringbuffer_issuecmds(device, 0,
					  drawctxt->reg_restore, 3);

		/* restore shader instructions & partitioning. */
		if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE) {
			adreno_ringbuffer_issuecmds(device, 0,
					  drawctxt->shader_restore, 3);
		}

		cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
		cmds[1] = drawctxt->bin_base_offset;
		if (!adreno_is_a220(adreno_dev))
			adreno_ringbuffer_issuecmds(device, 0, cmds, 2);

	} else
		kgsl_mmu_setstate(device, device->mmu.defaultpagetable);
}
static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
			struct adreno_context *context)
{
	struct kgsl_device *device = &adreno_dev->dev;
	unsigned int cmds[5];

	if (context == NULL) {
		
		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
				adreno_dev->drawctxt_active->id);
		return;
	}

	KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);

	cmds[0] = cp_nop_packet(1);
	cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
	cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
	cmds[3] = device->memstore.gpuaddr +
		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
	cmds[4] = context->id;
	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
					cmds, 5);
	kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);

#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
	kgsl_cffdump_syncmem(NULL, &context->gpustate,
		context->gpustate.gpuaddr, LCC_SHADOW_SIZE +
		REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false);
#endif

	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
		adreno_ringbuffer_issuecmds(device, context,
			KGSL_CMD_FLAGS_PMODE,
			context->context_gmem_shadow.gmem_restore, 3);

		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
			
			adreno_ringbuffer_issuecmds(device, context,
				KGSL_CMD_FLAGS_NONE,
				context->chicken_restore, 3);
		}

		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
	}

	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {

		
		adreno_ringbuffer_issuecmds(device, context,
			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);

		
		if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
			adreno_ringbuffer_issuecmds(device, context,
				KGSL_CMD_FLAGS_NONE,
				context->shader_restore, 3);
		}
	}

	if (adreno_is_a20x(adreno_dev)) {
		cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
		cmds[1] = context->bin_base_offset;
		adreno_ringbuffer_issuecmds(device, context,
			KGSL_CMD_FLAGS_NONE, cmds, 2);
	}
}
Ejemplo n.º 8
0
static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
			struct adreno_context *context)
{
	struct kgsl_device *device = &adreno_dev->dev;

	if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
		return;

	if (context->flags & CTXT_FLAGS_GPU_HANG)
		KGSL_CTXT_WARN(device,
			"Current active context has caused gpu hang\n");

	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
		kgsl_cffdump_syncmem(NULL, &context->gpustate,
			context->reg_save[1],
			context->reg_save[2] << 2, true);
		
		adreno_ringbuffer_issuecmds(device, context,
			KGSL_CMD_FLAGS_NONE,
			context->reg_save, 3);

		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
			kgsl_cffdump_syncmem(NULL, &context->gpustate,
				context->shader_save[1],
				context->shader_save[2] << 2, true);
			
			adreno_ringbuffer_issuecmds(device, context,
				KGSL_CMD_FLAGS_PMODE,
				context->shader_save, 3);

			kgsl_cffdump_syncmem(NULL, &context->gpustate,
				context->shader_fixup[1],
				context->shader_fixup[2] << 2, true);
			adreno_ringbuffer_issuecmds(device, context,
				KGSL_CMD_FLAGS_NONE,
				context->shader_fixup, 3);

			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
		}
	}

	if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
	    (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
		kgsl_cffdump_syncmem(NULL, &context->gpustate,
			context->context_gmem_shadow.gmem_save[1],
			context->context_gmem_shadow.gmem_save[2] << 2, true);
		adreno_ringbuffer_issuecmds(device, context,
			KGSL_CMD_FLAGS_PMODE,
			context->context_gmem_shadow.gmem_save, 3);

		kgsl_cffdump_syncmem(NULL, &context->gpustate,
			context->chicken_restore[1],
			context->chicken_restore[2] << 2, true);

		
		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
			adreno_ringbuffer_issuecmds(device, context,
				KGSL_CMD_FLAGS_NONE,
				context->chicken_restore, 3);
		}
		adreno_dev->gpudev->ctx_switches_since_last_draw = 0;

		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
	} else if (adreno_is_a2xx(adreno_dev))
		a2xx_drawctxt_draw_workaround(adreno_dev, context);
}