示例#1
0
static int
kgsl_ringbuffer_waitspace(struct kgsl_ringbuffer *rb, unsigned int numcmds,
			  int wptr_ahead)
{
	int nopcount;
	unsigned int freecmds;
	unsigned int *cmds;
	uint cmds_gpu;

	KGSL_CMD_VDBG("enter (rb=%p, numcmds=%d, wptr_ahead=%d)\n",
		      rb, numcmds, wptr_ahead);

	/* if wptr ahead, fill the remaining with NOPs */
	if (wptr_ahead) {
		/* -1 for header */
		nopcount = rb->sizedwords - rb->wptr - 1;

		cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
		cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;

		GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));

		/* Make sure that rptr is not 0 before submitting
		 * commands at the end of ringbuffer. We do not
		 * want the rptr and wptr to become equal when
		 * the ringbuffer is not empty */
		do {
			GSL_RB_GET_READPTR(rb, &rb->rptr);
		} while (!rb->rptr);

		rb->wptr++;

		kgsl_ringbuffer_submit(rb);

		rb->wptr = 0;
	}

	/* wait for space in ringbuffer */
	do {
		GSL_RB_GET_READPTR(rb, &rb->rptr);

		freecmds = rb->rptr - rb->wptr;

	} while ((freecmds != 0) && (freecmds <= numcmds));

	KGSL_CMD_VDBG("return %d\n", 0);

	return 0;
}
示例#2
0
static int
kgsl_ringbuffer_waitspace(struct kgsl_ringbuffer *rb, unsigned int numcmds,
			  int wptr_ahead)
{
	int nopcount;
	unsigned int freecmds;
	unsigned int *cmds;

	KGSL_CMD_VDBG("enter (rb=%p, numcmds=%d, wptr_ahead=%d)\n",
		      rb, numcmds, wptr_ahead);

	
	if (wptr_ahead) {
		
		nopcount = rb->sizedwords - rb->wptr - 1;

		cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
		GSL_RB_WRITE(cmds, pm4_nop_packet(nopcount));
		rb->wptr++;

		kgsl_ringbuffer_submit(rb);

		rb->wptr = 0;
	}

	
	do {
		GSL_RB_GET_READPTR(rb, &rb->rptr);

		freecmds = rb->rptr - rb->wptr;

	} while ((freecmds != 0) && (freecmds < numcmds));

	KGSL_CMD_VDBG("return %d\n", 0);

	return 0;
}
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
				unsigned int flags, unsigned int *cmds,
				int sizedwords)
{
	unsigned int *ringcmds;
	unsigned int timestamp;
	unsigned int total_sizedwords = sizedwords + 6;
	unsigned int i;
	unsigned int rcmd_gpu;

	/* reserve space to temporarily turn off protected mode
	*  error checking if needed
	*/
	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;

	ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);
	rcmd_gpu = rb->buffer_desc.gpuaddr
		+ sizeof(uint)*(rb->wptr-total_sizedwords);

	if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
	}
	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* disable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
	}

	for (i = 0; i < sizedwords; i++) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
		cmds++;
	}

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* re-enable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
	}

	rb->timestamp++;
	timestamp = rb->timestamp;

	/* start-of-pipeline and end-of-pipeline timestamps */
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
	GSL_RB_WRITE(ringcmds, rcmd_gpu,
		     (rb->device->memstore.gpuaddr +
		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);

	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
		/* Conditional execution based on memory values */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_COND_EXEC, 4));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
		/* # of conditional command DWORDs */
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_INTERRUPT, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
	}
示例#4
0
/* switch drawing contexts */
void
adreno_drawctxt_switch(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt,
			unsigned int flags)
{
	struct adreno_context *active_ctxt =
	  adreno_dev->drawctxt_active;
	struct kgsl_device *device = &adreno_dev->dev;
	unsigned int cmds[5];

	if (drawctxt) {
		if (flags & KGSL_CONTEXT_SAVE_GMEM)
			/* Set the flag in context so that the save is done
			* when this context is switched out. */
			drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
		else
			/* Remove GMEM saving flag from the context */
			drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
	}
	/* already current? */
	if (active_ctxt == drawctxt)
		return;

	KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
			adreno_dev->drawctxt_active, drawctxt, flags);
	/* save old context*/
	if (active_ctxt && active_ctxt->flags & CTXT_FLAGS_GPU_HANG)
		KGSL_CTXT_WARN(device,
			"Current active context has caused gpu hang\n");

	if (active_ctxt != NULL) {
		KGSL_CTXT_INFO(device,
			"active_ctxt flags %08x\n", active_ctxt->flags);
		/* save registers and constants. */
		adreno_ringbuffer_issuecmds(device, 0,
				active_ctxt->reg_save, 3);

		if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE) {
			/* save shader partitioning and instructions. */
			adreno_ringbuffer_issuecmds(device,
					KGSL_CMD_FLAGS_PMODE,
					active_ctxt->shader_save, 3);

			/* fixup shader partitioning parameter for
			 *  SET_SHADER_BASES.
			 */
			adreno_ringbuffer_issuecmds(device, 0,
					active_ctxt->shader_fixup, 3);

			active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE;
		}

		if (active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE
			&& active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW) {
			/* save gmem.
			 * (note: changes shader. shader must already be saved.)
			 */
			adreno_ringbuffer_issuecmds(device,
				KGSL_CMD_FLAGS_PMODE,
				active_ctxt->context_gmem_shadow.gmem_save, 3);

			/* Restore TP0_CHICKEN */
			adreno_ringbuffer_issuecmds(device, 0,
				active_ctxt->chicken_restore, 3);

			active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE;
		}
	}

	adreno_dev->drawctxt_active = drawctxt;

	/* restore new context */
	if (drawctxt != NULL) {

		KGSL_CTXT_INFO(device,
			"drawctxt flags %08x\n", drawctxt->flags);
		cmds[0] = pm4_nop_packet(1);
		cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
		cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2);
		cmds[3] = device->memstore.gpuaddr +
				KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
		cmds[4] = (unsigned int)adreno_dev->drawctxt_active;
		adreno_ringbuffer_issuecmds(device, 0, cmds, 5);
		kgsl_mmu_setstate(device, drawctxt->pagetable);

#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
		kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
			drawctxt->gpustate.gpuaddr, LCC_SHADOW_SIZE +
			REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE,
			false);
#endif

		/* restore gmem.
		 *  (note: changes shader. shader must not already be restored.)
		 */
		if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE) {
			adreno_ringbuffer_issuecmds(device,
				KGSL_CMD_FLAGS_PMODE,
				drawctxt->context_gmem_shadow.gmem_restore, 3);

			/* Restore TP0_CHICKEN */
			adreno_ringbuffer_issuecmds(device, 0,
				drawctxt->chicken_restore, 3);

			drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
		}

		/* restore registers and constants. */
		adreno_ringbuffer_issuecmds(device, 0,
					  drawctxt->reg_restore, 3);

		/* restore shader instructions & partitioning. */
		if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE) {
			adreno_ringbuffer_issuecmds(device, 0,
					  drawctxt->shader_restore, 3);
		}

		cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
		cmds[1] = drawctxt->bin_base_offset;
		if (!adreno_is_a220(adreno_dev))
			adreno_ringbuffer_issuecmds(device, 0, cmds, 2);

	} else
		kgsl_mmu_setstate(device, device->mmu.defaultpagetable);
}