Example #1
0
/* create buffers for saving/restoring registers, constants, & GMEM */
static int a2xx_ctxt_gmem_shadow(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt)
{
	int result;

	calc_gmemsize(&drawctxt->context_gmem_shadow,
		adreno_dev->gmemspace.sizebytes);
	tmp_ctx.gmem_base = adreno_dev->gmemspace.gpu_base;

	result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
		drawctxt->pagetable, drawctxt->context_gmem_shadow.size);

	if (result)
		return result;

	/* we've allocated the shadow, when swapped out, GMEM must be saved. */
	drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_GMEM_SAVE;

	/* blank out gmem shadow. */
	kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
			   drawctxt->context_gmem_shadow.size);

	/* build quad vertex buffer */
	build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
		&tmp_ctx.cmd);

	/* build TP0_CHICKEN register restore command buffer */
	tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);

	/* build indirect command buffers to save & restore gmem */
	/* Idle because we are reading PM override registers */
	adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT);
	drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_gmem2sys_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);
	drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_sys2gmem_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);

	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL,
			&drawctxt->context_gmem_shadow.gmemshadow,
			drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
			drawctxt->context_gmem_shadow.gmemshadow.size, false);

	return 0;
}
static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt)
{
	int result;

	calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
	tmp_ctx.gmem_base = adreno_dev->gmem_base;

	result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
		drawctxt->pagetable, drawctxt->context_gmem_shadow.size);

	if (result)
		return result;

	
	drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;

	
	kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
			   drawctxt->context_gmem_shadow.size);

	
	build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
		&tmp_ctx.cmd);

	
	if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE))
		tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);

	
	drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_gmem2sys_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);
	drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
	tmp_ctx.cmd =
	    build_sys2gmem_cmds(adreno_dev, drawctxt,
				&drawctxt->context_gmem_shadow);

	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL,
			&drawctxt->context_gmem_shadow.gmemshadow,
			drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
			drawctxt->context_gmem_shadow.gmemshadow.size, false);

	return 0;
}
Example #3
0
/**
 * _ringbuffer_setup_common() - Ringbuffer start
 * @rb: Pointer to adreno ringbuffer
 *
 * Setup ringbuffer for GPU.
 */
static void _ringbuffer_setup_common(struct adreno_ringbuffer *rb)
{
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_ringbuffer *rb_temp;
	int i;

	FOR_EACH_RINGBUFFER(adreno_dev, rb_temp, i) {
		kgsl_sharedmem_set(rb_temp->device,
			&(rb_temp->buffer_desc), 0,
			0xAA, KGSL_RB_SIZE);
		rb_temp->wptr = 0;
		rb_temp->rptr = 0;
		adreno_iommu_set_pt_generate_rb_cmds(rb_temp,
					device->mmu.defaultpagetable);
	}
static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
	struct adreno_context *drawctxt)
{
	int ret;


	ret = kgsl_allocate(&drawctxt->gpustate,
		drawctxt->pagetable, _context_size(adreno_dev));

	if (ret)
		return ret;

	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0,
		_context_size(adreno_dev));

	tmp_ctx.cmd = tmp_ctx.start
	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);

	if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
		ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt);
		if (ret)
			goto done;

		drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
	}

	if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) {
		ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt);
		if (ret)
			goto done;
	}

	

	kgsl_cache_range_op(&drawctxt->gpustate,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
			drawctxt->gpustate.gpuaddr,
			drawctxt->gpustate.size, false);

done:
	if (ret)
		kgsl_sharedmem_free(&drawctxt->gpustate);

	return ret;
}
Example #5
0
/* create buffers for saving/restoring registers, constants, & GMEM */
static int a2xx_ctxt_gpustate_shadow(struct adreno_device *adreno_dev,
			struct adreno_context *drawctxt)
{
	int result;

	/* Allocate vmalloc memory to store the gpustate */
	result = kgsl_allocate(&drawctxt->gpustate,
		drawctxt->pagetable, _context_size(adreno_dev));

	if (result)
		return result;

	drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;

	/* Blank out h/w register, constant, and command buffer shadows. */
	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0,
			   _context_size(adreno_dev));

	/* set-up command and vertex buffer pointers */
	tmp_ctx.cmd = tmp_ctx.start
	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);

	/* build indirect command buffers to save & restore regs/constants */
	adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT);
	build_regrestore_cmds(adreno_dev, drawctxt);
	build_regsave_cmds(adreno_dev, drawctxt);

	build_shader_save_restore_cmds(adreno_dev, drawctxt);

	kgsl_cache_range_op(&drawctxt->gpustate,
			    KGSL_CACHE_OP_FLUSH);

	kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
			drawctxt->gpustate.gpuaddr,
			drawctxt->gpustate.size, false);
	return 0;
}
Example #6
0
/* create buffers for saving/restoring registers, constants, & GMEM */
static int
create_gpustate_shadow(struct kgsl_device *device,
		       struct adreno_context *drawctxt,
		       struct tmp_ctx *ctx)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int result;

	/* Allocate vmalloc memory to store the gpustate */
	result = kgsl_allocate(&drawctxt->gpustate,
		drawctxt->pagetable, CONTEXT_SIZE);

	if (result)
		return result;

	drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;

	/* Blank out h/w register, constant, and command buffer shadows. */
	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);

	/* set-up command and vertex buffer pointers */
	ctx->cmd = ctx->start
	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);

	/* build indirect command buffers to save & restore regs/constants */
	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
	build_regrestore_cmds(adreno_dev, drawctxt, ctx);
	build_regsave_cmds(adreno_dev, drawctxt, ctx);

	build_shader_save_restore_cmds(drawctxt, ctx);

	kgsl_cache_range_op(&drawctxt->gpustate,
			    KGSL_CACHE_OP_FLUSH);

	return 0;
}
Example #7
0
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
    int status;
    /*cp_rb_cntl_u cp_rb_cntl; */
    union reg_cp_rb_cntl cp_rb_cntl;
    unsigned int rb_cntl;
    struct kgsl_device *device = rb->device;
    struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

    if (rb->flags & KGSL_FLAGS_STARTED)
        return 0;

    if (init_ram)
        rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;

    kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
                       sizeof(struct kgsl_rbmemptrs));

    kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
                       (rb->sizedwords << 2));

    if (adreno_is_a2xx(adreno_dev)) {
        adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
                        (rb->memptrs_desc.gpuaddr
                         + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

        /* setup WPTR delay */
        adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
                        0 /*0x70000010 */);
    }

    /*setup REG_CP_RB_CNTL */
    adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
    cp_rb_cntl.val = rb_cntl;

    /*
     * The size of the ringbuffer in the hardware is the log2
     * representation of the size in quadwords (sizedwords / 2)
     */
    cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

    /*
     * Specify the quadwords to read before updating mem RPTR.
     * Like above, pass the log2 representation of the blocksize
     * in quadwords.
    */
    cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

    if (adreno_is_a2xx(adreno_dev)) {
        /* WPTR polling */
        cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
    }

    /* mem RPTR writebacks */
    cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

    adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

    adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

    adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
                    rb->memptrs_desc.gpuaddr +
                    GSL_RB_MEMPTRS_RPTR_OFFSET);

    if (adreno_is_a3xx(adreno_dev)) {
        /* enable access protection to privileged registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);

        /* RBBM registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);

        /* CP registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);

        /* RB registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);

        /* VBIF registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
    }

    if (adreno_is_a2xx(adreno_dev)) {
        /* explicitly clear all cp interrupts */
        adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
    }

    /* setup scratch/timestamp */
    adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
                    KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
                                         soptimestamp));

    adreno_regwrite(device, REG_SCRATCH_UMSK,
                    GSL_RB_MEMPTRS_SCRATCH_MASK);

    /* load the CP ucode */

    status = adreno_ringbuffer_load_pm4_ucode(device);
    if (status != 0)
        return status;

    /* load the prefetch parser ucode */
    status = adreno_ringbuffer_load_pfp_ucode(device);
    if (status != 0)
        return status;

    if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
        adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000F0602);

    rb->rptr = 0;
    rb->wptr = 0;

    /* clear ME_HALT to start micro engine */
    adreno_regwrite(device, REG_CP_ME_CNTL, 0);

    /* ME init is GPU specific, so jump into the sub-function */
    adreno_dev->gpudev->rb_init(adreno_dev, rb);

    /* idle device to validate ME INIT */
    status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);

    if (status == 0)
        rb->flags |= KGSL_FLAGS_STARTED;

    return status;
}
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
	int status;
	/*cp_rb_cntl_u cp_rb_cntl; */
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int rb_cntl;
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	if (rb->flags & KGSL_FLAGS_STARTED)
		return 0;

	if (init_ram)
		rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;

	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
			   sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
			   (rb->sizedwords << 2));

	if (adreno_is_a2xx(adreno_dev)) {
		adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
			(rb->memptrs_desc.gpuaddr
			+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

		/* setup WPTR delay */
		adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
			0 /*0x70000010 */);
	}

	/*setup REG_CP_RB_CNTL */
	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2)
	 */
	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

	/*
	 * Specify the quadwords to read before updating mem RPTR.
	 * Like above, pass the log2 representation of the blocksize
	 * in quadwords.
	*/
	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

	if (adreno_is_a2xx(adreno_dev)) {
		/* WPTR polling */
		cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
	}

	/* mem RPTR writebacks */
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	if (adreno_is_a2xx(adreno_dev)) {
		/* explicitly clear all cp interrupts */
		adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
	}

	/* setup scratch/timestamp */
	adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
			     KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
				     soptimestamp));

	adreno_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	/* load the CP ucode */

	status = adreno_ringbuffer_load_pm4_ucode(device);
	if (status != 0)
		return status;

	/* load the prefetch parser ucode */
	status = adreno_ringbuffer_load_pfp_ucode(device);
	if (status != 0)
		return status;

	rb->rptr = 0;
	rb->wptr = 0;

	/* clear ME_HALT to start micro engine */
	adreno_regwrite(device, REG_CP_ME_CNTL, 0);

	/* ME init is GPU specific, so jump into the sub-function */
	adreno_dev->gpudev->rb_init(adreno_dev, rb);

	/* idle device to validate ME INIT */
	status = adreno_idle(device);

	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	return status;
}
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
	int status;
	/*cp_rb_cntl_u cp_rb_cntl; */
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int *cmds, rb_cntl;
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	uint cmds_gpu;

	if (rb->flags & KGSL_FLAGS_STARTED)
		return 0;

	if (init_ram) {
		rb->timestamp = 0;
		GSL_RB_INIT_TIMESTAMP(rb);
	}

	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
			   sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
			   (rb->sizedwords << 2));

	adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
			     (rb->memptrs_desc.gpuaddr
			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

	/* setup WPTR delay */
	adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);

	/*setup REG_CP_RB_CNTL */
	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2)
	 */
	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

	/*
	 * Specify the quadwords to read before updating mem RPTR.
	 * Like above, pass the log2 representation of the blocksize
	 * in quadwords.
	*/
	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
	/* mem RPTR writebacks */
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	/* explicitly clear all cp interrupts */
	adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);

	/* setup scratch/timestamp */
	adreno_regwrite(device, REG_SCRATCH_ADDR,
			     device->memstore.gpuaddr +
			     KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));

	adreno_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	/* update the eoptimestamp field with the last retired timestamp */
	kgsl_sharedmem_writel(&device->memstore,
			     KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp),
			     rb->timestamp);

	/* load the CP ucode */

	status = adreno_ringbuffer_load_pm4_ucode(device);
	if (status != 0)
		return status;

	/* load the prefetch parser ucode */
	status = adreno_ringbuffer_load_pfp_ucode(device);
	if (status != 0)
		return status;

	adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);

	rb->rptr = 0;
	rb->wptr = 0;

	/* clear ME_HALT to start micro engine */
	adreno_regwrite(device, REG_CP_ME_CNTL, 0);

	/* ME_INIT */
	cmds = adreno_ringbuffer_allocspace(rb, 19);
	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);

	GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
	/* All fields present (bits 9:0) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
	/* Disable/Enable Real-Time Stream processing (present but ignored) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));

	/* Instruction memory size: */
	GSL_RB_WRITE(cmds, cmds_gpu,
		     (adreno_encode_istore_size(adreno_dev)
		      | adreno_dev->pix_shader_start));
	/* Maximum Contexts */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
	/* Write Confirm Interval and The CP will wait the
	* wait_interval * 16 clocks between polling  */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	/* NQ and External Memory Swap */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Protected mode error checking */
	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
	/* Disable header dumping and Header dump address */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Header dump size */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	adreno_ringbuffer_submit(rb);

	/* idle device to validate ME INIT */
	status = adreno_idle(device);

	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	return status;
}
Example #10
0
int kgsl_ringbuffer_start(struct kgsl_ringbuffer *rb, unsigned int init_ram)
{
	int status;
	/*cp_rb_cntl_u cp_rb_cntl; */
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int *cmds, rb_cntl;
	struct kgsl_device *device = rb->device;
	uint cmds_gpu;

	KGSL_CMD_VDBG("enter (rb=%p)\n", rb);

	if (rb->flags & KGSL_FLAGS_STARTED) {
		KGSL_CMD_VDBG("already started return %d\n", 0);
		return 0;
	}
	if (init_ram) {
		rb->timestamp = 0;
		GSL_RB_INIT_TIMESTAMP(rb);
	}

	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
			   sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
			   (rb->sizedwords << 2));

	kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_BASE,
			     (rb->memptrs_desc.gpuaddr
			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

	/* setup WPTR delay */
	kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);

	/*setup REG_CP_RB_CNTL */
	kgsl_yamato_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;
	/* size of ringbuffer */
	cp_rb_cntl.f.rb_bufsz =
		kgsl_ringbuffer_sizelog2quadwords(rb->sizedwords);
	/* quadwords to read before updating mem RPTR */
	cp_rb_cntl.f.rb_blksz = rb->blksizequadwords;
	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
	/* mem RPTR writebacks */
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	kgsl_yamato_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	kgsl_yamato_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	kgsl_yamato_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	/* explicitly clear all cp interrupts */
	kgsl_yamato_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);

	/* setup scratch/timestamp */
	kgsl_yamato_regwrite(device, REG_SCRATCH_ADDR,
			     device->memstore.gpuaddr +
			     KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));

	kgsl_yamato_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	/* load the CP ucode */

	status = kgsl_ringbuffer_load_pm4_ucode(device);
	if (status != 0) {
		KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed  %d\n",
				status);
		return status;
	}


	/* load the prefetch parser ucode */
	status = kgsl_ringbuffer_load_pfp_ucode(device);
	if (status != 0) {
		KGSL_DRV_ERR("kgsl_ringbuffer_load_pfp_ucode failed %d\n",
				status);
		return status;
	}

	kgsl_yamato_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);

	rb->rptr = 0;
	rb->wptr = 0;

	/* clear ME_HALT to start micro engine */
	kgsl_yamato_regwrite(device, REG_CP_ME_CNTL, 0);

	/* ME_INIT */
	cmds = kgsl_ringbuffer_allocspace(rb, 19);
	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);

	GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
	/* All fields present (bits 9:0) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
	/* Disable/Enable Real-Time Stream processing (present but ignored) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));

	/* Vertex and Pixel Shader Start Addresses in instructions
	* (3 DWORDS per instruction) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
	/* Maximum Contexts */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
	/* Write Confirm Interval and The CP will wait the
	* wait_interval * 16 clocks between polling  */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	/* NQ and External Memory Swap */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Protected mode error checking */
	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
	/* Disable header dumping and Header dump address */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Header dump size */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	kgsl_ringbuffer_submit(rb);

	/* idle device to validate ME INIT */
	status = kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT);

	KGSL_CMD_DBG("enabling CP interrupts: mask %08lx\n", GSL_CP_INT_MASK);
	kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK);
	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	KGSL_CMD_VDBG("return %d\n", status);

	return status;
}
Example #11
0
static int kgsl_ringbuffer_start(struct kgsl_ringbuffer *rb)
{
	int status;
	
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int *cmds, rb_cntl;
	struct kgsl_device *device = rb->device;

	KGSL_CMD_VDBG("enter (rb=%p)\n", rb);

	if (rb->flags & KGSL_FLAGS_STARTED) {
		KGSL_CMD_VDBG("return %d\n", 0);
		return 0;
	}
	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
				sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
				(rb->sizedwords << 2));

	kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_BASE,
			     (rb->memptrs_desc.gpuaddr
			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

	
	kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 );

	
	kgsl_yamato_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;
	
	cp_rb_cntl.f.rb_bufsz =
		kgsl_ringbuffer_sizelog2quadwords(rb->sizedwords);
	
	cp_rb_cntl.f.rb_blksz = rb->blksizequadwords;
	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; 
	
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	kgsl_yamato_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	kgsl_yamato_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	kgsl_yamato_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	
	kgsl_yamato_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);

	
	kgsl_yamato_regwrite(device, REG_SCRATCH_ADDR,
			     device->memstore.gpuaddr +
			     KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));

	kgsl_yamato_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	

	status = kgsl_ringbuffer_load_pm4_ucode(device);
	if (status != 0) {
		KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed  %d\n",
				status);
		return status;
	}


	
	status = kgsl_ringbuffer_load_pfp_ucode(device);
	if (status != 0) {
		KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed %d\n",
				status);
		return status;
	}

	kgsl_yamato_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);

	rb->rptr = 0;
	rb->wptr = 0;

	rb->timestamp = 0;
	GSL_RB_INIT_TIMESTAMP(rb);

	INIT_LIST_HEAD(&rb->memqueue);

	
	kgsl_yamato_regwrite(device, REG_CP_ME_CNTL, 0);

	
	cmds = kgsl_ringbuffer_allocspace(rb, 19);

	GSL_RB_WRITE(cmds, PM4_HDR_ME_INIT);
	
	GSL_RB_WRITE(cmds, 0x000003ff);
	
	GSL_RB_WRITE(cmds, 0x00000000);
	
	GSL_RB_WRITE(cmds, 0x00000000);

	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
	GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
	GSL_RB_WRITE(cmds,
	     GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));

	
	GSL_RB_WRITE(cmds, 0x80000180);
	
	GSL_RB_WRITE(cmds, 0x00000001);
	
	GSL_RB_WRITE(cmds, 0x00000000);

	
	GSL_RB_WRITE(cmds, 0x00000000);
	
	GSL_RB_WRITE(cmds, GSL_RB_PROTECTED_MODE_CONTROL);
	
	GSL_RB_WRITE(cmds, 0x00000000);
	
	GSL_RB_WRITE(cmds, 0x00000000);

	kgsl_ringbuffer_submit(rb);

	
	status = kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT);

	KGSL_CMD_DBG("enabling CP interrupts: mask %08lx\n", GSL_CP_INT_MASK);
	kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK);
	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	KGSL_CMD_VDBG("return %d\n", status);

	return status;
}
Example #12
0
static long gsl_kmod_ioctl(struct file *fd, unsigned int cmd, unsigned long arg)
{
    int kgslStatus = GSL_FAILURE;

    switch (cmd) {
    case IOCTL_KGSL_DEVICE_START:
        {
            kgsl_device_start_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_start_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_start(param.device_id, param.flags);
            break;
        }
    case IOCTL_KGSL_DEVICE_STOP:
        {
            kgsl_device_stop_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_stop_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_stop(param.device_id);
            break;
        }
    case IOCTL_KGSL_DEVICE_IDLE:
        {
            kgsl_device_idle_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_idle_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_idle(param.device_id, param.timeout);
            break;
        }
    case IOCTL_KGSL_DEVICE_ISIDLE:
        {
            kgsl_device_isidle_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_isidle_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_isidle(param.device_id);
            break;
        }
    case IOCTL_KGSL_DEVICE_GETPROPERTY:
        {
            kgsl_device_getproperty_t param;
            void *tmp;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_getproperty_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            tmp = kmalloc(param.sizebytes, GFP_KERNEL);
            if (!tmp)
            {
                printk(KERN_ERR "%s:kmalloc error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_getproperty(param.device_id, param.type, tmp, param.sizebytes);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.value, tmp, param.sizebytes))
                {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    kfree(tmp);
                    break;
                }
            }
            else
            {
                printk(KERN_ERR "%s: kgsl_device_getproperty error\n", __func__);
            }
            kfree(tmp);
            break;
        }
    case IOCTL_KGSL_DEVICE_SETPROPERTY:
        {
            kgsl_device_setproperty_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_setproperty_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_setproperty(param.device_id, param.type, param.value, param.sizebytes);
            if (kgslStatus != GSL_SUCCESS)
            {
                printk(KERN_ERR "%s: kgsl_device_setproperty error\n", __func__);
            }
            break;
        }
    case IOCTL_KGSL_DEVICE_REGREAD:
        {
            kgsl_device_regread_t param;
            unsigned int tmp;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_regread_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_regread(param.device_id, param.offsetwords, &tmp);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.value, &tmp, sizeof(unsigned int)))
                {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
                }
            }
            break;
        }
    case IOCTL_KGSL_DEVICE_REGWRITE:
        {
            kgsl_device_regwrite_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_regwrite_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_regwrite(param.device_id, param.offsetwords, param.value);
            break;
        }
    case IOCTL_KGSL_DEVICE_WAITIRQ:
        {
            kgsl_device_waitirq_t param;
            unsigned int count;

            printk(KERN_ERR "IOCTL_KGSL_DEVICE_WAITIRQ obsoleted!\n");
//          kgslStatus = -ENOTTY; break;

            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_waitirq_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_waitirq(param.device_id, param.intr_id, &count, param.timeout);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.count, &count, sizeof(unsigned int)))
                {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
                }
            }
            break;
        }
    case IOCTL_KGSL_CMDSTREAM_ISSUEIBCMDS:
        {
            kgsl_cmdstream_issueibcmds_t param;
            gsl_timestamp_t tmp;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_issueibcmds_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_cmdstream_issueibcmds(param.device_id, param.drawctxt_index, param.ibaddr, param.sizedwords, &tmp, param.flags);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t)))
                {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
                }
            }
            break;
        }
    case IOCTL_KGSL_CMDSTREAM_READTIMESTAMP:
        {
            kgsl_cmdstream_readtimestamp_t param;
            gsl_timestamp_t tmp;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_readtimestamp_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            tmp = kgsl_cmdstream_readtimestamp(param.device_id, param.type);
            if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t)))
            {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
            }
            kgslStatus = GSL_SUCCESS;
            break;
        }
    case IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP:
        {
            int err;
            kgsl_cmdstream_freememontimestamp_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_freememontimestamp_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            err = del_memblock_from_allocated_list(fd, param.memdesc);
            if(err)
            {
                /* tried to remove a block of memory that is not allocated! 
                 * NOTE that -EINVAL is Linux kernel's error codes! 
                 * the drivers error codes COULD mix up with kernel's. */
                kgslStatus = -EINVAL;
            }
            else
            {
                kgslStatus = kgsl_cmdstream_freememontimestamp(param.device_id,
                                                               param.memdesc,
                                                               param.timestamp,
                                                               param.type);
            }
            break;
        }
    case IOCTL_KGSL_CMDSTREAM_WAITTIMESTAMP:
        {
            kgsl_cmdstream_waittimestamp_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_waittimestamp_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_cmdstream_waittimestamp(param.device_id, param.timestamp, param.timeout);
            break;
        }
    case IOCTL_KGSL_CMDWINDOW_WRITE:
        {
            kgsl_cmdwindow_write_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdwindow_write_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_cmdwindow_write(param.device_id, param.target, param.addr, param.data);
            break;
        }
    case IOCTL_KGSL_CONTEXT_CREATE:
        {
            kgsl_context_create_t param;
            unsigned int tmp;
            int tmpStatus;

            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_context_create_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_context_create(param.device_id, param.type, &tmp, param.flags);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.drawctxt_id, &tmp, sizeof(unsigned int)))
                {
                    tmpStatus = kgsl_context_destroy(param.device_id, tmp);
                    /* is asserting ok? Basicly we should return the error from copy_to_user
                     * but will the user space interpret it correctly? Will the user space 
                     * always check against GSL_SUCCESS  or GSL_FAILURE as they are not the only
                     * return values.
                     */
                    KOS_ASSERT(tmpStatus == GSL_SUCCESS);
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
                }
                else
                {
                    add_device_context_to_array(fd, param.device_id, tmp);
                }
            }
            break;
        }
    case IOCTL_KGSL_CONTEXT_DESTROY:
        {
            kgsl_context_destroy_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_context_destroy_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_context_destroy(param.device_id, param.drawctxt_id);
            del_device_context_from_array(fd, param.device_id, param.drawctxt_id);
            break;
        }
    case IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW:
        {
            kgsl_drawctxt_bind_gmem_shadow_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_drawctxt_bind_gmem_shadow_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_drawctxt_bind_gmem_shadow(param.device_id, param.drawctxt_id, param.gmem_rect, param.shadow_x, param.shadow_y, param.shadow_buffer, param.buffer_id);
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_ALLOC:
        {
            kgsl_sharedmem_alloc_t param;
            gsl_memdesc_t tmp;
            int tmpStatus;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_alloc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_sharedmem_alloc(param.device_id, param.flags, param.sizebytes, &tmp);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.memdesc, &tmp, sizeof(gsl_memdesc_t)))
                {
                    tmpStatus = kgsl_sharedmem_free(&tmp);
                    KOS_ASSERT(tmpStatus == GSL_SUCCESS);
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
                }
                else
                {
                    add_memblock_to_allocated_list(fd, &tmp);
                }
            }
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_FREE:
        {
            kgsl_sharedmem_free_t param;
            gsl_memdesc_t tmp;
            int err;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_free_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            if (copy_from_user(&tmp, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            err = del_memblock_from_allocated_list(fd, &tmp);
            if(err)
            {
                printk(KERN_ERR "%s: tried to free memdesc that was not allocated!\n", __func__);
                kgslStatus = err;
                break;
            }
            kgslStatus = kgsl_sharedmem_free(&tmp);
            if (kgslStatus == GSL_SUCCESS)
            {
                if (copy_to_user(param.memdesc, &tmp, sizeof(gsl_memdesc_t)))
                {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
                }
            }
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_READ:
        {
            kgsl_sharedmem_read_t param;
            gsl_memdesc_t memdesc;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_read_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_sharedmem_read(&memdesc, param.dst, param.offsetbytes, param.sizebytes, true);
            if (kgslStatus != GSL_SUCCESS)
            {
                printk(KERN_ERR "%s: kgsl_sharedmem_read failed\n", __func__);
            }
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_WRITE:
        {
            kgsl_sharedmem_write_t param;
            gsl_memdesc_t memdesc;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_write_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_sharedmem_write(&memdesc, param.offsetbytes, param.src, param.sizebytes, true);
            if (kgslStatus != GSL_SUCCESS)
            {
                printk(KERN_ERR "%s: kgsl_sharedmem_write failed\n", __func__);
            }
            
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_SET:
        {
            kgsl_sharedmem_set_t param;
            gsl_memdesc_t memdesc;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_set_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_sharedmem_set(&memdesc, param.offsetbytes, param.value, param.sizebytes);
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_LARGESTFREEBLOCK:
        {
            kgsl_sharedmem_largestfreeblock_t param;
            unsigned int largestfreeblock;

            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_largestfreeblock_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            largestfreeblock = kgsl_sharedmem_largestfreeblock(param.device_id, param.flags);
            if (copy_to_user(param.largestfreeblock, &largestfreeblock, sizeof(unsigned int)))
            {
                printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = GSL_SUCCESS;
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_CACHEOPERATION:
        {
            kgsl_sharedmem_cacheoperation_t param;
            gsl_memdesc_t memdesc;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_cacheoperation_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_sharedmem_cacheoperation(&memdesc, param.offsetbytes, param.sizebytes, param.operation);
            break;
        }
    case IOCTL_KGSL_SHAREDMEM_FROMHOSTPOINTER:
        {
            kgsl_sharedmem_fromhostpointer_t param;
            gsl_memdesc_t memdesc;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_fromhostpointer_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_sharedmem_fromhostpointer(param.device_id, &memdesc, param.hostptr);
            break;
        }
    case IOCTL_KGSL_ADD_TIMESTAMP:
        {
            kgsl_add_timestamp_t param;
            gsl_timestamp_t tmp;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_add_timestamp_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            tmp = kgsl_add_timestamp(param.device_id, &tmp);
            if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t)))
            {
                    printk(KERN_ERR "%s: copy_to_user error\n", __func__);
                    kgslStatus = GSL_FAILURE;
                    break;
            }
            kgslStatus = GSL_SUCCESS;
            break;
        }
    
    case IOCTL_KGSL_DEVICE_CLOCK:
        {
            kgsl_device_clock_t param;
            if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_clock_t)))
            {
                printk(KERN_ERR "%s: copy_from_user error\n", __func__);
                kgslStatus = GSL_FAILURE;
                break;
            }
            kgslStatus = kgsl_device_clock(param.device, param.enable);
            break;
        }
    default:
        kgslStatus = -ENOTTY;
        break;
    }

    return kgslStatus;
}
Example #13
0
int kgsl_mmu_init(struct kgsl_device *device)
{
	/*
	 * intialize device mmu
	 *
	 * call this with the global lock held
	 */
	int status;
	uint32_t flags;
	struct kgsl_mmu *mmu = &device->mmu;
#ifdef _DEBUG
	struct kgsl_mmu_debug regs;
#endif /* _DEBUG */

	KGSL_MEM_VDBG("enter (device=%p)\n", device);

	if (mmu->flags & KGSL_FLAGS_INITIALIZED0) {
		KGSL_MEM_INFO("MMU already initialized.\n");
		return 0;
	}

	mmu->device = device;

#ifndef CONFIG_MSM_KGSL_MMU
	mmu->config = 0x00000000;
#endif

	/* setup MMU and sub-client behavior */
	kgsl_yamato_regwrite(device, REG_MH_MMU_CONFIG, mmu->config);

	/* enable axi interrupts */
	KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
		     GSL_MMU_INT_MASK);
	kgsl_yamato_regwrite(device, REG_MH_INTERRUPT_MASK, GSL_MMU_INT_MASK);

	mmu->flags |= KGSL_FLAGS_INITIALIZED0;

	/* MMU not enabled */
	if ((mmu->config & 0x1) == 0) {
		KGSL_MEM_VDBG("return %d\n", 0);
		return 0;
	}

	/* idle device */
	kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT);

	/* make sure aligned to pagesize */
	BUG_ON(mmu->mpu_base & (KGSL_PAGESIZE - 1));
	BUG_ON((mmu->mpu_base + mmu->mpu_range) & (KGSL_PAGESIZE - 1));

	/* define physical memory range accessible by the core */
	kgsl_yamato_regwrite(device, REG_MH_MMU_MPU_BASE,
				mmu->mpu_base);
	kgsl_yamato_regwrite(device, REG_MH_MMU_MPU_END,
				mmu->mpu_base + mmu->mpu_range);

	/* enable axi interrupts */
	KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
		     GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
	kgsl_yamato_regwrite(device, REG_MH_INTERRUPT_MASK,
			GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);

	mmu->flags |= KGSL_FLAGS_INITIALIZED;

	/* sub-client MMU lookups require address translation */
	if ((mmu->config & ~0x1) > 0) {
		/*make sure virtual address range is a multiple of 64Kb */
		BUG_ON(mmu->va_range & ((1 << 16) - 1));

		/* allocate memory used for completing r/w operations that
		 * cannot be mapped by the MMU
		 */
		flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
			 | KGSL_MEMFLAGS_STRICTREQUEST);
		status = kgsl_sharedmem_alloc(flags, 64, &mmu->dummyspace);
		if (status != 0) {
			KGSL_MEM_ERR
			    ("Unable to allocate dummy space memory.\n");
			kgsl_mmu_close(device);
			return status;
		}

		kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
				   mmu->dummyspace.size);
		/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
		 * to complete transactions in case of an MMU fault. Note that
		 * we'll leave the bottom 32 bytes of the dummyspace for other
		 * purposes (e.g. use it when dummy read cycles are needed
		 * for other blocks */
		kgsl_yamato_regwrite(device,
				     REG_MH_MMU_TRAN_ERROR,
				     mmu->dummyspace.physaddr + 32);

		mmu->defaultpagetable = kgsl_mmu_createpagetableobject(mmu);
		if (!mmu->defaultpagetable) {
			KGSL_MEM_ERR("Failed to create global page table\n");
			kgsl_mmu_close(device);
			return -ENOMEM;
		}
		mmu->hwpagetable = mmu->defaultpagetable;
		kgsl_yamato_regwrite(device, REG_MH_MMU_PT_BASE,
					mmu->hwpagetable->base.gpuaddr);
		kgsl_yamato_regwrite(device, REG_MH_MMU_VA_RANGE,
				(mmu->hwpagetable->va_base |
				(mmu->hwpagetable->va_range >> 16)));
		status = kgsl_yamato_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
		if (status) {
			kgsl_mmu_close(device);
			return status;
		}

		mmu->flags |= KGSL_FLAGS_STARTED;
	}
Example #14
0
struct kgsl_pagetable *kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu)
{
	int status = 0;
	struct kgsl_pagetable *pagetable = NULL;
	uint32_t flags;

	KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);

	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
	if (pagetable == NULL) {
		KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
		return NULL;
	}

	pagetable->mmu = mmu;
	pagetable->va_base = mmu->va_base;
	pagetable->va_range = mmu->va_range;
	pagetable->last_superpte = 0;
	pagetable->max_entries = (mmu->va_range >> KGSL_PAGESIZE_SHIFT)
				 + GSL_PT_EXTRA_ENTRIES;

	pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
	if (pagetable->pool == NULL) {
		KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
		goto err_gen_pool_create;
	}

	if (gen_pool_add(pagetable->pool, pagetable->va_base,
				pagetable->va_range, -1)) {
		KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
				pagetable);
		goto err_gen_pool_add;
	}

	/* allocate page table memory */
	flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
		 | KGSL_MEMFLAGS_STRICTREQUEST);
	status = kgsl_sharedmem_alloc(flags,
				      pagetable->max_entries * GSL_PTE_SIZE,
				      &pagetable->base);

	if (status) {
		KGSL_MEM_ERR("cannot alloc page tables\n");
		goto err_kgsl_sharedmem_alloc;
	}

	/* reset page table entries
	 * -- all pte's are marked as not dirty initially
	 */
	kgsl_sharedmem_set(&pagetable->base, 0, 0, pagetable->base.size);
	pagetable->base.gpuaddr = pagetable->base.physaddr;

	KGSL_MEM_VDBG("return %p\n", pagetable);

	return pagetable;

err_kgsl_sharedmem_alloc:
err_gen_pool_add:
	gen_pool_destroy(pagetable->pool);
err_gen_pool_create:
	kfree(pagetable);
	return NULL;
}
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
	int status;
	
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int rb_cntl;
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	if (rb->flags & KGSL_FLAGS_STARTED)
		return 0;

	if (init_ram)
		rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;

	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
			   sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
			   (rb->sizedwords << 2));

	if (adreno_is_a2xx(adreno_dev)) {
		adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
			(rb->memptrs_desc.gpuaddr
			+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

		
		adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
			0 );
	}

	
	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;

	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

	if (adreno_is_a2xx(adreno_dev)) {
		
		cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
	}

	
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	if (adreno_is_a3xx(adreno_dev)) {
		
		adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);

		
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);

		
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);

		
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);

		
		adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
	}

	if (adreno_is_a2xx(adreno_dev)) {
		
		adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
	}

	
	adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
			     KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
				     soptimestamp));

	adreno_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	

	status = adreno_ringbuffer_load_pm4_ucode(device);
	if (status != 0)
		return status;

	
	status = adreno_ringbuffer_load_pfp_ucode(device);
	if (status != 0)
		return status;

	
	if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
		adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);

	rb->rptr = 0;
	rb->wptr = 0;

	
	adreno_regwrite(device, REG_CP_ME_CNTL, 0);

	
	adreno_dev->gpudev->rb_init(adreno_dev, rb);

	
	status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);

	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	return status;
}