Exemple #1
0
static inline void
set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
{
	u32 cb_color_info;
	int pitch, slice;
	RING_LOCALS;
	DRM_DEBUG("\n");

	h = (h + 7) & ~7;
	if (h < 8)
		h = 8;

	cb_color_info = ((format << 2) | (1 << 27));
	pitch = (w / 8) - 1;
	slice = ((w * h) / 64) - 1;

	if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
	    ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
		BEGIN_RING(21 + 2);
		OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
		OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
		OUT_RING(gpu_addr >> 8);
		OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		OUT_RING(2 << 0);
	} else {
Exemple #2
0
static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
{
    context_t         *context = R700_CONTEXT(ctx);
    struct r700_vertex_program *vp = context->selected_vp;
    unsigned int i, j = 0;
    BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

    if (context->radeon.tcl.aos_count == 0)
	    return;

    BEGIN_BATCH_NO_AUTOSTATE(6);
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0);

    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
    R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0);
    END_BATCH();
    COMMIT_BATCH();

    for(i=0; i<VERT_ATTRIB_MAX; i++) {
	    if(vp->mesa_program->Base.InputsRead & (1 << i))
	    {
                r700SetupVTXConstants(ctx,
				      (void*)(&context->radeon.tcl.aos[j]),
				      &(context->stream_desc[j]));
		j++;
	    }
    }
}
static void r300FireEB(r300ContextPtr rmesa, unsigned long addr,
		       int vertex_count, int type)
{
	int cmd_reserved = 0;
	int cmd_written = 0;
	drm_radeon_cmd_header_t *cmd = NULL;

	start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0), 0);
	e32(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (vertex_count << 16) | type | R300_VAP_VF_CNTL__INDEX_SIZE_32bit);

	start_packet3(CP_PACKET3(R300_PACKET3_INDX_BUFFER, 2), 2);
	e32(R300_EB_UNK1 | (0 << 16) | R300_EB_UNK2);
	e32(addr);
	e32(vertex_count);
}
static void r300EmitAOS(r300ContextPtr rmesa, GLuint nr, GLuint offset)
{
	int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
	int i;
	int cmd_reserved = 0;
	int cmd_written = 0;
	drm_radeon_cmd_header_t *cmd = NULL;

	if (RADEON_DEBUG & DEBUG_VERTS)
		fprintf(stderr, "%s: nr=%d, ofs=0x%08x\n", __FUNCTION__, nr,
			offset);

	start_packet3(CP_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, sz - 1), sz - 1);
	e32(nr);

	for (i = 0; i + 1 < nr; i += 2) {
		e32((rmesa->state.aos[i].aos_size << 0) |
		    (rmesa->state.aos[i].aos_stride << 8) |
		    (rmesa->state.aos[i + 1].aos_size << 16) |
		    (rmesa->state.aos[i + 1].aos_stride << 24));

		e32(rmesa->state.aos[i].aos_offset + offset * 4 * rmesa->state.aos[i].aos_stride);
		e32(rmesa->state.aos[i + 1].aos_offset + offset * 4 * rmesa->state.aos[i + 1].aos_stride);
	}

	if (nr & 1) {
		e32((rmesa->state.aos[nr - 1].aos_size << 0) |
		    (rmesa->state.aos[nr - 1].aos_stride << 8));
		e32(rmesa->state.aos[nr - 1].aos_offset + offset * 4 * rmesa->state.aos[nr - 1].aos_stride);
	}
}
Exemple #5
0
static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = radeon->query.current;
	BATCH_LOCALS(radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	/* clear the buffer */
	radeon_bo_map(query->bo, GL_FALSE);
	memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
	radeon_bo_unmap(query->bo);

	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
				      query->bo,
				      0, RADEON_GEM_DOMAIN_GTT);

	BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
	R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
	R600_OUT_BATCH(ZPASS_DONE);
	R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
	R600_OUT_BATCH(0x00000000);
	R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
	END_BATCH();
	query->emitted_begin = GL_TRUE;
}
static void r300FireAOS(r300ContextPtr rmesa, int vertex_count, int type)
{
	int cmd_reserved = 0;
	int cmd_written = 0;
	drm_radeon_cmd_header_t *cmd = NULL;

	start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0), 0);
	e32(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (vertex_count << 16) | type);
}
Exemple #7
0
/**
 * Emit the sequence to pacify R300.
 */
static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
{
	RING_LOCALS;

	BEGIN_RING(6);
	OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
	OUT_RING( 0xa );
	OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
	OUT_RING( 0x3 );
	OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
	OUT_RING( 0x0 );
	ADVANCE_RING();
}
static void evergreen_emit_query_finish(radeonContextPtr radeon)
{
	context_t *context = (context_t*) radeon;
	BATCH_LOCALS(&context->radeon);

	struct radeon_query_object *query = radeon->query.current;

	BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
	R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
	R600_OUT_BATCH(R600_EVENT_TYPE(ZPASS_DONE) | R600_EVENT_INDEX(1));
	R600_OUT_BATCH(query->curr_offset + 8); /* hw writes qwords */
	R600_OUT_BATCH(0x00000000);
	R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
	END_BATCH();
	assert(query->curr_offset < RADEON_QUERY_PAGE_SIZE);
	query->emitted_begin = GL_FALSE;
}
Exemple #9
0
/**
 * Emit a clear packet from userspace.
 * Called by r300_emit_packet3.
 */
static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
				      drm_radeon_cmd_buffer_t* cmdbuf)
{
	RING_LOCALS;

	if (8*4 > cmdbuf->bufsz)
		return DRM_ERR(EINVAL);

	BEGIN_RING(10);
	OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
	OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
	          (1<<R300_PRIM_NUM_VERTICES_SHIFT) );
	OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
	ADVANCE_RING();

	cmdbuf->buf += 8*4;
	cmdbuf->bufsz -= 8*4;

	return 0;
}
Exemple #10
0
static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_renderbuffer *rrb;
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	rrb = radeon_get_depthbuffer(&context->radeon);
	if (!rrb || !rrb->bo) {
		return;
	}

	r700SetDepthTarget(context);

        BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
	R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
	R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
	R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
	R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
	R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
			     rrb->bo,
			     r700->DB_DEPTH_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

	if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
	    (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
		BEGIN_BATCH_NO_AUTOSTATE(2);
		R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		R600_OUT_BATCH(1 << 0);
		END_BATCH();
	}

	COMMIT_BATCH();

}
Exemple #11
0
static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	int i;
	BATCH_LOCALS(&context->radeon);

	if (r700->ps.num_consts == 0)
		return;

	BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
	R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
	/* assembler map const from very beginning. */
	R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
	for (i = 0; i < r700->ps.num_consts; i++) {
		R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
		R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
		R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
		R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
	}
	END_BATCH();
	COMMIT_BATCH();
}
Exemple #12
0
static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t         *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
	unsigned int i;

    struct r700_vertex_program *vp = context->selected_vp;

	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
		if (ctx->Texture.Unit[i]._ReallyEnabled) {            
			radeonTexObj *t = r700->textures[i];
			if (t) {
				BEGIN_BATCH_NO_AUTOSTATE(5);
				R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));

                if( (1<<i) & vp->r700AsmCode.unVetTexBits )                    
                {   /* vs texture */
                    R600_OUT_BATCH((i+SQ_TEX_SAMPLER_VS_OFFSET) * SAMPLER_STRIDE); //work 1
                }
                else
                {
				    R600_OUT_BATCH(i * 3);
                }

				R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
				R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
				R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
				END_BATCH();
				COMMIT_BATCH();
			}
		}
	}
}
Exemple #13
0
static void evergreenRunRenderPrimitive(GLcontext * ctx, int start, int end, int prim,
					GLint basevertex) //same
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    BATCH_LOCALS(&context->radeon);
    int type, total_emit;
    int num_indices;
    uint32_t vgt_draw_initiator = 0;
    uint32_t vgt_index_type     = 0;
    uint32_t vgt_primitive_type = 0;
    uint32_t vgt_num_indices    = 0;

    type = evergreenPrimitiveType(prim);
    num_indices = evergreenNumVerts(end - start, prim);

    radeon_print(RADEON_RENDER, RADEON_TRACE,
		 "%s type %x num_indices %d\n",
		 __func__, type, num_indices);

    if (type < 0 || num_indices <= 0)
	    return;

    SETfield(vgt_primitive_type, type,
	     VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);

    SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);

    if(GL_TRUE != context->ind_buf.is_32bit)
    {
            SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
    }

    vgt_num_indices = num_indices;
    SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
    SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);

    total_emit =   3  /* VGT_PRIMITIVE_TYPE */
	         + 2  /* VGT_INDEX_TYPE */
	         + 2  /* NUM_INSTANCES */
	         + 4  /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
	         + 5 + 2; /* DRAW_INDEX */

    BEGIN_BATCH_NO_AUTOSTATE(total_emit);
    // prim
    R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
    R600_OUT_BATCH(vgt_primitive_type);
    // index type
    R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
    R600_OUT_BATCH(vgt_index_type);
    // num instances
    R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
    R600_OUT_BATCH(1);
    /* offset */
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(basevertex); //VTX_BASE_VTX_LOC
    R600_OUT_BATCH(0); //VTX_START_INST_LOC
    // draw packet
    R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
    R600_OUT_BATCH(context->ind_buf.bo_offset);
    R600_OUT_BATCH(0);
    R600_OUT_BATCH(vgt_num_indices);
    R600_OUT_BATCH(vgt_draw_initiator);
    R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
			 context->ind_buf.bo,
			 context->ind_buf.bo_offset,
			 RADEON_GEM_DOMAIN_GTT, 0, 0);
    END_BATCH();
    COMMIT_BATCH();
}
Exemple #14
0
static void evergreenRunRenderPrimitiveImmediate(GLcontext * ctx, int start, int end, int prim) //same
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    BATCH_LOCALS(&context->radeon);
    int type, i;
    uint32_t num_indices, total_emit = 0;
    uint32_t vgt_draw_initiator = 0;
    uint32_t vgt_index_type     = 0;
    uint32_t vgt_primitive_type = 0;
    uint32_t vgt_num_indices    = 0;

    type = evergreenPrimitiveType(prim);
    num_indices = evergreenNumVerts(end - start, prim);

    radeon_print(RADEON_RENDER, RADEON_TRACE,
		 "%s type %x num_indices %d\n",
		 __func__, type, num_indices);

    if (type < 0 || num_indices <= 0)
	    return;

    SETfield(vgt_primitive_type, type,
	     VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);

    if (num_indices > 0xffff)
    {
	    SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
    }
    else
    {
            SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
    }

    vgt_num_indices = num_indices;
    SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);

    if (start == 0)
    {
	SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
    }
    else
    {
	if (num_indices > 0xffff)
	{
		total_emit += num_indices;
	}
	else
	{
		total_emit += (num_indices + 1) / 2;
	}
	SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
    }

    total_emit +=   3 /* VGT_PRIMITIVE_TYPE */
	          + 2 /* VGT_INDEX_TYPE */
	          + 2 /* NUM_INSTANCES */
	          + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
	          + 3; /* DRAW */              

    BEGIN_BATCH_NO_AUTOSTATE(total_emit);
    // prim
    R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
    R600_OUT_BATCH(vgt_primitive_type);
    // index type
    R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
    R600_OUT_BATCH(vgt_index_type);
    // num instances
    R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
    R600_OUT_BATCH(1);
    /* offset */
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0); //VTX_BASE_VTX_LOC
    R600_OUT_BATCH(0); //VTX_START_INST_LOC
    // draw packet
    if(start == 0)
    {
        R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
        R600_OUT_BATCH(vgt_num_indices);
        R600_OUT_BATCH(vgt_draw_initiator);
    }
    else
    {
	if (num_indices > 0xffff)
        {
	    R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
	    R600_OUT_BATCH(vgt_num_indices);
	    R600_OUT_BATCH(vgt_draw_initiator);
	    for (i = start; i < (start + num_indices); i++)
	    {
		R600_OUT_BATCH(i);
	    }
	}
	else
        {
	    R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
	    R600_OUT_BATCH(vgt_num_indices);
	    R600_OUT_BATCH(vgt_draw_initiator);
	    for (i = start; i < (start + num_indices); i += 2)
	    {
		if ((i + 1) == (start + num_indices))
		{
		    R600_OUT_BATCH(i);
		}
		else
		{
		    R600_OUT_BATCH(((i + 1) << 16) | (i));
		}
	    }
	}
    }

    END_BATCH();
    COMMIT_BATCH();
}
Exemple #15
0
static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_renderbuffer *rrb;
	BATCH_LOCALS(&context->radeon);
	int id = 0;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	rrb = radeon_get_colorbuffer(&context->radeon);
	if (!rrb || !rrb->bo) {
		return;
	}

	r700SetRenderTarget(context, 0);

	if (id > R700_MAX_RENDER_TARGETS)
		return;

	if (!r700->render_target[id].enabled)
		return;

        BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

	if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
	    (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
		BEGIN_BATCH_NO_AUTOSTATE(2);
		R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		R600_OUT_BATCH((2 << id));
		END_BATCH();
	}
	/* Set CMASK & TILE buffer to the offset of color buffer as
	 * we don't use those this shouldn't cause any issue and we
	 * then have a valid cmd stream
	 */
	BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_TILE + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_TILE.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
	END_BATCH();
	BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_FRAG + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_FRAG.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

        BEGIN_BATCH_NO_AUTOSTATE(12);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Exemple #16
0
static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t         *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);

    struct r700_vertex_program *vp = context->selected_vp;

	struct radeon_bo *bo = NULL;
	unsigned int i;
	BATCH_LOCALS(&context->radeon);

	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
		if (ctx->Texture.Unit[i]._ReallyEnabled) {            
			radeonTexObj *t = r700->textures[i];
			if (t) {
				if (!t->image_override) {
					bo = t->mt->bo;
				} else {
					bo = t->bo;
				}
				if (bo) {

					r700SyncSurf(context, bo,
						     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
						     0, TC_ACTION_ENA_bit);

					BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
					R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));

                    if( (1<<i) & vp->r700AsmCode.unVetTexBits )                    
                    {   /* vs texture */                                     
                        R600_OUT_BATCH((i + VERT_ATTRIB_MAX + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
                    }
                    else
                    {
					    R600_OUT_BATCH(i * 7);
                    }

					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
					R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
					R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
							     bo,
							     r700->textures[i]->SQ_TEX_RESOURCE2,
							     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
					R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
							     bo,
							     r700->textures[i]->SQ_TEX_RESOURCE3,
							     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
					END_BATCH();
					COMMIT_BATCH();
				}
			}
		}
	}
}
Exemple #17
0
static void r700SetupVTXConstants(GLcontext  * ctx,
				  void *       pAos,
				  StreamDesc * pStreamDesc)
{
    context_t *context = R700_CONTEXT(ctx);
    struct radeon_aos * paos = (struct radeon_aos *)pAos;
    unsigned int nVBsize;
    BATCH_LOCALS(&context->radeon);

    unsigned int uSQ_VTX_CONSTANT_WORD0_0;
    unsigned int uSQ_VTX_CONSTANT_WORD1_0;
    unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
    unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
    unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;

    if (!paos->bo)
	    return;

    if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
	(context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
	(context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
	(context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
	(context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
	    r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
    else
	    r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);

    if(0 == pStreamDesc->stride)
    {
        nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
    }
    else
    {
        nVBsize = (paos->count - 1) * pStreamDesc->stride
                  + pStreamDesc->size * getTypeSize(pStreamDesc->type);
    }

    uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
    uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;

    SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
    SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
	     SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
    SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
	     SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
	     SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
    
    if(GL_TRUE == pStreamDesc->normalize)
    {
        SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
	             SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
    }
    else
    {
        SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
	             SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
    }

    if(1 == pStreamDesc->_signed)
    {
        SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
    }

    SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
    SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
	     SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);

    BEGIN_BATCH_NO_AUTOSTATE(9 + 2);

    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
    R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
    R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
    R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
    R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
    R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
    R600_OUT_BATCH(0);
    R600_OUT_BATCH(0);
    R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
    R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
                         paos->bo,
                         uSQ_VTX_CONSTANT_WORD0_0,
                         RADEON_GEM_DOMAIN_GTT, 0, 0);
    END_BATCH();
    COMMIT_BATCH();

}