Пример #1
0
static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(17);

	R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
	R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);

	R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
	R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);

	R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
	R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
	R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);

	R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);

	END_BATCH();
	COMMIT_BATCH();
}
Пример #2
0
static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
		BEGIN_BATCH_NO_AUTOSTATE(11);
		R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
		R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
		R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
		R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
		END_BATCH();
	}

	BEGIN_BATCH_NO_AUTOSTATE(7);
	R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
	R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
	R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
	R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Пример #3
0
static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(22);
	R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
	R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);

	R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);

	R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
	R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Пример #4
0
static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	int id = 0;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (id > R700_MAX_VIEWPORTS)
		return;

	if (!r700->viewport[id].enabled)
		return;

        BEGIN_BATCH_NO_AUTOSTATE(16);
	R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
	R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
	R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Пример #5
0
static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

        BEGIN_BATCH_NO_AUTOSTATE(41);

	R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
	R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
	R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
	R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
	R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);

	R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
	R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
	R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
	R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
	R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
	R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
	R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
	R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);

	R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
	R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
	R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
	R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);

	R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
	R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
	R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
	R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);

	R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);

	END_BATCH();
	COMMIT_BATCH();
}
Пример #6
0
static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

	BEGIN_BATCH_NO_AUTOSTATE(10);
	R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
	R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
	R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
	R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
	R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
	R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
	R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
	R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
	END_BATCH();
	COMMIT_BATCH();

}
Пример #7
0
static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

        BEGIN_BATCH_NO_AUTOSTATE(4);
	R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
	R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
	R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Пример #8
0
static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_renderbuffer *rrb;
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	rrb = radeon_get_depthbuffer(&context->radeon);
	if (!rrb || !rrb->bo) {
		return;
	}

	r700SetDepthTarget(context);

        BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
	R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
	R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
	R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
	R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
	R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
			     rrb->bo,
			     r700->DB_DEPTH_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

	if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
	    (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
		BEGIN_BATCH_NO_AUTOSTATE(2);
		R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		R600_OUT_BATCH(1 << 0);
		END_BATCH();
	}

	COMMIT_BATCH();

}
Пример #9
0
static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

	BEGIN_BATCH_NO_AUTOSTATE(6);
	R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
	R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
	R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
	R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
	R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Пример #10
0
static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

        BEGIN_BATCH_NO_AUTOSTATE(34);
	R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
	R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
	R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
	R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
	R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
	R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
	R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);

	R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
	R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
	R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
	R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
	R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);

	R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
	R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
	R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
        END_BATCH();

	COMMIT_BATCH();
}
Пример #11
0
static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

	BEGIN_BATCH_NO_AUTOSTATE(6);
	R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
	R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
	R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
	R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
	R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Пример #12
0
static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(6);
	R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
	R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
	R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
	R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
	R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Пример #13
0
static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

	BEGIN_BATCH_NO_AUTOSTATE(9);
	R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
	R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
	R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
	R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
	R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
	R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
	END_BATCH();
	COMMIT_BATCH();

}
Пример #14
0
static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_bo * pbo;
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	/* XXX fixme
	 * R6xx chips require a FS be emitted, even if it's not used.
	 * since we aren't using FS yet, just send the VS address to make
	 * the kernel command checker happy
	 */
	pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
	r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
	r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
	r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
	/* XXX */

	if (!pbo)
		return;

	r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);

        BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
	R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
	R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
			     pbo,
			     r700->fs.SQ_PGM_START_FS.u32All,
			     RADEON_GEM_DOMAIN_GTT, 0, 0);
	END_BATCH();

        BEGIN_BATCH_NO_AUTOSTATE(6);
	R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
	R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Пример #15
0
static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	int i;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	for (i = 0; i < R700_MAX_UCP; i++) {
		if (r700->ucp[i].enabled) {
			BEGIN_BATCH_NO_AUTOSTATE(6);
			R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
			R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
			R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
			R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
			R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
			END_BATCH();
			COMMIT_BATCH();
		}
	}
}
Пример #16
0
static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
{
    context_t *context = R700_CONTEXT(ctx);
    R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
    struct radeon_bo * pbo;
    BATCH_LOCALS(&context->radeon);
    radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

    pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));

    if (!pbo)
	    return;

    r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);

    BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
    R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
    R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
    R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
		         pbo,
		         r700->ps.SQ_PGM_START_PS.u32All,
		         RADEON_GEM_DOMAIN_GTT, 0, 0);
    END_BATCH();

    BEGIN_BATCH_NO_AUTOSTATE(9);
    R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
    R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
    R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
    END_BATCH();

    BEGIN_BATCH_NO_AUTOSTATE(3);
    R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
    END_BATCH();

    COMMIT_BATCH();

}
Пример #17
0
static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t         *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
	unsigned int i;
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
		if (ctx->Texture.Unit[i]._ReallyEnabled) {
			radeonTexObj *t = r700->textures[i];
			if (t) {
				BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
				R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
				R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
				R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
				R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
				R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
				END_BATCH();
				COMMIT_BATCH();
			}
		}
	}
}
Пример #18
0
static void evergreenRunRenderPrimitive(GLcontext * ctx, int start, int end, int prim,
					GLint basevertex) //same
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    BATCH_LOCALS(&context->radeon);
    int type, total_emit;
    int num_indices;
    uint32_t vgt_draw_initiator = 0;
    uint32_t vgt_index_type     = 0;
    uint32_t vgt_primitive_type = 0;
    uint32_t vgt_num_indices    = 0;

    type = evergreenPrimitiveType(prim);
    num_indices = evergreenNumVerts(end - start, prim);

    radeon_print(RADEON_RENDER, RADEON_TRACE,
		 "%s type %x num_indices %d\n",
		 __func__, type, num_indices);

    if (type < 0 || num_indices <= 0)
	    return;

    SETfield(vgt_primitive_type, type,
	     VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);

    SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);

    if(GL_TRUE != context->ind_buf.is_32bit)
    {
            SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
    }

    vgt_num_indices = num_indices;
    SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
    SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);

    total_emit =   3  /* VGT_PRIMITIVE_TYPE */
	         + 2  /* VGT_INDEX_TYPE */
	         + 2  /* NUM_INSTANCES */
	         + 4  /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
	         + 5 + 2; /* DRAW_INDEX */

    BEGIN_BATCH_NO_AUTOSTATE(total_emit);
    // prim
    R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
    R600_OUT_BATCH(vgt_primitive_type);
    // index type
    R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
    R600_OUT_BATCH(vgt_index_type);
    // num instances
    R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
    R600_OUT_BATCH(1);
    /* offset */
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(basevertex); //VTX_BASE_VTX_LOC
    R600_OUT_BATCH(0); //VTX_START_INST_LOC
    // draw packet
    R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
    R600_OUT_BATCH(context->ind_buf.bo_offset);
    R600_OUT_BATCH(0);
    R600_OUT_BATCH(vgt_num_indices);
    R600_OUT_BATCH(vgt_draw_initiator);
    R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
			 context->ind_buf.bo,
			 context->ind_buf.bo_offset,
			 RADEON_GEM_DOMAIN_GTT, 0, 0);
    END_BATCH();
    COMMIT_BATCH();
}
Пример #19
0
static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_renderbuffer *rrb;
	BATCH_LOCALS(&context->radeon);
	int id = 0;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	rrb = radeon_get_colorbuffer(&context->radeon);
	if (!rrb || !rrb->bo) {
		return;
	}

	r700SetRenderTarget(context, 0);

	if (id > R700_MAX_RENDER_TARGETS)
		return;

	if (!r700->render_target[id].enabled)
		return;

        BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

	if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
	    (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
		BEGIN_BATCH_NO_AUTOSTATE(2);
		R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		R600_OUT_BATCH((2 << id));
		END_BATCH();
	}
	/* Set CMASK & TILE buffer to the offset of color buffer as
	 * we don't use those this shouldn't cause any issue and we
	 * then have a valid cmd stream
	 */
	BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_TILE + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_TILE.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
	END_BATCH();
	BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_FRAG + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_FRAG.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

        BEGIN_BATCH_NO_AUTOSTATE(12);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Пример #20
0
static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	unsigned int ui;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);

	R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
	R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);

	R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
	R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);

	R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
	R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
	R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
	R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
	R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
	R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
	R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
	R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
	R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
	R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);

	R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
	for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
		R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);

	END_BATCH();
	COMMIT_BATCH();
}
Пример #21
0
static void evergreenRunRenderPrimitiveImmediate(GLcontext * ctx, int start, int end, int prim) //same
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    BATCH_LOCALS(&context->radeon);
    int type, i;
    uint32_t num_indices, total_emit = 0;
    uint32_t vgt_draw_initiator = 0;
    uint32_t vgt_index_type     = 0;
    uint32_t vgt_primitive_type = 0;
    uint32_t vgt_num_indices    = 0;

    type = evergreenPrimitiveType(prim);
    num_indices = evergreenNumVerts(end - start, prim);

    radeon_print(RADEON_RENDER, RADEON_TRACE,
		 "%s type %x num_indices %d\n",
		 __func__, type, num_indices);

    if (type < 0 || num_indices <= 0)
	    return;

    SETfield(vgt_primitive_type, type,
	     VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);

    if (num_indices > 0xffff)
    {
	    SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
    }
    else
    {
            SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
    }

    vgt_num_indices = num_indices;
    SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);

    if (start == 0)
    {
	SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
    }
    else
    {
	if (num_indices > 0xffff)
	{
		total_emit += num_indices;
	}
	else
	{
		total_emit += (num_indices + 1) / 2;
	}
	SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
    }

    total_emit +=   3 /* VGT_PRIMITIVE_TYPE */
	          + 2 /* VGT_INDEX_TYPE */
	          + 2 /* NUM_INSTANCES */
	          + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
	          + 3; /* DRAW */              

    BEGIN_BATCH_NO_AUTOSTATE(total_emit);
    // prim
    R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
    R600_OUT_BATCH(vgt_primitive_type);
    // index type
    R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
    R600_OUT_BATCH(vgt_index_type);
    // num instances
    R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
    R600_OUT_BATCH(1);
    /* offset */
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0); //VTX_BASE_VTX_LOC
    R600_OUT_BATCH(0); //VTX_START_INST_LOC
    // draw packet
    if(start == 0)
    {
        R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
        R600_OUT_BATCH(vgt_num_indices);
        R600_OUT_BATCH(vgt_draw_initiator);
    }
    else
    {
	if (num_indices > 0xffff)
        {
	    R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
	    R600_OUT_BATCH(vgt_num_indices);
	    R600_OUT_BATCH(vgt_draw_initiator);
	    for (i = start; i < (start + num_indices); i++)
	    {
		R600_OUT_BATCH(i);
	    }
	}
	else
        {
	    R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
	    R600_OUT_BATCH(vgt_num_indices);
	    R600_OUT_BATCH(vgt_draw_initiator);
	    for (i = start; i < (start + num_indices); i += 2)
	    {
		if ((i + 1) == (start + num_indices))
		{
		    R600_OUT_BATCH(i);
		}
		else
		{
		    R600_OUT_BATCH(((i + 1) << 16) | (i));
		}
	    }
	}
    }

    END_BATCH();
    COMMIT_BATCH();
}