static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) { BEGIN_BATCH_NO_AUTOSTATE(11); R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4); R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All); R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All); R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All); R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All); R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3); R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All); R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All); R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All); END_BATCH(); } BEGIN_BATCH_NO_AUTOSTATE(7); R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2); R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All); R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All); R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(17); R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2); R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All); R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All); R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All); R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All); R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2); R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All); R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All); R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(22); R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2); R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All); R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All); R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12); R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All); R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All); R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All); R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All); R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2); R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All); R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); unsigned int ui; radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) { BEGIN_BATCH_NO_AUTOSTATE(3); R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All); END_BATCH(); } BEGIN_BATCH_NO_AUTOSTATE(3); R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All); END_BATCH(); if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) { for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) { if (r700->render_target[ui].enabled) { BEGIN_BATCH_NO_AUTOSTATE(3); R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui), r700->render_target[ui].CB_BLEND0_CONTROL.u32All); END_BATCH(); } } } COMMIT_BATCH(); }
GLboolean Process_Vertex_Program_Vfetch_Instructions2( struct gl_context *ctx, struct r700_vertex_program *vp, struct gl_vertex_program *mesa_vp) { int i; context_t *context = R700_CONTEXT(ctx); VTX_FETCH_METHOD vtxFetchMethod; vtxFetchMethod.bEnableMini = GL_FALSE; vtxFetchMethod.mega_fetch_remainder = 0; for(i=0; i<context->nNumActiveAos; i++) { assemble_vfetch_instruction2(&vp->r700AsmCode, vp->r700AsmCode.ucVP_AttributeMap[context->stream_desc[i].element], context->stream_desc[i].type, context->stream_desc[i].size, context->stream_desc[i].element, context->stream_desc[i]._signed, context->stream_desc[i].normalize, context->stream_desc[i].format, &vtxFetchMethod); } return GL_TRUE; }
void r700SetVertexFormat(struct gl_context *ctx, const struct gl_client_array *arrays[], int count) { context_t *context = R700_CONTEXT(ctx); struct r700_vertex_program *vpc = (struct r700_vertex_program *)ctx->VertexProgram._Current; struct gl_vertex_program * mesa_vp = (struct gl_vertex_program *)&(vpc->mesa_program); unsigned int unLoc = 0; unsigned int unBit = mesa_vp->Base.InputsRead; context->nNumActiveAos = 0; if (mesa_vp->IsPositionInvariant) { unBit |= VERT_BIT_POS; } while(unBit) { if(unBit & 1) { r700TranslateAttrib(ctx, unLoc, count, arrays[unLoc]); } unBit >>= 1; ++unLoc; } context->radeon.tcl.aos_count = context->nNumActiveAos; }
static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); struct r700_vertex_program *vp = context->selected_vp; unsigned int i, j = 0; BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); if (context->radeon.tcl.aos_count == 0) return; BEGIN_BATCH_NO_AUTOSTATE(6); R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1)); R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX); R600_OUT_BATCH(0); R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1)); R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX); R600_OUT_BATCH(0); END_BATCH(); COMMIT_BATCH(); for(i=0; i<VERT_ATTRIB_MAX; i++) { if(vp->mesa_program->Base.InputsRead & (1 << i)) { r700SetupVTXConstants(ctx, (void*)(&context->radeon.tcl.aos[j]), &(context->stream_desc[j])); j++; } } }
static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); int id = 0; radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); if (id > R700_MAX_VIEWPORTS) return; if (!r700->viewport[id].enabled) return; BEGIN_BATCH_NO_AUTOSTATE(16); R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2); R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All); R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All); R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2); R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All); R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All); R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6); R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All); R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All); R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All); R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All); R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All); R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All); END_BATCH(); COMMIT_BATCH(); }
struct r700_vertex_program* r700TranslateVertexShader(GLcontext *ctx, struct gl_vertex_program *mesa_vp) { context_t *context = R700_CONTEXT(ctx); struct r700_vertex_program *vp; unsigned int i; vp = _mesa_calloc(sizeof(*vp)); vp->mesa_program = (struct gl_vertex_program *)_mesa_clone_program(ctx, &mesa_vp->Base); if (mesa_vp->IsPositionInvariant) { _mesa_insert_mvp_code(ctx, vp->mesa_program); } for(i=0; i<context->nNumActiveAos; i++) { vp->aos_desc[i].size = context->stream_desc[i].size; vp->aos_desc[i].stride = context->stream_desc[i].stride; vp->aos_desc[i].type = context->stream_desc[i].type; } if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) { vp->r700AsmCode.bR6xx = 1; } //Init_Program Init_r700_AssemblerBase(SPT_VP, &(vp->r700AsmCode), &(vp->r700Shader) ); Map_Vertex_Program(ctx, vp, vp->mesa_program ); if(GL_FALSE == Find_Instruction_Dependencies_vp(vp, vp->mesa_program)) { return NULL; } if(GL_FALSE == AssembleInstr(vp->mesa_program->Base.NumInstructions, &(vp->mesa_program->Base.Instructions[0]), &(vp->r700AsmCode)) ) { return NULL; } if(GL_FALSE == Process_Vertex_Exports(&(vp->r700AsmCode), vp->mesa_program->Base.OutputsWritten) ) { return NULL; } vp->r700Shader.nRegs = (vp->r700AsmCode.number_used_registers == 0) ? 0 : (vp->r700AsmCode.number_used_registers - 1); vp->r700Shader.nParamExports = vp->r700AsmCode.number_of_exports; vp->translated = GL_TRUE; return vp; }
void * r700GetActiveVpShaderBo(GLcontext * ctx) { context_t *context = R700_CONTEXT(ctx); struct r700_vertex_program *vp = context->selected_vp;; if (vp) return vp->shaderbo; else return NULL; }
void * r700GetActiveVpShaderConstBo(struct gl_context * ctx) { context_t *context = R700_CONTEXT(ctx); struct r700_vertex_program *vp = context->selected_vp;; if (vp) return vp->constbo0; else return NULL; }
static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); int count = context->radeon.tcl.aos_count * 18; if (count) count += 6; radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count); return count; }
static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); int count = 7; if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) count += 11; radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count); return count; }
static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw); int count = r700->vs.num_consts * 4; if (count) count += 2; radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count); return count; }
static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); BEGIN_BATCH_NO_AUTOSTATE(4); R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2); R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All); R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All); END_BATCH(); COMMIT_BATCH(); }
void r700SelectFragmentShader(GLcontext *ctx) { context_t *context = R700_CONTEXT(ctx); struct r700_fragment_program *fp = (struct r700_fragment_program *) (ctx->FragmentProgram._Current); if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) { fp->r700AsmCode.bR6xx = 1; } if (GL_FALSE == fp->translated) r700TranslateFragmentShader(fp, &(fp->mesa_program), ctx); }
static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(9); R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All); R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All); R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All); END_BATCH(); COMMIT_BATCH(); }
static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw); int i; int count = 0; for (i = 0; i < R700_MAX_UCP; i++) { if (r700->ucp[i].enabled) count += 6; } radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count); return count; }
static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); BEGIN_BATCH_NO_AUTOSTATE(12); R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All); R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All); R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All); R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(12); R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All); R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All); R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All); R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); BEGIN_BATCH_NO_AUTOSTATE(6); R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4); R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All); R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All); R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All); R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); BEGIN_BATCH_NO_AUTOSTATE(6); R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4); R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All); R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All); R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All); R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All); END_BATCH(); COMMIT_BATCH(); }
static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); unsigned int i, count = 0; R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw); for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled) { radeonTexObj *t = r700->textures[i]; if (t) count++; } } radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count); return count * 31; }
static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(6); R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4); R600_OUT_BATCH(r700->CB_BLEND_RED.u32All); R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All); R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All); R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(15); R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All); R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All); R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All); R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All); R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All); END_BATCH(); COMMIT_BATCH(); }
void r700SelectVertexShader(struct gl_context *ctx) { context_t *context = R700_CONTEXT(ctx); struct r700_vertex_program_cont *vpc; struct r700_vertex_program *vp; unsigned int i; GLboolean match; GLbitfield InputsRead; vpc = (struct r700_vertex_program_cont *)ctx->VertexProgram._Current; InputsRead = vpc->mesa_program.Base.InputsRead; if (vpc->mesa_program.IsPositionInvariant) { InputsRead |= VERT_BIT_POS; } for (vp = vpc->progs; vp; vp = vp->next) { match = GL_TRUE; for(i=0; i<context->nNumActiveAos; i++) { if (vp->aos_desc[i].size != context->stream_desc[i].size || vp->aos_desc[i].format != context->stream_desc[i].format) { match = GL_FALSE; break; } } if (match) { context->selected_vp = vp; return; } } vp = r700TranslateVertexShader(ctx, &(vpc->mesa_program)); if(!vp) { radeon_error("Failed to translate vertex shader. \n"); return; } vp->next = vpc->progs; vpc->progs = vp; context->selected_vp = vp; return; }
static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); BEGIN_BATCH_NO_AUTOSTATE(9); R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All); R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4); R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All); R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All); R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All); R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); BEGIN_BATCH_NO_AUTOSTATE(41); R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4); R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All); R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All); R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All); R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All); R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13); R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All); R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All); R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All); R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All); R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All); R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All); R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All); R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All); R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All); R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All); R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All); R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All); R600_OUT_BATCH(r700->VGT_GS_MODE.u32All); R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All); R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All); R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All); R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All); R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3); R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All); R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All); R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All); R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); BATCH_LOCALS(&context->radeon); BEGIN_BATCH_NO_AUTOSTATE(10); R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2); R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All); R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All); R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4); R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All); R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All); R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All); R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All); END_BATCH(); COMMIT_BATCH(); }
static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom) { context_t *context = R700_CONTEXT(ctx); R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context); struct radeon_bo * pbo; BATCH_LOCALS(&context->radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); /* XXX fixme * R6xx chips require a FS be emitted, even if it's not used. * since we aren't using FS yet, just send the VS address to make * the kernel command checker happy */ pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context)); r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All; r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0; r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0; /* XXX */ if (!pbo) return; r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit); BEGIN_BATCH_NO_AUTOSTATE(3 + 2); R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1); R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All); R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All, pbo, r700->fs.SQ_PGM_START_FS.u32All, RADEON_GEM_DOMAIN_GTT, 0, 0); END_BATCH(); BEGIN_BATCH_NO_AUTOSTATE(6); R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All); R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All); END_BATCH(); COMMIT_BATCH(); }