Example #1
0
static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	unsigned int ui;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
		BEGIN_BATCH_NO_AUTOSTATE(3);
		R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
		END_BATCH();
	}

	BEGIN_BATCH_NO_AUTOSTATE(3);
	R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
	END_BATCH();

	if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
		for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
			if (r700->render_target[ui].enabled) {
				BEGIN_BATCH_NO_AUTOSTATE(3);
				R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
						      r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
				END_BATCH();
			}
		}
	}

	COMMIT_BATCH();
}
Example #2
0
static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
		BEGIN_BATCH_NO_AUTOSTATE(11);
		R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
		R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
		R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
		R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
		END_BATCH();
	}

	BEGIN_BATCH_NO_AUTOSTATE(7);
	R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
	R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
	R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
	R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Example #3
0
/* Fire a section of the retained (indexed_verts) buffer as a regular
 * primtive.
 */
extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
				GLuint vertex_format,
				GLuint primitive,
				GLuint vertex_nr )
{
   BATCH_LOCALS(&rmesa->radeon);

   assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));

   radeonEmitState(&rmesa->radeon);
   radeonEmitScissor(rmesa);

#if RADEON_OLD_PACKETS
   BEGIN_BATCH(8);
   OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
   if (!rmesa->radeon.radeonScreen->kernel_mm) {
     OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
   } else {
     OUT_BATCH(rmesa->ioctl.vertex_offset);
   }

   OUT_BATCH(vertex_nr);
   OUT_BATCH(vertex_format);
   OUT_BATCH(primitive |  RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
	     RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
	     RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
	     (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));

   if (rmesa->radeon.radeonScreen->kernel_mm) {
     radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
			   rmesa->ioctl.bo,
			   RADEON_GEM_DOMAIN_GTT,
			   0, 0);
   }

   END_BATCH();

#else
   BEGIN_BATCH(4);
   OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
   OUT_BATCH(vertex_format);
   OUT_BATCH(primitive |
	     RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
	     RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
	     RADEON_CP_VC_CNTL_MAOS_ENABLE |
	     RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
	     (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
   END_BATCH();
#endif
}
Example #4
0
static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	int id = 0;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (id > R700_MAX_VIEWPORTS)
		return;

	if (!r700->viewport[id].enabled)
		return;

        BEGIN_BATCH_NO_AUTOSTATE(16);
	R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
	R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
	R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Example #5
0
static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
{
    context_t         *context = R700_CONTEXT(ctx);
    struct r700_vertex_program *vp = context->selected_vp;
    unsigned int i, j = 0;
    BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

    if (context->radeon.tcl.aos_count == 0)
	    return;

    BEGIN_BATCH_NO_AUTOSTATE(6);
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0);

    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
    R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0);
    END_BATCH();
    COMMIT_BATCH();

    for(i=0; i<VERT_ATTRIB_MAX; i++) {
	    if(vp->mesa_program->Base.InputsRead & (1 << i))
	    {
                r700SetupVTXConstants(ctx,
				      (void*)(&context->radeon.tcl.aos[j]),
				      &(context->stream_desc[j]));
		j++;
	    }
    }
}
Example #6
0
static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = radeon->query.current;
	BATCH_LOCALS(radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	/* clear the buffer */
	radeon_bo_map(query->bo, GL_FALSE);
	memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
	radeon_bo_unmap(query->bo);

	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
				      query->bo,
				      0, RADEON_GEM_DOMAIN_GTT);

	BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
	R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
	R600_OUT_BATCH(ZPASS_DONE);
	R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
	R600_OUT_BATCH(0x00000000);
	R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
	END_BATCH();
	query->emitted_begin = GL_TRUE;
}
Example #7
0
static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(22);
	R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
	R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);

	R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);

	R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
	R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
static void tex_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->cmd_size;
   int i = atom->idx;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;

   if (t && t->mt && !t->image_override)
     dwords += 2;
   BEGIN_BATCH_NO_AUTOSTATE(dwords);

   OUT_BATCH_TABLE(atom->cmd, 3);
   if (t && t->mt && !t->image_override) {
     if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) {
   	lvl = &t->mt->levels[0];
	OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset,
			RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
     } else {
        OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, 0,
		     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
     }
   } else if (!t) {
     /* workaround for old CS mechanism */
     OUT_BATCH(r100->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP]);
     //     OUT_BATCH(r100->radeon.radeonScreen);
   } else {
     OUT_BATCH(t->override_offset);
   }

   OUT_BATCH_TABLE((atom->cmd+4), 5);
   END_BATCH();
}
static void cube_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->check(ctx, atom);
   int i = atom->idx, j;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;

   if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT))
	return;

   if (!t)
	return;

   if (!t->mt)
	return;

   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_BATCH_TABLE(atom->cmd, 3);
   lvl = &t->mt->levels[0];
   for (j = 0; j < 5; j++) {
	OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset,
			RADEON_GEM_DOMAIN_VRAM, 0, 0);
   }
   END_BATCH();
}
static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->check(ctx, atom);
   int i = atom->idx, j;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;
   uint32_t base_reg;

   if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT))
	return;

   if (!t)
	return;

   if (!t->mt)
	return;

   switch(i) {
	case 1: base_reg = RADEON_PP_CUBIC_OFFSET_T1_0; break;
	case 2: base_reg = RADEON_PP_CUBIC_OFFSET_T2_0; break;
	default:
	case 0: base_reg = RADEON_PP_CUBIC_OFFSET_T0_0; break;
   };
   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_BATCH_TABLE(atom->cmd, 2);
   lvl = &t->mt->levels[0];
   for (j = 0; j < 5; j++) {
	OUT_BATCH(CP_PACKET0(base_reg + (4 * j), 0));
	OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset,
			RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
   }
   END_BATCH();
}
Example #11
0
static void r200FireEB(r200ContextPtr rmesa, int vertex_count, int type)
{
	BATCH_LOCALS(&rmesa->radeon);

	if (vertex_count > 0) {
		BEGIN_BATCH(8+2);
		OUT_BATCH_PACKET3_CLIP(R200_CP_CMD_3D_DRAW_INDX_2, 0);
		OUT_BATCH(R200_VF_PRIM_WALK_IND |
			  R200_VF_COLOR_ORDER_RGBA | 
			  ((vertex_count + 0) << 16) |
			  type);
		
		if (!rmesa->radeon.radeonScreen->kernel_mm) {
			OUT_BATCH_PACKET3(R200_CP_CMD_INDX_BUFFER, 2);
			OUT_BATCH((0x80 << 24) | (0 << 16) | 0x810);
			OUT_BATCH_RELOC(rmesa->radeon.tcl.elt_dma_offset,
					rmesa->radeon.tcl.elt_dma_bo,
					rmesa->radeon.tcl.elt_dma_offset,
					RADEON_GEM_DOMAIN_GTT, 0, 0);
			OUT_BATCH((vertex_count + 1)/2);
		} else {
			OUT_BATCH_PACKET3(R200_CP_CMD_INDX_BUFFER, 2);
			OUT_BATCH((0x80 << 24) | (0 << 16) | 0x810);
			OUT_BATCH(rmesa->radeon.tcl.elt_dma_offset);
			OUT_BATCH((vertex_count + 1)/2);
			radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
					      rmesa->radeon.tcl.elt_dma_bo,
					      RADEON_GEM_DOMAIN_GTT, 0, 0);
		}
		END_BATCH();
	}
}
static inline void emit_cb_setup(struct r200_context *r200,
				 struct radeon_bo *bo,
				 intptr_t offset,
				 gl_format mesa_format,
				 unsigned pitch,
				 unsigned width,
				 unsigned height)
{
    uint32_t dst_pitch = pitch;
    uint32_t dst_format = 0;
    BATCH_LOCALS(&r200->radeon);

    /* XXX others?  BE/LE? */
    switch (mesa_format) {
    case MESA_FORMAT_ARGB8888:
    case MESA_FORMAT_XRGB8888:
    case MESA_FORMAT_RGBA8888:
    case MESA_FORMAT_RGBA8888_REV:
	    dst_format = RADEON_COLOR_FORMAT_ARGB8888;
	    break;
    case MESA_FORMAT_RGB565:
	    dst_format = RADEON_COLOR_FORMAT_RGB565;
	    break;
    case MESA_FORMAT_ARGB4444:
	    dst_format = RADEON_COLOR_FORMAT_ARGB4444;
	    break;
    case MESA_FORMAT_ARGB1555:
	    dst_format = RADEON_COLOR_FORMAT_ARGB1555;
	    break;
    case MESA_FORMAT_A8:
    case MESA_FORMAT_L8:
    case MESA_FORMAT_I8:
	    dst_format = RADEON_COLOR_FORMAT_RGB8;
	    break;
    default:
	    break;
    }

    if (bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
	dst_pitch |= R200_COLOR_TILE_ENABLE;
    if (bo->flags & RADEON_BO_FLAGS_MICRO_TILE)
	dst_pitch |= R200_COLOR_MICROTILE_ENABLE;

    BEGIN_BATCH_NO_AUTOSTATE(22);
    OUT_BATCH_REGVAL(R200_RE_AUX_SCISSOR_CNTL, 0);
    OUT_BATCH_REGVAL(R200_RE_CNTL, 0);
    OUT_BATCH_REGVAL(RADEON_RE_TOP_LEFT, 0);
    OUT_BATCH_REGVAL(RADEON_RE_WIDTH_HEIGHT, (((width - 1) << RADEON_RE_WIDTH_SHIFT) |
					      ((height - 1) << RADEON_RE_HEIGHT_SHIFT)));
    OUT_BATCH_REGVAL(RADEON_RB3D_PLANEMASK, 0xffffffff);
    OUT_BATCH_REGVAL(RADEON_RB3D_BLENDCNTL, RADEON_SRC_BLEND_GL_ONE | RADEON_DST_BLEND_GL_ZERO);
    OUT_BATCH_REGVAL(RADEON_RB3D_CNTL, dst_format);

    OUT_BATCH_REGSEQ(RADEON_RB3D_COLOROFFSET, 1);
    OUT_BATCH_RELOC(offset, bo, offset, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0);
    OUT_BATCH_REGSEQ(RADEON_RB3D_COLORPITCH, 1);
    OUT_BATCH_RELOC(dst_pitch, bo, dst_pitch, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0);

    END_BATCH();
}
Example #13
0
static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(17);

	R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
	R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);

	R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
	R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);

	R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
	R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
	R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);

	R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);

	END_BATCH();
	COMMIT_BATCH();
}
Example #14
0
void r200EmitMaxVtxIndex(r200ContextPtr rmesa, int count)
{
   BATCH_LOCALS(&rmesa->radeon);

   BEGIN_BATCH_NO_AUTOSTATE(2);
   OUT_BATCH(CP_PACKET0(R200_SE_VF_MAX_VTX_INDX, 0));
   OUT_BATCH(count);
   END_BATCH();
}
static void vec_emit(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->check(ctx, atom);

   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_VEC(atom->cmd[0], atom->cmd+1);
   END_BATCH();
}
Example #16
0
static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_bo * pbo;
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	/* XXX fixme
	 * R6xx chips require a FS be emitted, even if it's not used.
	 * since we aren't using FS yet, just send the VS address to make
	 * the kernel command checker happy
	 */
	pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
	r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
	r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
	r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
	/* XXX */

	if (!pbo)
		return;

	r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);

        BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
	R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
	R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
			     pbo,
			     r700->fs.SQ_PGM_START_FS.u32All,
			     RADEON_GEM_DOMAIN_GTT, 0, 0);
	END_BATCH();

        BEGIN_BATCH_NO_AUTOSTATE(6);
	R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
	R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Example #17
0
static void radeonEmitScissor(r100ContextPtr rmesa)
{
    BATCH_LOCALS(&rmesa->radeon);
    if (rmesa->radeon.state.scissor.enabled) {
        BEGIN_BATCH(6);
        OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
        OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
        OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
        OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
                  rmesa->radeon.state.scissor.rect.x1);
        OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
        OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
                  (rmesa->radeon.state.scissor.rect.x2));
        END_BATCH();
    } else {
        BEGIN_BATCH(2);
        OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
        OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
        END_BATCH();
    }
}
Example #18
0
static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_renderbuffer *rrb;
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	rrb = radeon_get_depthbuffer(&context->radeon);
	if (!rrb || !rrb->bo) {
		return;
	}

	r700SetDepthTarget(context);

        BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
	R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
	R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
	R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
	R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
	R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
			     rrb->bo,
			     r700->DB_DEPTH_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

	if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
	    (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
		BEGIN_BATCH_NO_AUTOSTATE(2);
		R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		R600_OUT_BATCH(1 << 0);
		END_BATCH();
	}

	COMMIT_BATCH();

}
Example #19
0
static void r100_emit_query_finish(radeonContextPtr radeon)
{
   BATCH_LOCALS(radeon);
   struct radeon_query_object *query = radeon->query.current;

   BEGIN_BATCH_NO_AUTOSTATE(4);
   OUT_BATCH(CP_PACKET0(RADEON_RB3D_ZPASS_ADDR, 0));
   OUT_BATCH_RELOC(0, query->bo, query->curr_offset, 0, RADEON_GEM_DOMAIN_GTT, 0);
   END_BATCH();
   query->curr_offset += sizeof(uint32_t);
   assert(query->curr_offset < RADEON_QUERY_PAGE_SIZE);
   query->emitted_begin = GL_FALSE;
}
Example #20
0
static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

        BEGIN_BATCH_NO_AUTOSTATE(4);
	R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
	R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
	R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
static void tex_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->cmd_size;
   int i = atom->idx;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;
   int hastexture = 1;

   if (!t)
	hastexture = 0;
   else {
	if (!t->mt && !t->bo)
		hastexture = 0;
   }
   dwords += 1;
   if (hastexture)
     dwords += 2;
   else
     dwords -= 2;
   BEGIN_BATCH_NO_AUTOSTATE(dwords);

   OUT_BATCH(CP_PACKET0(RADEON_PP_TXFILTER_0 + (24 * i), 1));
   OUT_BATCH_TABLE((atom->cmd + 1), 2);

   if (hastexture) {
     OUT_BATCH(CP_PACKET0(RADEON_PP_TXOFFSET_0 + (24 * i), 0));
     if (t->mt && !t->image_override) {
        if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) {
            lvl = &t->mt->levels[t->minLod];
	    OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset,
			RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
        } else {
           OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, get_base_teximage_offset(t),
		     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
        }
      } else {
	if (t->bo)
            OUT_BATCH_RELOC(t->tile_bits, t->bo, 0,
                            RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
      }
   }

   OUT_BATCH(CP_PACKET0(RADEON_PP_TXCBLEND_0 + (i * 24), 1));
   OUT_BATCH_TABLE((atom->cmd+4), 2);
   OUT_BATCH(CP_PACKET0(RADEON_PP_BORDER_COLOR_0 + (i * 4), 0));
   OUT_BATCH((atom->cmd[TEX_PP_BORDER_COLOR]));
   END_BATCH();
}
Example #22
0
void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	BATCH_LOCALS(radeon);
	int dwords;

	dwords = (*atom->check) (ctx, atom);

	BEGIN_BATCH_NO_AUTOSTATE(dwords);
	OUT_BATCH_TABLE(atom->cmd, dwords);
	END_BATCH();

	radeon->query.current->emitted_begin = GL_TRUE;
}
Example #23
0
static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);

	BEGIN_BATCH_NO_AUTOSTATE(12);
	R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
	R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
	R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
	R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Example #24
0
void r300EmitCacheFlush(r300ContextPtr rmesa)
{
	BATCH_LOCALS(&rmesa->radeon);

	BEGIN_BATCH_NO_AUTOSTATE(4);
	OUT_BATCH_REGVAL(R300_RB3D_DSTCACHE_CTLSTAT,
		R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
		R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
	OUT_BATCH_REGVAL(R300_ZB_ZCACHE_CTLSTAT,
		R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
		R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
	END_BATCH();
	COMMIT_BATCH();
}
Example #25
0
static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

        BEGIN_BATCH_NO_AUTOSTATE(9);
	R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
	R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
	R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Example #26
0
void radeonFlushElts( GLcontext *ctx )
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);
   BATCH_LOCALS(&rmesa->radeon);
   int nr;
   uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
   int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);

   if (RADEON_DEBUG & RADEON_IOCTL)
      fprintf(stderr, "%s\n", __FUNCTION__);

   assert( rmesa->radeon.dma.flush == radeonFlushElts );
   rmesa->radeon.dma.flush = NULL;

   nr = rmesa->tcl.elt_used;

#if RADEON_OLD_PACKETS
   if (rmesa->radeon.radeonScreen->kernel_mm) {
     dwords -= 2;
   }
#endif

#if RADEON_OLD_PACKETS
   cmd[1] |= (dwords + 3) << 16;
   cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
#else
   cmd[1] |= (dwords + 2) << 16;
   cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
#endif

   rmesa->radeon.cmdbuf.cs->cdw += dwords;
   rmesa->radeon.cmdbuf.cs->section_cdw += dwords;

#if RADEON_OLD_PACKETS
   if (rmesa->radeon.radeonScreen->kernel_mm) {
      radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
			    rmesa->ioctl.bo,
			    RADEON_GEM_DOMAIN_GTT,
			    0, 0);
   }
#endif

   END_BATCH();

   if (RADEON_DEBUG & RADEON_SYNC) {
      fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
      radeonFinish( rmesa->radeon.glCtx );
   }

}
Example #27
0
static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
{
    context_t *context = R700_CONTEXT(ctx);
    R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
    struct radeon_bo * pbo;
    BATCH_LOCALS(&context->radeon);
    radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

    pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));

    if (!pbo)
	    return;

    r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);

    BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
    R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
    R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
    R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
		         pbo,
		         r700->ps.SQ_PGM_START_PS.u32All,
		         RADEON_GEM_DOMAIN_GTT, 0, 0);
    END_BATCH();

    BEGIN_BATCH_NO_AUTOSTATE(9);
    R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
    R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
    R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
    END_BATCH();

    BEGIN_BATCH_NO_AUTOSTATE(3);
    R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
    END_BATCH();

    COMMIT_BATCH();

}
Example #28
0
static inline void emit_cb_setup(struct r100_context *r100,
				 struct radeon_bo *bo,
				 intptr_t offset,
				 gl_format mesa_format,
				 unsigned pitch,
				 unsigned width,
				 unsigned height)
{
    uint32_t dst_pitch = pitch;
    uint32_t dst_format = 0;
    BATCH_LOCALS(&r100->radeon);

    /* XXX others?  BE/LE? */
    switch (mesa_format) {
    case MESA_FORMAT_ARGB8888:
    case MESA_FORMAT_XRGB8888:
	    dst_format = RADEON_COLOR_FORMAT_ARGB8888;
	    break;
    case MESA_FORMAT_RGB565:
	    dst_format = RADEON_COLOR_FORMAT_RGB565;
	    break;
    case MESA_FORMAT_ARGB4444:
	    dst_format = RADEON_COLOR_FORMAT_ARGB4444;
	    break;
    case MESA_FORMAT_ARGB1555:
	    dst_format = RADEON_COLOR_FORMAT_ARGB1555;
	    break;
    case MESA_FORMAT_A8:
	    dst_format = RADEON_COLOR_FORMAT_RGB8;
	    break;
    default:
	    break;
    }

    BEGIN_BATCH_NO_AUTOSTATE(18);
    OUT_BATCH_REGVAL(RADEON_RE_TOP_LEFT, 0);
    OUT_BATCH_REGVAL(RADEON_RE_WIDTH_HEIGHT, ((width << RADEON_RE_WIDTH_SHIFT) |
					      (height << RADEON_RE_HEIGHT_SHIFT)));
    OUT_BATCH_REGVAL(RADEON_RB3D_PLANEMASK, 0xffffffff);
    OUT_BATCH_REGVAL(RADEON_RB3D_BLENDCNTL, RADEON_SRC_BLEND_GL_ONE | RADEON_DST_BLEND_GL_ZERO);
    OUT_BATCH_REGVAL(RADEON_RB3D_CNTL, dst_format);

    OUT_BATCH_REGSEQ(RADEON_RB3D_COLOROFFSET, 1);
    OUT_BATCH_RELOC(0, bo, 0, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0);
    OUT_BATCH_REGSEQ(RADEON_RB3D_COLORPITCH, 1);
    OUT_BATCH_RELOC(dst_pitch, bo, dst_pitch, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0);

    END_BATCH();
}
Example #29
0
static void r300EmitVbufPrim(r300ContextPtr rmesa, GLuint primitive, GLuint vertex_nr)
{
	BATCH_LOCALS(&rmesa->radeon);
	int type, num_verts;
	if (RADEON_DEBUG & RADEON_VERTS)
		fprintf(stderr, "%s\n", __func__);

	type = r300PrimitiveType(rmesa, primitive);
	num_verts = r300NumVerts(rmesa, vertex_nr, primitive);

	BEGIN_BATCH(3);
	OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
	OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type);
	END_BATCH();
}
Example #30
0
static void r300EmitVertexAOS(r300ContextPtr rmesa, GLuint vertex_size, struct radeon_bo *bo, GLuint offset)
{
	BATCH_LOCALS(&rmesa->radeon);

	radeon_print(RADEON_SWRENDER, RADEON_TRACE,
		"%s:  vertex_size %d, offset 0x%x \n",
			__FUNCTION__, vertex_size, offset);

	BEGIN_BATCH(7);
	OUT_BATCH_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, 2);
	OUT_BATCH(1);
	OUT_BATCH(vertex_size | (vertex_size << 8));
	OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
	END_BATCH();
}