Ejemplo n.º 1
0
/**
 * Ensure all enabled and complete textures are uploaded along with any buffers being used.
 */
GLboolean r300ValidateBuffers(GLcontext * ctx)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_renderbuffer *rrb;
	int i;
	int ret;

	radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);

	rrb = radeon_get_colorbuffer(&rmesa->radeon);
	/* color buffer */
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}

	/* depth buffer */
	rrb = radeon_get_depthbuffer(&rmesa->radeon);
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}
	
	for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
		radeonTexObj *t;

		if (!ctx->Texture.Unit[i]._ReallyEnabled)
			continue;

		if (!r300_validate_texture(ctx, ctx->Texture.Unit[i]._Current)) {
			_mesa_warning(ctx,
				      "failed to validate texture for unit %d.\n",
				      i);
		}
		t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
		if (t->image_override && t->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
		else if (t->mt->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->mt->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
	}

	ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, first_elem(&rmesa->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	if (ret)
		return GL_FALSE;
	return GL_TRUE;
}
Ejemplo n.º 2
0
static void r700SetRenderTarget(context_t *context, int id)
{
    R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);

    struct radeon_renderbuffer *rrb;
    unsigned int nPitchInPixel;

    rrb = radeon_get_colorbuffer(&context->radeon);
    if (!rrb || !rrb->bo) {
	    return;
    }

    R600_STATECHANGE(context, cb_target);

    /* color buffer */
    r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset / 256;

    nPitchInPixel = rrb->pitch/rrb->cpp;
    SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
             PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
    SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
             SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
    SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
    SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
             CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
    if(4 == rrb->cpp)
    {
        SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
                 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
        SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
    }
    else
    {
        SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
                 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
        SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
                 COMP_SWAP_shift, COMP_SWAP_mask);
    }
    SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
    SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
    SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);

    r700->render_target[id].enabled = GL_TRUE;
}
static int check_always_ctx( struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   struct radeon_renderbuffer *rrb, *drb;
   uint32_t dwords;

   rrb = radeon_get_colorbuffer(&r100->radeon);
   if (!rrb || !rrb->bo) {
      return 0;
   }

   drb = radeon_get_depthbuffer(&r100->radeon);

   dwords = 10;
   if (drb)
     dwords += 6;
   if (rrb)
     dwords += 8;

   return dwords;
}
static void ctx_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   struct radeon_renderbuffer *rrb, *drb;
   uint32_t cbpitch = 0;
   uint32_t zbpitch = 0;
   uint32_t dwords = atom->check(ctx, atom);
   uint32_t depth_fmt;

   rrb = radeon_get_colorbuffer(&r100->radeon);
   if (!rrb || !rrb->bo) {
      fprintf(stderr, "no rrb\n");
      return;
   }

   atom->cmd[CTX_RB3D_CNTL] &= ~(0xf << 10);
   if (rrb->cpp == 4)
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB8888;
   else switch (rrb->base.Base.Format) {
   case MESA_FORMAT_RGB565:
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_RGB565;
	break;
   case MESA_FORMAT_ARGB4444:
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB4444;
	break;
   case MESA_FORMAT_ARGB1555:
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB1555;
	break;
   default:
	_mesa_problem(ctx, "unexpected format in ctx_emit_cs()");
   }

   cbpitch = (rrb->pitch / rrb->cpp);
   if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
       cbpitch |= R200_COLOR_TILE_ENABLE;
   if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE)
       cbpitch |= RADEON_COLOR_MICROTILE_ENABLE;

   drb = radeon_get_depthbuffer(&r100->radeon);
   if (drb) {
     zbpitch = (drb->pitch / drb->cpp);
     if (drb->cpp == 4)
        depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
     else
        depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_DEPTH_FORMAT_MASK;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] |= depth_fmt;
     
   }

   BEGIN_BATCH_NO_AUTOSTATE(dwords);

   /* In the CS case we need to split this up */
   OUT_BATCH(CP_PACKET0(packet[0].start, 3));
   OUT_BATCH_TABLE((atom->cmd + 1), 4);

   if (drb) {
     OUT_BATCH(CP_PACKET0(RADEON_RB3D_DEPTHOFFSET, 0));
     OUT_BATCH_RELOC(0, drb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);

     OUT_BATCH(CP_PACKET0(RADEON_RB3D_DEPTHPITCH, 0));
     OUT_BATCH(zbpitch);
   }

   OUT_BATCH(CP_PACKET0(RADEON_RB3D_ZSTENCILCNTL, 0));
   OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]);
   OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 1));
   OUT_BATCH(atom->cmd[CTX_PP_CNTL]);
   OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);

   if (rrb) {
     OUT_BATCH(CP_PACKET0(RADEON_RB3D_COLOROFFSET, 0));
     OUT_BATCH_RELOC(rrb->draw_offset, rrb->bo, rrb->draw_offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);

     OUT_BATCH(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
     OUT_BATCH_RELOC(cbpitch, rrb->bo, cbpitch, 0, RADEON_GEM_DOMAIN_VRAM, 0);
   }

   // if (atom->cmd_size == CTX_STATE_SIZE_NEWDRM) {
   //   OUT_BATCH_TABLE((atom->cmd + 14), 4);
   // }

   END_BATCH();
   BEGIN_BATCH_NO_AUTOSTATE(4);
   OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
   OUT_BATCH(0);
   OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
   if (rrb) {
       OUT_BATCH(((rrb->base.Base.Width - 1) << RADEON_RE_WIDTH_SHIFT) |
                 ((rrb->base.Base.Height - 1) << RADEON_RE_HEIGHT_SHIFT));
   } else {
       OUT_BATCH(0);
   }
   END_BATCH();
}
Ejemplo n.º 5
0
static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	struct radeon_renderbuffer *rrb;
	BATCH_LOCALS(&context->radeon);
	int id = 0;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	rrb = radeon_get_colorbuffer(&context->radeon);
	if (!rrb || !rrb->bo) {
		return;
	}

	r700SetRenderTarget(context, 0);

	if (id > R700_MAX_RENDER_TARGETS)
		return;

	if (!r700->render_target[id].enabled)
		return;

        BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

	if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
	    (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
		BEGIN_BATCH_NO_AUTOSTATE(2);
		R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
		R600_OUT_BATCH((2 << id));
		END_BATCH();
	}
	/* Set CMASK & TILE buffer to the offset of color buffer as
	 * we don't use those this shouldn't cause any issue and we
	 * then have a valid cmd stream
	 */
	BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_TILE + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_TILE.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
	END_BATCH();
	BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
	R600_OUT_BATCH_REGSEQ(CB_COLOR0_FRAG + (4 * id), 1);
	R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_FRAG.u32All);
	R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
			     rrb->bo,
			     r700->render_target[id].CB_COLOR0_BASE.u32All,
			     0, RADEON_GEM_DOMAIN_VRAM, 0);
        END_BATCH();

        BEGIN_BATCH_NO_AUTOSTATE(12);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
	R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Ejemplo n.º 6
0
static void ctx_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   struct radeon_renderbuffer *rrb;
   uint32_t cbpitch;
   uint32_t zbpitch, depth_fmt;
   uint32_t dwords = atom->check(ctx, atom);

   /* output the first 7 bytes of context */
   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_BATCH_TABLE(atom->cmd, 5);

   rrb = radeon_get_depthbuffer(&r100->radeon);
   if (!rrb) {
     OUT_BATCH(0);
     OUT_BATCH(0);
   } else {
     zbpitch = (rrb->pitch / rrb->cpp);
     if (r100->using_hyperz)
       zbpitch |= RADEON_DEPTH_HYPERZ;

     OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
     OUT_BATCH(zbpitch);
     if (rrb->cpp == 4)
        depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
     else
        depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_DEPTH_FORMAT_MASK;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] |= depth_fmt;
   }
     
   OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]);
   OUT_BATCH(atom->cmd[CTX_CMD_1]);
   OUT_BATCH(atom->cmd[CTX_PP_CNTL]);

   rrb = radeon_get_colorbuffer(&r100->radeon);
   if (!rrb || !rrb->bo) {
      OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);
      OUT_BATCH(atom->cmd[CTX_RB3D_COLOROFFSET]);
   } else {
      atom->cmd[CTX_RB3D_CNTL] &= ~(0xf << 10);
      if (rrb->cpp == 4)
         atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB8888;
      else
         atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_RGB565;

      OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);
      OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
   }

   OUT_BATCH(atom->cmd[CTX_CMD_2]);

   if (!rrb || !rrb->bo) {
     OUT_BATCH(atom->cmd[CTX_RB3D_COLORPITCH]);
   } else {
     cbpitch = (rrb->pitch / rrb->cpp);
     if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
       cbpitch |= RADEON_COLOR_TILE_ENABLE;
     OUT_BATCH(cbpitch);
   }

   END_BATCH();
}
Ejemplo n.º 7
0
static GLboolean evergreenTryDrawPrims(GLcontext *ctx,
				  const struct gl_client_array *arrays[],
				  const struct _mesa_prim *prim,
				  GLuint nr_prims,
				  const struct _mesa_index_buffer *ib,
				  GLuint min_index,
				  GLuint max_index )
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    radeonContextPtr radeon = &context->radeon;
    GLuint i, id = 0;
    struct radeon_renderbuffer *rrb;

    if (ctx->NewState)
        _mesa_update_state( ctx );

    if (evergreen_check_fallbacks(ctx))
	    return GL_FALSE;

    _tnl_UpdateFixedFunctionProgram(ctx);
    evergreenSetVertexFormat(ctx, arrays, max_index + 1);


    /* shaders need to be updated before buffers are validated */
    evergreenUpdateShaders(ctx);
    if (!evergreenValidateBuffers(ctx))
	    return GL_FALSE;

    /* always emit CB base to prevent
     * lock ups on some chips.
     */
    EVERGREEN_STATECHANGE(context, cb);
    /* mark vtx as dirty since it changes per-draw */
    EVERGREEN_STATECHANGE(context, vtx);

    evergreenSetScissor(context);

    evergreenSetupVertexProgram(ctx);
    evergreenSetupFragmentProgram(ctx);
    evergreenUpdateShaderStates(ctx);

    GLuint emit_end = evergreenPredictRenderSize(ctx, prim, ib, nr_prims)
                    + context->radeon.cmdbuf.cs->cdw;

    /* evergreenPredictRenderSize will call radeonReleaseDmaRegions, so update VP/FP const buf after it. */
    evergreenSetupVPconstants(ctx);
    evergreenSetupFPconstants(ctx);

    evergreenSetupIndexBuffer(ctx, ib);

    evergreenSetupStreams(ctx, arrays, max_index + 1);

    radeonEmitState(radeon);

    radeon_debug_add_indent();

    for (i = 0; i < nr_prims; ++i)
    {
	    if (context->ind_buf.bo)
		    evergreenRunRenderPrimitive(ctx,
					   prim[i].start,
					   prim[i].start + prim[i].count,
					   prim[i].mode,
					   prim[i].basevertex);
	    else
		    evergreenRunRenderPrimitiveImmediate(ctx,
						    prim[i].start,
						    prim[i].start + prim[i].count,
						    prim[i].mode);
    }

    radeon_debug_remove_indent();

    /* Flush render op cached for last several quads. */
    /* XXX drm should handle this in fence submit */

    //evergreeWaitForIdleClean(context);

    rrb = radeon_get_colorbuffer(&context->radeon);
    if (rrb && rrb->bo)
	    r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
			 CB_ACTION_ENA_bit | (1 << (id + 6)));

    rrb = radeon_get_depthbuffer(&context->radeon);
    if (rrb && rrb->bo)
	    r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
			 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);

    evergreenFreeData(ctx);

    if (emit_end < context->radeon.cmdbuf.cs->cdw)
    {
        WARN_ONCE("Rendering was %d commands larger than predicted size."
            " We might overflow  command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
    }

    return GL_TRUE;
}