static void tex_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->cmd_size;
   int i = atom->idx;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;

   if (t && t->mt && !t->image_override)
     dwords += 2;
   BEGIN_BATCH_NO_AUTOSTATE(dwords);

   OUT_BATCH_TABLE(atom->cmd, 3);
   if (t && t->mt && !t->image_override) {
     if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) {
   	lvl = &t->mt->levels[0];
	OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset,
			RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
     } else {
        OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, 0,
		     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
     }
   } else if (!t) {
     /* workaround for old CS mechanism */
     OUT_BATCH(r100->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP]);
     //     OUT_BATCH(r100->radeon.radeonScreen);
   } else {
     OUT_BATCH(t->override_offset);
   }

   OUT_BATCH_TABLE((atom->cmd+4), 5);
   END_BATCH();
}
static void tex_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->cmd_size;
   int i = atom->idx;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;
   int hastexture = 1;

   if (!t)
	hastexture = 0;
   else {
	if (!t->mt && !t->bo)
		hastexture = 0;
   }
   dwords += 1;
   if (hastexture)
     dwords += 2;
   else
     dwords -= 2;
   BEGIN_BATCH_NO_AUTOSTATE(dwords);

   OUT_BATCH(CP_PACKET0(RADEON_PP_TXFILTER_0 + (24 * i), 1));
   OUT_BATCH_TABLE((atom->cmd + 1), 2);

   if (hastexture) {
     OUT_BATCH(CP_PACKET0(RADEON_PP_TXOFFSET_0 + (24 * i), 0));
     if (t->mt && !t->image_override) {
        if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) {
            lvl = &t->mt->levels[t->minLod];
	    OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset,
			RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
        } else {
           OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, get_base_teximage_offset(t),
		     RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
        }
      } else {
	if (t->bo)
            OUT_BATCH_RELOC(t->tile_bits, t->bo, 0,
                            RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
      }
   }

   OUT_BATCH(CP_PACKET0(RADEON_PP_TXCBLEND_0 + (i * 24), 1));
   OUT_BATCH_TABLE((atom->cmd+4), 2);
   OUT_BATCH(CP_PACKET0(RADEON_PP_BORDER_COLOR_0 + (i * 4), 0));
   OUT_BATCH((atom->cmd[TEX_PP_BORDER_COLOR]));
   END_BATCH();
}
static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->check(ctx, atom);
   int i = atom->idx, j;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;
   uint32_t base_reg;

   if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT))
	return;

   if (!t)
	return;

   if (!t->mt)
	return;

   switch(i) {
	case 1: base_reg = RADEON_PP_CUBIC_OFFSET_T1_0; break;
	case 2: base_reg = RADEON_PP_CUBIC_OFFSET_T2_0; break;
	default:
	case 0: base_reg = RADEON_PP_CUBIC_OFFSET_T0_0; break;
   };
   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_BATCH_TABLE(atom->cmd, 2);
   lvl = &t->mt->levels[0];
   for (j = 0; j < 5; j++) {
	OUT_BATCH(CP_PACKET0(base_reg + (4 * j), 0));
	OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset,
			RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
   }
   END_BATCH();
}
static void cube_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   uint32_t dwords = atom->check(ctx, atom);
   int i = atom->idx, j;
   radeonTexObj *t = r100->state.texture.unit[i].texobj;
   radeon_mipmap_level *lvl;

   if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT))
	return;

   if (!t)
	return;

   if (!t->mt)
	return;

   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_BATCH_TABLE(atom->cmd, 3);
   lvl = &t->mt->levels[0];
   for (j = 0; j < 5; j++) {
	OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset,
			RADEON_GEM_DOMAIN_VRAM, 0, 0);
   }
   END_BATCH();
}
void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	BATCH_LOCALS(radeon);
	int dwords;

	dwords = (*atom->check) (ctx, atom);

	BEGIN_BATCH_NO_AUTOSTATE(dwords);
	OUT_BATCH_TABLE(atom->cmd, dwords);
	END_BATCH();

	radeon->query.current->emitted_begin = GL_TRUE;
}
Exemple #6
0
static inline void emit_draw_packet(struct r100_context *r100,
				    unsigned src_width, unsigned src_height,
				    unsigned src_x_offset, unsigned src_y_offset,
				    unsigned dst_x_offset, unsigned dst_y_offset,
				    unsigned reg_width, unsigned reg_height,
				    unsigned flip_y)
{
    float texcoords[4];
    float verts[12];
    BATCH_LOCALS(&r100->radeon);

    calc_tex_coords(src_width, src_height,
                    src_x_offset, src_y_offset,
                    reg_width, reg_height,
                    flip_y, texcoords);

    verts[0] = dst_x_offset;
    verts[1] = dst_y_offset + reg_height;
    verts[2] = texcoords[0];
    verts[3] = texcoords[3];

    verts[4] = dst_x_offset + reg_width;
    verts[5] = dst_y_offset + reg_height;
    verts[6] = texcoords[1];
    verts[7] = texcoords[3];

    verts[8] = dst_x_offset + reg_width;
    verts[9] = dst_y_offset;
    verts[10] = texcoords[1];
    verts[11] = texcoords[2];

    BEGIN_BATCH(15);
    OUT_BATCH(RADEON_CP_PACKET3_3D_DRAW_IMMD | (13 << 16));
    OUT_BATCH(RADEON_CP_VC_FRMT_XY | RADEON_CP_VC_FRMT_ST0);
    OUT_BATCH(RADEON_CP_VC_CNTL_PRIM_WALK_RING |
	      RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST |
	      RADEON_CP_VC_CNTL_MAOS_ENABLE |
	      RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
              (3 << 16));
    OUT_BATCH_TABLE(verts, 12);
    END_BATCH();
}
static void ctx_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   struct radeon_renderbuffer *rrb, *drb;
   uint32_t cbpitch = 0;
   uint32_t zbpitch = 0;
   uint32_t dwords = atom->check(ctx, atom);
   uint32_t depth_fmt;

   rrb = radeon_get_colorbuffer(&r100->radeon);
   if (!rrb || !rrb->bo) {
      fprintf(stderr, "no rrb\n");
      return;
   }

   atom->cmd[CTX_RB3D_CNTL] &= ~(0xf << 10);
   if (rrb->cpp == 4)
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB8888;
   else switch (rrb->base.Base.Format) {
   case MESA_FORMAT_RGB565:
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_RGB565;
	break;
   case MESA_FORMAT_ARGB4444:
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB4444;
	break;
   case MESA_FORMAT_ARGB1555:
	atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB1555;
	break;
   default:
	_mesa_problem(ctx, "unexpected format in ctx_emit_cs()");
   }

   cbpitch = (rrb->pitch / rrb->cpp);
   if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
       cbpitch |= R200_COLOR_TILE_ENABLE;
   if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE)
       cbpitch |= RADEON_COLOR_MICROTILE_ENABLE;

   drb = radeon_get_depthbuffer(&r100->radeon);
   if (drb) {
     zbpitch = (drb->pitch / drb->cpp);
     if (drb->cpp == 4)
        depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
     else
        depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_DEPTH_FORMAT_MASK;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] |= depth_fmt;
     
   }

   BEGIN_BATCH_NO_AUTOSTATE(dwords);

   /* In the CS case we need to split this up */
   OUT_BATCH(CP_PACKET0(packet[0].start, 3));
   OUT_BATCH_TABLE((atom->cmd + 1), 4);

   if (drb) {
     OUT_BATCH(CP_PACKET0(RADEON_RB3D_DEPTHOFFSET, 0));
     OUT_BATCH_RELOC(0, drb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);

     OUT_BATCH(CP_PACKET0(RADEON_RB3D_DEPTHPITCH, 0));
     OUT_BATCH(zbpitch);
   }

   OUT_BATCH(CP_PACKET0(RADEON_RB3D_ZSTENCILCNTL, 0));
   OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]);
   OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 1));
   OUT_BATCH(atom->cmd[CTX_PP_CNTL]);
   OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);

   if (rrb) {
     OUT_BATCH(CP_PACKET0(RADEON_RB3D_COLOROFFSET, 0));
     OUT_BATCH_RELOC(rrb->draw_offset, rrb->bo, rrb->draw_offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);

     OUT_BATCH(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
     OUT_BATCH_RELOC(cbpitch, rrb->bo, cbpitch, 0, RADEON_GEM_DOMAIN_VRAM, 0);
   }

   // if (atom->cmd_size == CTX_STATE_SIZE_NEWDRM) {
   //   OUT_BATCH_TABLE((atom->cmd + 14), 4);
   // }

   END_BATCH();
   BEGIN_BATCH_NO_AUTOSTATE(4);
   OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
   OUT_BATCH(0);
   OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
   if (rrb) {
       OUT_BATCH(((rrb->base.Base.Width - 1) << RADEON_RE_WIDTH_SHIFT) |
                 ((rrb->base.Base.Height - 1) << RADEON_RE_HEIGHT_SHIFT));
   } else {
       OUT_BATCH(0);
   }
   END_BATCH();
}
static void ctx_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
   r100ContextPtr r100 = R100_CONTEXT(ctx);
   BATCH_LOCALS(&r100->radeon);
   struct radeon_renderbuffer *rrb;
   uint32_t cbpitch;
   uint32_t zbpitch, depth_fmt;
   uint32_t dwords = atom->check(ctx, atom);

   /* output the first 7 bytes of context */
   BEGIN_BATCH_NO_AUTOSTATE(dwords);
   OUT_BATCH_TABLE(atom->cmd, 5);

   rrb = radeon_get_depthbuffer(&r100->radeon);
   if (!rrb) {
     OUT_BATCH(0);
     OUT_BATCH(0);
   } else {
     zbpitch = (rrb->pitch / rrb->cpp);
     if (r100->using_hyperz)
       zbpitch |= RADEON_DEPTH_HYPERZ;

     OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
     OUT_BATCH(zbpitch);
     if (rrb->cpp == 4)
        depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
     else
        depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_DEPTH_FORMAT_MASK;
     atom->cmd[CTX_RB3D_ZSTENCILCNTL] |= depth_fmt;
   }
     
   OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]);
   OUT_BATCH(atom->cmd[CTX_CMD_1]);
   OUT_BATCH(atom->cmd[CTX_PP_CNTL]);

   rrb = radeon_get_colorbuffer(&r100->radeon);
   if (!rrb || !rrb->bo) {
      OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);
      OUT_BATCH(atom->cmd[CTX_RB3D_COLOROFFSET]);
   } else {
      atom->cmd[CTX_RB3D_CNTL] &= ~(0xf << 10);
      if (rrb->cpp == 4)
         atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB8888;
      else
         atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_RGB565;

      OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);
      OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
   }

   OUT_BATCH(atom->cmd[CTX_CMD_2]);

   if (!rrb || !rrb->bo) {
     OUT_BATCH(atom->cmd[CTX_RB3D_COLORPITCH]);
   } else {
     cbpitch = (rrb->pitch / rrb->cpp);
     if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
       cbpitch |= RADEON_COLOR_TILE_ENABLE;
     OUT_BATCH(cbpitch);
   }

   END_BATCH();
}