Exemplo n.º 1
0
static void
nv50_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
                         struct pipe_constant_buffer *cb)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct pipe_resource *res = cb ? cb->buffer : NULL;
   const unsigned s = nv50_context_shader_stage(shader);
   const unsigned i = index;

   if (shader == PIPE_SHADER_COMPUTE)
      return;

   if (nv50->constbuf[s][i].user)
      nv50->constbuf[s][i].u.buf = NULL;
   else
   if (nv50->constbuf[s][i].u.buf)
      nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_CB(s, i));

   pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);

   nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE;
   if (nv50->constbuf[s][i].user) {
      nv50->constbuf[s][i].u.data = cb->user_buffer;
      nv50->constbuf[s][i].size = cb->buffer_size;
      nv50->constbuf_valid[s] |= 1 << i;
   } else
   if (res) {
      nv50->constbuf[s][i].offset = cb->buffer_offset;
      nv50->constbuf[s][i].size = align(cb->buffer_size, 0x100);
      nv50->constbuf_valid[s] |= 1 << i;
   } else {
      nv50->constbuf_valid[s] &= ~(1 << i);
   }
   nv50->constbuf_dirty[s] |= 1 << i;

   nv50->dirty |= NV50_NEW_CONSTBUF;
}
Exemplo n.º 2
0
static void
nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   struct nv50_query *q = nv50_query(pq);

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      nv50_query_get(push, q, 0, 0x0100f002);
      PUSH_SPACE(push, 2);
      BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
      PUSH_DATA (push, 0);
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      nv50_query_get(push, q, 0, 0x06805002);
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      nv50_query_get(push, q, 0, 0x05805002);
      break;
   case PIPE_QUERY_SO_STATISTICS:
      nv50_query_get(push, q, 0x00, 0x05805002);
      nv50_query_get(push, q, 0x10, 0x06805002);
      break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
   case PIPE_QUERY_TIME_ELAPSED:
      nv50_query_get(push, q, 0, 0x00005002);
      break;
   case PIPE_QUERY_GPU_FINISHED:
      nv50_query_get(push, q, 0, 0x1000f010);
      break;
   default:
      assert(0);
      break;
   }
   q->flushed = FALSE;
}
static void
nv50_set_vertex_buffers(struct pipe_context *pipe,
                        unsigned start_slot, unsigned count,
                        const struct pipe_vertex_buffer *vb)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   unsigned i;

   util_set_vertex_buffers_count(nv50->vtxbuf, &nv50->num_vtxbufs, vb,
                                 start_slot, count);

   if (!vb) {
      nv50->vbo_user &= ~(((1ull << count) - 1) << start_slot);
      nv50->vbo_constant &= ~(((1ull << count) - 1) << start_slot);
      return;
   }

   for (i = 0; i < count; ++i) {
      unsigned dst_index = start_slot + i;

      if (!vb[i].buffer && vb[i].user_buffer) {
         nv50->vbo_user |= 1 << dst_index;
         if (!vb[i].stride)
            nv50->vbo_constant |= 1 << dst_index;
         else
            nv50->vbo_constant &= ~(1 << dst_index);
      } else {
         nv50->vbo_user &= ~(1 << dst_index);
         nv50->vbo_constant &= ~(1 << dst_index);
      }
   }

   nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX);

   nv50->dirty |= NV50_NEW_ARRAYS;
}
Exemplo n.º 4
0
void
nv50_push_elements_instanced(struct pipe_context *pipe,
                             struct pipe_resource *idxbuf,
                             unsigned idxsize, int idxbias,
                             unsigned mode, unsigned start, unsigned count,
                             unsigned i_start, unsigned i_count)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nouveau_grobj *tesla = nv50->screen->tesla;
   struct nouveau_channel *chan = tesla->channel;
   struct push_context ctx;
   const unsigned p_overhead = 4 + /* begin/end */
                               4; /* potential edgeflag enable/disable */
   const unsigned v_overhead = 1 + /* VERTEX_DATA packet header */
                               2; /* potential edgeflag modification */
   struct util_split_prim s;
   unsigned vtx_size;
   boolean nzi = FALSE;
   int i;

   ctx.nv50 = nv50;
   ctx.attr_nr = 0;
   ctx.idxbuf = NULL;
   ctx.vtx_size = 0;
   ctx.edgeflag = 0.5f;
   ctx.edgeflag_attr = nv50->vertprog->vp.edgeflag;

   /* map vertex buffers, determine vertex size */
   for (i = 0; i < nv50->vtxelt->num_elements; i++) {
      struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i];
      struct pipe_vertex_buffer *vb = &nv50->vtxbuf[ve->vertex_buffer_index];
      struct nouveau_bo *bo = nv50_resource(vb->buffer)->bo;
      unsigned size, nr_components, n;

      if (!(nv50->vbo_fifo & (1 << i)))
         continue;
      n = ctx.attr_nr++;

      if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) {
         assert(bo->map);
         return;
      }
      ctx.attr[n].map = (uint8_t *)bo->map + vb->buffer_offset + ve->src_offset;
      nouveau_bo_unmap(bo);

      ctx.attr[n].stride = vb->stride;
      ctx.attr[n].divisor = ve->instance_divisor;
      if (ctx.attr[n].divisor) {
         ctx.attr[n].step = i_start % ve->instance_divisor;
         ctx.attr[n].map = (uint8_t *)ctx.attr[n].map + i_start * vb->stride;
      }

      size = util_format_get_component_bits(ve->src_format,
                                            UTIL_FORMAT_COLORSPACE_RGB, 0);
      nr_components = util_format_get_nr_components(ve->src_format);
      switch (size) {
      case 8:
         switch (nr_components) {
         case 1: ctx.attr[n].push = emit_b08_1; break;
         case 2: ctx.attr[n].push = emit_b16_1; break;
         case 3: ctx.attr[n].push = emit_b08_3; break;
         case 4: ctx.attr[n].push = emit_b32_1; break;
         }
         ctx.vtx_size++;
         break;
      case 16:
         switch (nr_components) {
         case 1: ctx.attr[n].push = emit_b16_1; break;
         case 2: ctx.attr[n].push = emit_b32_1; break;
         case 3: ctx.attr[n].push = emit_b16_3; break;
         case 4: ctx.attr[n].push = emit_b32_2; break;
         }
         ctx.vtx_size += (nr_components + 1) >> 1;
         break;
      case 32:
         switch (nr_components) {
         case 1: ctx.attr[n].push = emit_b32_1; break;
         case 2: ctx.attr[n].push = emit_b32_2; break;
         case 3: ctx.attr[n].push = emit_b32_3; break;
         case 4: ctx.attr[n].push = emit_b32_4; break;
         }
         ctx.vtx_size += nr_components;
         break;
      default:
         assert(0);
         return;
      }
   }
   vtx_size = ctx.vtx_size + v_overhead;

   /* map index buffer, if present */
   if (idxbuf) {
      struct nouveau_bo *bo = nv50_resource(idxbuf)->bo;

      if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) {
         assert(bo->map);
         return;
      }
      ctx.idxbuf = bo->map;
      ctx.idxbias = idxbias;
      ctx.idxsize = idxsize;
      nouveau_bo_unmap(bo);
   }

   s.priv = &ctx;
   s.edge = emit_edgeflag;
   if (idxbuf) {
      if (idxsize == 1)
         s.emit = idxbias ? emit_elt08_biased : emit_elt08;
      else
      if (idxsize == 2)
         s.emit = idxbias ? emit_elt16_biased : emit_elt16;
      else
         s.emit = idxbias ? emit_elt32_biased : emit_elt32;
   } else
      s.emit = emit_verts;

   /* per-instance loop */
   BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
   OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
   OUT_RING  (chan, i_start);
   while (i_count--) {
      unsigned max_verts;
      boolean done;

      for (i = 0; i < ctx.attr_nr; i++) {
         if (!ctx.attr[i].divisor ||
              ctx.attr[i].divisor != ++ctx.attr[i].step)
            continue;
         ctx.attr[i].step = 0;
         ctx.attr[i].map = (uint8_t *)ctx.attr[i].map + ctx.attr[i].stride;
      }

      util_split_prim_init(&s, mode, start, count);
      do {
         if (AVAIL_RING(chan) < p_overhead + (6 * vtx_size)) {
            FIRE_RING(chan);
            if (!nv50_state_validate(nv50, p_overhead + (6 * vtx_size))) {
               assert(0);
               return;
            }
         }

         max_verts  = AVAIL_RING(chan);
         max_verts -= p_overhead;
         max_verts /= vtx_size;

         BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
         OUT_RING  (chan, nv50_prim(s.mode) | (nzi ? (1 << 28) : 0));
         done = util_split_prim_next(&s, max_verts);
         BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
         OUT_RING  (chan, 0);
      } while (!done);

      nzi = TRUE;
   }
}
Exemplo n.º 5
0
static void
nv50_resource_resolve(struct pipe_context *pipe,
                      const struct pipe_resolve_info *info)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nv50_screen *screen = nv50->screen;
   struct nv50_blitctx *blit = screen->blitctx;
   struct nouveau_channel *chan = screen->base.channel;
   struct pipe_resource *src = info->src.res;
   struct pipe_resource *dst = info->dst.res;
   float x0, x1, y0, y1, z;
   float x_range, y_range;

   nv50_blitctx_get_color_mask_and_fp(blit, dst->format, info->mask);

   blit->filter = util_format_is_depth_or_stencil(dst->format) ? 0 : 1;

   nv50_blitctx_pre_blit(blit, nv50);

   nv50_blit_set_dst(nv50, dst, info->dst.level, info->dst.layer);
   nv50_blit_set_src(nv50, src, 0,               info->src.layer);

   nv50_blitctx_prepare_state(blit);

   nv50_state_validate(nv50, 36);

   x_range =
      (float)(info->src.x1 - info->src.x0) /
      (float)(info->dst.x1 - info->dst.x0);
   y_range =
      (float)(info->src.y1 - info->src.y0) /
      (float)(info->dst.y1 - info->dst.y0);

   x0 = (float)info->src.x0 - x_range * (float)info->dst.x0;
   y0 = (float)info->src.y0 - y_range * (float)info->dst.y0;

   x1 = x0 + 16384.0f * x_range;
   y1 = y0 + 16384.0f * y_range;

   x0 *= (float)(1 << nv50_miptree(src)->ms_x);
   x1 *= (float)(1 << nv50_miptree(src)->ms_x);
   y0 *= (float)(1 << nv50_miptree(src)->ms_y);
   y1 *= (float)(1 << nv50_miptree(src)->ms_y);

   z = (float)info->src.layer;

   BEGIN_RING(chan, RING_3D(FP_START_ID), 1);
   OUT_RING  (chan,
              blit->fp.code_base + blit->fp_offset);

   BEGIN_RING(chan, RING_3D(VIEWPORT_TRANSFORM_EN), 1);
   OUT_RING  (chan, 0);

   /* Draw a large triangle in screen coordinates covering the whole
    * render target, with scissors defining the destination region.
    * The vertex is supplied with non-normalized texture coordinates
    * arranged in a way to yield the desired offset and scale.
    */

   BEGIN_RING(chan, RING_3D(SCISSOR_HORIZ(0)), 2);
   OUT_RING  (chan, (info->dst.x1 << 16) | info->dst.x0);
   OUT_RING  (chan, (info->dst.y1 << 16) | info->dst.y0);

   BEGIN_RING(chan, RING_3D(VERTEX_BEGIN_GL), 1);
   OUT_RING  (chan, NV50_3D_VERTEX_BEGIN_GL_PRIMITIVE_TRIANGLES);
   BEGIN_RING(chan, RING_3D(VTX_ATTR_3F_X(1)), 3);
   OUT_RINGf (chan, x0);
   OUT_RINGf (chan, y0);
   OUT_RINGf (chan, z);
   BEGIN_RING(chan, RING_3D(VTX_ATTR_2F_X(0)), 2);
   OUT_RINGf (chan, 0.0f);
   OUT_RINGf (chan, 0.0f);
   BEGIN_RING(chan, RING_3D(VTX_ATTR_3F_X(1)), 3);
   OUT_RINGf (chan, x1);
   OUT_RINGf (chan, y0);
   OUT_RINGf (chan, z);
   BEGIN_RING(chan, RING_3D(VTX_ATTR_2F_X(0)), 2);
   OUT_RINGf (chan, 16384 << nv50_miptree(dst)->ms_x);
   OUT_RINGf (chan, 0.0f);
   BEGIN_RING(chan, RING_3D(VTX_ATTR_3F_X(1)), 3);
   OUT_RINGf (chan, x0);
   OUT_RINGf (chan, y1);
   OUT_RINGf (chan, z);
   BEGIN_RING(chan, RING_3D(VTX_ATTR_2F_X(0)), 2);
   OUT_RINGf (chan, 0.0f);
   OUT_RINGf (chan, 16384 << nv50_miptree(dst)->ms_y);
   BEGIN_RING(chan, RING_3D(VERTEX_END_GL), 1);
   OUT_RING  (chan, 0);

   /* re-enable normally constant state */

   BEGIN_RING(chan, RING_3D(VIEWPORT_TRANSFORM_EN), 1);
   OUT_RING  (chan, 1);

   nv50_blitctx_post_blit(nv50, blit);
}
Exemplo n.º 6
0
static void
nv50_resource_copy_region(struct pipe_context *pipe,
                          struct pipe_resource *dst, unsigned dst_level,
                          unsigned dstx, unsigned dsty, unsigned dstz,
                          struct pipe_resource *src, unsigned src_level,
                          const struct pipe_box *src_box)
{
   struct nv50_screen *screen = nv50_context(pipe)->screen;
   int ret;
   boolean m2mf;
   unsigned dst_layer = dstz, src_layer = src_box->z;

   /* Fallback for buffers. */
   if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
      util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz,
                                src, src_level, src_box);
      return;
   }

   assert(src->nr_samples == dst->nr_samples);

   m2mf = (src->format == dst->format) ||
      (util_format_get_blocksizebits(src->format) ==
       util_format_get_blocksizebits(dst->format));

   nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   if (m2mf) {
      struct nv50_m2mf_rect drect, srect;
      unsigned i;
      unsigned nx = util_format_get_nblocksx(src->format, src_box->width);
      unsigned ny = util_format_get_nblocksy(src->format, src_box->height);

      nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz);
      nv50_m2mf_rect_setup(&srect, src, src_level,
                           src_box->x, src_box->y, src_box->z);

      for (i = 0; i < src_box->depth; ++i) {
         nv50_m2mf_transfer_rect(&screen->base.base, &drect, &srect, nx, ny);

         if (nv50_miptree(dst)->layout_3d)
            drect.z++;
         else
            drect.base += nv50_miptree(dst)->layer_stride;

         if (nv50_miptree(src)->layout_3d)
            srect.z++;
         else
            srect.base += nv50_miptree(src)->layer_stride;
      }
      return;
   }

   assert((src->format == dst->format) ||
          (nv50_2d_format_faithful(src->format) &&
           nv50_2d_format_faithful(dst->format)));

   for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) {
      ret = nv50_2d_texture_do_copy(screen->base.channel,
                                    nv50_miptree(dst), dst_level,
                                    dstx, dsty, dst_layer,
                                    nv50_miptree(src), src_level,
                                    src_box->x, src_box->y, src_layer,
                                    src_box->width, src_box->height);
      if (ret)
         return;
   }
}
static void
nv50_gp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
{
   nv50_stage_sampler_states_bind(nv50_context(pipe), 1, nr, s);
}
static void
nv50_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
{
   nv50_query_allocate(nv50_context(pipe), nv50_query(pq), 0);
   FREE(nv50_query(pq));
}
static boolean
nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq,
                  boolean wait, union pipe_query_result *result)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nv50_query *q = nv50_query(pq);
   uint64_t *res64 = (uint64_t *)result;
   uint32_t *res32 = (uint32_t *)result;
   boolean *res8 = (boolean *)result;
   uint64_t *data64 = (uint64_t *)q->data;
   int i;

   if (!q->ready) /* update ? */
      q->ready = nv50_query_ready(q);
   if (!q->ready) {
      if (!wait) {
         /* for broken apps that spin on GL_QUERY_RESULT_AVAILABLE */
         if (!q->flushed) {
            q->flushed = TRUE;
            PUSH_KICK(nv50->base.pushbuf);
         }
         return FALSE;
      }
      if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
         return FALSE;
   }
   q->ready = TRUE;

   switch (q->type) {
   case PIPE_QUERY_GPU_FINISHED:
      res8[0] = TRUE;
      break;
   case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
      res64[0] = q->data[1];
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED: /* u64 count, u64 time */
   case PIPE_QUERY_PRIMITIVES_EMITTED: /* u64 count, u64 time */
      res64[0] = data64[0] - data64[2];
      break;
   case PIPE_QUERY_SO_STATISTICS:
      res64[0] = data64[0] - data64[4];
      res64[1] = data64[2] - data64[6];
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      for (i = 0; i < 8; ++i)
         res64[i] = data64[i * 2] - data64[16 + i * 2];
      break;
   case PIPE_QUERY_TIMESTAMP:
      res64[0] = data64[1];
      break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
      res64[0] = 1000000000;
      res8[8] = FALSE;
      break;
   case PIPE_QUERY_TIME_ELAPSED:
      res64[0] = data64[1] - data64[3];
      break;
   case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
      res32[0] = q->data[1];
      break;
   default:
      return FALSE;
   }

   return TRUE;
}
Exemplo n.º 10
0
static void *
nv50_rasterizer_state_create(struct pipe_context *pipe,
                             const struct pipe_rasterizer_state *cso)
{
    struct nouveau_stateobj *so = so_new(15, 21, 0);
    struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
    struct nv50_rasterizer_stateobj *rso =
        CALLOC_STRUCT(nv50_rasterizer_stateobj);

    /*XXX: ignored
     * 	- light_twosize
     * 	- point_smooth
     * 	- multisample
     * 	- point_sprite / sprite_coord_mode
     */

    so_method(so, tesla, NV50TCL_SHADE_MODEL, 1);
    so_data  (so, cso->flatshade ? NV50TCL_SHADE_MODEL_FLAT :
              NV50TCL_SHADE_MODEL_SMOOTH);
    so_method(so, tesla, NV50TCL_PROVOKING_VERTEX_LAST, 1);
    so_data  (so, cso->flatshade_first ? 0 : 1);

    so_method(so, tesla, NV50TCL_VERTEX_TWO_SIDE_ENABLE, 1);
    so_data  (so, cso->light_twoside);

    so_method(so, tesla, NV50TCL_LINE_WIDTH, 1);
    so_data  (so, fui(cso->line_width));
    so_method(so, tesla, NV50TCL_LINE_SMOOTH_ENABLE, 1);
    so_data  (so, cso->line_smooth ? 1 : 0);
    if (cso->line_stipple_enable) {
        so_method(so, tesla, NV50TCL_LINE_STIPPLE_ENABLE, 1);
        so_data  (so, 1);
        so_method(so, tesla, NV50TCL_LINE_STIPPLE_PATTERN, 1);
        so_data  (so, (cso->line_stipple_pattern << 8) |
                  cso->line_stipple_factor);
    } else {
        so_method(so, tesla, NV50TCL_LINE_STIPPLE_ENABLE, 1);
        so_data  (so, 0);
    }

    so_method(so, tesla, NV50TCL_POINT_SIZE, 1);
    so_data  (so, fui(cso->point_size));

    so_method(so, tesla, NV50TCL_POINT_SPRITE_ENABLE, 1);
    so_data  (so, cso->point_quad_rasterization ? 1 : 0);

    so_method(so, tesla, NV50TCL_POLYGON_MODE_FRONT, 3);
    if (cso->front_winding == PIPE_WINDING_CCW) {
        so_data(so, nvgl_polygon_mode(cso->fill_ccw));
        so_data(so, nvgl_polygon_mode(cso->fill_cw));
    } else {
        so_data(so, nvgl_polygon_mode(cso->fill_cw));
        so_data(so, nvgl_polygon_mode(cso->fill_ccw));
    }
    so_data(so, cso->poly_smooth ? 1 : 0);

    so_method(so, tesla, NV50TCL_CULL_FACE_ENABLE, 3);
    so_data  (so, cso->cull_mode != PIPE_WINDING_NONE);
    if (cso->front_winding == PIPE_WINDING_CCW) {
        so_data(so, NV50TCL_FRONT_FACE_CCW);
        switch (cso->cull_mode) {
        case PIPE_WINDING_CCW:
            so_data(so, NV50TCL_CULL_FACE_FRONT);
            break;
        case PIPE_WINDING_CW:
            so_data(so, NV50TCL_CULL_FACE_BACK);
            break;
        case PIPE_WINDING_BOTH:
            so_data(so, NV50TCL_CULL_FACE_FRONT_AND_BACK);
            break;
        default:
            so_data(so, NV50TCL_CULL_FACE_BACK);
            break;
        }
    } else {
        so_data(so, NV50TCL_FRONT_FACE_CW);
        switch (cso->cull_mode) {
        case PIPE_WINDING_CCW:
            so_data(so, NV50TCL_CULL_FACE_BACK);
            break;
        case PIPE_WINDING_CW:
            so_data(so, NV50TCL_CULL_FACE_FRONT);
            break;
        case PIPE_WINDING_BOTH:
            so_data(so, NV50TCL_CULL_FACE_FRONT_AND_BACK);
            break;
        default:
            so_data(so, NV50TCL_CULL_FACE_BACK);
            break;
        }
    }

    so_method(so, tesla, NV50TCL_POLYGON_STIPPLE_ENABLE, 1);
    so_data  (so, cso->poly_stipple_enable ? 1 : 0);

    so_method(so, tesla, NV50TCL_POLYGON_OFFSET_POINT_ENABLE, 3);
    if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_POINT) ||
            (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_POINT))
        so_data(so, 1);
    else
        so_data(so, 0);
    if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_LINE) ||
            (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_LINE))
        so_data(so, 1);
    else
        so_data(so, 0);
    if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_FILL) ||
            (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_FILL))
        so_data(so, 1);
    else
        so_data(so, 0);

    if (cso->offset_cw || cso->offset_ccw) {
        so_method(so, tesla, NV50TCL_POLYGON_OFFSET_FACTOR, 1);
        so_data  (so, fui(cso->offset_scale));
        so_method(so, tesla, NV50TCL_POLYGON_OFFSET_UNITS, 1);
        so_data  (so, fui(cso->offset_units * 2.0f));
    }

    rso->pipe = *cso;
    so_ref(so, &rso->so);
    so_ref(NULL, &so);
    return (void *)rso;
}
Exemplo n.º 11
0
static int
nv50_invalidate_resource_storage(struct nouveau_context *ctx,
                                 struct pipe_resource *res,
                                 int ref)
{
   struct nv50_context *nv50 = nv50_context(&ctx->pipe);
   unsigned bind = res->bind ? res->bind : PIPE_BIND_VERTEX_BUFFER;
   unsigned s, i;

   if (bind & PIPE_BIND_RENDER_TARGET) {
      assert(nv50->framebuffer.nr_cbufs <= PIPE_MAX_COLOR_BUFS);
      for (i = 0; i < nv50->framebuffer.nr_cbufs; ++i) {
         if (nv50->framebuffer.cbufs[i] &&
             nv50->framebuffer.cbufs[i]->texture == res) {
            nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
            if (!--ref)
               return ref;
         }
      }
   }
   if (bind & PIPE_BIND_DEPTH_STENCIL) {
      if (nv50->framebuffer.zsbuf &&
          nv50->framebuffer.zsbuf->texture == res) {
         nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
         if (!--ref)
            return ref;
      }
   }

   if (bind & (PIPE_BIND_VERTEX_BUFFER |
               PIPE_BIND_INDEX_BUFFER |
               PIPE_BIND_CONSTANT_BUFFER |
               PIPE_BIND_STREAM_OUTPUT |
               PIPE_BIND_SAMPLER_VIEW)) {

      assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
      for (i = 0; i < nv50->num_vtxbufs; ++i) {
         if (nv50->vtxbuf[i].buffer == res) {
            nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
            if (!--ref)
               return ref;
         }
      }

      if (nv50->idxbuf.buffer == res) {
         /* Just rebind to the bufctx as there is no separate dirty bit */
         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
         BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(res), RD);
         if (!--ref)
            return ref;
      }

      for (s = 0; s < 3; ++s) {
      assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
      for (i = 0; i < nv50->num_textures[s]; ++i) {
         if (nv50->textures[s][i] &&
             nv50->textures[s][i]->texture == res) {
            nv50->dirty_3d |= NV50_NEW_3D_TEXTURES;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
            if (!--ref)
               return ref;
         }
      }
      }

      for (s = 0; s < 3; ++s) {
      for (i = 0; i < NV50_MAX_PIPE_CONSTBUFS; ++i) {
         if (!(nv50->constbuf_valid[s] & (1 << i)))
            continue;
         if (!nv50->constbuf[s][i].user &&
             nv50->constbuf[s][i].u.buf == res) {
            nv50->dirty_3d |= NV50_NEW_3D_CONSTBUF;
            nv50->constbuf_dirty[s] |= 1 << i;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_CB(s, i));
            if (!--ref)
               return ref;
         }
      }
      }
   }

   return ref;
}
Exemplo n.º 12
0
static void *
nv50_depth_stencil_alpha_state_create(struct pipe_context *pipe,
			const struct pipe_depth_stencil_alpha_state *cso)
{
	struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
	struct nv50_zsa_stateobj *zsa = CALLOC_STRUCT(nv50_zsa_stateobj);
	struct nouveau_stateobj *so = so_new(64, 0);

	so_method(so, tesla, NV50TCL_DEPTH_WRITE_ENABLE, 1);
	so_data  (so, cso->depth.writemask ? 1 : 0);
	if (cso->depth.enabled) {
		so_method(so, tesla, NV50TCL_DEPTH_TEST_ENABLE, 1);
		so_data  (so, 1);
		so_method(so, tesla, NV50TCL_DEPTH_TEST_FUNC, 1);
		so_data  (so, nvgl_comparison_op(cso->depth.func));
	} else {
		so_method(so, tesla, NV50TCL_DEPTH_TEST_ENABLE, 1);
		so_data  (so, 0);
	}

	/* XXX: keep hex values until header is updated (names reversed) */
	if (cso->stencil[0].enabled) {
		so_method(so, tesla, 0x1380, 8);
		so_data  (so, 1);
		so_data  (so, nvgl_stencil_op(cso->stencil[0].fail_op));
		so_data  (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
		so_data  (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
		so_data  (so, nvgl_comparison_op(cso->stencil[0].func));
		so_data  (so, cso->stencil[0].ref_value);
		so_data  (so, cso->stencil[0].writemask);
		so_data  (so, cso->stencil[0].valuemask);
	} else {
		so_method(so, tesla, 0x1380, 1);
		so_data  (so, 0);
	}

	if (cso->stencil[1].enabled) {
		so_method(so, tesla, 0x1594, 5);
		so_data  (so, 1);
		so_data  (so, nvgl_stencil_op(cso->stencil[1].fail_op));
		so_data  (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
		so_data  (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
		so_data  (so, nvgl_comparison_op(cso->stencil[1].func));
		so_method(so, tesla, 0x0f54, 3);
		so_data  (so, cso->stencil[1].ref_value);
		so_data  (so, cso->stencil[1].writemask);
		so_data  (so, cso->stencil[1].valuemask);
	} else {
		so_method(so, tesla, 0x1594, 1);
		so_data  (so, 0);
	}

	if (cso->alpha.enabled) {
		so_method(so, tesla, NV50TCL_ALPHA_TEST_ENABLE, 1);
		so_data  (so, 1);
		so_method(so, tesla, NV50TCL_ALPHA_TEST_REF, 2);
		so_data  (so, fui(cso->alpha.ref_value));
		so_data  (so, nvgl_comparison_op(cso->alpha.func));
	} else {
		so_method(so, tesla, NV50TCL_ALPHA_TEST_ENABLE, 1);
		so_data  (so, 0);
	}

	zsa->pipe = *cso;
	so_ref(so, &zsa->so);
	so_ref(NULL, &so);
	return (void *)zsa;
}
Exemplo n.º 13
0
static void
nv50_render_condition(struct pipe_context *pipe,
                      struct pipe_query *pq,
                      boolean condition, enum pipe_render_cond_flag mode)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   struct nv50_query *q = nv50_query(pq);
   struct nv50_hw_query *hq = nv50_hw_query(q);
   uint32_t cond;
   bool wait =
      mode != PIPE_RENDER_COND_NO_WAIT &&
      mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;

   if (!pq) {
      cond = NV50_3D_COND_MODE_ALWAYS;
   }
   else {
      /* NOTE: comparison of 2 queries only works if both have completed */
      switch (q->type) {
      case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
         cond = condition ? NV50_3D_COND_MODE_EQUAL :
                            NV50_3D_COND_MODE_NOT_EQUAL;
         wait = true;
         break;
      case PIPE_QUERY_OCCLUSION_COUNTER:
      case PIPE_QUERY_OCCLUSION_PREDICATE:
      case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
         if (hq->state == NV50_HW_QUERY_STATE_READY)
            wait = true;
         if (likely(!condition)) {
            cond = wait ? NV50_3D_COND_MODE_NOT_EQUAL : NV50_3D_COND_MODE_ALWAYS;
         } else {
            cond = wait ? NV50_3D_COND_MODE_EQUAL : NV50_3D_COND_MODE_ALWAYS;
         }
         break;
      default:
         assert(!"render condition query not a predicate");
         cond = NV50_3D_COND_MODE_ALWAYS;
         break;
      }
   }

   nv50->cond_query = pq;
   nv50->cond_cond = condition;
   nv50->cond_condmode = cond;
   nv50->cond_mode = mode;

   if (!pq) {
      PUSH_SPACE(push, 2);
      BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
      PUSH_DATA (push, cond);
      return;
   }

   PUSH_SPACE(push, 9);

   if (wait && hq->state != NV50_HW_QUERY_STATE_READY) {
      BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
      PUSH_DATA (push, 0);
   }

   PUSH_REFN (push, hq->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
   BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3);
   PUSH_DATAh(push, hq->bo->offset + hq->offset);
   PUSH_DATA (push, hq->bo->offset + hq->offset);
   PUSH_DATA (push, cond);

   BEGIN_NV04(push, NV50_2D(COND_ADDRESS_HIGH), 2);
   PUSH_DATAh(push, hq->bo->offset + hq->offset);
   PUSH_DATA (push, hq->bo->offset + hq->offset);
}
Exemplo n.º 14
0
static boolean
nv50_begin_query(struct pipe_context *pipe, struct pipe_query *pq)
{
   struct nv50_query *q = nv50_query(pq);
   return q->funcs->begin_query(nv50_context(pipe), q);
}
Exemplo n.º 15
0
static void
nv50_destroy_query(struct pipe_context *pipe, struct pipe_query *pq)
{
   struct nv50_query *q = nv50_query(pq);
   q->funcs->destroy_query(nv50_context(pipe), q);
}
Exemplo n.º 16
0
static int
nv50_invalidate_resource_storage(struct nouveau_context *ctx,
                                 struct pipe_resource *res,
                                 int ref)
{
   struct nv50_context *nv50 = nv50_context(&ctx->pipe);
   unsigned s, i;

   if (res->bind & PIPE_BIND_RENDER_TARGET) {
      for (i = 0; i < nv50->framebuffer.nr_cbufs; ++i) {
         if (nv50->framebuffer.cbufs[i] &&
             nv50->framebuffer.cbufs[i]->texture == res) {
            nv50->dirty |= NV50_NEW_FRAMEBUFFER;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
            if (!--ref)
               return ref;
         }
      }
   }
   if (res->bind & PIPE_BIND_DEPTH_STENCIL) {
      if (nv50->framebuffer.zsbuf &&
          nv50->framebuffer.zsbuf->texture == res) {
         nv50->dirty |= NV50_NEW_FRAMEBUFFER;
         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
         if (!--ref)
            return ref;
      }
   }

   if (res->bind & PIPE_BIND_VERTEX_BUFFER) {
      for (i = 0; i < nv50->num_vtxbufs; ++i) {
         if (nv50->vtxbuf[i].buffer == res) {
            nv50->dirty |= NV50_NEW_ARRAYS;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX);
            if (!--ref)
               return ref;
         }
      }
   }
   if (res->bind & PIPE_BIND_INDEX_BUFFER) {
      if (nv50->idxbuf.buffer == res)
         if (!--ref)
            return ref;
   }

   if (res->bind & PIPE_BIND_SAMPLER_VIEW) {
      for (s = 0; s < 5; ++s) {
      for (i = 0; i < nv50->num_textures[s]; ++i) {
         if (nv50->textures[s][i] &&
             nv50->textures[s][i]->texture == res) {
            nv50->dirty |= NV50_NEW_TEXTURES;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TEXTURES);
            if (!--ref)
               return ref;
         }
      }
      }
   }

   if (res->bind & PIPE_BIND_CONSTANT_BUFFER) {
      for (s = 0; s < 5; ++s) {
      for (i = 0; i < nv50->num_vtxbufs; ++i) {
         if (!nv50->constbuf[s][i].user &&
             nv50->constbuf[s][i].u.buf == res) {
            nv50->dirty |= NV50_NEW_CONSTBUF;
            nv50->constbuf_dirty[s] |= 1 << i;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_CB(s, i));
            if (!--ref)
               return ref;
         }
      }
      }
   }

   return ref;
}
static boolean
nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   struct nv50_query *q = nv50_query(pq);

   /* For occlusion queries we have to change the storage, because a previous
    * query might set the initial render conition to FALSE even *after* we re-
    * initialized it to TRUE.
    */
   if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) {
      q->offset += 32;
      q->data += 32 / sizeof(*q->data);
      if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE)
         nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE);

      /* XXX: can we do this with the GPU, and sync with respect to a previous
       *  query ?
       */
      q->data[0] = q->sequence; /* initialize sequence */
      q->data[1] = 1; /* initial render condition = TRUE */
      q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
      q->data[5] = 0;
   }
   if (!q->is64bit)
      q->data[0] = q->sequence++; /* the previously used one */

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      PUSH_SPACE(push, 4);
      BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1);
      PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT);
      BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
      PUSH_DATA (push, 1);
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      nv50_query_get(push, q, 0x10, 0x06805002);
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      nv50_query_get(push, q, 0x10, 0x05805002);
      break;
   case PIPE_QUERY_SO_STATISTICS:
      nv50_query_get(push, q, 0x20, 0x05805002);
      nv50_query_get(push, q, 0x30, 0x06805002);
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      nv50_query_get(push, q, 0x80, 0x00801002); /* VFETCH, VERTICES */
      nv50_query_get(push, q, 0x90, 0x01801002); /* VFETCH, PRIMS */
      nv50_query_get(push, q, 0xa0, 0x02802002); /* VP, LAUNCHES */
      nv50_query_get(push, q, 0xb0, 0x03806002); /* GP, LAUNCHES */
      nv50_query_get(push, q, 0xc0, 0x04806002); /* GP, PRIMS_OUT */
      nv50_query_get(push, q, 0xd0, 0x07804002); /* RAST, PRIMS_IN */
      nv50_query_get(push, q, 0xe0, 0x08804002); /* RAST, PRIMS_OUT */
      nv50_query_get(push, q, 0xf0, 0x0980a002); /* ROP, PIXELS */
      break;
   case PIPE_QUERY_TIME_ELAPSED:
      nv50_query_get(push, q, 0x10, 0x00005002);
      break;
   default:
      break;
   }
   q->ready = FALSE;
   return true;
}
Exemplo n.º 18
0
static void *
nv50_depth_stencil_alpha_state_create(struct pipe_context *pipe,
                                      const struct pipe_depth_stencil_alpha_state *cso)
{
    struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
    struct nv50_zsa_stateobj *zsa = CALLOC_STRUCT(nv50_zsa_stateobj);
    struct nouveau_stateobj *so = so_new(9, 21, 0);

    so_method(so, tesla, NV50TCL_DEPTH_WRITE_ENABLE, 1);
    so_data  (so, cso->depth.writemask ? 1 : 0);
    if (cso->depth.enabled) {
        so_method(so, tesla, NV50TCL_DEPTH_TEST_ENABLE, 1);
        so_data  (so, 1);
        so_method(so, tesla, NV50TCL_DEPTH_TEST_FUNC, 1);
        so_data  (so, nvgl_comparison_op(cso->depth.func));
    } else {
        so_method(so, tesla, NV50TCL_DEPTH_TEST_ENABLE, 1);
        so_data  (so, 0);
    }

    if (cso->stencil[0].enabled) {
        so_method(so, tesla, NV50TCL_STENCIL_FRONT_ENABLE, 5);
        so_data  (so, 1);
        so_data  (so, nvgl_stencil_op(cso->stencil[0].fail_op));
        so_data  (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
        so_data  (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
        so_data  (so, nvgl_comparison_op(cso->stencil[0].func));
        so_method(so, tesla, NV50TCL_STENCIL_FRONT_MASK, 2);
        so_data  (so, cso->stencil[0].writemask);
        so_data  (so, cso->stencil[0].valuemask);
    } else {
        so_method(so, tesla, NV50TCL_STENCIL_FRONT_ENABLE, 1);
        so_data  (so, 0);
    }

    if (cso->stencil[1].enabled) {
        so_method(so, tesla, NV50TCL_STENCIL_BACK_ENABLE, 5);
        so_data  (so, 1);
        so_data  (so, nvgl_stencil_op(cso->stencil[1].fail_op));
        so_data  (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
        so_data  (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
        so_data  (so, nvgl_comparison_op(cso->stencil[1].func));
        so_method(so, tesla, NV50TCL_STENCIL_BACK_MASK, 2);
        so_data  (so, cso->stencil[1].writemask);
        so_data  (so, cso->stencil[1].valuemask);
    } else {
        so_method(so, tesla, NV50TCL_STENCIL_BACK_ENABLE, 1);
        so_data  (so, 0);
    }

    if (cso->alpha.enabled) {
        so_method(so, tesla, NV50TCL_ALPHA_TEST_ENABLE, 1);
        so_data  (so, 1);
        so_method(so, tesla, NV50TCL_ALPHA_TEST_REF, 2);
        so_data  (so, fui(cso->alpha.ref_value));
        so_data  (so, nvgl_comparison_op(cso->alpha.func));
    } else {
        so_method(so, tesla, NV50TCL_ALPHA_TEST_ENABLE, 1);
        so_data  (so, 0);
    }

    zsa->pipe = *cso;
    so_ref(so, &zsa->so);
    so_ref(NULL, &so);
    return (void *)zsa;
}
static void
nv50_render_condition(struct pipe_context *pipe,
                      struct pipe_query *pq,
                      boolean condition, uint mode)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   struct nv50_query *q;
   uint32_t cond;
   boolean wait =
      mode != PIPE_RENDER_COND_NO_WAIT &&
      mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;

   if (!pq) {
      cond = NV50_3D_COND_MODE_ALWAYS;
   }
   else {
      q = nv50_query(pq);
      /* NOTE: comparison of 2 queries only works if both have completed */
      switch (q->type) {
      case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
         cond = condition ? NV50_3D_COND_MODE_EQUAL :
                            NV50_3D_COND_MODE_NOT_EQUAL;
         wait = TRUE;
         break;
      case PIPE_QUERY_OCCLUSION_COUNTER:
      case PIPE_QUERY_OCCLUSION_PREDICATE:
         if (likely(!condition)) {
            /* XXX: Placeholder, handle nesting here if available */
            if (unlikely(false))
               cond = wait ? NV50_3D_COND_MODE_NOT_EQUAL :
                             NV50_3D_COND_MODE_ALWAYS;
            else
               cond = NV50_3D_COND_MODE_RES_NON_ZERO;
         } else {
            cond = wait ? NV50_3D_COND_MODE_EQUAL : NV50_3D_COND_MODE_ALWAYS;
         }
         break;
      default:
         assert(!"render condition query not a predicate");
         cond = NV50_3D_COND_MODE_ALWAYS;
         break;
      }
   }

   nv50->cond_query = pq;
   nv50->cond_cond = condition;
   nv50->cond_condmode = cond;
   nv50->cond_mode = mode;

   if (!pq) {
      PUSH_SPACE(push, 2);
      BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
      PUSH_DATA (push, cond);
      return;
   }

   PUSH_SPACE(push, 9);

   if (wait) {
      BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
      PUSH_DATA (push, 0);
   }

   PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
   BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3);
   PUSH_DATAh(push, q->bo->offset + q->offset);
   PUSH_DATA (push, q->bo->offset + q->offset);
   PUSH_DATA (push, cond);

   BEGIN_NV04(push, NV50_2D(COND_ADDRESS_HIGH), 2);
   PUSH_DATAh(push, q->bo->offset + q->offset);
   PUSH_DATA (push, q->bo->offset + q->offset);
}
Exemplo n.º 20
0
static void *
nv50_blend_state_create(struct pipe_context *pipe,
                        const struct pipe_blend_state *cso)
{
    struct nouveau_stateobj *so = so_new(5, 24, 0);
    struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
    struct nv50_blend_stateobj *bso = CALLOC_STRUCT(nv50_blend_stateobj);
    unsigned i, blend_enabled = 0;

    /*XXX ignored:
     * 	- dither
     */

    so_method(so, tesla, NV50TCL_BLEND_ENABLE(0), 8);
    if (cso->independent_blend_enable) {
        for (i = 0; i < 8; ++i) {
            so_data(so, cso->rt[i].blend_enable);
            if (cso->rt[i].blend_enable)
                blend_enabled = 1;
        }
    } else if (cso->rt[0].blend_enable) {
        blend_enabled = 1;
        for (i = 0; i < 8; i++)
            so_data(so, 1);
    } else {
        for (i = 0; i < 8; i++)
            so_data(so, 0);
    }
    if (blend_enabled) {
        so_method(so, tesla, NV50TCL_BLEND_EQUATION_RGB, 5);
        so_data  (so, nvgl_blend_eqn(cso->rt[0].rgb_func));
        so_data  (so, 0x4000 | nvgl_blend_func(cso->rt[0].rgb_src_factor));
        so_data  (so, 0x4000 | nvgl_blend_func(cso->rt[0].rgb_dst_factor));
        so_data  (so, nvgl_blend_eqn(cso->rt[0].alpha_func));
        so_data  (so, 0x4000 | nvgl_blend_func(cso->rt[0].alpha_src_factor));
        so_method(so, tesla, NV50TCL_BLEND_FUNC_DST_ALPHA, 1);
        so_data  (so, 0x4000 | nvgl_blend_func(cso->rt[0].alpha_dst_factor));
    }

    if (cso->logicop_enable == 0 ) {
        so_method(so, tesla, NV50TCL_LOGIC_OP_ENABLE, 1);
        so_data  (so, 0);
    } else {
        so_method(so, tesla, NV50TCL_LOGIC_OP_ENABLE, 2);
        so_data  (so, 1);
        so_data  (so, nvgl_logicop_func(cso->logicop_func));
    }

    so_method(so, tesla, NV50TCL_COLOR_MASK(0), 8);
    if (cso->independent_blend_enable)
        for (i = 0; i < 8; ++i)
            so_data(so, nv50_colormask(cso->rt[i].colormask));
    else {
        uint32_t cmask = nv50_colormask(cso->rt[0].colormask);
        for (i = 0; i < 8; i++)
            so_data(so, cmask);
    }

    bso->pipe = *cso;
    so_ref(so, &bso->so);
    so_ref(NULL, &so);
    return (void *)bso;
}
static void *
nv50_blend_state_create(struct pipe_context *pipe,
                        const struct pipe_blend_state *cso)
{
   struct nv50_blend_stateobj *so = CALLOC_STRUCT(nv50_blend_stateobj);
   int i;
   boolean emit_common_func = cso->rt[0].blend_enable;
   uint32_t ms;

   if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
      SB_BEGIN_3D(so, BLEND_INDEPENDENT, 1);
      SB_DATA    (so, cso->independent_blend_enable);
   }

   so->pipe = *cso;

   SB_BEGIN_3D(so, COLOR_MASK_COMMON, 1);
   SB_DATA    (so, !cso->independent_blend_enable);

   SB_BEGIN_3D(so, BLEND_ENABLE_COMMON, 1);
   SB_DATA    (so, !cso->independent_blend_enable);

   if (cso->independent_blend_enable) {
      SB_BEGIN_3D(so, BLEND_ENABLE(0), 8);
      for (i = 0; i < 8; ++i) {
         SB_DATA(so, cso->rt[i].blend_enable);
         if (cso->rt[i].blend_enable)
            emit_common_func = TRUE;
      }

      if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
         emit_common_func = FALSE;

         for (i = 0; i < 8; ++i) {
            if (!cso->rt[i].blend_enable)
               continue;
            SB_BEGIN_3D_(so, NVA3_3D_IBLEND_EQUATION_RGB(i), 6);
            SB_DATA     (so, nvgl_blend_eqn(cso->rt[i].rgb_func));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].rgb_src_factor));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].rgb_dst_factor));
            SB_DATA     (so, nvgl_blend_eqn(cso->rt[i].alpha_func));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].alpha_src_factor));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].alpha_dst_factor));
         }
      }
   } else {
      SB_BEGIN_3D(so, BLEND_ENABLE(0), 1);
      SB_DATA    (so, cso->rt[0].blend_enable);
   }

   if (emit_common_func) {
      SB_BEGIN_3D(so, BLEND_EQUATION_RGB, 5);
      SB_DATA    (so, nvgl_blend_eqn(cso->rt[0].rgb_func));
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].rgb_src_factor));
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].rgb_dst_factor));
      SB_DATA    (so, nvgl_blend_eqn(cso->rt[0].alpha_func));
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].alpha_src_factor));
      SB_BEGIN_3D(so, BLEND_FUNC_DST_ALPHA, 1);
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].alpha_dst_factor));
   }

   if (cso->logicop_enable) {
      SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 2);
      SB_DATA    (so, 1);
      SB_DATA    (so, nvgl_logicop_func(cso->logicop_func));
   } else {
      SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 1);
      SB_DATA    (so, 0);
   }

   if (cso->independent_blend_enable) {
      SB_BEGIN_3D(so, COLOR_MASK(0), 8);
      for (i = 0; i < 8; ++i)
         SB_DATA(so, nv50_colormask(cso->rt[i].colormask));
   } else {
      SB_BEGIN_3D(so, COLOR_MASK(0), 1);
      SB_DATA    (so, nv50_colormask(cso->rt[0].colormask));
   }

   ms = 0;
   if (cso->alpha_to_coverage)
      ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE;
   if (cso->alpha_to_one)
      ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE;

   SB_BEGIN_3D(so, MULTISAMPLE_CTRL, 1);
   SB_DATA    (so, ms);

   assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
   return so;
}
Exemplo n.º 22
0
static void *
nv50_blend_state_create(struct pipe_context *pipe,
                        const struct pipe_blend_state *cso)
{
   struct nv50_blend_stateobj *so = CALLOC_STRUCT(nv50_blend_stateobj);
   int i;
   bool emit_common_func = cso->rt[0].blend_enable;

   if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
      SB_BEGIN_3D(so, BLEND_INDEPENDENT, 1);
      SB_DATA    (so, cso->independent_blend_enable);
   }

   so->pipe = *cso;

   SB_BEGIN_3D(so, COLOR_MASK_COMMON, 1);
   SB_DATA    (so, !cso->independent_blend_enable);

   SB_BEGIN_3D(so, BLEND_ENABLE_COMMON, 1);
   SB_DATA    (so, !cso->independent_blend_enable);

   if (cso->independent_blend_enable) {
      SB_BEGIN_3D(so, BLEND_ENABLE(0), 8);
      for (i = 0; i < 8; ++i) {
         SB_DATA(so, cso->rt[i].blend_enable);
         if (cso->rt[i].blend_enable)
            emit_common_func = true;
      }

      if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
         emit_common_func = false;

         for (i = 0; i < 8; ++i) {
            if (!cso->rt[i].blend_enable)
               continue;
            SB_BEGIN_3D_(so, NVA3_3D_IBLEND_EQUATION_RGB(i), 6);
            SB_DATA     (so, nvgl_blend_eqn(cso->rt[i].rgb_func));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].rgb_src_factor));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].rgb_dst_factor));
            SB_DATA     (so, nvgl_blend_eqn(cso->rt[i].alpha_func));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].alpha_src_factor));
            SB_DATA     (so, nv50_blend_fac(cso->rt[i].alpha_dst_factor));
         }
      }
   } else {
      SB_BEGIN_3D(so, BLEND_ENABLE(0), 1);
      SB_DATA    (so, cso->rt[0].blend_enable);
   }

   if (emit_common_func) {
      SB_BEGIN_3D(so, BLEND_EQUATION_RGB, 5);
      SB_DATA    (so, nvgl_blend_eqn(cso->rt[0].rgb_func));
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].rgb_src_factor));
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].rgb_dst_factor));
      SB_DATA    (so, nvgl_blend_eqn(cso->rt[0].alpha_func));
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].alpha_src_factor));
      SB_BEGIN_3D(so, BLEND_FUNC_DST_ALPHA, 1);
      SB_DATA    (so, nv50_blend_fac(cso->rt[0].alpha_dst_factor));
   }

   if (cso->logicop_enable) {
      SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 2);
      SB_DATA    (so, 1);
      SB_DATA    (so, nvgl_logicop_func(cso->logicop_func));
   } else {
      SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 1);
      SB_DATA    (so, 0);
   }

   if (cso->independent_blend_enable) {
      SB_BEGIN_3D(so, COLOR_MASK(0), 8);
      for (i = 0; i < 8; ++i)
         SB_DATA(so, nv50_colormask(cso->rt[i].colormask));
   } else {
      SB_BEGIN_3D(so, COLOR_MASK(0), 1);
      SB_DATA    (so, nv50_colormask(cso->rt[0].colormask));
   }

   assert(so->size <= ARRAY_SIZE(so->state));
   return so;
}
Exemplo n.º 23
0
static void
nv50_resource_copy_region(struct pipe_context *pipe,
                          struct pipe_resource *dst, unsigned dst_level,
                          unsigned dstx, unsigned dsty, unsigned dstz,
                          struct pipe_resource *src, unsigned src_level,
                          const struct pipe_box *src_box)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   int ret;
   boolean m2mf;
   unsigned dst_layer = dstz, src_layer = src_box->z;

   /* Fallback for buffers. */
   if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
      util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz,
                                src, src_level, src_box);
      return;
   }

   /* 0 and 1 are equal, only supporting 0/1, 2, 4 and 8 */
   assert((src->nr_samples | 1) == (dst->nr_samples | 1));

   m2mf = (src->format == dst->format) ||
      (util_format_get_blocksizebits(src->format) ==
       util_format_get_blocksizebits(dst->format));

   nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   if (m2mf) {
      struct nv50_m2mf_rect drect, srect;
      unsigned i;
      unsigned nx = util_format_get_nblocksx(src->format, src_box->width);
      unsigned ny = util_format_get_nblocksy(src->format, src_box->height);

      nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz);
      nv50_m2mf_rect_setup(&srect, src, src_level,
                           src_box->x, src_box->y, src_box->z);

      for (i = 0; i < src_box->depth; ++i) {
         nv50_m2mf_transfer_rect(nv50, &drect, &srect, nx, ny);

         if (nv50_miptree(dst)->layout_3d)
            drect.z++;
         else
            drect.base += nv50_miptree(dst)->layer_stride;

         if (nv50_miptree(src)->layout_3d)
            srect.z++;
         else
            srect.base += nv50_miptree(src)->layer_stride;
      }
      return;
   }

   assert((src->format == dst->format) ||
          (nv50_2d_format_faithful(src->format) &&
           nv50_2d_format_faithful(dst->format)));

   BCTX_REFN(nv50->bufctx, 2D, nv04_resource(src), RD);
   BCTX_REFN(nv50->bufctx, 2D, nv04_resource(dst), WR);
   nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx);
   nouveau_pushbuf_validate(nv50->base.pushbuf);

   for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) {
      ret = nv50_2d_texture_do_copy(nv50->base.pushbuf,
                                    nv50_miptree(dst), dst_level,
                                    dstx, dsty, dst_layer,
                                    nv50_miptree(src), src_level,
                                    src_box->x, src_box->y, src_layer,
                                    src_box->width, src_box->height);
      if (ret)
         break;
   }
   nouveau_bufctx_reset(nv50->bufctx, NV50_BIND_2D);
}
Exemplo n.º 24
0
static void
nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   struct nv50_query *q = nv50_query(pq);

   q->state = NV50_QUERY_STATE_ENDED;

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      nv50_query_get(push, q, 0, 0x0100f002);
      if (--nv50->screen->num_occlusion_queries_active == 0) {
         PUSH_SPACE(push, 2);
         BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
         PUSH_DATA (push, 0);
      }
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      nv50_query_get(push, q, 0, 0x06805002);
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      nv50_query_get(push, q, 0, 0x05805002);
      break;
   case PIPE_QUERY_SO_STATISTICS:
      nv50_query_get(push, q, 0x00, 0x05805002);
      nv50_query_get(push, q, 0x10, 0x06805002);
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      nv50_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
      nv50_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
      nv50_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */
      nv50_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */
      nv50_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
      nv50_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
      nv50_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
      nv50_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */
      break;
   case PIPE_QUERY_TIMESTAMP:
      q->sequence++;
      /* fall through */
   case PIPE_QUERY_TIME_ELAPSED:
      nv50_query_get(push, q, 0, 0x00005002);
      break;
   case PIPE_QUERY_GPU_FINISHED:
      q->sequence++;
      nv50_query_get(push, q, 0, 0x1000f010);
      break;
   case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
      q->sequence++;
      nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5));
      break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
      /* This query is not issued on GPU because disjoint is forced to false */
      q->state = NV50_QUERY_STATE_READY;
      break;
   default:
      assert(0);
      break;
   }

   if (q->is64bit)
      nouveau_fence_ref(nv50->screen->base.fence.current, &q->fence);
}