Пример #1
0
/* When the primitive changes, set a state bit and re-validate.  Not
 * the nicest and would rather deal with this by having all the
 * programs be immune to the active primitive (ie. cope with all
 * possibilities).  That may not be realistic however.
 */
static void brw_set_prim(struct brw_context *brw, int prim)
{
   PRINT("PRIM: %d\n", prim);

   /* Slight optimization to avoid the GS program when not needed:
    */
   if (prim == PIPE_PRIM_QUAD_STRIP &&
       brw->attribs.Raster->flatshade &&
       brw->attribs.Raster->fill_cw == PIPE_POLYGON_MODE_FILL &&
       brw->attribs.Raster->fill_ccw == PIPE_POLYGON_MODE_FILL)
      prim = PIPE_PRIM_TRIANGLE_STRIP;

   if (prim != brw->primitive) {
      brw->primitive = prim;
      brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;

      if (u_reduced_prim(prim) != brw->reduced_primitive) {
	 brw->reduced_primitive = u_reduced_prim(prim);
	 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
      }

      brw_validate_state(brw);
   }

}
Пример #2
0
static void 
vcache_prepare( struct draw_pt_front_end *frontend,
                unsigned prim,
                struct draw_pt_middle_end *middle,
                unsigned opt )
{
   struct vcache_frontend *vcache = (struct vcache_frontend *)frontend;

   if (opt & PT_PIPELINE)
   {
      vcache->base.run = vcache_run_extras;
   }
   else 
   {
      vcache->base.run = vcache_check_run;
   }

   vcache->input_prim = prim;
   vcache->output_prim = u_reduced_prim(prim);

   vcache->middle = middle;
   vcache->opt = opt;

   /* Have to run prepare here, but try and guess a good prim for
    * doing so:
    */
   vcache->middle_prim = (opt & PT_PIPELINE) ? vcache->output_prim : vcache->input_prim;
   middle->prepare( middle, vcache->middle_prim, opt, &vcache->fetch_max );
}
Пример #3
0
static void
draw_session_prepare(struct ilo_render *render,
                     const struct ilo_state_vector *vec,
                     struct ilo_render_draw_session *session)
{
   memset(session, 0, sizeof(*session));
   session->pipe_dirty = vec->dirty;
   session->reduced_prim = u_reduced_prim(vec->draw->mode);

   if (render->hw_ctx_changed) {
      /* these should be enough to make everything uploaded */
      render->batch_bo_changed = true;
      render->state_bo_changed = true;
      render->instruction_bo_changed = true;

      session->prim_changed = true;

      ilo_state_urb_full_delta(&vec->urb, render->dev, &session->urb_delta);
      ilo_state_vf_full_delta(&vec->ve->vf, render->dev, &session->vf_delta);

      ilo_state_raster_full_delta(&vec->rasterizer->rs, render->dev,
            &session->rs_delta);

      ilo_state_viewport_full_delta(&vec->viewport.vp, render->dev,
            &session->vp_delta);

      ilo_state_cc_full_delta(&vec->blend->cc, render->dev,
            &session->cc_delta);
   } else {
      session->prim_changed =
         (render->state.reduced_prim != session->reduced_prim);

      ilo_state_urb_get_delta(&vec->urb, render->dev,
            &render->state.urb, &session->urb_delta);

      if (vec->dirty & ILO_DIRTY_VE) {
         ilo_state_vf_full_delta(&vec->ve->vf, render->dev,
               &session->vf_delta);
      }

      if (vec->dirty & ILO_DIRTY_RASTERIZER) {
         ilo_state_raster_get_delta(&vec->rasterizer->rs, render->dev,
               &render->state.rs, &session->rs_delta);
      }

      if (vec->dirty & ILO_DIRTY_VIEWPORT) {
         ilo_state_viewport_full_delta(&vec->viewport.vp, render->dev,
               &session->vp_delta);
      }

      if (vec->dirty & ILO_DIRTY_BLEND) {
         ilo_state_cc_get_delta(&vec->blend->cc, render->dev,
               &render->state.cc, &session->cc_delta);
      }
   }
}
Пример #4
0
/*
 * Primitive assembler breaks up adjacency primitives and assembles
 * the base primitives they represent, e.g. vertices forming
 * PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
 * become vertices forming PIPE_PRIM_TRIANGLES 
 * This is needed because specification says that the adjacency
 * primitives are only visible in the geometry shader so we need
 * to get rid of them so that the rest of the pipeline can
 * process the inputs.
 */
void
draw_prim_assembler_run(struct draw_context *draw,
                        const struct draw_prim_info *input_prims,
                        const struct draw_vertex_info *input_verts,
                        struct draw_prim_info *output_prims,
                        struct draw_vertex_info *output_verts)
{
   struct draw_assembler *asmblr = draw->ia;
   unsigned start, i;
   unsigned assembled_prim = u_reduced_prim(input_prims->prim);
   unsigned max_primitives = u_decomposed_prims_for_vertices(
      input_prims->prim, input_prims->count);
   unsigned max_verts = u_vertices_per_prim(assembled_prim) * max_primitives;

   asmblr->output_prims = output_prims;
   asmblr->output_verts = output_verts;
   asmblr->input_prims = input_prims;
   asmblr->input_verts = input_verts;
   asmblr->needs_primid = needs_primid(asmblr->draw);
   asmblr->primid = 0;
   asmblr->num_prims = 0;

   output_prims->linear = TRUE;
   output_prims->elts = NULL;
   output_prims->start = 0;
   output_prims->prim = assembled_prim;
   output_prims->flags = 0x0;
   output_prims->primitive_lengths = MALLOC(sizeof(unsigned));
   output_prims->primitive_lengths[0] = 0;
   output_prims->primitive_count = 1;

   output_verts->vertex_size = input_verts->vertex_size;
   output_verts->stride = input_verts->stride;
   output_verts->verts = (struct vertex_header*)MALLOC(
      input_verts->vertex_size * max_verts);
   output_verts->count = 0;


   for (start = i = 0; i < input_prims->primitive_count;
        start += input_prims->primitive_lengths[i], i++)
   {
      unsigned count = input_prims->primitive_lengths[i];
      if (input_prims->linear) {
         assembler_run_linear(asmblr, input_prims, input_verts,
                              start, count);
      } else {
         assembler_run_elts(asmblr, input_prims, input_verts,
                            start, count);
      }
   }

   output_prims->primitive_lengths[0] = output_verts->count;
   output_prims->count = output_verts->count;
}
Пример #5
0
static void
sp_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
{
   struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
   struct setup_context *setup_ctx = cvbr->setup;
   
   sp_setup_prepare( setup_ctx );

   cvbr->softpipe->reduced_prim = u_reduced_prim(prim);
   cvbr->prim = prim;
}
Пример #6
0
static boolean
lp_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
{
   struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
   struct setup_context *setup_ctx = cvbr->setup;
   
   llvmpipe_setup_prepare( setup_ctx );

   cvbr->llvmpipe->reduced_prim = u_reduced_prim(prim);
   cvbr->prim = prim;
   return TRUE;

}
Пример #7
0
/**
 * Returns true if the draw module will inject the frontface
 * info into the outputs.
 *
 * Given the specified primitive and rasterizer state
 * the function will figure out if the draw module
 * will inject the front-face information into shader
 * outputs. This is done to preserve the front-facing
 * info when decomposing primitives into wireframes.
 */
boolean
draw_will_inject_frontface(const struct draw_context *draw)
{
   unsigned reduced_prim = u_reduced_prim(draw->pt.prim);
   const struct pipe_rasterizer_state *rast = draw->rasterizer;

   if (reduced_prim != PIPE_PRIM_TRIANGLES) {
      return FALSE;
   }

   return (rast &&
           (rast->fill_front != PIPE_POLYGON_MODE_FILL ||
            rast->fill_back != PIPE_POLYGON_MODE_FILL));
}
Пример #8
0
/**
 * Emit context states and 3DPRIMITIVE.
 */
bool
ilo_3d_pipeline_emit_draw(struct ilo_3d_pipeline *p,
                          const struct ilo_context *ilo,
                          int *prim_generated, int *prim_emitted)
{
   bool success;

   if (ilo->dirty & ILO_DIRTY_SO &&
       ilo->so.enabled && !ilo->so.append_bitmask) {
      /*
       * We keep track of the SVBI in the driver, so that we can restore it
       * when the HW context is invalidated (by another process).  The value
       * needs to be reset when stream output is enabled and the targets are
       * changed.
       */
      p->state.so_num_vertices = 0;

      /* on GEN7+, we need SOL_RESET to reset the SO write offsets */
      if (p->dev->gen >= ILO_GEN(7))
         ilo_cp_set_one_off_flags(p->cp, INTEL_EXEC_GEN7_SOL_RESET);
   }


   while (true) {
      struct ilo_builder_snapshot snapshot;

      /* we will rewind if aperture check below fails */
      ilo_builder_batch_snapshot(&p->cp->builder, &snapshot);

      handle_invalid_batch_bo(p, false);

      /* draw! */
      p->emit_draw(p, ilo);

      if (ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         success = true;
         break;
      }

      /* rewind */
      ilo_builder_batch_restore(&p->cp->builder, &snapshot);

      if (ilo_cp_empty(p->cp)) {
         success = false;
         break;
      }
      else {
         /* flush and try again */
         ilo_cp_flush(p->cp, "out of aperture");
      }
   }

   if (success) {
      const int num_verts =
         u_vertices_per_prim(u_reduced_prim(ilo->draw->mode));
      const int max_emit =
         (p->state.so_max_vertices - p->state.so_num_vertices) / num_verts;
      const int generated =
         u_reduced_prims_for_vertices(ilo->draw->mode, ilo->draw->count);
      const int emitted = MIN2(generated, max_emit);

      p->state.so_num_vertices += emitted * num_verts;

      if (prim_generated)
         *prim_generated = generated;

      if (prim_emitted)
         *prim_emitted = emitted;
   }

   p->invalidate_flags = 0x0;

   return success;
}
Пример #9
0
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct svga_context *svga = svga_context( pipe );
   unsigned reduced_prim = u_reduced_prim( info->mode );
   unsigned count = info->count;
   enum pipe_error ret = 0;
   boolean needed_swtnl;

   if (!u_trim_pipe_prim( info->mode, &count ))
      return;

   /*
    * Mark currently bound target surfaces as dirty
    * doesn't really matter if it is done before drawing.
    *
    * TODO If we ever normaly return something other then
    * true we should not mark it as dirty then.
    */
   svga_mark_surfaces_dirty(svga_context(pipe));

   if (svga->curr.reduced_prim != reduced_prim) {
      svga->curr.reduced_prim = reduced_prim;
      svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
   }
   
   needed_swtnl = svga->state.sw.need_swtnl;

   svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

#ifdef DEBUG
   if (svga->curr.vs->base.id == svga->debug.disable_shader ||
       svga->curr.fs->base.id == svga->debug.disable_shader)
      return;
#endif

   if (svga->state.sw.need_swtnl) {
      if (!needed_swtnl) {
         /*
          * We're switching from HW to SW TNL.  SW TNL will require mapping all
          * currently bound vertex buffers, some of which may already be
          * referenced in the current command buffer as result of previous HW
          * TNL. So flush now, to prevent the context to flush while a referred
          * vertex buffer is mapped.
          */

         svga_context_flush(svga, NULL);
      }

      /* Avoid leaking the previous hwtnl bias to swtnl */
      svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
      ret = svga_swtnl_draw_vbo( svga, info );
   }
   else {
      if (info->indexed && svga->curr.ib.buffer) {
         unsigned offset;

         assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
         offset = svga->curr.ib.offset / svga->curr.ib.index_size;

         ret = retry_draw_range_elements( svga,
                                          svga->curr.ib.buffer,
                                          svga->curr.ib.index_size,
                                          info->index_bias,
                                          info->min_index,
                                          info->max_index,
                                          info->mode,
                                          info->start + offset,
                                          info->count,
                                          info->instance_count,
                                          TRUE );
      }
      else {
         ret = retry_draw_arrays( svga,
                                  info->mode,
                                  info->start,
                                  info->count,
                                  info->instance_count,
                                  TRUE );
      }
   }

   /* XXX: Silence warnings, do something sensible here? */
   (void)ret;

   svga_release_user_upl_buffers( svga );

   if (SVGA_DEBUG & DEBUG_FLUSH) {
      svga_hwtnl_flush_retry( svga );
      svga_context_flush(svga, NULL);
   }
}
Пример #10
0
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct svga_context *svga = svga_context( pipe );
   unsigned reduced_prim = u_reduced_prim( info->mode );
   unsigned count = info->count;
   enum pipe_error ret = 0;
   boolean needed_swtnl;

   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);

   svga->hud.num_draw_calls++;  /* for SVGA_QUERY_NUM_DRAW_CALLS */

   if (u_reduced_prim(info->mode) == PIPE_PRIM_TRIANGLES &&
       svga->curr.rast->templ.cull_face == PIPE_FACE_FRONT_AND_BACK)
      goto done;

   /*
    * Mark currently bound target surfaces as dirty
    * doesn't really matter if it is done before drawing.
    *
    * TODO If we ever normaly return something other then
    * true we should not mark it as dirty then.
    */
   svga_mark_surfaces_dirty(svga_context(pipe));

   if (svga->curr.reduced_prim != reduced_prim) {
      svga->curr.reduced_prim = reduced_prim;
      svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
   }

   if (need_fallback_prim_restart(svga, info)) {
      enum pipe_error r;
      r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info);
      assert(r == PIPE_OK);
      (void) r;
      goto done;
   }

   if (!u_trim_pipe_prim( info->mode, &count ))
      goto done;

   needed_swtnl = svga->state.sw.need_swtnl;

   svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

   if (svga->state.sw.need_swtnl) {
      svga->hud.num_fallbacks++;  /* for SVGA_QUERY_NUM_FALLBACKS */
      if (!needed_swtnl) {
         /*
          * We're switching from HW to SW TNL.  SW TNL will require mapping all
          * currently bound vertex buffers, some of which may already be
          * referenced in the current command buffer as result of previous HW
          * TNL. So flush now, to prevent the context to flush while a referred
          * vertex buffer is mapped.
          */

         svga_context_flush(svga, NULL);
      }

      /* Avoid leaking the previous hwtnl bias to swtnl */
      svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
      ret = svga_swtnl_draw_vbo( svga, info );
   }
   else {
      if (info->indexed && svga->curr.ib.buffer) {
         unsigned offset;

         assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
         offset = svga->curr.ib.offset / svga->curr.ib.index_size;

         ret = retry_draw_range_elements( svga,
                                          svga->curr.ib.buffer,
                                          svga->curr.ib.index_size,
                                          info->index_bias,
                                          info->min_index,
                                          info->max_index,
                                          info->mode,
                                          info->start + offset,
                                          count,
                                          info->start_instance,
                                          info->instance_count,
                                          TRUE );
      }
      else {
         ret = retry_draw_arrays(svga, info->mode, info->start, count,
                                 info->start_instance, info->instance_count,
                                 TRUE);
      }
   }

   /* XXX: Silence warnings, do something sensible here? */
   (void)ret;

   if (SVGA_DEBUG & DEBUG_FLUSH) {
      svga_hwtnl_flush_retry( svga );
      svga_context_flush(svga, NULL);
   }

done:
   SVGA_STATS_TIME_POP(svga_sws(svga));
;
}
Пример #11
0
/**
 * Draw vertex arrays, with optional indexing.
 * Basically, map the vertex buffers (and drawing surfaces), then hand off
 * the drawing to the 'draw' module.
 */
boolean
llvmpipe_draw_range_elements(struct pipe_context *pipe,
                             struct pipe_buffer *indexBuffer,
                             unsigned indexSize,
                             unsigned min_index,
                             unsigned max_index,
                             unsigned mode, unsigned start, unsigned count)
{
   struct llvmpipe_context *lp = llvmpipe_context(pipe);
   struct draw_context *draw = lp->draw;
   unsigned i;

   lp->reduced_api_prim = u_reduced_prim(mode);

   if (lp->dirty)
      llvmpipe_update_derived( lp );

   llvmpipe_map_transfers(lp);

   /*
    * Map vertex buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      void *buf = llvmpipe_buffer(lp->vertex_buffer[i].buffer)->data;
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }

   /* Map index buffer, if present */
   if (indexBuffer) {
      void *mapped_indexes = llvmpipe_buffer(indexBuffer)->data;
      draw_set_mapped_element_buffer_range(draw, indexSize,
                                           min_index,
                                           max_index,
                                           mapped_indexes);
   }
   else {
      /* no index/element buffer */
      draw_set_mapped_element_buffer_range(draw, 0, start,
                                           start + count - 1, NULL);
   }

   /* draw! */
   draw_arrays(draw, mode, start, count);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL);
   }
   if (indexBuffer) {
      draw_set_mapped_element_buffer(draw, 0, NULL);
   }

   /*
    * TODO: Flush only when a user vertex/index buffer is present
    * (or even better, modify draw module to do this
    * internally when this condition is seen?)
    */
   draw_flush(draw);

   /* Note: leave drawing surfaces mapped */

   lp->dirty_render_cache = TRUE;
   
   return TRUE;
}
Пример #12
0
static boolean
svga_draw_range_elements( struct pipe_context *pipe,
                          struct pipe_buffer *index_buffer,
                          unsigned index_size,
                          unsigned min_index,
                          unsigned max_index,
                          unsigned prim, unsigned start, unsigned count)
{
    struct svga_context *svga = svga_context( pipe );
    unsigned reduced_prim = u_reduced_prim(prim);
    enum pipe_error ret = 0;

    if (!u_trim_pipe_prim( prim, &count ))
        return TRUE;

    /*
     * Mark currently bound target surfaces as dirty
     * doesn't really matter if it is done before drawing.
     *
     * TODO If we ever normaly return something other then
     * true we should not mark it as dirty then.
     */
    svga_mark_surfaces_dirty(svga_context(pipe));

    if (svga->curr.reduced_prim != reduced_prim) {
        svga->curr.reduced_prim = reduced_prim;
        svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
    }

    svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

#ifdef DEBUG
    if (svga->curr.vs->base.id == svga->debug.disable_shader ||
            svga->curr.fs->base.id == svga->debug.disable_shader)
        return 0;
#endif

    if (svga->state.sw.need_swtnl)
    {
        ret = svga_swtnl_draw_range_elements( svga,
                                              index_buffer,
                                              index_size,
                                              min_index, max_index,
                                              prim,
                                              start, count );
    }
    else {
        if (index_buffer) {
            ret = retry_draw_range_elements( svga,
                                             index_buffer,
                                             index_size,
                                             min_index,
                                             max_index,
                                             prim,
                                             start,
                                             count,
                                             TRUE );
        }
        else {
            ret = retry_draw_arrays( svga,
                                     prim,
                                     start,
                                     count,
                                     TRUE );
        }
    }

    if (SVGA_DEBUG & DEBUG_FLUSH) {
        static unsigned id;
        debug_printf("%s %d\n", __FUNCTION__, id++);
        svga_hwtnl_flush_retry( svga );
        svga_context_flush(svga, NULL);
    }

    return ret == PIPE_OK;
}