Exemplo n.º 1
0
static void
ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct ilo_context *ilo = ilo_context(pipe);
   int vs_scratch_size, gs_scratch_size, fs_scratch_size;

   if (ilo_debug & ILO_DEBUG_DRAW) {
      if (info->indexed) {
         ilo_printf("indexed draw %s: "
               "index start %d, count %d, vertex range [%d, %d]\n",
               u_prim_name(info->mode), info->start, info->count,
               info->min_index, info->max_index);
      }
      else {
         ilo_printf("draw %s: vertex start %d, count %d\n",
               u_prim_name(info->mode), info->start, info->count);
      }

      ilo_state_vector_dump_dirty(&ilo->state_vector);
   }

   if (ilo_skip_rendering(ilo))
      return;

   if (info->primitive_restart && info->indexed &&
       draw_vbo_need_sw_restart(ilo, info)) {
      draw_vbo_with_sw_restart(ilo, info);
      return;
   }

   ilo_finalize_3d_states(ilo, info);

   /* upload kernels */
   ilo_shader_cache_upload(ilo->shader_cache, &ilo->cp->builder);

   /* prepare scratch spaces */
   ilo_shader_cache_get_max_scratch_sizes(ilo->shader_cache,
         &vs_scratch_size, &gs_scratch_size, &fs_scratch_size);
   ilo_render_prepare_scratch_spaces(ilo->render,
         vs_scratch_size, gs_scratch_size, fs_scratch_size);

   ilo_blit_resolve_framebuffer(ilo);

   /* If draw_vbo ever fails, return immediately. */
   if (!draw_vbo(ilo, &ilo->state_vector))
      return;

   /* clear dirty status */
   ilo->state_vector.dirty = 0x0;

   /* avoid dangling pointer reference */
   ilo->state_vector.draw = NULL;

   if (ilo_debug & ILO_DEBUG_NOCACHE)
      ilo_render_emit_flush(ilo->render);
}
Exemplo n.º 2
0
static void
ilo_texture_barrier(struct pipe_context *pipe, unsigned flags)
{
   struct ilo_context *ilo = ilo_context(pipe);

   if (ilo->cp->ring != INTEL_RING_RENDER)
      return;

   ilo_render_emit_flush(ilo->render);

   /* don't know why */
   if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
      ilo_cp_submit(ilo->cp, "texture barrier");
}
Exemplo n.º 3
0
void
ilo_render_emit_rectlist_commands_gen6(struct ilo_render *r,
                                       const struct ilo_blitter *blitter,
                                       const struct ilo_render_rectlist_session *session)
{
   ILO_DEV_ASSERT(r->dev, 6, 6);

   gen6_wa_pre_non_pipelined(r);

   gen6_rectlist_wm_multisample(r, blitter);

   gen6_state_base_address(r->builder, true);

   gen6_user_3DSTATE_VERTEX_BUFFERS(r->builder,
         session->vb_start, session->vb_end,
         sizeof(blitter->vertices[0]));

   gen6_3DSTATE_VERTEX_ELEMENTS(r->builder, &blitter->ve);

   gen6_3DSTATE_URB(r->builder, r->dev->urb_size, 0,
         (blitter->ve.count + blitter->ve.prepend_nosrc_cso) * 4 * sizeof(float),
         0);

   /* 3DSTATE_URB workaround */
   if (r->state.gs.active) {
      ilo_render_emit_flush(r);
      r->state.gs.active = false;
   }

   if (blitter->uses &
       (ILO_BLITTER_USE_DSA | ILO_BLITTER_USE_CC)) {
      gen6_3DSTATE_CC_STATE_POINTERS(r->builder, 0,
            r->state.DEPTH_STENCIL_STATE, r->state.COLOR_CALC_STATE);
   }

   gen6_rectlist_vs_to_sf(r, blitter);
   gen6_rectlist_wm(r, blitter);

   if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
      gen6_3DSTATE_VIEWPORT_STATE_POINTERS(r->builder,
            0, 0, r->state.CC_VIEWPORT);
   }

   gen6_rectlist_wm_depth(r, blitter);

   gen6_3DSTATE_DRAWING_RECTANGLE(r->builder, 0, 0,
         blitter->fb.width, blitter->fb.height);

   gen6_3DPRIMITIVE(r->builder, &blitter->draw, NULL);
}
Exemplo n.º 4
0
void
ilo_draw_rectlist(struct ilo_context *ilo)
{
   int max_len, before_space;
   bool need_flush;

   need_flush = ilo_builder_batch_used(&ilo->cp->builder);

   ilo_draw_set_owner(ilo);

   max_len = ilo_render_get_rectlist_len(ilo->render, ilo->blitter);
   max_len += ilo_render_get_flush_len(ilo->render) * 2;

   if (max_len > ilo_cp_space(ilo->cp)) {
      ilo_cp_submit(ilo->cp, "out of space");
      need_flush = false;
      assert(max_len <= ilo_cp_space(ilo->cp));
   }

   before_space = ilo_cp_space(ilo->cp);

   /*
    * From the Sandy Bridge PRM, volume 2 part 1, page 313:
    *
    *     "If other rendering operations have preceded this clear, a
    *      PIPE_CONTROL with write cache flush enabled and Z-inhibit
    *      disabled must be issued before the rectangle primitive used for
    *      the depth buffer clear operation."
    *
    * From the Sandy Bridge PRM, volume 2 part 1, page 314:
    *
    *     "Depth buffer clear pass must be followed by a PIPE_CONTROL
    *      command with DEPTH_STALL bit set and Then followed by Depth
    *      FLUSH"
    *
    * But the pipeline has to be flushed both before and after not only
    * because of these workarounds.  We need them for reasons such as
    *
    *  - we may sample from a texture that was rendered to
    *  - we may sample from the fb shortly after
    *
    * Skip checking blitter->op and do the flushes.
    */
   if (need_flush)
      ilo_render_emit_flush(ilo->render);

   while (true) {
      struct ilo_builder_snapshot snapshot;

      ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);

      ilo_render_emit_rectlist(ilo->render, ilo->blitter);

      if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);

         /* flush and try again */
         if (ilo_builder_batch_used(&ilo->cp->builder)) {
            ilo_cp_submit(ilo->cp, "out of aperture");
            continue;
         }
      }

      break;
   }

   ilo_render_invalidate_hw(ilo->render);

   ilo_render_emit_flush(ilo->render);

   /* sanity check size estimation */
   assert(before_space - ilo_cp_space(ilo->cp) <= max_len);
}
Exemplo n.º 5
0
static bool
draw_vbo(struct ilo_context *ilo, const struct ilo_state_vector *vec)
{
   bool need_flush = false;
   bool success = true;
   int max_len, before_space;

   /* on Gen7 and Gen7.5, we need SOL_RESET to reset the SO write offsets */
   if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7) &&
       ilo_dev_gen(ilo->dev) <= ILO_GEN(7.5) &&
       (vec->dirty & ILO_DIRTY_SO) && vec->so.enabled &&
       !vec->so.append_bitmask) {
      ilo_cp_submit(ilo->cp, "SOL_RESET");
      ilo_cp_set_one_off_flags(ilo->cp, INTEL_EXEC_GEN7_SOL_RESET);
   }

   if (ilo_builder_batch_used(&ilo->cp->builder)) {
      /*
       * Without a better tracking mechanism, when the framebuffer changes, we
       * have to assume that the old framebuffer may be sampled from.  If that
       * happens in the middle of a batch buffer, we need to insert manual
       * flushes.
       */
      need_flush = (vec->dirty & ILO_DIRTY_FB);

      /* same to SO target changes */
      need_flush |= (vec->dirty & ILO_DIRTY_SO);
   }

   ilo_draw_set_owner(ilo);

   /* make sure there is enough room first */
   max_len = ilo_render_get_draw_len(ilo->render, vec);
   if (need_flush)
      max_len += ilo_render_get_flush_len(ilo->render);

   if (max_len > ilo_cp_space(ilo->cp)) {
      ilo_cp_submit(ilo->cp, "out of space");
      need_flush = false;
      assert(max_len <= ilo_cp_space(ilo->cp));
   }

   /* space available before emission */
   before_space = ilo_cp_space(ilo->cp);

   if (need_flush)
      ilo_render_emit_flush(ilo->render);

   while (true) {
      struct ilo_builder_snapshot snapshot;

      ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);

      ilo_render_emit_draw(ilo->render, vec);

      if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);

         /* flush and try again */
         if (ilo_builder_batch_used(&ilo->cp->builder)) {
            ilo_cp_submit(ilo->cp, "out of aperture");
            continue;
         }

         success = false;
      }

      break;
   }

   /* sanity check size estimation */
   assert(before_space - ilo_cp_space(ilo->cp) <= max_len);

   return success;
}
Exemplo n.º 6
0
static void
gen6_draw_common_urb(struct ilo_render *r,
                     const struct ilo_state_vector *vec,
                     struct ilo_render_draw_session *session)
{
   /* 3DSTATE_URB */
   if (DIRTY(VE) || DIRTY(VS) || DIRTY(GS)) {
      const bool gs_active = (vec->gs || (vec->vs &&
               ilo_shader_get_kernel_param(vec->vs, ILO_KERNEL_VS_GEN6_SO)));
      int vs_entry_size, gs_entry_size;
      int vs_total_size, gs_total_size;

      vs_entry_size = (vec->vs) ?
         ilo_shader_get_kernel_param(vec->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;

      /*
       * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
       * share VUE handles.  The VUE allocation size must be large enough to
       * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
       *
       * I am not sure if the PRM explicitly states that VF and VS share VUE
       * handles.  But here is a citation that implies so:
       *
       * From the Sandy Bridge PRM, volume 2 part 1, page 44:
       *
       *     "Once a FF stage that spawn threads has sufficient input to
       *      initiate a thread, it must guarantee that it is safe to request
       *      the thread initiation. For all these FF stages, this check is
       *      based on :
       *
       *      - The availability of output URB entries:
       *        - VS: As the input URB entries are overwritten with the
       *          VS-generated output data, output URB availability isn't a
       *          factor."
       */
      if (vs_entry_size < vec->ve->count + vec->ve->prepend_nosrc_cso)
         vs_entry_size = vec->ve->count + vec->ve->prepend_nosrc_cso;

      gs_entry_size = (vec->gs) ?
         ilo_shader_get_kernel_param(vec->gs, ILO_KERNEL_OUTPUT_COUNT) :
         (gs_active) ? vs_entry_size : 0;

      /* in bytes */
      vs_entry_size *= sizeof(float) * 4;
      gs_entry_size *= sizeof(float) * 4;
      vs_total_size = r->dev->urb_size;

      if (gs_active) {
         vs_total_size /= 2;
         gs_total_size = vs_total_size;
      }
      else {
         gs_total_size = 0;
      }

      gen6_3DSTATE_URB(r->builder, vs_total_size, gs_total_size,
            vs_entry_size, gs_entry_size);

      /*
       * From the Sandy Bridge PRM, volume 2 part 1, page 27:
       *
       *     "Because of a urb corruption caused by allocating a previous
       *      gsunit's urb entry to vsunit software is required to send a
       *      "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
       *      size == 0) plus a dummy DRAW call before any case where VS will
       *      be taking over GS URB space."
       */
      if (r->state.gs.active && !gs_active)
         ilo_render_emit_flush(r);

      r->state.gs.active = gs_active;
   }
}
Exemplo n.º 7
0
/**
 * Emit PIPE_CONTROLs or MI_STORE_REGISTER_MEMs to store register values.
 */
void
ilo_render_emit_query(struct ilo_render *render,
                      struct ilo_query *q, uint32_t offset)
{
   const uint32_t pipeline_statistics_regs[11] = {
      GEN6_REG_IA_VERTICES_COUNT,
      GEN6_REG_IA_PRIMITIVES_COUNT,
      GEN6_REG_VS_INVOCATION_COUNT,
      GEN6_REG_GS_INVOCATION_COUNT,
      GEN6_REG_GS_PRIMITIVES_COUNT,
      GEN6_REG_CL_INVOCATION_COUNT,
      GEN6_REG_CL_PRIMITIVES_COUNT,
      GEN6_REG_PS_INVOCATION_COUNT,
      (ilo_dev_gen(render->dev) >= ILO_GEN(7)) ?
         GEN7_REG_HS_INVOCATION_COUNT : 0,
      (ilo_dev_gen(render->dev) >= ILO_GEN(7)) ?
         GEN7_REG_DS_INVOCATION_COUNT : 0,
      0,
   };
   const uint32_t primitives_generated_reg =
      (ilo_dev_gen(render->dev) >= ILO_GEN(7) && q->index > 0) ?
      GEN7_REG_SO_PRIM_STORAGE_NEEDED(q->index) :
      GEN6_REG_CL_INVOCATION_COUNT;
   const uint32_t primitives_emitted_reg =
      (ilo_dev_gen(render->dev) >= ILO_GEN(7)) ?
      GEN7_REG_SO_NUM_PRIMS_WRITTEN(q->index) :
      GEN6_REG_SO_NUM_PRIMS_WRITTEN;
   const unsigned batch_used = ilo_builder_batch_used(render->builder);
   const uint32_t *regs;
   int reg_count = 0, i;
   uint32_t pipe_control_dw1 = 0;

   ILO_DEV_ASSERT(render->dev, 6, 8);

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_OCCLUSION_PREDICATE:
      pipe_control_dw1 = GEN6_PIPE_CONTROL_DEPTH_STALL |
                         GEN6_PIPE_CONTROL_WRITE_PS_DEPTH_COUNT;
      break;
   case PIPE_QUERY_TIMESTAMP:
   case PIPE_QUERY_TIME_ELAPSED:
      pipe_control_dw1 = GEN6_PIPE_CONTROL_WRITE_TIMESTAMP;
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      regs = &primitives_generated_reg;
      reg_count = 1;
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      regs = &primitives_emitted_reg;
      reg_count = 1;
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      regs = pipeline_statistics_regs;
      reg_count = ARRAY_SIZE(pipeline_statistics_regs);
      break;
   default:
      break;
   }

   if (pipe_control_dw1) {
      assert(!reg_count);

      if (ilo_dev_gen(render->dev) == ILO_GEN(6))
         gen6_wa_pre_pipe_control(render, pipe_control_dw1);

      gen6_PIPE_CONTROL(render->builder, pipe_control_dw1, q->bo, offset, 0);

      render->state.current_pipe_control_dw1 |= pipe_control_dw1;
      render->state.deferred_pipe_control_dw1 &= ~pipe_control_dw1;
   } else if (reg_count) {
      ilo_render_emit_flush(render);
   }

   for (i = 0; i < reg_count; i++) {
      if (regs[i]) {
         /* store lower 32 bits */
         gen6_MI_STORE_REGISTER_MEM(render->builder, regs[i], q->bo, offset);
         /* store higher 32 bits */
         gen6_MI_STORE_REGISTER_MEM(render->builder, regs[i] + 4,
               q->bo, offset + 4);
      } else {
         gen6_MI_STORE_DATA_IMM(render->builder, q->bo, offset, 0);
      }

      offset += 8;
   }

   assert(ilo_builder_batch_used(render->builder) <= batch_used +
         ilo_render_get_query_len(render, q->type));
}