示例#1
0
static void
ilo_draw_own_cp(struct ilo_cp *cp, void *data)
{
   struct ilo_context *ilo = data;

   /* multiply by 2 for both resuming and pausing */
   if (ilo_cp_space(ilo->cp) < ilo->draw.cp_owner.reserve * 2) {
      ilo_cp_submit(ilo->cp, "out of space");
      assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve * 2);
   }

   while (true) {
      struct ilo_builder_snapshot snapshot;
      struct ilo_query *q;

      ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);

      /* resume queries */
      LIST_FOR_EACH_ENTRY(q, &ilo->draw.queries, list)
         query_begin_bo(ilo, q);

      if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);

         if (ilo_builder_batch_used(&ilo->cp->builder)) {
            ilo_cp_submit(ilo->cp, "out of aperture");
            continue;
         }
      }

      break;
   }

   assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve);
}
示例#2
0
static void
ilo_texture_barrier(struct pipe_context *pipe, unsigned flags)
{
   struct ilo_context *ilo = ilo_context(pipe);

   if (ilo->cp->ring != INTEL_RING_RENDER)
      return;

   ilo_render_emit_flush(ilo->render);

   /* don't know why */
   if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
      ilo_cp_submit(ilo->cp, "texture barrier");
}
示例#3
0
文件: ilo_context.c 项目: Gnurou/mesa
static void
ilo_flush(struct pipe_context *pipe,
          struct pipe_fence_handle **f,
          unsigned flags)
{
   struct ilo_context *ilo = ilo_context(pipe);

   ilo_cp_submit(ilo->cp,
         (flags & PIPE_FLUSH_END_OF_FRAME) ? "frame end" : "user request");

   if (f) {
      *f = ilo_screen_fence_create(pipe->screen, ilo->cp->last_submitted_bo);
   }
}
示例#4
0
文件: ilo_cp.c 项目: Distrotech/Mesa
/**
 * Set the parser owner.  If this is a new owner or a new ring, the old owner
 * is released and the new owner's own() is called.  The parser may implicitly
 * submit if there is a ring change.
 *
 * own() is called before \p owner owns the parser.  It must make sure there
 * is more space than \p owner->reserve when it returns.  Calling
 * ilo_cp_submit() is allowed.
 *
 * release() will be called after \p owner loses the parser.  That may happen
 * just before the parser submits and ilo_cp_submit() is not allowed.
 */
void
ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
                 const struct ilo_cp_owner *owner)
{
   if (!owner)
      owner = &ilo_cp_default_owner;

   if (cp->ring != ring) {
      ilo_cp_submit(cp, "ring change");
      cp->ring = ring;
   }

   if (cp->owner != owner) {
      ilo_cp_release_owner(cp);

      owner->own(cp, owner->data);

      assert(ilo_cp_space(cp) >= owner->reserve);
      cp->owner = owner;
   }
}
示例#5
0
void
ilo_begin_draw_query(struct ilo_context *ilo, struct ilo_query *q)
{
   ilo_draw_set_owner(ilo);

   /* need to submit first */
   if (!ilo_builder_validate(&ilo->cp->builder, 1, &q->bo) ||
         ilo_cp_space(ilo->cp) < q->cmd_len) {
      ilo_cp_submit(ilo->cp, "out of aperture or space");

      assert(ilo_builder_validate(&ilo->cp->builder, 1, &q->bo));
      assert(ilo_cp_space(ilo->cp) >= q->cmd_len);

      ilo_draw_set_owner(ilo);
   }

   /* reserve the space for ending/pausing the query */
   ilo->draw.cp_owner.reserve += q->cmd_len >> q->in_pairs;

   query_begin_bo(ilo, q);

   if (q->in_pairs)
      list_add(&q->list, &ilo->draw.queries);
}
示例#6
0
void
ilo_draw_rectlist(struct ilo_context *ilo)
{
   int max_len, before_space;
   bool need_flush;

   need_flush = ilo_builder_batch_used(&ilo->cp->builder);

   ilo_draw_set_owner(ilo);

   max_len = ilo_render_get_rectlist_len(ilo->render, ilo->blitter);
   max_len += ilo_render_get_flush_len(ilo->render) * 2;

   if (max_len > ilo_cp_space(ilo->cp)) {
      ilo_cp_submit(ilo->cp, "out of space");
      need_flush = false;
      assert(max_len <= ilo_cp_space(ilo->cp));
   }

   before_space = ilo_cp_space(ilo->cp);

   /*
    * From the Sandy Bridge PRM, volume 2 part 1, page 313:
    *
    *     "If other rendering operations have preceded this clear, a
    *      PIPE_CONTROL with write cache flush enabled and Z-inhibit
    *      disabled must be issued before the rectangle primitive used for
    *      the depth buffer clear operation."
    *
    * From the Sandy Bridge PRM, volume 2 part 1, page 314:
    *
    *     "Depth buffer clear pass must be followed by a PIPE_CONTROL
    *      command with DEPTH_STALL bit set and Then followed by Depth
    *      FLUSH"
    *
    * But the pipeline has to be flushed both before and after not only
    * because of these workarounds.  We need them for reasons such as
    *
    *  - we may sample from a texture that was rendered to
    *  - we may sample from the fb shortly after
    *
    * Skip checking blitter->op and do the flushes.
    */
   if (need_flush)
      ilo_render_emit_flush(ilo->render);

   while (true) {
      struct ilo_builder_snapshot snapshot;

      ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);

      ilo_render_emit_rectlist(ilo->render, ilo->blitter);

      if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);

         /* flush and try again */
         if (ilo_builder_batch_used(&ilo->cp->builder)) {
            ilo_cp_submit(ilo->cp, "out of aperture");
            continue;
         }
      }

      break;
   }

   ilo_render_invalidate_hw(ilo->render);

   ilo_render_emit_flush(ilo->render);

   /* sanity check size estimation */
   assert(before_space - ilo_cp_space(ilo->cp) <= max_len);
}
示例#7
0
static bool
draw_vbo(struct ilo_context *ilo, const struct ilo_state_vector *vec)
{
   bool need_flush = false;
   bool success = true;
   int max_len, before_space;

   /* on Gen7 and Gen7.5, we need SOL_RESET to reset the SO write offsets */
   if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7) &&
       ilo_dev_gen(ilo->dev) <= ILO_GEN(7.5) &&
       (vec->dirty & ILO_DIRTY_SO) && vec->so.enabled &&
       !vec->so.append_bitmask) {
      ilo_cp_submit(ilo->cp, "SOL_RESET");
      ilo_cp_set_one_off_flags(ilo->cp, INTEL_EXEC_GEN7_SOL_RESET);
   }

   if (ilo_builder_batch_used(&ilo->cp->builder)) {
      /*
       * Without a better tracking mechanism, when the framebuffer changes, we
       * have to assume that the old framebuffer may be sampled from.  If that
       * happens in the middle of a batch buffer, we need to insert manual
       * flushes.
       */
      need_flush = (vec->dirty & ILO_DIRTY_FB);

      /* same to SO target changes */
      need_flush |= (vec->dirty & ILO_DIRTY_SO);
   }

   ilo_draw_set_owner(ilo);

   /* make sure there is enough room first */
   max_len = ilo_render_get_draw_len(ilo->render, vec);
   if (need_flush)
      max_len += ilo_render_get_flush_len(ilo->render);

   if (max_len > ilo_cp_space(ilo->cp)) {
      ilo_cp_submit(ilo->cp, "out of space");
      need_flush = false;
      assert(max_len <= ilo_cp_space(ilo->cp));
   }

   /* space available before emission */
   before_space = ilo_cp_space(ilo->cp);

   if (need_flush)
      ilo_render_emit_flush(ilo->render);

   while (true) {
      struct ilo_builder_snapshot snapshot;

      ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);

      ilo_render_emit_draw(ilo->render, vec);

      if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);

         /* flush and try again */
         if (ilo_builder_batch_used(&ilo->cp->builder)) {
            ilo_cp_submit(ilo->cp, "out of aperture");
            continue;
         }

         success = false;
      }

      break;
   }

   /* sanity check size estimation */
   assert(before_space - ilo_cp_space(ilo->cp) <= max_len);

   return success;
}