/**
 * Request a transfer map to the texture resource
 */
static void *
svga_texture_transfer_map(struct pipe_context *pipe,
                          struct pipe_resource *texture,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **ptransfer)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
   struct svga_texture *tex = svga_texture(texture);
   struct svga_transfer *st;
   struct svga_winsys_surface *surf = tex->handle;
   boolean use_direct_map = svga_have_gb_objects(svga) &&
                            !svga_have_gb_dma(svga);
   void *map = NULL;
   int64_t begin = svga_get_time(svga);

   SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);

   if (!surf)
      goto done;

   /* We can't map texture storage directly unless we have GB objects */
   if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
      if (svga_have_gb_objects(svga))
         use_direct_map = TRUE;
      else
         goto done;
   }

   st = CALLOC_STRUCT(svga_transfer);
   if (!st)
      goto done;

   st->base.level = level;
   st->base.usage = usage;
   st->base.box = *box;

   /* The modified transfer map box with the array index removed from z.
    * The array index is specified in slice.
    */
   st->box.x = box->x;
   st->box.y = box->y;
   st->box.z = box->z;
   st->box.w = box->width;
   st->box.h = box->height;
   st->box.d = box->depth;

   switch (tex->b.b.target) {
   case PIPE_TEXTURE_CUBE:
      st->slice = st->base.box.z;
      st->box.z = 0;   /* so we don't apply double offsets below */
      break;
   case PIPE_TEXTURE_1D_ARRAY:
   case PIPE_TEXTURE_2D_ARRAY:
   case PIPE_TEXTURE_CUBE_ARRAY:
      st->slice = st->base.box.z;
      st->box.z = 0;   /* so we don't apply double offsets below */

      /* Force direct map for transfering multiple slices */
      if (st->base.box.depth > 1)
         use_direct_map = svga_have_gb_objects(svga);

      break;
   default:
      st->slice = 0;
      break;
   }

   /* Force direct map for multisample surface */
   if (texture->nr_samples > 1) {
      assert(svga_have_gb_objects(svga));
      assert(sws->have_sm4_1);
      use_direct_map = TRUE;
   }

   st->use_direct_map = use_direct_map;
   pipe_resource_reference(&st->base.resource, texture);

   /* If this is the first time mapping to the surface in this
    * command buffer, clear the dirty masks of this surface.
    */
   if (sws->surface_is_flushed(sws, surf)) {
      svga_clear_texture_dirty(tex);
   }

   if (!use_direct_map) {
      /* upload to the DMA buffer */
      map = svga_texture_transfer_map_dma(svga, st);
   }
   else {
      boolean can_use_upload = tex->can_use_upload &&
                               !(st->base.usage & PIPE_TRANSFER_READ);
      boolean was_rendered_to =
         svga_was_texture_rendered_to(svga_texture(texture),
                                      st->slice, st->base.level);

      /* If the texture was already rendered to and upload buffer
       * is supported, then we will use upload buffer to
       * avoid the need to read back the texture content; otherwise,
       * we'll first try to map directly to the GB surface, if it is blocked,
       * then we'll try the upload buffer.
       */
      if (was_rendered_to && can_use_upload) {
         map = svga_texture_transfer_map_upload(svga, st);
      }
      else {
         unsigned orig_usage = st->base.usage;

         /* First try directly map to the GB surface */
         if (can_use_upload)
            st->base.usage |= PIPE_TRANSFER_DONTBLOCK;
         map = svga_texture_transfer_map_direct(svga, st);
         st->base.usage = orig_usage;

         if (!map && can_use_upload) {
            /* if direct map with DONTBLOCK fails, then try upload to the
             * texture upload buffer.
             */
            map = svga_texture_transfer_map_upload(svga, st);
         }
      }

      /* If upload fails, then try direct map again without forcing it
       * to DONTBLOCK.
       */
      if (!map) {
         map = svga_texture_transfer_map_direct(svga, st);
      }
   }

   if (!map) {
      FREE(st);
   }
   else {
      *ptransfer = &st->base;
      svga->hud.num_textures_mapped++;
      if (usage & PIPE_TRANSFER_WRITE) {
         /* record texture upload for HUD */
         svga->hud.num_bytes_uploaded +=
            st->base.layer_stride * st->box.d;

         /* mark this texture level as dirty */
         svga_set_texture_dirty(tex, st->slice, level);
      }
   }

done:
   svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
   SVGA_STATS_TIME_POP(sws);
   (void) sws;

   return map;
}
Example #2
0
/**
 * Create a buffer transfer.
 *
 * Unlike texture DMAs (which are written immediately to the command buffer and
 * therefore inherently serialized with other context operations), for buffers
 * we try to coalesce multiple range mappings (i.e, multiple calls to this
 * function) into a single DMA command, for better efficiency in command
 * processing.  This means we need to exercise extra care here to ensure that
 * the end result is exactly the same as if one DMA was used for every mapped
 * range.
 */
static void *
svga_buffer_transfer_map(struct pipe_context *pipe,
                         struct pipe_resource *resource,
                         unsigned level,
                         unsigned usage,
                         const struct pipe_box *box,
                         struct pipe_transfer **ptransfer)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_screen *ss = svga_screen(pipe->screen);
   struct svga_buffer *sbuf = svga_buffer(resource);
   struct pipe_transfer *transfer;
   uint8_t *map = NULL;
   int64_t begin = svga_get_time(svga);

   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERMAP);

   assert(box->y == 0);
   assert(box->z == 0);
   assert(box->height == 1);
   assert(box->depth == 1);

   transfer = MALLOC_STRUCT(pipe_transfer);
   if (!transfer) {
      goto done;
   }

   transfer->resource = resource;
   transfer->level = level;
   transfer->usage = usage;
   transfer->box = *box;
   transfer->stride = 0;
   transfer->layer_stride = 0;

   if (usage & PIPE_TRANSFER_WRITE) {
      /* If we write to the buffer for any reason, free any saved translated
       * vertices.
       */
      pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
   }

   if ((usage & PIPE_TRANSFER_READ) && sbuf->dirty) {
      enum pipe_error ret;

      /* Host-side buffers can only be dirtied with vgpu10 features
       * (streamout and buffer copy).
       */
      assert(svga_have_vgpu10(svga));

      if (!sbuf->user) {
         (void) svga_buffer_handle(svga, resource, sbuf->bind_flags);
      }

      if (sbuf->dma.pending) {
         svga_buffer_upload_flush(svga, sbuf);
         svga_context_finish(svga);
      }

      assert(sbuf->handle);

      ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, sbuf->handle, 0);
      if (ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, sbuf->handle, 0);
         assert(ret == PIPE_OK);
      }

      svga->hud.num_readbacks++;

      svga_context_finish(svga);

      sbuf->dirty = FALSE;
   }

   if (usage & PIPE_TRANSFER_WRITE) {
      if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
         /*
          * Flush any pending primitives, finish writing any pending DMA
          * commands, and tell the host to discard the buffer contents on
          * the next DMA operation.
          */

         svga_hwtnl_flush_buffer(svga, resource);

         if (sbuf->dma.pending) {
            svga_buffer_upload_flush(svga, sbuf);

            /*
             * Instead of flushing the context command buffer, simply discard
             * the current hwbuf, and start a new one.
             * With GB objects, the map operation takes care of this
             * if passed the PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE flag,
             * and the old backing store is busy.
             */

            if (!svga_have_gb_objects(svga))
               svga_buffer_destroy_hw_storage(ss, sbuf);
         }

         sbuf->map.num_ranges = 0;
         sbuf->dma.flags.discard = TRUE;
      }

      if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
         if (!sbuf->map.num_ranges) {
            /*
             * No pending ranges to upload so far, so we can tell the host to
             * not synchronize on the next DMA command.
             */

            sbuf->dma.flags.unsynchronized = TRUE;
         }
      } else {
         /*
          * Synchronizing, so flush any pending primitives, finish writing any
          * pending DMA command, and ensure the next DMA will be done in order.
          */

         svga_hwtnl_flush_buffer(svga, resource);

         if (sbuf->dma.pending) {
            svga_buffer_upload_flush(svga, sbuf);

            if (svga_buffer_has_hw_storage(sbuf)) {
               /*
                * We have a pending DMA upload from a hardware buffer, therefore
                * we need to ensure that the host finishes processing that DMA
                * command before the state tracker can start overwriting the
                * hardware buffer.
                *
                * XXX: This could be avoided by tying the hardware buffer to
                * the transfer (just as done with textures), which would allow
                * overlapping DMAs commands to be queued on the same context
                * buffer. However, due to the likelihood of software vertex
                * processing, it is more convenient to hold on to the hardware
                * buffer, allowing to quickly access the contents from the CPU
                * without having to do a DMA download from the host.
                */

               if (usage & PIPE_TRANSFER_DONTBLOCK) {
                  /*
                   * Flushing the command buffer here will most likely cause
                   * the map of the hwbuf below to block, so preemptively
                   * return NULL here if DONTBLOCK is set to prevent unnecessary
                   * command buffer flushes.
                   */

                  FREE(transfer);
                  goto done;
               }

               svga_context_flush(svga, NULL);
            }
         }

         sbuf->dma.flags.unsynchronized = FALSE;
      }
   }

   if (!sbuf->swbuf && !svga_buffer_has_hw_storage(sbuf)) {
      if (svga_buffer_create_hw_storage(ss, sbuf, sbuf->bind_flags) != PIPE_OK) {
         /*
          * We can't create a hardware buffer big enough, so create a malloc
          * buffer instead.
          */
         if (0) {
            debug_printf("%s: failed to allocate %u KB of DMA, "
                         "splitting DMA transfers\n",
                         __FUNCTION__,
                         (sbuf->b.b.width0 + 1023)/1024);
         }

         sbuf->swbuf = align_malloc(sbuf->b.b.width0, 16);
         if (!sbuf->swbuf) {
            FREE(transfer);
            goto done;
         }
      }
   }

   if (sbuf->swbuf) {
      /* User/malloc buffer */
      map = sbuf->swbuf;
   }
   else if (svga_buffer_has_hw_storage(sbuf)) {
      boolean retry;

      map = svga_buffer_hw_storage_map(svga, sbuf, transfer->usage, &retry);
      if (map == NULL && retry) {
         /*
          * At this point, svga_buffer_get_transfer() has already
          * hit the DISCARD_WHOLE_RESOURCE path and flushed HWTNL
          * for this buffer.
          */
         svga_context_flush(svga, NULL);
         map = svga_buffer_hw_storage_map(svga, sbuf, transfer->usage, &retry);
      }
   }
   else {
      map = NULL;
   }

   if (map) {
      ++sbuf->map.count;
      map += transfer->box.x;
      *ptransfer = transfer;
   } else {
      FREE(transfer);
   }

   svga->hud.map_buffer_time += (svga_get_time(svga) - begin);

done:
   SVGA_STATS_TIME_POP(svga_sws(svga));
   return map;
}