Exemple #1
0
static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
                                       struct radeon_bo_metadata *md)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   struct amdgpu_bo_metadata metadata = {0};
   uint32_t tiling_flags = 0;

   if (md->macrotile == RADEON_LAYOUT_TILED)
      tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
   else if (md->microtile == RADEON_LAYOUT_TILED)
      tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
   else
      tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */

   tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->pipe_config);
   tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->bankw));
   tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->bankh));
   if (md->tile_split)
      tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->tile_split));
   tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->mtilea));
   tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->num_banks)-1);

   if (md->scanout)
      tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
   else
      tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */

   metadata.tiling_info = tiling_flags;
   metadata.size_metadata = md->size_metadata;
   memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));

   amdgpu_bo_set_metadata(bo->bo, &metadata);
}
Exemple #2
0
static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
                                    unsigned stride,
                                    struct winsys_handle *whandle)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
   enum amdgpu_bo_handle_type type;
   int r;

   bo->use_reusable_pool = false;

   switch (whandle->type) {
   case DRM_API_HANDLE_TYPE_SHARED:
      type = amdgpu_bo_handle_type_gem_flink_name;
      break;
   case DRM_API_HANDLE_TYPE_FD:
      type = amdgpu_bo_handle_type_dma_buf_fd;
      break;
   case DRM_API_HANDLE_TYPE_KMS:
      type = amdgpu_bo_handle_type_kms;
      break;
   default:
      return FALSE;
   }

   r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
   if (r)
      return FALSE;

   whandle->stride = stride;
   bo->is_shared = true;
   return TRUE;
}
Exemple #3
0
static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
                                       struct radeon_bo_metadata *md)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   struct amdgpu_bo_info info = {0};
   uint32_t tiling_flags;
   int r;

   r = amdgpu_bo_query_info(bo->bo, &info);
   if (r)
      return;

   tiling_flags = info.metadata.tiling_info;

   md->microtile = RADEON_LAYOUT_LINEAR;
   md->macrotile = RADEON_LAYOUT_LINEAR;

   if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4)  /* 2D_TILED_THIN1 */
      md->macrotile = RADEON_LAYOUT_TILED;
   else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
      md->microtile = RADEON_LAYOUT_TILED;

   md->pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
   md->bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
   md->bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
   md->tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
   md->mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
   md->num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
   md->scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */

   md->size_metadata = info.metadata.size_metadata;
   memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
}
Exemple #4
0
static struct amdgpu_winsys_bo *get_amdgpu_winsys_bo(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = NULL;

   if (_buf->vtbl == &amdgpu_winsys_bo_vtbl) {
      bo = amdgpu_winsys_bo(_buf);
   } else {
      struct pb_buffer *base_buf;
      pb_size offset;
      pb_get_base_buffer(_buf, &base_buf, &offset);

      if (base_buf->vtbl == &amdgpu_winsys_bo_vtbl)
         bo = amdgpu_winsys_bo(base_buf);
   }

   return bo;
}
Exemple #5
0
static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);

   if (bo->use_reusable_pool)
      pb_cache_add_buffer(&bo->cache_entry);
   else
      amdgpu_bo_destroy(_buf);
}
Exemple #6
0
bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);

   if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
      return false;
   }

   return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
}
Exemple #7
0
/* This is for the cache bufmgr. */
static boolean amdgpu_bomgr_is_buffer_busy(struct pb_manager *_mgr,
                                           struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);

   if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
      return TRUE;
   }

   if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0, RADEON_USAGE_READWRITE)) {
      return TRUE;
   }

   return FALSE;
}
Exemple #8
0
static void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   int i;

   amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
   amdgpu_va_range_free(bo->va_handle);
   amdgpu_bo_free(bo->bo);

   for (i = 0; i < RING_LAST; i++)
      amdgpu_fence_reference(&bo->fence[i], NULL);

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      bo->rws->allocated_vram -= align(bo->base.size, bo->rws->gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      bo->rws->allocated_gtt -= align(bo->base.size, bo->rws->gart_page_size);
   FREE(bo);
}
Exemple #9
0
static void amdgpu_bo_set_tiling(struct pb_buffer *_buf,
                                 struct radeon_winsys_cs *rcs,
                                 enum radeon_bo_layout microtiled,
                                 enum radeon_bo_layout macrotiled,
                                 unsigned pipe_config,
                                 unsigned bankw, unsigned bankh,
                                 unsigned tile_split,
                                 unsigned stencil_tile_split,
                                 unsigned mtilea, unsigned num_banks,
                                 uint32_t pitch,
                                 bool scanout)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   struct amdgpu_bo_metadata metadata = {0};
   uint32_t tiling_flags = 0;

   if (macrotiled == RADEON_LAYOUT_TILED)
      tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
   else if (microtiled == RADEON_LAYOUT_TILED)
      tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
   else
      tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */

   tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, pipe_config);
   tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(bankw));
   tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(bankh));
   if (tile_split)
      tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(tile_split));
   tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(mtilea));
   tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(num_banks)-1);

   if (scanout)
      tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
   else
      tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */

   metadata.tiling_info = tiling_flags;

   amdgpu_bo_set_metadata(bo->bo, &metadata);
}
Exemple #10
0
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   int i;

   pipe_mutex_lock(bo->ws->global_bo_list_lock);
   LIST_DEL(&bo->global_list_item);
   bo->ws->num_buffers--;
   pipe_mutex_unlock(bo->ws->global_bo_list_lock);

   amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
   amdgpu_va_range_free(bo->va_handle);
   amdgpu_bo_free(bo->bo);

   for (i = 0; i < RING_LAST; i++)
      amdgpu_fence_reference(&bo->fence[i], NULL);

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
   FREE(bo);
}
Exemple #11
0
static void amdgpu_bo_get_tiling(struct pb_buffer *_buf,
                                 enum radeon_bo_layout *microtiled,
                                 enum radeon_bo_layout *macrotiled,
                                 unsigned *bankw, unsigned *bankh,
                                 unsigned *tile_split,
                                 unsigned *stencil_tile_split,
                                 unsigned *mtilea,
                                 bool *scanout)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   struct amdgpu_bo_info info = {0};
   uint32_t tiling_flags;
   int r;

   r = amdgpu_bo_query_info(bo->bo, &info);
   if (r)
      return;

   tiling_flags = info.metadata.tiling_info;

   *microtiled = RADEON_LAYOUT_LINEAR;
   *macrotiled = RADEON_LAYOUT_LINEAR;

   if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4)  /* 2D_TILED_THIN1 */
      *macrotiled = RADEON_LAYOUT_TILED;
   else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
      *microtiled = RADEON_LAYOUT_TILED;

   if (bankw && tile_split && mtilea && tile_split) {
      *bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
      *bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
      *tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
      *mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
   }
   if (scanout)
      *scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
}
Exemple #12
0
static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
                           enum radeon_bo_usage usage)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   struct amdgpu_winsys *ws = bo->ws;
   int i;

   if (bo->is_shared) {
      /* We can't use user fences for shared buffers, because user fences
       * are local to this process only. If we want to wait for all buffer
       * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
       */
      bool buffer_busy = true;
      int r;

      r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
      if (r)
         fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
                 r);
      return !buffer_busy;
   }

   if (timeout == 0) {
      /* Timeout == 0 is quite simple. */
      pipe_mutex_lock(ws->bo_fence_lock);
      for (i = 0; i < RING_LAST; i++)
         if (bo->fence[i]) {
            if (amdgpu_fence_wait(bo->fence[i], 0, false)) {
               /* Release the idle fence to avoid checking it again later. */
               amdgpu_fence_reference(&bo->fence[i], NULL);
            } else {
               pipe_mutex_unlock(ws->bo_fence_lock);
               return false;
            }
         }
      pipe_mutex_unlock(ws->bo_fence_lock);
      return true;

   } else {
      struct pipe_fence_handle *fence[RING_LAST] = {};
      bool fence_idle[RING_LAST] = {};
      bool buffer_idle = true;
      int64_t abs_timeout = os_time_get_absolute_timeout(timeout);

      /* Take references to all fences, so that we can wait for them
       * without the lock. */
      pipe_mutex_lock(ws->bo_fence_lock);
      for (i = 0; i < RING_LAST; i++)
         amdgpu_fence_reference(&fence[i], bo->fence[i]);
      pipe_mutex_unlock(ws->bo_fence_lock);

      /* Now wait for the fences. */
      for (i = 0; i < RING_LAST; i++) {
         if (fence[i]) {
            if (amdgpu_fence_wait(fence[i], abs_timeout, true))
               fence_idle[i] = true;
            else
               buffer_idle = false;
         }
      }

      /* Release idle fences to avoid checking them again later. */
      pipe_mutex_lock(ws->bo_fence_lock);
      for (i = 0; i < RING_LAST; i++) {
         if (fence[i] == bo->fence[i] && fence_idle[i])
            amdgpu_fence_reference(&bo->fence[i], NULL);

         amdgpu_fence_reference(&fence[i], NULL);
      }
      pipe_mutex_unlock(ws->bo_fence_lock);

      return buffer_idle;
   }
}