Esempio n. 1
0
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
                 unsigned size,
                 unsigned alignment,
                 boolean use_reusable_pool,
                 enum radeon_bo_domain domain,
                 enum radeon_bo_flag flags)
{
   struct amdgpu_winsys *ws = amdgpu_winsys(rws);
   struct amdgpu_winsys_bo *bo;
   unsigned usage = 0;

   /* Don't use VRAM if the GPU doesn't have much. This is only the initial
    * domain. The kernel is free to move the buffer if it wants to.
    *
    * 64MB means no VRAM by todays standards.
    */
   if (domain & RADEON_DOMAIN_VRAM && ws->info.vram_size <= 64*1024*1024) {
      domain = RADEON_DOMAIN_GTT;
      flags = RADEON_FLAG_GTT_WC;
   }

   /* Align size to page size. This is the minimum alignment for normal
    * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
    * like constant/uniform buffers, can benefit from better and more reuse.
    */
   size = align(size, ws->gart_page_size);

   /* Only set one usage bit each for domains and flags, or the cache manager
    * might consider different sets of domains / flags compatible
    */
   if (domain == RADEON_DOMAIN_VRAM_GTT)
      usage = 1 << 2;
   else
      usage = domain >> 1;
   assert(flags < sizeof(usage) * 8 - 3);
   usage |= 1 << (flags + 3);

   /* Get a buffer from the cache. */
   if (use_reusable_pool) {
       bo = (struct amdgpu_winsys_bo*)
            pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
                                    usage);
       if (bo)
          return &bo->base;
   }

   /* Create a new one. */
   bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
   if (!bo) {
      /* Clear the cache and try again. */
      pb_cache_release_all_buffers(&ws->bo_cache);
      bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
      if (!bo)
         return NULL;
   }

   bo->use_reusable_pool = use_reusable_pool;
   return &bo->base;
}
Esempio n. 2
0
static void
pb_cache_manager_flush(struct pb_manager *_mgr)
{
   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);

   pb_cache_release_all_buffers(&mgr->cache);
   
   assert(mgr->provider->flush);
   if(mgr->provider->flush)
      mgr->provider->flush(mgr->provider);
}
Esempio n. 3
0
static struct pb_buffer *
pb_cache_manager_create_buffer(struct pb_manager *_mgr, 
                               pb_size size,
                               const struct pb_desc *desc)
{
   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
   struct pb_cache_buffer *buf;

   /* get a buffer from the cache */
   buf = (struct pb_cache_buffer *)
         pb_cache_reclaim_buffer(&mgr->cache, size, desc->alignment,
                                 desc->usage, 0);
   if (buf)
      return &buf->base;

   /* create a new one */
   buf = CALLOC_STRUCT(pb_cache_buffer);
   if (!buf)
      return NULL;
   
   buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);

   /* Empty the cache and try again. */
   if (!buf->buffer) {
      pb_cache_release_all_buffers(&mgr->cache);
      buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
   }

   if(!buf->buffer) {
      FREE(buf);
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->reference));
   assert(pb_check_alignment(desc->alignment, buf->buffer->alignment));
   assert(buf->buffer->size >= size);
   
   pipe_reference_init(&buf->base.reference, 1);
   buf->base.alignment = buf->buffer->alignment;
   buf->base.usage = buf->buffer->usage;
   buf->base.size = buf->buffer->size;
   
   buf->base.vtbl = &pb_cache_buffer_vtbl;
   buf->mgr = mgr;
   pb_cache_init_entry(&mgr->cache, &buf->cache_entry, &buf->base, 0);
   
   return &buf->base;
}
Esempio n. 4
0
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
                 uint64_t size,
                 unsigned alignment,
                 enum radeon_bo_domain domain,
                 enum radeon_bo_flag flags)
{
   struct amdgpu_winsys *ws = amdgpu_winsys(rws);
   struct amdgpu_winsys_bo *bo;
   unsigned usage = 0;

   /* Align size to page size. This is the minimum alignment for normal
    * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
    * like constant/uniform buffers, can benefit from better and more reuse.
    */
   size = align64(size, ws->info.gart_page_size);
   alignment = align(alignment, ws->info.gart_page_size);

   /* Only set one usage bit each for domains and flags, or the cache manager
    * might consider different sets of domains / flags compatible
    */
   if (domain == RADEON_DOMAIN_VRAM_GTT)
      usage = 1 << 2;
   else
      usage = domain >> 1;
   assert(flags < sizeof(usage) * 8 - 3);
   usage |= 1 << (flags + 3);

   /* Get a buffer from the cache. */
   bo = (struct amdgpu_winsys_bo*)
        pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage);
   if (bo)
      return &bo->base;

   /* Create a new one. */
   bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
   if (!bo) {
      /* Clear the cache and try again. */
      pb_cache_release_all_buffers(&ws->bo_cache);
      bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
      if (!bo)
         return NULL;
   }

   bo->use_reusable_pool = true;
   return &bo->base;
}
Esempio n. 5
0
/**
 * Deinitialize the manager completely.
 */
void
pb_cache_deinit(struct pb_cache *mgr)
{
   pb_cache_release_all_buffers(mgr);
   pipe_mutex_destroy(mgr->mutex);
}
Esempio n. 6
0
static void *amdgpu_bo_map(struct pb_buffer *buf,
                           struct radeon_winsys_cs *rcs,
                           enum pipe_transfer_usage usage)
{
   struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
   struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
   int r;
   void *cpu = NULL;

   /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
   if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
      /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
      if (usage & PIPE_TRANSFER_DONTBLOCK) {
         if (!(usage & PIPE_TRANSFER_WRITE)) {
            /* Mapping for read.
             *
             * Since we are mapping for read, we don't need to wait
             * if the GPU is using the buffer for read too
             * (neither one is changing it).
             *
             * Only check whether the buffer is being used for write. */
            if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
                                                               RADEON_USAGE_WRITE)) {
               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
               return NULL;
            }

            if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
                                RADEON_USAGE_WRITE)) {
               return NULL;
            }
         } else {
            if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
               return NULL;
            }

            if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
                                RADEON_USAGE_READWRITE)) {
               return NULL;
            }
         }
      } else {
         uint64_t time = os_time_get_nano();

         if (!(usage & PIPE_TRANSFER_WRITE)) {
            /* Mapping for read.
             *
             * Since we are mapping for read, we don't need to wait
             * if the GPU is using the buffer for read too
             * (neither one is changing it).
             *
             * Only check whether the buffer is being used for write. */
            if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
                                                               RADEON_USAGE_WRITE)) {
               cs->flush_cs(cs->flush_data, 0, NULL);
            }
            amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
                           RADEON_USAGE_WRITE);
         } else {
            /* Mapping for write. */
            if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
               cs->flush_cs(cs->flush_data, 0, NULL);

            amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
                           RADEON_USAGE_READWRITE);
         }

         bo->ws->buffer_wait_time += os_time_get_nano() - time;
      }
   }

   /* If the buffer is created from user memory, return the user pointer. */
   if (bo->user_ptr)
       return bo->user_ptr;

   r = amdgpu_bo_cpu_map(bo->bo, &cpu);
   if (r) {
      /* Clear the cache and try again. */
      pb_cache_release_all_buffers(&bo->ws->bo_cache);
      r = amdgpu_bo_cpu_map(bo->bo, &cpu);
   }
   return r ? NULL : cpu;
}