示例#1
0
static void
mm_buffer_destroy(struct pb_buffer *buf)
{
   struct mm_buffer *mm_buf = mm_buffer(buf);
   struct mm_pb_manager *mm = mm_buf->mgr;
   
   assert(!pipe_is_referenced(&mm_buf->base.reference));
   
   mtx_lock(&mm->mutex);
   u_mmFreeMem(mm_buf->block);
   FREE(mm_buf);
   mtx_unlock(&mm->mutex);
}
示例#2
0
static void
pool_buffer_destroy(struct pb_buffer *buf)
{
   struct pool_buffer *pool_buf = pool_buffer(buf);
   struct pool_pb_manager *pool = pool_buf->mgr;
   
   assert(!pipe_is_referenced(&pool_buf->base.reference));

   pipe_mutex_lock(pool->mutex);
   LIST_ADD(&pool_buf->head, &pool->free);
   pool->numFree++;
   pipe_mutex_unlock(pool->mutex);
}
示例#3
0
/**
 * Actually destroy the buffer.
 */
static INLINE void
_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
{
   struct pb_cache_manager *mgr = buf->mgr;

   LIST_DEL(&buf->head);
   assert(mgr->numDelayed);
   --mgr->numDelayed;
   mgr->cache_size -= buf->base.size;
   assert(!pipe_is_referenced(&buf->base.reference));
   pb_reference(&buf->buffer, NULL);
   FREE(buf);
}
示例#4
0
static void
fenced_buffer_destroy(struct pb_buffer *buf)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;

   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));

   pipe_mutex_lock(fenced_mgr->mutex);

   fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);

   pipe_mutex_unlock(fenced_mgr->mutex);
}
示例#5
0
/**
 * Actually destroy the buffer.
 */
static void
destroy_buffer_locked(struct pb_cache_entry *entry)
{
   struct pb_cache *mgr = entry->mgr;

   assert(!pipe_is_referenced(&entry->buffer->reference));
   if (entry->head.next) {
      LIST_DEL(&entry->head);
      assert(mgr->num_buffers);
      --mgr->num_buffers;
      mgr->cache_size -= entry->buffer->size;
   }
   entry->mgr->destroy_buffer(entry->buffer);
}
示例#6
0
文件: st_texture.c 项目: ndesh26/Mesa
/**
 * Allocate a new pipe_resource object
 * width0, height0, depth0 are the dimensions of the level 0 image
 * (the highest resolution).  last_level indicates how many mipmap levels
 * to allocate storage for.  For non-mipmapped textures, this will be zero.
 */
struct pipe_resource *
st_texture_create(struct st_context *st,
                  enum pipe_texture_target target,
                  enum pipe_format format,
                  GLuint last_level,
                  GLuint width0,
                  GLuint height0,
                  GLuint depth0,
                  GLuint layers,
                  GLuint nr_samples,
                  GLuint bind)
{
   struct pipe_resource pt, *newtex;
   struct pipe_screen *screen = st->pipe->screen;

   assert(target < PIPE_MAX_TEXTURE_TYPES);
   assert(width0 > 0);
   assert(height0 > 0);
   assert(depth0 > 0);
   if (target == PIPE_TEXTURE_CUBE)
      assert(layers == 6);

   DBG("%s target %d format %s last_level %d\n", __func__,
       (int) target, util_format_name(format), last_level);

   assert(format);
   assert(screen->is_format_supported(screen, format, target, 0,
                                      PIPE_BIND_SAMPLER_VIEW));

   memset(&pt, 0, sizeof(pt));
   pt.target = target;
   pt.format = format;
   pt.last_level = last_level;
   pt.width0 = width0;
   pt.height0 = height0;
   pt.depth0 = depth0;
   pt.array_size = layers;
   pt.usage = PIPE_USAGE_DEFAULT;
   pt.bind = bind;
   /* only set this for OpenGL textures, not renderbuffers */
   pt.flags = PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY;
   pt.nr_samples = nr_samples;

   newtex = screen->resource_create(screen, &pt);

   assert(!newtex || pipe_is_referenced(&newtex->reference));

   return newtex;
}
示例#7
0
static struct pb_buffer *
pb_cache_manager_create_buffer(struct pb_manager *_mgr, 
                               pb_size size,
                               const struct pb_desc *desc)
{
   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
   struct pb_cache_buffer *buf;

   /* get a buffer from the cache */
   buf = (struct pb_cache_buffer *)
         pb_cache_reclaim_buffer(&mgr->cache, size, desc->alignment,
                                 desc->usage, 0);
   if (buf)
      return &buf->base;

   /* create a new one */
   buf = CALLOC_STRUCT(pb_cache_buffer);
   if (!buf)
      return NULL;
   
   buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);

   /* Empty the cache and try again. */
   if (!buf->buffer) {
      pb_cache_release_all_buffers(&mgr->cache);
      buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
   }

   if(!buf->buffer) {
      FREE(buf);
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->reference));
   assert(pb_check_alignment(desc->alignment, buf->buffer->alignment));
   assert(buf->buffer->size >= size);
   
   pipe_reference_init(&buf->base.reference, 1);
   buf->base.alignment = buf->buffer->alignment;
   buf->base.usage = buf->buffer->usage;
   buf->base.size = buf->buffer->size;
   
   buf->base.vtbl = &pb_cache_buffer_vtbl;
   buf->mgr = mgr;
   pb_cache_init_entry(&mgr->cache, &buf->cache_entry, &buf->base, 0);
   
   return &buf->base;
}
示例#8
0
/**
 * Add the buffer to the fenced list.
 *
 * Reference count should be incremented before calling this function.
 */
static INLINE void
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
                         struct fenced_buffer *fenced_buf)
{
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

   p_atomic_inc(&fenced_buf->base.base.reference.count);

   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
   ++fenced_mgr->num_fenced;
}
static INLINE void
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
                             struct fenced_buffer *fenced_buf)
{
   assert(!pipe_is_referenced(&fenced_buf->base.reference));

   assert(!fenced_buf->fence);
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;

   fenced_buffer_destroy_gpu_storage_locked(fenced_buf);

   FREE(fenced_buf);
}
示例#10
0
static INLINE void
_fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
   struct fenced_buffer_list *fenced_list = fenced_buf->list;

   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

#ifdef DEBUG
   LIST_DEL(&fenced_buf->head);
   assert(fenced_list->numUnfenced);
   --fenced_list->numUnfenced;
#endif
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
   ++fenced_list->numDelayed;
}
示例#11
0
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   struct pb_cache_manager *mgr = buf->mgr;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&buf->base.base.reference));
   
   _pb_cache_buffer_list_check_free(mgr);
   
   buf->start = os_time_get();
   buf->end = buf->start + mgr->usecs;
   LIST_ADDTAIL(&buf->head, &mgr->delayed);
   ++mgr->numDelayed;
   pipe_mutex_unlock(mgr->mutex);
}
示例#12
0
/**
 * Actually destroy the buffer.
 */
static INLINE void
_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
{
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
   
   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(!fenced_buf->fence);
#ifdef DEBUG
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   LIST_DEL(&fenced_buf->head);
   assert(fenced_list->numUnfenced);
   --fenced_list->numUnfenced;
#else
   (void)fenced_list;
#endif
   pb_reference(&fenced_buf->buffer, NULL);
   FREE(fenced_buf);
}
示例#13
0
static void
pb_debug_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
   struct pb_debug_manager *mgr = buf->mgr;
   
   assert(!pipe_is_referenced(&buf->base.base.reference));
   
   pb_debug_buffer_check(buf);

   pipe_mutex_lock(mgr->mutex);
   LIST_DEL(&buf->head);
   pipe_mutex_unlock(mgr->mutex);

   pipe_mutex_destroy(buf->mutex);
   
   pb_reference(&buf->buffer, NULL);
   FREE(buf);
}
示例#14
0
/**
 * Allocate a new pipe_resource object
 * width0, height0, depth0 are the dimensions of the level 0 image
 * (the highest resolution).  last_level indicates how many mipmap levels
 * to allocate storage for.  For non-mipmapped textures, this will be zero.
 */
struct pipe_resource *
st_texture_create(struct st_context *st,
                  enum pipe_texture_target target,
		  enum pipe_format format,
		  GLuint last_level,
		  GLuint width0,
		  GLuint height0,
		  GLuint depth0,
                  GLuint bind )
{
   struct pipe_resource pt, *newtex;
   struct pipe_screen *screen = st->pipe->screen;

   assert(target < PIPE_MAX_TEXTURE_TYPES);
   assert(width0 > 0);
   assert(height0 > 0);
   assert(depth0 > 0);

   DBG("%s target %s format %s last_level %d\n", __FUNCTION__,
       _mesa_lookup_enum_by_nr(target),
       _mesa_lookup_enum_by_nr(format), last_level);

   assert(format);
   assert(screen->is_format_supported(screen, format, target, 0,
                                      PIPE_BIND_SAMPLER_VIEW, 0));

   memset(&pt, 0, sizeof(pt));
   pt.target = target;
   pt.format = format;
   pt.last_level = last_level;
   pt.width0 = width0;
   pt.height0 = height0;
   pt.depth0 = depth0;
   pt.usage = PIPE_USAGE_DEFAULT;
   pt.bind = bind;
   pt.flags = 0;

   newtex = screen->resource_create(screen, &pt);

   assert(!newtex || pipe_is_referenced(&newtex->reference));

   return newtex;
}
示例#15
0
static struct pb_buffer *
pool_bufmgr_create_buffer(struct pb_manager *mgr,
                          pb_size size,
                          const struct pb_desc *desc)
{
   struct pool_pb_manager *pool = pool_pb_manager(mgr);
   struct pool_buffer *pool_buf;
   struct list_head *item;

   assert(size == pool->bufSize);
   assert(pool->bufAlign % desc->alignment == 0);
   
   pipe_mutex_lock(pool->mutex);

   if (pool->numFree == 0) {
      pipe_mutex_unlock(pool->mutex);
      debug_printf("warning: out of fixed size buffer objects\n");
      return NULL;
   }

   item = pool->free.next;

   if (item == &pool->free) {
      pipe_mutex_unlock(pool->mutex);
      debug_printf("error: fixed size buffer pool corruption\n");
      return NULL;
   }

   LIST_DEL(item);
   --pool->numFree;

   pipe_mutex_unlock(pool->mutex);
   
   pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
   assert(!pipe_is_referenced(&pool_buf->base.reference));
   pipe_reference_init(&pool_buf->base.reference, 1);
   pool_buf->base.alignment = desc->alignment;
   pool_buf->base.usage = desc->usage;
   
   return SUPER(pool_buf);
}
示例#16
0
/**
 * Allocate a new pipe_texture object
 * width0, height0, depth0 are the dimensions of the level 0 image
 * (the highest resolution).  last_level indicates how many mipmap levels
 * to allocate storage for.  For non-mipmapped textures, this will be zero.
 */
struct pipe_texture *
st_texture_create(struct st_context *st,
                  enum pipe_texture_target target,
		  enum pipe_format format,
		  GLuint last_level,
		  GLuint width0,
		  GLuint height0,
		  GLuint depth0,
                  GLuint usage )
{
   struct pipe_texture pt, *newtex;
   struct pipe_screen *screen = st->pipe->screen;

   assert(target <= PIPE_TEXTURE_CUBE);

   DBG("%s target %s format %s last_level %d\n", __FUNCTION__,
       _mesa_lookup_enum_by_nr(target),
       _mesa_lookup_enum_by_nr(format), last_level);

   assert(format);
   assert(screen->is_format_supported(screen, format, target, 
                                      PIPE_TEXTURE_USAGE_SAMPLER, 0));

   memset(&pt, 0, sizeof(pt));
   pt.target = target;
   pt.format = format;
   pt.last_level = last_level;
   pt.width[0] = width0;
   pt.height[0] = height0;
   pt.depth[0] = depth0;
   pf_get_block(format, &pt.block);
   pt.tex_usage = usage;

   newtex = screen->texture_create(screen, &pt);

   assert(!newtex || pipe_is_referenced(&newtex->reference));

   return newtex;
}
示例#17
0
/**
 * Add a buffer to the cache. This is typically done when the buffer is
 * being released.
 */
void
pb_cache_add_buffer(struct pb_cache_entry *entry)
{
   struct pb_cache *mgr = entry->mgr;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&entry->buffer->reference));

   release_expired_buffers_locked(mgr);

   /* Directly release any buffer that exceeds the limit. */
   if (mgr->cache_size + entry->buffer->size > mgr->max_cache_size) {
      entry->mgr->destroy_buffer(entry->buffer);
      pipe_mutex_unlock(mgr->mutex);
      return;
   }

   entry->start = os_time_get();
   entry->end = entry->start + mgr->usecs;
   LIST_ADDTAIL(&entry->head, &mgr->cache);
   ++mgr->num_buffers;
   mgr->cache_size += entry->buffer->size;
   pipe_mutex_unlock(mgr->mutex);
}
示例#18
0
static void
fenced_buffer_fence(struct pb_buffer *buf,
                    struct pipe_fence_handle *fence)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
   struct pb_fence_ops *ops = fenced_mgr->ops;

   pipe_mutex_lock(fenced_mgr->mutex);

   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->buffer);

   if(fence != fenced_buf->fence) {
      assert(fenced_buf->vl);
      assert(fenced_buf->validation_flags);

      if (fenced_buf->fence) {
         boolean destroyed;
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
         assert(!destroyed);
      }
      if (fence) {
         ops->fence_reference(ops, &fenced_buf->fence, fence);
         fenced_buf->flags |= fenced_buf->validation_flags;
         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
      }

      pb_fence(fenced_buf->buffer, fence);

      fenced_buf->vl = NULL;
      fenced_buf->validation_flags = 0;
   }

   pipe_mutex_unlock(fenced_mgr->mutex);
}
示例#19
0
static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager *_mgr, 
                               pb_size size,
                               const struct pb_desc *desc)
{
   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
   struct pb_debug_buffer *buf;
   struct pb_desc real_desc;
   pb_size real_size;
   
   assert(size);
   assert(desc->alignment);

   buf = CALLOC_STRUCT(pb_debug_buffer);
   if(!buf)
      return NULL;
   
   real_size = mgr->underflow_size + size + mgr->overflow_size;
   real_desc = *desc;
   real_desc.usage |= PIPE_BUFFER_USAGE_CPU_WRITE;
   real_desc.usage |= PIPE_BUFFER_USAGE_CPU_READ;

   buf->buffer = mgr->provider->create_buffer(mgr->provider, 
                                              real_size, 
                                              &real_desc);
   if(!buf->buffer) {
      FREE(buf);
#if 0
      pipe_mutex_lock(mgr->mutex);
      debug_printf("%s: failed to create buffer\n", __FUNCTION__);
      if(!LIST_IS_EMPTY(&mgr->list))
         pb_debug_manager_dump(mgr);
      pipe_mutex_unlock(mgr->mutex);
#endif
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->base.reference));
   assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment));
   assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage));
   assert(buf->buffer->base.size >= real_size);
   
   pipe_reference_init(&buf->base.base.reference, 1);
   buf->base.base.alignment = desc->alignment;
   buf->base.base.usage = desc->usage;
   buf->base.base.size = size;
   
   buf->base.vtbl = &pb_debug_buffer_vtbl;
   buf->mgr = mgr;

   buf->underflow_size = mgr->underflow_size;
   buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size;
   
   debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);

   pb_debug_buffer_fill(buf);
   
   pipe_mutex_init(buf->mutex);
   
   pipe_mutex_lock(mgr->mutex);
   LIST_ADDTAIL(&buf->head, &mgr->list);
   pipe_mutex_unlock(mgr->mutex);

   return &buf->base;
}
示例#20
0
struct pipe_texture *
renderer_clone_texture(struct xorg_renderer *r,
                       struct pipe_texture *src)
{
   enum pipe_format format;
   struct pipe_context *pipe = r->pipe;
   struct pipe_screen *screen = pipe->screen;
   struct pipe_texture *pt;
   struct pipe_texture templ;

   if (pipe->is_texture_referenced(pipe, src, 0, 0) &
       PIPE_REFERENCED_FOR_WRITE)
      pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);

   /* the coming in texture should already have that invariance */
   debug_assert(screen->is_format_supported(screen, src->format,
                                            PIPE_TEXTURE_2D,
                                            PIPE_TEXTURE_USAGE_SAMPLER, 0));

   format = src->format;

   memset(&templ, 0, sizeof(templ));
   templ.target = PIPE_TEXTURE_2D;
   templ.format = format;
   templ.last_level = 0;
   templ.width[0] = src->width[0];
   templ.height[0] = src->height[0];
   templ.depth[0] = 1;
   pf_get_block(format, &templ.block);
   templ.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;

   pt = screen->texture_create(screen, &templ);

   debug_assert(!pt || pipe_is_referenced(&pt->reference));

   if (!pt)
      return NULL;

   {
      /* copy source framebuffer surface into texture */
      struct pipe_surface *ps_read = screen->get_tex_surface(
         screen, src, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ);
      struct pipe_surface *ps_tex = screen->get_tex_surface(
         screen, pt, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_WRITE );
      if (pipe->surface_copy) {
         pipe->surface_copy(pipe,
                ps_tex, /* dest */
                0, 0, /* destx/y */
                ps_read,
                0, 0, src->width[0], src->height[0]);
      } else {
          util_surface_copy(pipe, FALSE,
                ps_tex, /* dest */
                0, 0, /* destx/y */
                ps_read,
                0, 0, src->width[0], src->height[0]);
      }
      pipe_surface_reference(&ps_read, NULL);
      pipe_surface_reference(&ps_tex, NULL);
   }

   return pt;
}
示例#21
0
static struct pb_buffer *
pb_cache_manager_create_buffer(struct pb_manager *_mgr, 
                               pb_size size,
                               const struct pb_desc *desc)
{
   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
   struct pb_cache_buffer *buf;
   struct pb_cache_buffer *curr_buf;
   struct list_head *curr, *next;
   int64_t now;
   
   pipe_mutex_lock(mgr->mutex);

   buf = NULL;
   curr = mgr->delayed.next;
   next = curr->next;
   
   /* search in the expired buffers, freeing them in the process */
   now = os_time_get();
   while(curr != &mgr->delayed) {
      curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
      if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc))
	 buf = curr_buf;
      else if(os_time_timeout(curr_buf->start, curr_buf->end, now))
	 _pb_cache_buffer_destroy(curr_buf);
      else
         /* This buffer (and all hereafter) are still hot in cache */
         break;
      curr = next; 
      next = curr->next;
   }

   /* keep searching in the hot buffers */
   if(!buf) {
      while(curr != &mgr->delayed) {
         curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
         if(pb_cache_is_buffer_compat(curr_buf, size, desc)) {
            buf = curr_buf;
            break;
         }
         /* no need to check the timeout here */
         curr = next;
         next = curr->next;
      }
   }
   
   if(buf) {
      LIST_DEL(&buf->head);
      pipe_mutex_unlock(mgr->mutex);
      /* Increase refcount */
      pipe_reference_init(&buf->base.base.reference, 1);
      return &buf->base;
   }
   
   pipe_mutex_unlock(mgr->mutex);

   buf = CALLOC_STRUCT(pb_cache_buffer);
   if(!buf)
      return NULL;
   
   buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
   if(!buf->buffer) {
      FREE(buf);
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->base.reference));
   assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment));
   assert(pb_check_usage(desc->usage, buf->buffer->base.usage));
   assert(buf->buffer->base.size >= size);
   
   pipe_reference_init(&buf->base.base.reference, 1);
   buf->base.base.alignment = buf->buffer->base.alignment;
   buf->base.base.usage = buf->buffer->base.usage;
   buf->base.base.size = buf->buffer->base.size;
   
   buf->base.vtbl = &pb_cache_buffer_vtbl;
   buf->mgr = mgr;
   
   return &buf->base;
}