Beispiel #1
0
/**
 * Add a buffer to the cache. This is typically done when the buffer is
 * being released.
 */
void
pb_cache_add_buffer(struct pb_cache_entry *entry)
{
   struct pb_cache *mgr = entry->mgr;
   struct list_head *cache = &mgr->buckets[entry->bucket_index];
   struct pb_buffer *buf = entry->buffer;
   unsigned i;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&buf->reference));

   for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
      release_expired_buffers_locked(&mgr->buckets[i]);

   /* Directly release any buffer that exceeds the limit. */
   if (mgr->cache_size + buf->size > mgr->max_cache_size) {
      mgr->destroy_buffer(buf);
      pipe_mutex_unlock(mgr->mutex);
      return;
   }

   entry->start = os_time_get();
   entry->end = entry->start + mgr->usecs;
   LIST_ADDTAIL(&entry->head, cache);
   ++mgr->num_buffers;
   mgr->cache_size += buf->size;
   pipe_mutex_unlock(mgr->mutex);
}
Beispiel #2
0
/**
 *
 *
 * @param
 * @return
 */
int sched_rr_vcpu_attach(vcpuid_t vcpuid, uint32_t pcpu)
{
    struct rq_entry_rr *entry_to_be_attached = NULL;

    /* To find entry in registered entry list */
    struct rq_entry_rr *entry = NULL;
    list_for_each_entry(struct rq_entry_rr, entry, &registered_list_rr[pcpu], registered_list_head) {
        if (entry->vcpuid == vcpuid) {
            entry_to_be_attached = entry;
            break;
        }
    }

    /* TODO:(igkang) Name the return value constants. */
    if (entry_to_be_attached == NULL) {
        return -1;    /* error: not registered */
    }

    if (entry_to_be_attached->state != DETACHED) {
        return -2;    /* error: already attached */
    }

    /* Set rq_entry_rr's fields */
    entry_to_be_attached->state = WAITING;

    /* Add it to runqueue */
    LIST_ADDTAIL(&entry_to_be_attached->head, &runqueue_rr[pcpu]);

    return 0;
}
static struct nv30_query_object *
nv30_query_object_new(struct nv30_screen *screen)
{
   struct nv30_query_object *oq, *qo = CALLOC_STRUCT(nv30_query_object);
   volatile uint32_t *ntfy;

   if (!qo)
      return NULL;

   /* allocate a new hw query object, if no hw objects left we need to
    * spin waiting for one to become free
    */
   while (nouveau_heap_alloc(screen->query_heap, 32, NULL, &qo->hw)) {
      oq = LIST_FIRST_ENTRY(struct nv30_query_object, &screen->queries, list);
      nv30_query_object_del(screen, &oq);
   }

   LIST_ADDTAIL(&qo->list, &screen->queries);

   ntfy = nv30_ntfy(screen, qo);
   ntfy[0] = 0x00000000;
   ntfy[1] = 0x00000000;
   ntfy[2] = 0x00000000;
   ntfy[3] = 0x01000000;
   return qo;
}
Beispiel #4
0
/**
 * Register a vCPU to a scheduler
 *
 * You have to call sched_vcpu_attach() to \
 * run a vcpu by adding it to runqueue, additionally.
 *
 * @param shed A scheduler definition
 * @param vcpu A vCPU
 * @return
 */
int sched_rr_vcpu_register(vcpuid_t vcpuid, uint32_t pcpu)
{
    struct rq_entry_rr *new_entry;

    /* Check if vcpu is already registered */

    /* Allocate a rq_entry_rr */
    new_entry = (struct rq_entry_rr *) malloc(sizeof(struct rq_entry_rr));// alloc_rq_entry_rr();

    /* Initialize rq_entry_rr instance */
    LIST_INITHEAD(&new_entry->registered_list_head);
    LIST_INITHEAD(&new_entry->head);

    new_entry->vcpuid = vcpuid;

    /* FIXME:(igkang) Hardcoded. should use function parameter's value for tick_reset_val init. */
    new_entry->tick_reset_val = 5;

    new_entry->state = DETACHED;

    /* Add it to registerd vcpus list */
    LIST_ADDTAIL(&new_entry->registered_list_head, &registered_list_rr[pcpu]);

    return 0;
}
/**
 * Remove the buffer from the fenced list, and potentially destroy the buffer
 * if the reference count reaches zero.
 *
 * Returns TRUE if the buffer was detroyed.
 */
static INLINE boolean
fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
                            struct fenced_buffer *fenced_buf)
{
   struct pb_fence_ops *ops = fenced_mgr->ops;

   assert(fenced_buf->fence);
   assert(fenced_buf->mgr == fenced_mgr);

   ops->fence_reference(ops, &fenced_buf->fence, NULL);
   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;

   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);

   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_fenced);
   --fenced_mgr->num_fenced;

   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
   ++fenced_mgr->num_unfenced;

   if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
      fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
      return TRUE;
   }

   return FALSE;
}
Beispiel #6
0
static INLINE void
_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
                      struct fenced_buffer *fenced_buf)
{
   struct pb_fence_ops *ops = fenced_list->ops;

   assert(fenced_buf->fence);
   assert(fenced_buf->list == fenced_list);
   
   ops->fence_reference(ops, &fenced_buf->fence, NULL);
   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
   
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   
   LIST_DEL(&fenced_buf->head);
   assert(fenced_list->numDelayed);
   --fenced_list->numDelayed;
   
#ifdef DEBUG
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
   ++fenced_list->numUnfenced;
#endif
   
   /**
    * FIXME!!!
    */

   if(!pipe_is_referenced(&fenced_buf->base.base.reference))
      _fenced_buffer_destroy(fenced_buf);
}
Beispiel #7
0
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   struct pb_cache_manager *mgr = buf->mgr;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&buf->base.reference));
   
   _pb_cache_buffer_list_check_free(mgr);

   /* Directly release any buffer that exceeds the limit. */
   if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
      pb_reference(&buf->buffer, NULL);
      FREE(buf);
      pipe_mutex_unlock(mgr->mutex);
      return;
   }

   buf->start = os_time_get();
   buf->end = buf->start + mgr->usecs;
   LIST_ADDTAIL(&buf->head, &mgr->delayed);
   ++mgr->numDelayed;
   mgr->cache_size += buf->base.size;
   pipe_mutex_unlock(mgr->mutex);
}
Beispiel #8
0
static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
{
   struct amdgpu_winsys *ws = bo->ws;

   pipe_mutex_lock(ws->global_bo_list_lock);
   LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list);
   ws->num_buffers++;
   pipe_mutex_unlock(ws->global_bo_list_lock);
}
Beispiel #9
0
static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_query *rquery = (struct r600_query *)query;

	memset(&rquery->result, 0, sizeof(rquery->result));
	rquery->results_start = rquery->results_end;
	r600_query_begin(rctx, (struct r600_query *)query);
	LIST_ADDTAIL(&rquery->list, &rctx->active_query_list);
}
Beispiel #10
0
/**
 * Main scheduler routine in RR policy implmentation
 *
 * @param
 * @return next_vcpuid
 */
int sched_rr_do_schedule(uint64_t *expiration)
{
    uint32_t pcpu = smp_processor_id();
    /* TODO:(igkang) change type to bool */
    struct rq_entry_rr *next_entry = NULL;
    bool is_switching_needed = false;
    int next_vcpuid = VCPUID_INVALID;

    /* check pending attach list
     *      then attach them to runqueue_rr */
    /* TODO:(igkang) write code to attach pending attach requests */

    /* TODO:(igkang) improve logical code structure to make it easier to read */
    /* determine next vcpu to be run
     *  - if there is an detach-pending vcpu than detach it. */
    if (current[pcpu] == NULL) { /* No vCPU is running */
        if (!LIST_IS_EMPTY(&runqueue_rr[pcpu])) { /* and there are some vcpus waiting */
            is_switching_needed = true;
        }
    } else { /* There's a vCPU currently running */
        struct rq_entry_rr *current_entry = NULL;
        /* put current entry back to runqueue_rr */
        current_entry = LIST_ENTRY(struct rq_entry_rr, current[pcpu], head);
        LIST_ADDTAIL(current[pcpu], &runqueue_rr[pcpu]);

        /* let's switch as tick is over */
        current_entry->state = WAITING;
        current[pcpu] = NULL;

        is_switching_needed = true;
    }

    /* update scheduling-related data (like tick) */
    if (is_switching_needed) {
        /* move entry from runqueue_rr to current */
        current[pcpu] = runqueue_rr[pcpu].next;
        LIST_DELINIT(current[pcpu]);

        next_entry = LIST_ENTRY(struct rq_entry_rr, current[pcpu], head);

        *expiration =
            timer_get_timenow() + MSEC(1) * (uint64_t) next_entry->tick_reset_val;
    }

    /* vcpu of current entry will be the next vcpu */
    if (current[pcpu] != NULL) {
        next_entry = LIST_ENTRY(struct rq_entry_rr, current[pcpu], head);
        next_entry->state = RUNNING;

        /* set return next_vcpuid value */
        next_vcpuid = next_entry->vcpuid;
    }
/**
 * Wrap a buffer in a fenced buffer.
 */
static struct pb_buffer *
fenced_bufmgr_create_buffer(struct pb_manager *mgr,
                            pb_size size,
                            const struct pb_desc *desc)
{
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
   struct fenced_buffer *fenced_buf;
   enum pipe_error ret;

   fenced_buf = CALLOC_STRUCT(fenced_buffer);
   if(!fenced_buf)
      goto no_buffer;

   pipe_reference_init(&fenced_buf->base.reference, 1);
   fenced_buf->base.alignment = desc->alignment;
   fenced_buf->base.usage = desc->usage;
   fenced_buf->base.size = size;
   fenced_buf->size = size;

   fenced_buf->base.vtbl = &fenced_buffer_vtbl;
   fenced_buf->mgr = fenced_mgr;

   pipe_mutex_lock(fenced_mgr->mutex);

   /*
    * Try to create GPU storage without stalling,
    */
   ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf,
                                                 desc, TRUE);

   /*
    * Give up.
    */
   if(ret != PIPE_OK) {
      goto no_storage;
   }

   assert(fenced_buf->buffer);

   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
   ++fenced_mgr->num_unfenced;
   pipe_mutex_unlock(fenced_mgr->mutex);

   return &fenced_buf->base;

no_storage:
   pipe_mutex_unlock(fenced_mgr->mutex);
   FREE(fenced_buf);
no_buffer:
   return NULL;
}
Beispiel #12
0
static INLINE void
_fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
   struct fenced_buffer_list *fenced_list = fenced_buf->list;

   assert(fenced_buf->base.base.refcount);
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

   assert(!fenced_buf->head.prev);
   assert(!fenced_buf->head.next);
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
   ++fenced_list->numDelayed;
}
/**
 * Add the buffer to the fenced list.
 *
 * Reference count should be incremented before calling this function.
 */
static INLINE void
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
                         struct fenced_buffer *fenced_buf)
{
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

   p_atomic_inc(&fenced_buf->base.base.reference.count);

   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
   ++fenced_mgr->num_fenced;
}
Beispiel #14
0
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   struct pb_cache_manager *mgr = buf->mgr;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&buf->base.base.reference));
   
   _pb_cache_buffer_list_check_free(mgr);
   
   buf->start = os_time_get();
   buf->end = buf->start + mgr->usecs;
   LIST_ADDTAIL(&buf->head, &mgr->delayed);
   ++mgr->numDelayed;
   pipe_mutex_unlock(mgr->mutex);
}
Beispiel #15
0
static INLINE void
_fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
   struct fenced_buffer_list *fenced_list = fenced_buf->list;

   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

#ifdef DEBUG
   LIST_DEL(&fenced_buf->head);
   assert(fenced_list->numUnfenced);
   --fenced_list->numUnfenced;
#endif
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
   ++fenced_list->numDelayed;
}
Beispiel #16
0
static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query)
{
    struct r600_context *rctx = (struct r600_context *)ctx;
    struct r600_query *rquery = (struct r600_query *)query;

    if (!si_query_needs_begin(rquery->type)) {
        assert(0);
        return;
    }

    memset(&rquery->result, 0, sizeof(rquery->result));
    rquery->results_start = rquery->results_end;
    r600_query_begin(rctx, (struct r600_query *)query);

    if (!si_is_timer_query(rquery->type)) {
        LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_query_list);
    }
}
Beispiel #17
0
static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
                                           struct virgl_hw_res **dres,
                                           struct virgl_hw_res *sres)
{
   struct virgl_hw_res *old = *dres;
   if (pipe_reference(&(*dres)->reference, &sres->reference)) {
      if (!can_cache_resource(old)) {
         virgl_hw_res_destroy(vtws, old);
      } else {
         pipe_mutex_lock(vtws->mutex);
         virgl_cache_list_check_free(vtws);

         old->start = os_time_get();
         old->end = old->start + vtws->usecs;
         LIST_ADDTAIL(&old->head, &vtws->delayed);
         vtws->num_delayed++;
         pipe_mutex_unlock(vtws->mutex);
      }
   }
   *dres = sres;
}
Beispiel #18
0
void *
debug_malloc(const char *file, unsigned line, const char *function,
             size_t size) 
{
   struct debug_memory_header *hdr;
   struct debug_memory_footer *ftr;
   
   hdr = os_malloc(sizeof(*hdr) + size + sizeof(*ftr));
   if(!hdr) {
      debug_printf("%s:%u:%s: out of memory when trying to allocate %lu bytes\n",
                   file, line, function,
                   (long unsigned)size);
      return NULL;
   }
 
   hdr->no = last_no++;
   hdr->file = file;
   hdr->line = line;
   hdr->function = function;
   hdr->size = size;
   hdr->magic = DEBUG_MEMORY_MAGIC;
   hdr->tag = 0;
#if DEBUG_FREED_MEMORY
   hdr->freed = FALSE;
#endif

#if DEBUG_MEMORY_STACK
   debug_backtrace_capture(hdr->backtrace, 0, DEBUG_MEMORY_STACK);
#endif

   ftr = footer_from_header(hdr);
   ftr->magic = DEBUG_MEMORY_MAGIC;
   
   pipe_mutex_lock(list_mutex);
   LIST_ADDTAIL(&hdr->head, &list);
   pipe_mutex_unlock(list_mutex);
   
   return data_from_header(hdr);
}
Beispiel #19
0
/**
 * Add a buffer to the cache. This is typically done when the buffer is
 * being released.
 */
void
pb_cache_add_buffer(struct pb_cache_entry *entry)
{
   struct pb_cache *mgr = entry->mgr;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&entry->buffer->reference));

   release_expired_buffers_locked(mgr);

   /* Directly release any buffer that exceeds the limit. */
   if (mgr->cache_size + entry->buffer->size > mgr->max_cache_size) {
      entry->mgr->destroy_buffer(entry->buffer);
      pipe_mutex_unlock(mgr->mutex);
      return;
   }

   entry->start = os_time_get();
   entry->end = entry->start + mgr->usecs;
   LIST_ADDTAIL(&entry->head, &mgr->cache);
   ++mgr->num_buffers;
   mgr->cache_size += entry->buffer->size;
   pipe_mutex_unlock(mgr->mutex);
}
Beispiel #20
0
static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager *_mgr, 
                               pb_size size,
                               const struct pb_desc *desc)
{
   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
   struct pb_debug_buffer *buf;
   struct pb_desc real_desc;
   pb_size real_size;
   
   assert(size);
   assert(desc->alignment);

   buf = CALLOC_STRUCT(pb_debug_buffer);
   if(!buf)
      return NULL;
   
   real_size = mgr->underflow_size + size + mgr->overflow_size;
   real_desc = *desc;
   real_desc.usage |= PIPE_BUFFER_USAGE_CPU_WRITE;
   real_desc.usage |= PIPE_BUFFER_USAGE_CPU_READ;

   buf->buffer = mgr->provider->create_buffer(mgr->provider, 
                                              real_size, 
                                              &real_desc);
   if(!buf->buffer) {
      FREE(buf);
#if 0
      pipe_mutex_lock(mgr->mutex);
      debug_printf("%s: failed to create buffer\n", __FUNCTION__);
      if(!LIST_IS_EMPTY(&mgr->list))
         pb_debug_manager_dump(mgr);
      pipe_mutex_unlock(mgr->mutex);
#endif
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->base.reference));
   assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment));
   assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage));
   assert(buf->buffer->base.size >= real_size);
   
   pipe_reference_init(&buf->base.base.reference, 1);
   buf->base.base.alignment = desc->alignment;
   buf->base.base.usage = desc->usage;
   buf->base.base.size = size;
   
   buf->base.vtbl = &pb_debug_buffer_vtbl;
   buf->mgr = mgr;

   buf->underflow_size = mgr->underflow_size;
   buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size;
   
   debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);

   pb_debug_buffer_fill(buf);
   
   pipe_mutex_init(buf->mutex);
   
   pipe_mutex_lock(mgr->mutex);
   LIST_ADDTAIL(&buf->head, &mgr->list);
   pipe_mutex_unlock(mgr->mutex);

   return &buf->base;
}
Beispiel #21
0
/* Get (or create/upload) the winsys surface handle so that we can
 * refer to this buffer in fifo commands.
 */
struct svga_winsys_surface *
svga_buffer_handle(struct svga_context *svga,
                   struct pipe_resource *buf)
{
    struct pipe_screen *screen = svga->pipe.screen;
    struct svga_screen *ss = svga_screen(screen);
    struct svga_buffer *sbuf;
    enum pipe_error ret;

    if (!buf)
        return NULL;

    sbuf = svga_buffer(buf);

    assert(!sbuf->map.count);
    assert(!sbuf->user);

    if (!sbuf->handle) {
        ret = svga_buffer_create_host_surface(ss, sbuf);
        if (ret != PIPE_OK)
            return NULL;
    }

    assert(sbuf->handle);

    if (sbuf->map.num_ranges) {
        if (!sbuf->dma.pending) {
            /*
             * No pending DMA upload yet, so insert a DMA upload command now.
             */

            /*
             * Migrate the data from swbuf -> hwbuf if necessary.
             */
            ret = svga_buffer_update_hw(ss, sbuf);
            if (ret == PIPE_OK) {
                /*
                 * Queue a dma command.
                 */

                ret = svga_buffer_upload_command(svga, sbuf);
                if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
                    svga_context_flush(svga, NULL);
                    ret = svga_buffer_upload_command(svga, sbuf);
                    assert(ret == PIPE_OK);
                }
                if (ret == PIPE_OK) {
                    sbuf->dma.pending = TRUE;
                    assert(!sbuf->head.prev && !sbuf->head.next);
                    LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
                }
            }
            else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
                /*
                 * The buffer is too big to fit in the GMR aperture, so break it in
                 * smaller pieces.
                 */
                ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
            }

            if (ret != PIPE_OK) {
                /*
                 * Something unexpected happened above. There is very little that
                 * we can do other than proceeding while ignoring the dirty ranges.
                 */
                assert(0);
                sbuf->map.num_ranges = 0;
            }
        }
        else {
            /*
             * There a pending dma already. Make sure it is from this context.
             */
            assert(sbuf->dma.svga == svga);
        }
    }

    assert(!sbuf->map.num_ranges || sbuf->dma.pending);

    return sbuf->handle;
}
Beispiel #22
0
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider, 
                   pb_size numBufs, 
                   pb_size bufSize,
                   const struct pb_desc *desc) 
{
   struct pool_pb_manager *pool;
   struct pool_buffer *pool_buf;
   pb_size i;

   if(!provider)
      return NULL;
   
   pool = CALLOC_STRUCT(pool_pb_manager);
   if (!pool)
      return NULL;

   pool->base.destroy = pool_bufmgr_destroy;
   pool->base.create_buffer = pool_bufmgr_create_buffer;
   pool->base.flush = pool_bufmgr_flush;

   LIST_INITHEAD(&pool->free);

   pool->numTot = numBufs;
   pool->numFree = numBufs;
   pool->bufSize = bufSize;
   pool->bufAlign = desc->alignment; 
   
   pipe_mutex_init(pool->mutex);

   pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); 
   if (!pool->buffer)
      goto failure;

   pool->map = pb_map(pool->buffer,
                          PB_USAGE_CPU_READ |
                          PB_USAGE_CPU_WRITE, NULL);
   if(!pool->map)
      goto failure;

   pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
   if (!pool->bufs)
      goto failure;

   pool_buf = pool->bufs;
   for (i = 0; i < numBufs; ++i) {
      pipe_reference_init(&pool_buf->base.reference, 0);
      pool_buf->base.alignment = 0;
      pool_buf->base.usage = 0;
      pool_buf->base.size = bufSize;
      pool_buf->base.vtbl = &pool_buffer_vtbl;
      pool_buf->mgr = pool;
      pool_buf->start = i * bufSize;
      LIST_ADDTAIL(&pool_buf->head, &pool->free);
      pool_buf++;
   }

   return SUPER(pool);
   
failure:
   if(pool->bufs)
      FREE(pool->bufs);
   if(pool->map)
      pb_unmap(pool->buffer);
   if(pool->buffer)
      pb_reference(&pool->buffer, NULL);
   if(pool)
      FREE(pool);
   return NULL;
}
/**
 * Wrap a buffer in a fenced buffer.
 */
static struct pb_buffer *
fenced_bufmgr_create_buffer(struct pb_manager *mgr,
                            pb_size size,
                            const struct pb_desc *desc)
{
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
   struct fenced_buffer *fenced_buf;
   enum pipe_error ret;

   /*
    * Don't stall the GPU, waste time evicting buffers, or waste memory
    * trying to create a buffer that will most likely never fit into the
    * graphics aperture.
    */
   if(size > fenced_mgr->max_buffer_size) {
      goto no_buffer;
   }

   fenced_buf = CALLOC_STRUCT(fenced_buffer);
   if(!fenced_buf)
      goto no_buffer;

   pipe_reference_init(&fenced_buf->base.base.reference, 1);
   fenced_buf->base.base.alignment = desc->alignment;
   fenced_buf->base.base.usage = desc->usage;
   fenced_buf->base.base.size = size;
   fenced_buf->size = size;
   fenced_buf->desc = *desc;

   fenced_buf->base.vtbl = &fenced_buffer_vtbl;
   fenced_buf->mgr = fenced_mgr;

   pipe_mutex_lock(fenced_mgr->mutex);

   /*
    * Try to create GPU storage without stalling,
    */
   ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);

   /*
    * Attempt to use CPU memory to avoid stalling the GPU.
    */
   if(ret != PIPE_OK) {
      ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
   }

   /*
    * Create GPU storage, waiting for some to be available.
    */
   if(ret != PIPE_OK) {
      ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
   }

   /*
    * Give up.
    */
   if(ret != PIPE_OK) {
      goto no_storage;
   }

   assert(fenced_buf->buffer || fenced_buf->data);

   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
   ++fenced_mgr->num_unfenced;
   pipe_mutex_unlock(fenced_mgr->mutex);

   return &fenced_buf->base;

no_storage:
   pipe_mutex_unlock(fenced_mgr->mutex);
   FREE(fenced_buf);
no_buffer:
   return NULL;
}