Ejemplo n.º 1
0
static INLINE int
pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,  
                          pb_size size,
                          const struct pb_desc *desc)
{
   if(buf->base.size < size)
      return 0;

   /* be lenient with size */
   if(buf->base.size >= 2*size)
      return 0;
   
   if(!pb_check_alignment(desc->alignment, buf->base.alignment))
      return 0;
   
   if(!pb_check_usage(desc->usage, buf->base.usage))
      return 0;

   if (buf->mgr->provider->is_buffer_busy) {
      if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
         return -1;
   } else {
      void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);

      if (!ptr)
         return -1;

      pb_unmap(buf->buffer);
   }

   return 1;
}
Ejemplo n.º 2
0
static void *
pb_cache_buffer_map(struct pb_buffer *_buf, 
		    unsigned flags, void *flush_ctx)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   return pb_map(buf->buffer, flags, flush_ctx);
}
Ejemplo n.º 3
0
static enum pipe_error 
pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf)
{
   if(!buf->buffer) {
      struct pb_manager *provider = buf->mgr->provider;
      uint8_t *map;
      
      assert(!buf->mapcount);
      
      buf->buffer = provider->create_buffer(provider, buf->size, &buf->desc);
      if(!buf->buffer)
         return PIPE_ERROR_OUT_OF_MEMORY;
      
      map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
      if(!map) {
         pb_reference(&buf->buffer, NULL);
         return PIPE_ERROR;
      }
      
      memcpy(map, buf->data, buf->size);
      
      pb_unmap(buf->buffer);
      
      if(!buf->mapcount) {
         FREE(buf->data);
         buf->data = NULL;
      }
   }
   
   return PIPE_OK;
}
Ejemplo n.º 4
0
static void *
pb_cache_buffer_map(struct pb_buffer *_buf, 
		    enum pb_usage_flags flags, void *flush_ctx)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   return pb_map(buf->buffer, flags, flush_ctx);
}
Ejemplo n.º 5
0
static void *
vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
                           struct svga_winsys_buffer *buf,
                           unsigned flags)
{
   (void)sws;
   return pb_map(vmw_pb_buffer(buf), flags, NULL);
}
Ejemplo n.º 6
0
static void *
fenced_buffer_map(struct pb_buffer *buf,
                  unsigned flags)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
   struct pb_fence_ops *ops = fenced_mgr->ops;
   void *map = NULL;

   pipe_mutex_lock(fenced_mgr->mutex);

   assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));

   /*
    * Serialize writes.
    */
   while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
         ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) &&
          (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {

      /* 
       * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
       */
      if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
          ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
         goto done;
      }

      if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) {
         break;
      }

      /*
       * Wait for the GPU to finish accessing. This will release and re-acquire
       * the mutex, so all copies of mutable state must be discarded.
       */
      fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
   }

   if(fenced_buf->buffer) {
      map = pb_map(fenced_buf->buffer, flags);
   }
   else {
      assert(fenced_buf->data);
      map = fenced_buf->data;
   }

   if(map) {
      ++fenced_buf->mapcount;
      fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
   }

done:
   pipe_mutex_unlock(fenced_mgr->mutex);

   return map;
}
Ejemplo n.º 7
0
static void
pb_debug_buffer_fill(struct pb_debug_buffer *buf)
{
   uint8_t *map;
   
   map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
   assert(map);
   if(map) {
      fill_random_pattern(map, buf->underflow_size);
      fill_random_pattern(map + buf->underflow_size + buf->base.base.size, 
                          buf->overflow_size);
      pb_unmap(buf->buffer);
   }
}
/**
 * Check for under/over flows.
 * 
 * Should be called with the buffer unmaped.
 */
static void
pb_debug_buffer_check(struct pb_debug_buffer *buf)
{
   uint8_t *map;
   
   map = pb_map(buf->buffer,
                PB_USAGE_CPU_READ |
                PB_USAGE_UNSYNCHRONIZED, NULL);
   assert(map);
   if (map) {
      boolean underflow, overflow;
      pb_size min_ofs, max_ofs;
      
      underflow = !check_random_pattern(map, buf->underflow_size, 
                                        &min_ofs, &max_ofs);
      if(underflow) {
         debug_printf("buffer underflow (offset -%"PRIu64"%s to -%"PRIu64" bytes) detected\n",
                      buf->underflow_size - min_ofs,
                      min_ofs == 0 ? "+" : "",
                      buf->underflow_size - max_ofs);
      }
      
      overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
                                       buf->overflow_size, 
                                       &min_ofs, &max_ofs);
      if(overflow) {
         debug_printf("buffer overflow (size %"PRIu64" plus offset %"PRIu64" to %"PRIu64"%s bytes) detected\n",
                      buf->base.size,
                      min_ofs,
                      max_ofs,
                      max_ofs == buf->overflow_size - 1 ? "+" : "");
      }
      
      if(underflow || overflow)
         debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);

      debug_assert(!underflow);
      debug_assert(!overflow);

      /* re-fill if not aborted */
      if(underflow)
         fill_random_pattern(map, buf->underflow_size);
      if(overflow)
         fill_random_pattern(map + buf->underflow_size + buf->base.size,
                             buf->overflow_size);

      pb_unmap(buf->buffer);
   }
}
Ejemplo n.º 9
0
static void *
pb_debug_buffer_map(struct pb_buffer *_buf, 
                    unsigned flags)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
   void *map;
   
   pb_debug_buffer_check(buf);

   map = pb_map(buf->buffer, flags);
   if(!map)
      return NULL;
   
   return (uint8_t *)map + buf->underflow_size;
}
Ejemplo n.º 10
0
static void *
pb_ondemand_buffer_map(struct pb_buffer *_buf, 
                       unsigned flags)
{
   struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);

   if(buf->buffer) {
      assert(!buf->data);
      return pb_map(buf->buffer, flags);
   }
   else {
      assert(buf->data);
      ++buf->mapcount;
      return buf->data;
   }
}
Ejemplo n.º 11
0
struct pb_manager *
mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, 
                             pb_size size, pb_size align2) 
{
   struct mm_pb_manager *mm;

   if(!buffer)
      return NULL;
   
   mm = CALLOC_STRUCT(mm_pb_manager);
   if (!mm)
      return NULL;

   mm->base.destroy = mm_bufmgr_destroy;
   mm->base.create_buffer = mm_bufmgr_create_buffer;
   mm->base.flush = mm_bufmgr_flush;

   mm->size = size;
   mm->align2 = align2; /* 64-byte alignment */

   pipe_mutex_init(mm->mutex);

   mm->buffer = buffer; 

   mm->map = pb_map(mm->buffer, 
		    PIPE_BUFFER_USAGE_CPU_READ |
		    PIPE_BUFFER_USAGE_CPU_WRITE);
   if(!mm->map)
      goto failure;

   mm->heap = u_mmInit(0, (int)size); 
   if (!mm->heap)
      goto failure;

   return SUPER(mm);
   
failure:
if(mm->heap)
   u_mmDestroy(mm->heap);
   if(mm->map)
      pb_unmap(mm->buffer);
   if(mm)
      FREE(mm);
   return NULL;
}
Ejemplo n.º 12
0
static enum pipe_error
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
{
    uint8_t *map;

    assert(fenced_buf->data);
    assert(fenced_buf->buffer);

    map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
    if (!map)
        return PIPE_ERROR;

    memcpy(map, fenced_buf->data, fenced_buf->size);

    pb_unmap(fenced_buf->buffer);

    return PIPE_OK;
}
Ejemplo n.º 13
0
static enum pipe_error
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
{
   const uint8_t *map;

   assert(fenced_buf->data);
   assert(fenced_buf->buffer);

   map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
   if(!map)
      return PIPE_ERROR;

   memcpy(fenced_buf->data, map, fenced_buf->size);

   pb_unmap(fenced_buf->buffer);

   return PIPE_OK;
}
Ejemplo n.º 14
0
/**
 * Check for under/over flows.
 * 
 * Should be called with the buffer unmaped.
 */
static void
pb_debug_buffer_check(struct pb_debug_buffer *buf)
{
   uint8_t *map;
   
   map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
   assert(map);
   if(map) {
      boolean underflow, overflow;
      size_t min_ofs, max_ofs;
      
      underflow = !check_random_pattern(map, buf->underflow_size, 
                                        &min_ofs, &max_ofs);
      if(underflow) {
         debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
                      buf->underflow_size - min_ofs,
                      min_ofs == 0 ? "+" : "",
                      buf->underflow_size - max_ofs);
      }
      
      overflow = !check_random_pattern(map + buf->underflow_size + buf->base.base.size, 
                                       buf->overflow_size, 
                                       &min_ofs, &max_ofs);
      if(overflow) {
         debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
                      buf->base.base.size,
                      min_ofs,
                      max_ofs,
                      max_ofs == buf->overflow_size - 1 ? "+" : "");
      }
      
      debug_assert(!underflow && !overflow);

      /* re-fill if not aborted */
      if(underflow)
         fill_random_pattern(map, buf->underflow_size);
      if(overflow)
         fill_random_pattern(map + buf->underflow_size + buf->base.base.size, 
                             buf->overflow_size);

      pb_unmap(buf->buffer);
   }
}
Ejemplo n.º 15
0
static bool
pb_cache_can_reclaim_buffer(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);

   if (buf->mgr->provider->is_buffer_busy) {
      if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
         return false;
   } else {
      void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);

      if (!ptr)
         return false;

      pb_unmap(buf->buffer);
   }

   return true;
}
static void *
pb_debug_buffer_map(struct pb_buffer *_buf, 
                    unsigned flags, void *flush_ctx)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
   void *map;
   
   pb_debug_buffer_check(buf);

   map = pb_map(buf->buffer, flags, flush_ctx);
   if (!map)
      return NULL;
   
   mtx_lock(&buf->mutex);
   ++buf->map_count;
   debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
   mtx_unlock(&buf->mutex);
   
   return (uint8_t *)map + buf->underflow_size;
}
Ejemplo n.º 17
0
static void *radeon_bo_map(struct pb_buffer *buf,
                           struct radeon_winsys_cs *cs,
                           enum pipe_transfer_usage usage)
{
    return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs);
}
Ejemplo n.º 18
0
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider, 
                   pb_size numBufs, 
                   pb_size bufSize,
                   const struct pb_desc *desc) 
{
   struct pool_pb_manager *pool;
   struct pool_buffer *pool_buf;
   pb_size i;

   if(!provider)
      return NULL;
   
   pool = CALLOC_STRUCT(pool_pb_manager);
   if (!pool)
      return NULL;

   pool->base.destroy = pool_bufmgr_destroy;
   pool->base.create_buffer = pool_bufmgr_create_buffer;
   pool->base.flush = pool_bufmgr_flush;

   LIST_INITHEAD(&pool->free);

   pool->numTot = numBufs;
   pool->numFree = numBufs;
   pool->bufSize = bufSize;
   pool->bufAlign = desc->alignment; 
   
   pipe_mutex_init(pool->mutex);

   pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); 
   if (!pool->buffer)
      goto failure;

   pool->map = pb_map(pool->buffer,
                          PB_USAGE_CPU_READ |
                          PB_USAGE_CPU_WRITE, NULL);
   if(!pool->map)
      goto failure;

   pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
   if (!pool->bufs)
      goto failure;

   pool_buf = pool->bufs;
   for (i = 0; i < numBufs; ++i) {
      pipe_reference_init(&pool_buf->base.reference, 0);
      pool_buf->base.alignment = 0;
      pool_buf->base.usage = 0;
      pool_buf->base.size = bufSize;
      pool_buf->base.vtbl = &pool_buffer_vtbl;
      pool_buf->mgr = pool;
      pool_buf->start = i * bufSize;
      LIST_ADDTAIL(&pool_buf->head, &pool->free);
      pool_buf++;
   }

   return SUPER(pool);
   
failure:
   if(pool->bufs)
      FREE(pool->bufs);
   if(pool->map)
      pb_unmap(pool->buffer);
   if(pool->buffer)
      pb_reference(&pool->buffer, NULL);
   if(pool)
      FREE(pool);
   return NULL;
}