Esempio n. 1
0
static INLINE int
pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,  
                          pb_size size,
                          const struct pb_desc *desc)
{
   if(buf->base.size < size)
      return 0;

   /* be lenient with size */
   if(buf->base.size >= 2*size)
      return 0;
   
   if(!pb_check_alignment(desc->alignment, buf->base.alignment))
      return 0;
   
   if(!pb_check_usage(desc->usage, buf->base.usage))
      return 0;

   if (buf->mgr->provider->is_buffer_busy) {
      if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
         return -1;
   } else {
      void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);

      if (!ptr)
         return -1;

      pb_unmap(buf->buffer);
   }

   return 1;
}
Esempio n. 2
0
static void
vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
                             struct svga_winsys_buffer *buf)
{
   (void)sws;
   pb_unmap(vmw_pb_buffer(buf));
}
Esempio n. 3
0
static enum pipe_error 
pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf)
{
   if(!buf->buffer) {
      struct pb_manager *provider = buf->mgr->provider;
      uint8_t *map;
      
      assert(!buf->mapcount);
      
      buf->buffer = provider->create_buffer(provider, buf->size, &buf->desc);
      if(!buf->buffer)
         return PIPE_ERROR_OUT_OF_MEMORY;
      
      map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
      if(!map) {
         pb_reference(&buf->buffer, NULL);
         return PIPE_ERROR;
      }
      
      memcpy(map, buf->data, buf->size);
      
      pb_unmap(buf->buffer);
      
      if(!buf->mapcount) {
         FREE(buf->data);
         buf->data = NULL;
      }
   }
   
   return PIPE_OK;
}
Esempio n. 4
0
static void
pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);   
   pb_unmap(buf->buffer);
   
   pb_debug_buffer_check(buf);
}
Esempio n. 5
0
static void
pb_debug_buffer_fill(struct pb_debug_buffer *buf)
{
   uint8_t *map;
   
   map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
   assert(map);
   if(map) {
      fill_random_pattern(map, buf->underflow_size);
      fill_random_pattern(map + buf->underflow_size + buf->base.base.size, 
                          buf->overflow_size);
      pb_unmap(buf->buffer);
   }
}
/**
 * Check for under/over flows.
 * 
 * Should be called with the buffer unmaped.
 */
static void
pb_debug_buffer_check(struct pb_debug_buffer *buf)
{
   uint8_t *map;
   
   map = pb_map(buf->buffer,
                PB_USAGE_CPU_READ |
                PB_USAGE_UNSYNCHRONIZED, NULL);
   assert(map);
   if (map) {
      boolean underflow, overflow;
      pb_size min_ofs, max_ofs;
      
      underflow = !check_random_pattern(map, buf->underflow_size, 
                                        &min_ofs, &max_ofs);
      if(underflow) {
         debug_printf("buffer underflow (offset -%"PRIu64"%s to -%"PRIu64" bytes) detected\n",
                      buf->underflow_size - min_ofs,
                      min_ofs == 0 ? "+" : "",
                      buf->underflow_size - max_ofs);
      }
      
      overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
                                       buf->overflow_size, 
                                       &min_ofs, &max_ofs);
      if(overflow) {
         debug_printf("buffer overflow (size %"PRIu64" plus offset %"PRIu64" to %"PRIu64"%s bytes) detected\n",
                      buf->base.size,
                      min_ofs,
                      max_ofs,
                      max_ofs == buf->overflow_size - 1 ? "+" : "");
      }
      
      if(underflow || overflow)
         debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);

      debug_assert(!underflow);
      debug_assert(!overflow);

      /* re-fill if not aborted */
      if(underflow)
         fill_random_pattern(map, buf->underflow_size);
      if(overflow)
         fill_random_pattern(map + buf->underflow_size + buf->base.size,
                             buf->overflow_size);

      pb_unmap(buf->buffer);
   }
}
Esempio n. 7
0
static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
   struct pool_pb_manager *pool = pool_pb_manager(mgr);
   pipe_mutex_lock(pool->mutex);

   FREE(pool->bufs);
   
   pb_unmap(pool->buffer);
   pb_reference(&pool->buffer, NULL);
   
   pipe_mutex_unlock(pool->mutex);
   
   FREE(mgr);
}
Esempio n. 8
0
static void
pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);   
   
   pipe_mutex_lock(buf->mutex);
   assert(buf->map_count);
   if(buf->map_count)
      --buf->map_count;
   pipe_mutex_unlock(buf->mutex);
   
   pb_unmap(buf->buffer);
   
   pb_debug_buffer_check(buf);
}
Esempio n. 9
0
static void
mm_bufmgr_destroy(struct pb_manager *mgr)
{
   struct mm_pb_manager *mm = mm_pb_manager(mgr);
   
   mtx_lock(&mm->mutex);

   u_mmDestroy(mm->heap);
   
   pb_unmap(mm->buffer);
   pb_reference(&mm->buffer, NULL);
   
   mtx_unlock(&mm->mutex);
   
   FREE(mgr);
}
Esempio n. 10
0
static void
pb_ondemand_buffer_unmap(struct pb_buffer *_buf)
{
   struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);

   if(buf->buffer) {
      assert(!buf->data);
      pb_unmap(buf->buffer);
   }
   else {
      assert(buf->data);
      assert(buf->mapcount);
      if(buf->mapcount)
         --buf->mapcount;
   }
}
Esempio n. 11
0
struct pb_manager *
mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, 
                             pb_size size, pb_size align2) 
{
   struct mm_pb_manager *mm;

   if(!buffer)
      return NULL;
   
   mm = CALLOC_STRUCT(mm_pb_manager);
   if (!mm)
      return NULL;

   mm->base.destroy = mm_bufmgr_destroy;
   mm->base.create_buffer = mm_bufmgr_create_buffer;
   mm->base.flush = mm_bufmgr_flush;

   mm->size = size;
   mm->align2 = align2; /* 64-byte alignment */

   pipe_mutex_init(mm->mutex);

   mm->buffer = buffer; 

   mm->map = pb_map(mm->buffer, 
		    PIPE_BUFFER_USAGE_CPU_READ |
		    PIPE_BUFFER_USAGE_CPU_WRITE);
   if(!mm->map)
      goto failure;

   mm->heap = u_mmInit(0, (int)size); 
   if (!mm->heap)
      goto failure;

   return SUPER(mm);
   
failure:
if(mm->heap)
   u_mmDestroy(mm->heap);
   if(mm->map)
      pb_unmap(mm->buffer);
   if(mm)
      FREE(mm);
   return NULL;
}
Esempio n. 12
0
static enum pipe_error
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
{
   const uint8_t *map;

   assert(fenced_buf->data);
   assert(fenced_buf->buffer);

   map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
   if(!map)
      return PIPE_ERROR;

   memcpy(fenced_buf->data, map, fenced_buf->size);

   pb_unmap(fenced_buf->buffer);

   return PIPE_OK;
}
Esempio n. 13
0
static enum pipe_error
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
{
    uint8_t *map;

    assert(fenced_buf->data);
    assert(fenced_buf->buffer);

    map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
    if (!map)
        return PIPE_ERROR;

    memcpy(map, fenced_buf->data, fenced_buf->size);

    pb_unmap(fenced_buf->buffer);

    return PIPE_OK;
}
Esempio n. 14
0
/**
 * Check for under/over flows.
 * 
 * Should be called with the buffer unmaped.
 */
static void
pb_debug_buffer_check(struct pb_debug_buffer *buf)
{
   uint8_t *map;
   
   map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
   assert(map);
   if(map) {
      boolean underflow, overflow;
      size_t min_ofs, max_ofs;
      
      underflow = !check_random_pattern(map, buf->underflow_size, 
                                        &min_ofs, &max_ofs);
      if(underflow) {
         debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
                      buf->underflow_size - min_ofs,
                      min_ofs == 0 ? "+" : "",
                      buf->underflow_size - max_ofs);
      }
      
      overflow = !check_random_pattern(map + buf->underflow_size + buf->base.base.size, 
                                       buf->overflow_size, 
                                       &min_ofs, &max_ofs);
      if(overflow) {
         debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
                      buf->base.base.size,
                      min_ofs,
                      max_ofs,
                      max_ofs == buf->overflow_size - 1 ? "+" : "");
      }
      
      debug_assert(!underflow && !overflow);

      /* re-fill if not aborted */
      if(underflow)
         fill_random_pattern(map, buf->underflow_size);
      if(overflow)
         fill_random_pattern(map + buf->underflow_size + buf->base.base.size, 
                             buf->overflow_size);

      pb_unmap(buf->buffer);
   }
}
Esempio n. 15
0
static void
fenced_buffer_unmap(struct pb_buffer *buf)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;

   pipe_mutex_lock(fenced_mgr->mutex);

   assert(fenced_buf->mapcount);
   if(fenced_buf->mapcount) {
      if (fenced_buf->buffer)
         pb_unmap(fenced_buf->buffer);
      --fenced_buf->mapcount;
      if(!fenced_buf->mapcount)
	 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
   }

   pipe_mutex_unlock(fenced_mgr->mutex);
}
Esempio n. 16
0
static bool
pb_cache_can_reclaim_buffer(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);

   if (buf->mgr->provider->is_buffer_busy) {
      if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
         return false;
   } else {
      void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);

      if (!ptr)
         return false;

      pb_unmap(buf->buffer);
   }

   return true;
}
Esempio n. 17
0
static void
pb_cache_buffer_unmap(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   pb_unmap(buf->buffer);
}
Esempio n. 18
0
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider, 
                   pb_size numBufs, 
                   pb_size bufSize,
                   const struct pb_desc *desc) 
{
   struct pool_pb_manager *pool;
   struct pool_buffer *pool_buf;
   pb_size i;

   if(!provider)
      return NULL;
   
   pool = CALLOC_STRUCT(pool_pb_manager);
   if (!pool)
      return NULL;

   pool->base.destroy = pool_bufmgr_destroy;
   pool->base.create_buffer = pool_bufmgr_create_buffer;
   pool->base.flush = pool_bufmgr_flush;

   LIST_INITHEAD(&pool->free);

   pool->numTot = numBufs;
   pool->numFree = numBufs;
   pool->bufSize = bufSize;
   pool->bufAlign = desc->alignment; 
   
   pipe_mutex_init(pool->mutex);

   pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); 
   if (!pool->buffer)
      goto failure;

   pool->map = pb_map(pool->buffer,
                          PB_USAGE_CPU_READ |
                          PB_USAGE_CPU_WRITE, NULL);
   if(!pool->map)
      goto failure;

   pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
   if (!pool->bufs)
      goto failure;

   pool_buf = pool->bufs;
   for (i = 0; i < numBufs; ++i) {
      pipe_reference_init(&pool_buf->base.reference, 0);
      pool_buf->base.alignment = 0;
      pool_buf->base.usage = 0;
      pool_buf->base.size = bufSize;
      pool_buf->base.vtbl = &pool_buffer_vtbl;
      pool_buf->mgr = pool;
      pool_buf->start = i * bufSize;
      LIST_ADDTAIL(&pool_buf->head, &pool->free);
      pool_buf++;
   }

   return SUPER(pool);
   
failure:
   if(pool->bufs)
      FREE(pool->bufs);
   if(pool->map)
      pb_unmap(pool->buffer);
   if(pool->buffer)
      pb_reference(&pool->buffer, NULL);
   if(pool)
      FREE(pool);
   return NULL;
}