Example #1
0
void
vmw_winsys_destroy(struct vmw_winsys_screen *vws)
{
   if (--vws->open_count == 0) {
      util_hash_table_remove(dev_hash, &vws->device);
      vmw_pools_cleanup(vws);
      vws->fence_ops->destroy(vws->fence_ops);
      vmw_ioctl_cleanup(vws);
      close(vws->ioctl.drm_fd);
      FREE(vws);
   }
}
Example #2
0
struct vmw_winsys_screen *
vmw_winsys_create( int fd )
{
   struct vmw_winsys_screen *vws;
   struct stat stat_buf;

   if (dev_hash == NULL) {
      dev_hash = util_hash_table_create(vmw_dev_hash, vmw_dev_compare);
      if (dev_hash == NULL)
         return NULL;
   }

   if (fstat(fd, &stat_buf))
      return NULL;

   vws = util_hash_table_get(dev_hash, &stat_buf.st_rdev);
   if (vws) {
      vws->open_count++;
      return vws;
   }

   vws = CALLOC_STRUCT(vmw_winsys_screen);
   if (!vws)
      goto out_no_vws;

   vws->device = stat_buf.st_rdev;
   vws->open_count = 1;
   vws->ioctl.drm_fd = dup(fd);
   vws->base.have_gb_dma = TRUE;
   vws->base.need_to_rebind_resources = FALSE;

   if (!vmw_ioctl_init(vws))
      goto out_no_ioctl;

   vws->fence_ops = vmw_fence_ops_create(vws);
   if (!vws->fence_ops)
      goto out_no_fence_ops;

   if(!vmw_pools_init(vws))
      goto out_no_pools;

   if (!vmw_winsys_screen_init_svga(vws))
      goto out_no_svga;

   if (util_hash_table_set(dev_hash, &vws->device, vws) != PIPE_OK)
      goto out_no_hash_insert;

   return vws;
out_no_hash_insert:
out_no_svga:
   vmw_pools_cleanup(vws);
out_no_pools:
   vws->fence_ops->destroy(vws->fence_ops);
out_no_fence_ops:
   vmw_ioctl_cleanup(vws);
out_no_ioctl:
   close(vws->ioctl.drm_fd);
   FREE(vws);
out_no_vws:
   return NULL;
}
Example #3
0
/**
 * vmw_pools_init - Create a pool of GMR buffers.
 *
 * @vws: Pointer to a struct vmw_winsys_screen.
 */
boolean
vmw_pools_init(struct vmw_winsys_screen *vws)
{
   struct pb_desc desc;

   vws->pools.gmr = vmw_gmr_bufmgr_create(vws);
   if(!vws->pools.gmr)
      goto error;

   if ((vws->base.have_gb_objects && vws->base.have_gb_dma) ||
       !vws->base.have_gb_objects) {
      /*
       * A managed pool for DMA buffers.
       */
      vws->pools.gmr_mm = mm_bufmgr_create(vws->pools.gmr,
                                           VMW_GMR_POOL_SIZE,
                                           12 /* 4096 alignment */);
      if(!vws->pools.gmr_mm)
         goto error;

      vws->pools.gmr_fenced = simple_fenced_bufmgr_create
         (vws->pools.gmr_mm, vws->fence_ops);

#ifdef DEBUG
      vws->pools.gmr_fenced = pb_debug_manager_create(vws->pools.gmr_fenced,
                                                      4096,
                                                      4096);
#endif
      if(!vws->pools.gmr_fenced)
         goto error;

   /*
    * The slab pool allocates buffers directly from the kernel except
    * for very small buffers which are allocated from a slab in order
    * not to waste memory, since a kernel buffer is a minimum 4096 bytes.
    *
    * Here we use it only for emergency in the case our pre-allocated
    * managed buffer pool runs out of memory.
    */

      desc.alignment = 64;
      desc.usage = ~(SVGA_BUFFER_USAGE_PINNED | SVGA_BUFFER_USAGE_SHADER |
                     VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC);
      vws->pools.gmr_slab = pb_slab_range_manager_create(vws->pools.gmr,
                                                         64,
                                                         8192,
                                                         16384,
                                                         &desc);
      if (!vws->pools.gmr_slab)
         goto error;

      vws->pools.gmr_slab_fenced =
         simple_fenced_bufmgr_create(vws->pools.gmr_slab, vws->fence_ops);

      if (!vws->pools.gmr_slab_fenced)
         goto error;
   }

   vws->pools.query_fenced = NULL;
   vws->pools.query_mm = NULL;
   vws->pools.mob_cache = NULL;

   if (vws->base.have_gb_objects && !vmw_mob_pools_init(vws))
      goto error;

   return TRUE;

error:
   vmw_pools_cleanup(vws);
   return FALSE;
}