Example #1
0
static struct svga_winsys_buffer *
vmw_svga_winsys_buffer_create(struct svga_winsys_screen *sws,
                              unsigned alignment,
                              unsigned usage,
                              unsigned size)
{
   struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
   struct vmw_buffer_desc desc;
   struct pb_manager *provider;
   struct pb_buffer *buffer;

   memset(&desc, 0, sizeof desc);
   desc.pb_desc.alignment = alignment;
   desc.pb_desc.usage = usage;

   if (usage == SVGA_BUFFER_USAGE_PINNED) {
      if (vws->pools.query_fenced == NULL && !vmw_query_pools_init(vws))
	 return NULL;
      provider = vws->pools.query_fenced;
   } else if (usage == SVGA_BUFFER_USAGE_SHADER) {
      provider = vws->pools.mob_shader_slab_fenced;
   } else
      provider = vws->pools.gmr_fenced;

   assert(provider);
   buffer = provider->create_buffer(provider, size, &desc.pb_desc);

   if(!buffer && provider == vws->pools.gmr_fenced) {

      assert(provider);
      provider = vws->pools.gmr_slab_fenced;
      buffer = provider->create_buffer(provider, size, &desc.pb_desc);
   }

   if (!buffer)
      return NULL;

   return vmw_svga_winsys_buffer_wrap(buffer);
}
Example #2
0
/**
 * vmw_drm_gb_surface_from_handle - Create a shared surface
 *
 * @sws: Screen to register the surface with.
 * @whandle: struct winsys_handle identifying the kernel surface object
 * @format: On successful return points to a value describing the
 * surface format.
 *
 * Returns a refcounted pointer to a struct svga_winsys_surface
 * embedded in a struct vmw_svga_winsys_surface on success or NULL
 * on failure.
 */
static struct svga_winsys_surface *
vmw_drm_gb_surface_from_handle(struct svga_winsys_screen *sws,
                               struct winsys_handle *whandle,
                               SVGA3dSurfaceFormat *format)
{
    struct vmw_svga_winsys_surface *vsrf;
    struct svga_winsys_surface *ssrf;
    struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
    SVGA3dSurfaceFlags flags;
    uint32_t mip_levels;
    struct vmw_buffer_desc desc;
    struct pb_manager *provider = vws->pools.gmr;
    struct pb_buffer *pb_buf;
    uint32_t handle;
    int ret;

    if (whandle->offset != 0) {
       fprintf(stderr, "Attempt to import unsupported winsys offset %u\n",
               whandle->offset);
       return NULL;
    }

    ret = vmw_ioctl_gb_surface_ref(vws, whandle, &flags, format,
                                   &mip_levels, &handle, &desc.region);

    if (ret) {
	fprintf(stderr, "Failed referencing shared surface. SID %d.\n"
		"Error %d (%s).\n",
		whandle->handle, ret, strerror(-ret));
	return NULL;
    }

    if (mip_levels != 1) {
       fprintf(stderr, "Incorrect number of mipmap levels on shared surface."
               " SID %d, levels %d\n",
               whandle->handle, mip_levels);
       goto out_mip;
    }

    vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
    if (!vsrf)
	goto out_mip;

    pipe_reference_init(&vsrf->refcnt, 1);
    p_atomic_set(&vsrf->validated, 0);
    vsrf->screen = vws;
    vsrf->sid = handle;
    vsrf->size = vmw_region_size(desc.region);

    /*
     * Synchronize backing buffers of shared surfaces using the
     * kernel, since we don't pass fence objects around between
     * processes.
     */
    desc.pb_desc.alignment = 4096;
    desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC;
    pb_buf = provider->create_buffer(provider, vsrf->size, &desc.pb_desc);
    vsrf->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
    if (!vsrf->buf)
       goto out_no_buf;
    ssrf = svga_winsys_surface(vsrf);

    return ssrf;

out_no_buf:
    FREE(vsrf);
out_mip:
    vmw_ioctl_region_destroy(desc.region);
    vmw_ioctl_surface_destroy(vws, whandle->handle);
    return NULL;
}
Example #3
0
static struct svga_winsys_surface *
vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws,
                               SVGA3dSurfaceFlags flags,
                               SVGA3dSurfaceFormat format,
                               unsigned usage,
                               SVGA3dSize size,
                               uint32 numLayers,
                               uint32 numMipLevels,
                               unsigned sampleCount)
{
   struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
   struct vmw_svga_winsys_surface *surface;
   struct vmw_buffer_desc desc;
   struct pb_manager *provider;
   uint32_t buffer_size;

   memset(&desc, 0, sizeof(desc));
   surface = CALLOC_STRUCT(vmw_svga_winsys_surface);
   if(!surface)
      goto no_surface;

   pipe_reference_init(&surface->refcnt, 1);
   p_atomic_set(&surface->validated, 0);
   surface->screen = vws;
   pipe_mutex_init(surface->mutex);
   surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED);
   provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced;

   /*
    * Used for the backing buffer GB surfaces, and to approximate
    * when to flush on non-GB hosts.
    */
   buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, 
                                                   numLayers);
   if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
      buffer_size += sizeof(SVGA3dDXSOState);

   if (buffer_size > vws->ioctl.max_texture_size) {
      goto no_sid;
   }

   if (sws->have_gb_objects) {
      SVGAGuestPtr ptr = {0,0};

      /*
       * If the backing buffer size is small enough, try to allocate a
       * buffer out of the buffer cache. Otherwise, let the kernel allocate
       * a suitable buffer for us.
       */
      if (buffer_size < VMW_TRY_CACHED_SIZE && !surface->shared) {
         struct pb_buffer *pb_buf;

         surface->size = buffer_size;
         desc.pb_desc.alignment = 4096;
         desc.pb_desc.usage = 0;
         pb_buf = provider->create_buffer(provider, buffer_size, &desc.pb_desc);
         surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
         if (surface->buf && !vmw_gmr_bufmgr_region_ptr(pb_buf, &ptr))
            assert(0);
      }

      surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
                                                 size, numLayers,
                                                 numMipLevels, sampleCount,
                                                 ptr.gmrId,
                                                 surface->buf ? NULL :
						 &desc.region);

      if (surface->sid == SVGA3D_INVALID_ID && surface->buf) {

         /*
          * Kernel refused to allocate a surface for us.
          * Perhaps something was wrong with our buffer?
          * This is really a guard against future new size requirements
          * on the backing buffers.
          */
         vmw_svga_winsys_buffer_destroy(sws, surface->buf);
         surface->buf = NULL;
         surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
                                                    size, numLayers,
                                                    numMipLevels, sampleCount,
                                                    0, &desc.region);
         if (surface->sid == SVGA3D_INVALID_ID)
            goto no_sid;
      }

      /*
       * If the kernel created the buffer for us, wrap it into a
       * vmw_svga_winsys_buffer.
       */
      if (surface->buf == NULL) {
         struct pb_buffer *pb_buf;

         surface->size = vmw_region_size(desc.region);
         desc.pb_desc.alignment = 4096;
         desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED;
         pb_buf = provider->create_buffer(provider, surface->size,
                                          &desc.pb_desc);
         surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
         if (surface->buf == NULL) {
            vmw_ioctl_region_destroy(desc.region);
            vmw_ioctl_surface_destroy(vws, surface->sid);
            goto no_sid;
         }
      }
   } else {
      surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage,
                                              size, numLayers, numMipLevels,
                                              sampleCount);
      if(surface->sid == SVGA3D_INVALID_ID)
         goto no_sid;

      /* Best estimate for surface size, used for early flushing. */
      surface->size = buffer_size;
      surface->buf = NULL; 
   }      

   return svga_winsys_surface(surface);

no_sid:
   if (surface->buf)
      vmw_svga_winsys_buffer_destroy(sws, surface->buf);

   FREE(surface);
no_surface:
   return NULL;
}
Example #4
0
void *
vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
                            struct svga_winsys_surface *srf,
                            unsigned flags, boolean *retry)
{
    struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
    void *data = NULL;
    struct pb_buffer *pb_buf;
    uint32_t pb_flags;
    struct vmw_winsys_screen *vws = vsrf->screen;

    *retry = FALSE;
    assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
    pipe_mutex_lock(vsrf->mutex);

    if (vsrf->mapcount) {
        /*
         * Only allow multiple readers to map.
         */
        if ((flags & PIPE_TRANSFER_WRITE) ||
                (vsrf->map_mode & PIPE_TRANSFER_WRITE))
            goto out_unlock;

        data = vsrf->data;
        goto out_mapped;
    }

    vsrf->rebind = FALSE;

    /*
     * If we intend to read, there's no point discarding the
     * data if busy.
     */
    if (flags & PIPE_TRANSFER_READ || vsrf->shared)
        flags &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;

    /*
     * Discard is a hint to a synchronized map.
     */
    if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
        flags &= ~PIPE_TRANSFER_UNSYNCHRONIZED;

    /*
     * The surface is allowed to be referenced on the command stream iff
     * we're mapping unsynchronized or discard. This is an early check.
     * We need to recheck after a failing discard map.
     */
    if (!(flags & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
                   PIPE_TRANSFER_UNSYNCHRONIZED)) &&
            p_atomic_read(&vsrf->validated)) {
        *retry = TRUE;
        goto out_unlock;
    }

    pb_flags = flags & (PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);

    if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
        struct pb_manager *provider;
        struct pb_desc desc;

        /*
         * First, if possible, try to map existing storage with DONTBLOCK.
         */
        if (!p_atomic_read(&vsrf->validated)) {
            data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf,
                                              PIPE_TRANSFER_DONTBLOCK | pb_flags);
            if (data)
                goto out_mapped;
        }

        /*
         * Attempt to get a new buffer.
         */
        provider = vws->pools.mob_fenced;
        memset(&desc, 0, sizeof(desc));
        desc.alignment = 4096;
        pb_buf = provider->create_buffer(provider, vsrf->size, &desc);
        if (pb_buf != NULL) {
            struct svga_winsys_buffer *vbuf =
                vmw_svga_winsys_buffer_wrap(pb_buf);

            data = vmw_svga_winsys_buffer_map(&vws->base, vbuf, pb_flags);
            if (data) {
                vsrf->rebind = TRUE;
                /*
                 * We've discarded data on this surface and thus
                 * it's data is no longer consider referenced.
                 */
                vmw_swc_surface_clear_reference(swc, vsrf);
                if (vsrf->buf)
                    vmw_svga_winsys_buffer_destroy(&vws->base, vsrf->buf);
                vsrf->buf = vbuf;
                goto out_mapped;
            } else
                vmw_svga_winsys_buffer_destroy(&vws->base, vbuf);
        }
        /*
         * We couldn't get and map a new buffer for some reason.
         * Fall through to an ordinary map.
         * But tell pipe driver to flush now if already on validate list,
         * Otherwise we'll overwrite previous contents.
         */
        if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) &&
                p_atomic_read(&vsrf->validated)) {
            *retry = TRUE;
            goto out_unlock;
        }
    }

    pb_flags |= (flags & PIPE_TRANSFER_DONTBLOCK);
    data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
    if (data == NULL)
        goto out_unlock;

out_mapped:
    ++vsrf->mapcount;
    vsrf->data = data;
    vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
out_unlock:
    pipe_mutex_unlock(vsrf->mutex);
    return data;
}