Esempio n. 1
0
void
vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst,
                                  struct vmw_svga_winsys_surface *src)
{
    struct pipe_reference *src_ref;
    struct pipe_reference *dst_ref;
    struct vmw_svga_winsys_surface *dst;

    if(pdst == NULL || *pdst == src)
        return;

    dst = *pdst;

    src_ref = src ? &src->refcnt : NULL;
    dst_ref = dst ? &dst->refcnt : NULL;

    if (pipe_reference(dst_ref, src_ref)) {
        if (dst->buf)
            vmw_svga_winsys_buffer_destroy(&dst->screen->base, dst->buf);
        vmw_ioctl_surface_destroy(dst->screen, dst->sid);
#ifdef DEBUG
        /* to detect dangling pointers */
        assert(p_atomic_read(&dst->validated) == 0);
        dst->sid = SVGA3D_INVALID_ID;
#endif
        pipe_mutex_destroy(dst->mutex);
        FREE(dst);
    }

    *pdst = src;
}
Esempio n. 2
0
struct svga_winsys_gb_shader *
vmw_svga_winsys_shader_create(struct svga_winsys_screen *sws,
			      SVGA3dShaderType type,
			      const uint32 *bytecode,
			      uint32 bytecodeLen)
{
   struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
   struct vmw_svga_winsys_shader *shader;
   void *code;

   shader = CALLOC_STRUCT(vmw_svga_winsys_shader);
   if(!shader)
      goto out_no_shader;

   pipe_reference_init(&shader->refcnt, 1);
   p_atomic_set(&shader->validated, 0);
   shader->screen = vws;
   shader->buf = vmw_svga_winsys_buffer_create(sws, 64,
					       SVGA_BUFFER_USAGE_SHADER,
					       bytecodeLen);
   if (!shader->buf)
      goto out_no_buf;

   code = vmw_svga_winsys_buffer_map(sws, shader->buf, PIPE_TRANSFER_WRITE);
   if (!code)
      goto out_no_buf;

   memcpy(code, bytecode, bytecodeLen);
   vmw_svga_winsys_buffer_unmap(sws, shader->buf);

   if (!sws->have_vgpu10) {
      shader->shid = vmw_ioctl_shader_create(vws, type, bytecodeLen);
      if (shader->shid == SVGA3D_INVALID_ID)
         goto out_no_shid;
   }

   return svga_winsys_shader(shader);

out_no_shid:
   vmw_svga_winsys_buffer_destroy(sws, shader->buf);
out_no_buf:
   FREE(shader);
out_no_shader:
   return NULL;
}
Esempio n. 3
0
static struct svga_winsys_surface *
vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws,
                               SVGA3dSurfaceFlags flags,
                               SVGA3dSurfaceFormat format,
                               unsigned usage,
                               SVGA3dSize size,
                               uint32 numLayers,
                               uint32 numMipLevels,
                               unsigned sampleCount)
{
   struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
   struct vmw_svga_winsys_surface *surface;
   struct vmw_buffer_desc desc;
   struct pb_manager *provider;
   uint32_t buffer_size;

   memset(&desc, 0, sizeof(desc));
   surface = CALLOC_STRUCT(vmw_svga_winsys_surface);
   if(!surface)
      goto no_surface;

   pipe_reference_init(&surface->refcnt, 1);
   p_atomic_set(&surface->validated, 0);
   surface->screen = vws;
   pipe_mutex_init(surface->mutex);
   surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED);
   provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced;

   /*
    * Used for the backing buffer GB surfaces, and to approximate
    * when to flush on non-GB hosts.
    */
   buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, 
                                                   numLayers);
   if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
      buffer_size += sizeof(SVGA3dDXSOState);

   if (buffer_size > vws->ioctl.max_texture_size) {
      goto no_sid;
   }

   if (sws->have_gb_objects) {
      SVGAGuestPtr ptr = {0,0};

      /*
       * If the backing buffer size is small enough, try to allocate a
       * buffer out of the buffer cache. Otherwise, let the kernel allocate
       * a suitable buffer for us.
       */
      if (buffer_size < VMW_TRY_CACHED_SIZE && !surface->shared) {
         struct pb_buffer *pb_buf;

         surface->size = buffer_size;
         desc.pb_desc.alignment = 4096;
         desc.pb_desc.usage = 0;
         pb_buf = provider->create_buffer(provider, buffer_size, &desc.pb_desc);
         surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
         if (surface->buf && !vmw_gmr_bufmgr_region_ptr(pb_buf, &ptr))
            assert(0);
      }

      surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
                                                 size, numLayers,
                                                 numMipLevels, sampleCount,
                                                 ptr.gmrId,
                                                 surface->buf ? NULL :
						 &desc.region);

      if (surface->sid == SVGA3D_INVALID_ID && surface->buf) {

         /*
          * Kernel refused to allocate a surface for us.
          * Perhaps something was wrong with our buffer?
          * This is really a guard against future new size requirements
          * on the backing buffers.
          */
         vmw_svga_winsys_buffer_destroy(sws, surface->buf);
         surface->buf = NULL;
         surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
                                                    size, numLayers,
                                                    numMipLevels, sampleCount,
                                                    0, &desc.region);
         if (surface->sid == SVGA3D_INVALID_ID)
            goto no_sid;
      }

      /*
       * If the kernel created the buffer for us, wrap it into a
       * vmw_svga_winsys_buffer.
       */
      if (surface->buf == NULL) {
         struct pb_buffer *pb_buf;

         surface->size = vmw_region_size(desc.region);
         desc.pb_desc.alignment = 4096;
         desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED;
         pb_buf = provider->create_buffer(provider, surface->size,
                                          &desc.pb_desc);
         surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
         if (surface->buf == NULL) {
            vmw_ioctl_region_destroy(desc.region);
            vmw_ioctl_surface_destroy(vws, surface->sid);
            goto no_sid;
         }
      }
   } else {
      surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage,
                                              size, numLayers, numMipLevels,
                                              sampleCount);
      if(surface->sid == SVGA3D_INVALID_ID)
         goto no_sid;

      /* Best estimate for surface size, used for early flushing. */
      surface->size = buffer_size;
      surface->buf = NULL; 
   }      

   return svga_winsys_surface(surface);

no_sid:
   if (surface->buf)
      vmw_svga_winsys_buffer_destroy(sws, surface->buf);

   FREE(surface);
no_surface:
   return NULL;
}
Esempio n. 4
0
void *
vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
                            struct svga_winsys_surface *srf,
                            unsigned flags, boolean *retry)
{
    struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
    void *data = NULL;
    struct pb_buffer *pb_buf;
    uint32_t pb_flags;
    struct vmw_winsys_screen *vws = vsrf->screen;

    *retry = FALSE;
    assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
    pipe_mutex_lock(vsrf->mutex);

    if (vsrf->mapcount) {
        /*
         * Only allow multiple readers to map.
         */
        if ((flags & PIPE_TRANSFER_WRITE) ||
                (vsrf->map_mode & PIPE_TRANSFER_WRITE))
            goto out_unlock;

        data = vsrf->data;
        goto out_mapped;
    }

    vsrf->rebind = FALSE;

    /*
     * If we intend to read, there's no point discarding the
     * data if busy.
     */
    if (flags & PIPE_TRANSFER_READ || vsrf->shared)
        flags &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;

    /*
     * Discard is a hint to a synchronized map.
     */
    if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
        flags &= ~PIPE_TRANSFER_UNSYNCHRONIZED;

    /*
     * The surface is allowed to be referenced on the command stream iff
     * we're mapping unsynchronized or discard. This is an early check.
     * We need to recheck after a failing discard map.
     */
    if (!(flags & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
                   PIPE_TRANSFER_UNSYNCHRONIZED)) &&
            p_atomic_read(&vsrf->validated)) {
        *retry = TRUE;
        goto out_unlock;
    }

    pb_flags = flags & (PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);

    if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
        struct pb_manager *provider;
        struct pb_desc desc;

        /*
         * First, if possible, try to map existing storage with DONTBLOCK.
         */
        if (!p_atomic_read(&vsrf->validated)) {
            data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf,
                                              PIPE_TRANSFER_DONTBLOCK | pb_flags);
            if (data)
                goto out_mapped;
        }

        /*
         * Attempt to get a new buffer.
         */
        provider = vws->pools.mob_fenced;
        memset(&desc, 0, sizeof(desc));
        desc.alignment = 4096;
        pb_buf = provider->create_buffer(provider, vsrf->size, &desc);
        if (pb_buf != NULL) {
            struct svga_winsys_buffer *vbuf =
                vmw_svga_winsys_buffer_wrap(pb_buf);

            data = vmw_svga_winsys_buffer_map(&vws->base, vbuf, pb_flags);
            if (data) {
                vsrf->rebind = TRUE;
                /*
                 * We've discarded data on this surface and thus
                 * it's data is no longer consider referenced.
                 */
                vmw_swc_surface_clear_reference(swc, vsrf);
                if (vsrf->buf)
                    vmw_svga_winsys_buffer_destroy(&vws->base, vsrf->buf);
                vsrf->buf = vbuf;
                goto out_mapped;
            } else
                vmw_svga_winsys_buffer_destroy(&vws->base, vbuf);
        }
        /*
         * We couldn't get and map a new buffer for some reason.
         * Fall through to an ordinary map.
         * But tell pipe driver to flush now if already on validate list,
         * Otherwise we'll overwrite previous contents.
         */
        if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) &&
                p_atomic_read(&vsrf->validated)) {
            *retry = TRUE;
            goto out_unlock;
        }
    }

    pb_flags |= (flags & PIPE_TRANSFER_DONTBLOCK);
    data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
    if (data == NULL)
        goto out_unlock;

out_mapped:
    ++vsrf->mapcount;
    vsrf->data = data;
    vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
out_unlock:
    pipe_mutex_unlock(vsrf->mutex);
    return data;
}