Example #1
0
/**
 * This is called when we need to set up GL rendering to a new X window.
 */
boolean
dri_create_buffer(__DRIscreen * sPriv,
		  __DRIdrawable * dPriv,
		  const struct gl_config * visual, boolean isPixmap)
{
   struct dri_screen *screen = sPriv->driverPrivate;
   struct dri_drawable *drawable = NULL;

   if (isPixmap)
      goto fail;		       /* not implemented */

   drawable = CALLOC_STRUCT(dri_drawable);
   if (drawable == NULL)
      goto fail;

   dri_fill_st_visual(&drawable->stvis, screen, visual);

   /* setup the st_framebuffer_iface */
   drawable->base.visual = &drawable->stvis;
   drawable->base.flush_front = dri_st_framebuffer_flush_front;
   drawable->base.validate = dri_st_framebuffer_validate;
   drawable->base.st_manager_private = (void *) drawable;

   drawable->screen = screen;
   drawable->sPriv = sPriv;
   drawable->dPriv = dPriv;
   drawable->desired_fences = screen->default_throttle_frames;
   if (drawable->desired_fences > DRI_SWAP_FENCES_MAX)
      drawable->desired_fences = DRI_SWAP_FENCES_MAX;

   dPriv->driverPrivate = (void *)drawable;
   p_atomic_set(&drawable->base.stamp, 1);

   return GL_TRUE;
fail:
   FREE(drawable);
   return GL_FALSE;
}
Example #2
0
static void
st_context_notify_invalid_framebuffer(struct st_context_iface *stctxi,
                                      struct st_framebuffer_iface *stfbi)
{
   struct st_context *st = (struct st_context *) stctxi;
   struct st_framebuffer *stfb;

   /* either draw or read winsys fb */
   stfb = st_ws_framebuffer(st->ctx->WinSysDrawBuffer);
   if (!stfb || stfb->iface != stfbi)
      stfb = st_ws_framebuffer(st->ctx->WinSysReadBuffer);

   if (stfb && stfb->iface == stfbi) {
      p_atomic_set(&stfb->revalidate, TRUE);
   }
   else {
      /* This function is probably getting called when we've detected a
       * change in a window's size but the currently bound context is
       * not bound to that window.
       * If the st_framebuffer_iface structure had a pointer to the
       * corresponding st_framebuffer we'd be able to handle this.
       */
   }
}
Example #3
0
static struct svga_winsys_surface *
vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws,
                               SVGA3dSurfaceFlags flags,
                               SVGA3dSurfaceFormat format,
                               unsigned usage,
                               SVGA3dSize size,
                               uint32 numLayers,
                               uint32 numMipLevels,
                               unsigned sampleCount)
{
   struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
   struct vmw_svga_winsys_surface *surface;
   struct vmw_buffer_desc desc;
   struct pb_manager *provider;
   uint32_t buffer_size;

   memset(&desc, 0, sizeof(desc));
   surface = CALLOC_STRUCT(vmw_svga_winsys_surface);
   if(!surface)
      goto no_surface;

   pipe_reference_init(&surface->refcnt, 1);
   p_atomic_set(&surface->validated, 0);
   surface->screen = vws;
   pipe_mutex_init(surface->mutex);
   surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED);
   provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced;

   /*
    * Used for the backing buffer GB surfaces, and to approximate
    * when to flush on non-GB hosts.
    */
   buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, 
                                                   numLayers);
   if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
      buffer_size += sizeof(SVGA3dDXSOState);

   if (buffer_size > vws->ioctl.max_texture_size) {
      goto no_sid;
   }

   if (sws->have_gb_objects) {
      SVGAGuestPtr ptr = {0,0};

      /*
       * If the backing buffer size is small enough, try to allocate a
       * buffer out of the buffer cache. Otherwise, let the kernel allocate
       * a suitable buffer for us.
       */
      if (buffer_size < VMW_TRY_CACHED_SIZE && !surface->shared) {
         struct pb_buffer *pb_buf;

         surface->size = buffer_size;
         desc.pb_desc.alignment = 4096;
         desc.pb_desc.usage = 0;
         pb_buf = provider->create_buffer(provider, buffer_size, &desc.pb_desc);
         surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
         if (surface->buf && !vmw_gmr_bufmgr_region_ptr(pb_buf, &ptr))
            assert(0);
      }

      surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
                                                 size, numLayers,
                                                 numMipLevels, sampleCount,
                                                 ptr.gmrId,
                                                 surface->buf ? NULL :
						 &desc.region);

      if (surface->sid == SVGA3D_INVALID_ID && surface->buf) {

         /*
          * Kernel refused to allocate a surface for us.
          * Perhaps something was wrong with our buffer?
          * This is really a guard against future new size requirements
          * on the backing buffers.
          */
         vmw_svga_winsys_buffer_destroy(sws, surface->buf);
         surface->buf = NULL;
         surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
                                                    size, numLayers,
                                                    numMipLevels, sampleCount,
                                                    0, &desc.region);
         if (surface->sid == SVGA3D_INVALID_ID)
            goto no_sid;
      }

      /*
       * If the kernel created the buffer for us, wrap it into a
       * vmw_svga_winsys_buffer.
       */
      if (surface->buf == NULL) {
         struct pb_buffer *pb_buf;

         surface->size = vmw_region_size(desc.region);
         desc.pb_desc.alignment = 4096;
         desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED;
         pb_buf = provider->create_buffer(provider, surface->size,
                                          &desc.pb_desc);
         surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
         if (surface->buf == NULL) {
            vmw_ioctl_region_destroy(desc.region);
            vmw_ioctl_surface_destroy(vws, surface->sid);
            goto no_sid;
         }
      }
   } else {
      surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage,
                                              size, numLayers, numMipLevels,
                                              sampleCount);
      if(surface->sid == SVGA3D_INVALID_ID)
         goto no_sid;

      /* Best estimate for surface size, used for early flushing. */
      surface->size = buffer_size;
      surface->buf = NULL; 
   }      

   return svga_winsys_surface(surface);

no_sid:
   if (surface->buf)
      vmw_svga_winsys_buffer_destroy(sws, surface->buf);

   FREE(surface);
no_surface:
   return NULL;
}
Example #4
0
static struct svga_winsys_surface *
vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
                            struct winsys_handle *whandle,
			    SVGA3dSurfaceFormat *format)
{
    struct vmw_svga_winsys_surface *vsrf;
    struct svga_winsys_surface *ssrf;
    struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
    union drm_vmw_surface_reference_arg arg;
    struct drm_vmw_surface_arg *req = &arg.req;
    struct drm_vmw_surface_create_req *rep = &arg.rep;
    uint32_t handle = 0;
    struct drm_vmw_size size;
    SVGA3dSize base_size;
    int ret;
    int i;

    if (whandle->offset != 0) {
       fprintf(stderr, "Attempt to import unsupported winsys offset %u\n",
               whandle->offset);
       return NULL;
    }

    switch (whandle->type) {
    case DRM_API_HANDLE_TYPE_SHARED:
    case DRM_API_HANDLE_TYPE_KMS:
       handle = whandle->handle;
       break;
    case DRM_API_HANDLE_TYPE_FD:
       ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle,
                                &handle);
       if (ret) {
	  vmw_error("Failed to get handle from prime fd %d.\n",
		    (int) whandle->handle);
	  return NULL;
       }
       break;
    default:
       vmw_error("Attempt to import unsupported handle type %d.\n",
                 whandle->type);
       return NULL;
    }

    memset(&arg, 0, sizeof(arg));
    req->sid = handle;
    rep->size_addr = (unsigned long)&size;

    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE,
			      &arg, sizeof(arg));

    /*
     * Need to close the handle we got from prime.
     */
    if (whandle->type == DRM_API_HANDLE_TYPE_FD)
       vmw_ioctl_surface_destroy(vws, handle);

    if (ret) {
       /*
        * Any attempt to share something other than a surface, like a dumb
        * kms buffer, should fail here.
        */
       vmw_error("Failed referencing shared surface. SID %d.\n"
                 "Error %d (%s).\n",
                 handle, ret, strerror(-ret));
       return NULL;
    }

    if (rep->mip_levels[0] != 1) {
        vmw_error("Incorrect number of mipmap levels on shared surface."
                  " SID %d, levels %d\n",
                  handle, rep->mip_levels[0]);
	goto out_mip;
    }

    for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
	if (rep->mip_levels[i] != 0) {
            vmw_error("Incorrect number of faces levels on shared surface."
                      " SID %d, face %d present.\n",
                      handle, i);
	    goto out_mip;
	}
   }

    vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
    if (!vsrf)
	goto out_mip;

    pipe_reference_init(&vsrf->refcnt, 1);
    p_atomic_set(&vsrf->validated, 0);
    vsrf->screen = vws;
    vsrf->sid = handle;
    ssrf = svga_winsys_surface(vsrf);
    *format = rep->format;

    /* Estimate usage, for early flushing. */

    base_size.width = size.width;
    base_size.height = size.height;
    base_size.depth = size.depth;
    vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size,
                                                   rep->mip_levels[0],
                                                   FALSE);

    return ssrf;

out_mip:
    vmw_ioctl_surface_destroy(vws, handle);

    return NULL;
}
Example #5
0
/**
 * vmw_drm_gb_surface_from_handle - Create a shared surface
 *
 * @sws: Screen to register the surface with.
 * @whandle: struct winsys_handle identifying the kernel surface object
 * @format: On successful return points to a value describing the
 * surface format.
 *
 * Returns a refcounted pointer to a struct svga_winsys_surface
 * embedded in a struct vmw_svga_winsys_surface on success or NULL
 * on failure.
 */
static struct svga_winsys_surface *
vmw_drm_gb_surface_from_handle(struct svga_winsys_screen *sws,
                               struct winsys_handle *whandle,
                               SVGA3dSurfaceFormat *format)
{
    struct vmw_svga_winsys_surface *vsrf;
    struct svga_winsys_surface *ssrf;
    struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
    SVGA3dSurfaceFlags flags;
    uint32_t mip_levels;
    struct vmw_buffer_desc desc;
    struct pb_manager *provider = vws->pools.gmr;
    struct pb_buffer *pb_buf;
    uint32_t handle;
    int ret;

    if (whandle->offset != 0) {
       fprintf(stderr, "Attempt to import unsupported winsys offset %u\n",
               whandle->offset);
       return NULL;
    }

    ret = vmw_ioctl_gb_surface_ref(vws, whandle, &flags, format,
                                   &mip_levels, &handle, &desc.region);

    if (ret) {
	fprintf(stderr, "Failed referencing shared surface. SID %d.\n"
		"Error %d (%s).\n",
		whandle->handle, ret, strerror(-ret));
	return NULL;
    }

    if (mip_levels != 1) {
       fprintf(stderr, "Incorrect number of mipmap levels on shared surface."
               " SID %d, levels %d\n",
               whandle->handle, mip_levels);
       goto out_mip;
    }

    vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
    if (!vsrf)
	goto out_mip;

    pipe_reference_init(&vsrf->refcnt, 1);
    p_atomic_set(&vsrf->validated, 0);
    vsrf->screen = vws;
    vsrf->sid = handle;
    vsrf->size = vmw_region_size(desc.region);

    /*
     * Synchronize backing buffers of shared surfaces using the
     * kernel, since we don't pass fence objects around between
     * processes.
     */
    desc.pb_desc.alignment = 4096;
    desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC;
    pb_buf = provider->create_buffer(provider, vsrf->size, &desc.pb_desc);
    vsrf->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
    if (!vsrf->buf)
       goto out_no_buf;
    ssrf = svga_winsys_surface(vsrf);

    return ssrf;

out_no_buf:
    FREE(vsrf);
out_mip:
    vmw_ioctl_region_destroy(desc.region);
    vmw_ioctl_surface_destroy(vws, whandle->handle);
    return NULL;
}
Example #6
0
static struct pipe_buffer *
vmw_drm_buffer_from_handle(struct drm_api *drm_api,
                           struct pipe_screen *screen,
			   const char *name,
			   unsigned handle)
{
    struct vmw_svga_winsys_surface *vsrf;
    struct svga_winsys_surface *ssrf;
    struct vmw_winsys_screen *vws =
	vmw_winsys_screen(svga_winsys_screen(screen));
    struct pipe_buffer *buf;
    union drm_vmw_surface_reference_arg arg;
    struct drm_vmw_surface_arg *req = &arg.req;
    struct drm_vmw_surface_create_req *rep = &arg.rep;
    int ret;
    int i;

    /**
     * The vmware device specific handle is the hardware SID.
     * FIXME: We probably want to move this to the ioctl implementations.
     */

    memset(&arg, 0, sizeof(arg));
    req->sid = handle;

    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE,
			      &arg, sizeof(arg));

    if (ret) {
	fprintf(stderr, "Failed referencing shared surface. SID %d.\n"
		"Error %d (%s).\n",
		handle, ret, strerror(-ret));
	return NULL;
    }

    if (rep->mip_levels[0] != 1) {
	fprintf(stderr, "Incorrect number of mipmap levels on shared surface."
		" SID %d, levels %d\n",
		handle, rep->mip_levels[0]);
	goto out_mip;
    }

    for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
	if (rep->mip_levels[i] != 0) {
	    fprintf(stderr, "Incorrect number of faces levels on shared surface."
		    " SID %d, face %d present.\n",
		    handle, i);
	    goto out_mip;
	}
    }

    vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
    if (!vsrf)
	goto out_mip;

    pipe_reference_init(&vsrf->refcnt, 1);
    p_atomic_set(&vsrf->validated, 0);
    vsrf->sid = handle;
    ssrf = svga_winsys_surface(vsrf);
    buf = svga_screen_buffer_wrap_surface(screen, rep->format, ssrf);
    if (!buf)
	vmw_svga_winsys_surface_reference(&vsrf, NULL);

    return buf;
  out_mip:
    vmw_ioctl_surface_destroy(vws, handle);
    return NULL;
}
Example #7
0
static boolean
st_api_make_current(struct st_api *stapi, struct st_context_iface *stctxi,
                    struct st_framebuffer_iface *stdrawi,
                    struct st_framebuffer_iface *streadi)
{
   struct st_context *st = (struct st_context *) stctxi;
   struct st_framebuffer *stdraw, *stread;
   boolean ret;

   _glapi_check_multithread();

   if (st) {
      /* reuse or create the draw fb */
      stdraw = st_framebuffer_reuse_or_create(st->ctx->WinSysDrawBuffer,
                                              stdrawi);
      if (streadi != stdrawi) {
         /* do the same for the read fb */
         stread = st_framebuffer_reuse_or_create(st->ctx->WinSysReadBuffer,
                                                 streadi);
      }
      else {
         stread = NULL;
         /* reuse the draw fb for the read fb */
         if (stdraw)
            st_framebuffer_reference(&stread, stdraw);
      }

      if (stdraw && stread) {
         if (stctxi != st_api_get_current(stapi)) {
            p_atomic_set(&stdraw->revalidate, TRUE);
            p_atomic_set(&stread->revalidate, TRUE);
         }
         st_framebuffer_validate(stdraw, st);
         if (stread != stdraw)
            st_framebuffer_validate(stread, st);

         /* modify the draw/read buffers of the context */
         if (stdraw->iface) {
            st_visual_to_default_buffer(stdraw->iface->visual,
                  &st->ctx->Color.DrawBuffer[0], NULL);
         }
         if (stread->iface) {
            st_visual_to_default_buffer(stread->iface->visual,
                  &st->ctx->Pixel.ReadBuffer, NULL);
         }

         ret = _mesa_make_current(st->ctx, &stdraw->Base, &stread->Base);
      }
      else {
         struct gl_framebuffer *incomplete = _mesa_get_incomplete_framebuffer();
         ret = _mesa_make_current(st->ctx, incomplete, incomplete);
      }

      st_framebuffer_reference(&stdraw, NULL);
      st_framebuffer_reference(&stread, NULL);
   }
   else {
      ret = _mesa_make_current(NULL, NULL, NULL);
   }

   return ret;
}
Example #8
0
/**
 * Validate a framebuffer to make sure up-to-date pipe_textures are used.
 */
static void
st_framebuffer_validate(struct st_framebuffer *stfb, struct st_context *st)
{
   struct pipe_context *pipe = st->pipe;
   struct pipe_resource *textures[ST_ATTACHMENT_COUNT];
   uint width, height;
   unsigned i;
   boolean changed = FALSE;

   if (!p_atomic_read(&stfb->revalidate))
      return;

   /* validate the fb */
   if (!stfb->iface->validate(stfb->iface, stfb->statts, stfb->num_statts, textures))
      return;

   width = stfb->Base.Width;
   height = stfb->Base.Height;

   for (i = 0; i < stfb->num_statts; i++) {
      struct st_renderbuffer *strb;
      struct pipe_surface *ps, surf_tmpl;
      gl_buffer_index idx;

      if (!textures[i])
         continue;

      idx = attachment_to_buffer_index(stfb->statts[i]);
      if (idx >= BUFFER_COUNT) {
         pipe_resource_reference(&textures[i], NULL);
         continue;
      }

      strb = st_renderbuffer(stfb->Base.Attachment[idx].Renderbuffer);
      assert(strb);
      if (strb->texture == textures[i]) {
         pipe_resource_reference(&textures[i], NULL);
         continue;
      }

      memset(&surf_tmpl, 0, sizeof(surf_tmpl));
      u_surface_default_template(&surf_tmpl, textures[i],
                                 PIPE_BIND_RENDER_TARGET);
      ps = pipe->create_surface(pipe, textures[i], &surf_tmpl);
      if (ps) {
         pipe_surface_reference(&strb->surface, ps);
         pipe_resource_reference(&strb->texture, ps->texture);
         /* ownership transfered */
         pipe_surface_reference(&ps, NULL);

         changed = TRUE;

         strb->Base.Width = strb->surface->width;
         strb->Base.Height = strb->surface->height;

         width = strb->Base.Width;
         height = strb->Base.Height;
      }

      pipe_resource_reference(&textures[i], NULL);
   }

   if (changed) {
      st->dirty.st |= ST_NEW_FRAMEBUFFER;
      _mesa_resize_framebuffer(st->ctx, &stfb->Base, width, height);

      assert(stfb->Base.Width == width);
      assert(stfb->Base.Height == height);
   }

   p_atomic_set(&stfb->revalidate, FALSE);
}
Example #9
0
/**
 * Bind the context to the given framebuffers.
 */
static boolean
vg_context_bind_framebuffers(struct st_context_iface *stctxi,
                             struct st_framebuffer_iface *stdrawi,
                             struct st_framebuffer_iface *streadi)
{
   struct vg_context *ctx = (struct vg_context *) stctxi;
   struct st_framebuffer *stfb;
   enum st_attachment_type strb_att;

   /* the draw and read framebuffers must be the same */
   if (stdrawi != streadi)
      return FALSE;

   p_atomic_set(&ctx->draw_buffer_invalid, TRUE);

   strb_att = (stdrawi) ? choose_attachment(stdrawi) : ST_ATTACHMENT_INVALID;

   if (ctx->draw_buffer) {
      stfb = ctx->draw_buffer;

      /* free the existing fb */
      if (!stdrawi ||
          stfb->strb_att != strb_att ||
          stfb->strb->format != stdrawi->visual->color_format) {
         destroy_renderbuffer(stfb->strb);
         destroy_renderbuffer(stfb->dsrb);
         FREE(stfb);

         ctx->draw_buffer = NULL;
      }
   }

   if (!stdrawi)
      return TRUE;

   if (strb_att == ST_ATTACHMENT_INVALID)
      return FALSE;

   /* create a new fb */
   if (!ctx->draw_buffer) {
      stfb = CALLOC_STRUCT(st_framebuffer);
      if (!stfb)
         return FALSE;

      stfb->strb = create_renderbuffer(stdrawi->visual->color_format);
      if (!stfb->strb) {
         FREE(stfb);
         return FALSE;
      }

      stfb->dsrb = create_renderbuffer(ctx->ds_format);
      if (!stfb->dsrb) {
         FREE(stfb->strb);
         FREE(stfb);
         return FALSE;
      }

      stfb->width = 0;
      stfb->height = 0;
      stfb->strb_att = strb_att;

      ctx->draw_buffer = stfb;
   }

   ctx->draw_buffer->iface = stdrawi;

   return TRUE;
}
Example #10
0
int
iris_bo_busy(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };

   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
   if (ret == 0) {
      bo->idle = !busy.busy;
      return busy.busy;
   }
   return false;
}

int
iris_bo_madvise(struct iris_bo *bo, int state)
{
   struct drm_i915_gem_madvise madv = {
      .handle = bo->gem_handle,
      .madv = state,
      .retained = 1,
   };

   drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);

   return madv.retained;
}

/* drop the oldest entries that have been purged by the kernel */
static void
iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
                          struct bo_cache_bucket *bucket)
{
   list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
      if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
         break;

      list_del(&bo->head);
      bo_free(bo);
   }
}

static struct iris_bo *
bo_calloc(void)
{
   struct iris_bo *bo = calloc(1, sizeof(*bo));
   if (bo) {
      bo->hash = _mesa_hash_pointer(bo);
   }
   return bo;
}

static struct iris_bo *
bo_alloc_internal(struct iris_bufmgr *bufmgr,
                  const char *name,
                  uint64_t size,
                  enum iris_memory_zone memzone,
                  unsigned flags,
                  uint32_t tiling_mode,
                  uint32_t stride)
{
   struct iris_bo *bo;
   unsigned int page_size = getpagesize();
   int ret;
   struct bo_cache_bucket *bucket;
   bool alloc_from_cache;
   uint64_t bo_size;
   bool zeroed = false;

   if (flags & BO_ALLOC_ZEROED)
      zeroed = true;

   if ((flags & BO_ALLOC_COHERENT) && !bufmgr->has_llc) {
      bo_size = MAX2(ALIGN(size, page_size), page_size);
      bucket = NULL;
      goto skip_cache;
   }

   /* Round the allocated size up to a power of two number of pages. */
   bucket = bucket_for_size(bufmgr, size);

   /* If we don't have caching at this size, don't actually round the
    * allocation up.
    */
   if (bucket == NULL) {
      bo_size = MAX2(ALIGN(size, page_size), page_size);
   } else {
      bo_size = bucket->size;
   }

   mtx_lock(&bufmgr->lock);
   /* Get a buffer out of the cache if available */
retry:
   alloc_from_cache = false;
   if (bucket != NULL && !list_empty(&bucket->head)) {
      /* If the last BO in the cache is idle, then reuse it.  Otherwise,
       * allocate a fresh buffer to avoid stalling.
       */
      bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
      if (!iris_bo_busy(bo)) {
         alloc_from_cache = true;
         list_del(&bo->head);
      }

      if (alloc_from_cache) {
         if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
            bo_free(bo);
            iris_bo_cache_purge_bucket(bufmgr, bucket);
            goto retry;
         }

         if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
            bo_free(bo);
            goto retry;
         }

         if (zeroed) {
            void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
            if (!map) {
               bo_free(bo);
               goto retry;
            }
            memset(map, 0, bo_size);
         }
      }
   }

   if (alloc_from_cache) {
      /* If the cached BO isn't in the right memory zone, free the old
       * memory and assign it a new address.
       */
      if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
         vma_free(bufmgr, bo->gtt_offset, bo->size);
         bo->gtt_offset = 0ull;
      }
   } else {
skip_cache:
      bo = bo_calloc();
      if (!bo)
         goto err;

      bo->size = bo_size;
      bo->idle = true;

      struct drm_i915_gem_create create = { .size = bo_size };

      /* All new BOs we get from the kernel are zeroed, so we don't need to
       * worry about that here.
       */
      ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
      if (ret != 0) {
         free(bo);
         goto err;
      }

      bo->gem_handle = create.handle;

      bo->bufmgr = bufmgr;

      bo->tiling_mode = I915_TILING_NONE;
      bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
      bo->stride = 0;

      if (bo_set_tiling_internal(bo, tiling_mode, stride))
         goto err_free;

      /* Calling set_domain() will allocate pages for the BO outside of the
       * struct mutex lock in the kernel, which is more efficient than waiting
       * to create them during the first execbuf that uses the BO.
       */
      struct drm_i915_gem_set_domain sd = {
         .handle = bo->gem_handle,
         .read_domains = I915_GEM_DOMAIN_CPU,
         .write_domain = 0,
      };

      if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
         goto err_free;
   }

   bo->name = name;
   p_atomic_set(&bo->refcount, 1);
   bo->reusable = bucket && bufmgr->bo_reuse;
   bo->cache_coherent = bufmgr->has_llc;
   bo->index = -1;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;

   /* By default, capture all driver-internal buffers like shader kernels,
    * surface states, dynamic states, border colors, and so on.
    */
   if (memzone < IRIS_MEMZONE_OTHER)
      bo->kflags |= EXEC_OBJECT_CAPTURE;

   if (bo->gtt_offset == 0ull) {
      bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);

      if (bo->gtt_offset == 0ull)
         goto err_free;
   }

   mtx_unlock(&bufmgr->lock);

   if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
      struct drm_i915_gem_caching arg = {
         .handle = bo->gem_handle,
         .caching = 1,
      };
      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
         bo->cache_coherent = true;
         bo->reusable = false;
      }
   }

   DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo->gem_handle,
       bo->name, memzone_name(memzone), (unsigned long long) size);

   return bo;

err_free:
   bo_free(bo);
err:
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

struct iris_bo *
iris_bo_alloc(struct iris_bufmgr *bufmgr,
              const char *name,
              uint64_t size,
              enum iris_memory_zone memzone)
{
   return bo_alloc_internal(bufmgr, name, size, memzone,
                            0, I915_TILING_NONE, 0);
}

struct iris_bo *
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
                    uint64_t size, enum iris_memory_zone memzone,
                    uint32_t tiling_mode, uint32_t pitch, unsigned flags)
{
   return bo_alloc_internal(bufmgr, name, size, memzone,
                            flags, tiling_mode, pitch);
}

struct iris_bo *
iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
                       void *ptr, size_t size,
                       enum iris_memory_zone memzone)
{
   struct iris_bo *bo;

   bo = bo_calloc();
   if (!bo)
      return NULL;

   struct drm_i915_gem_userptr arg = {
      .user_ptr = (uintptr_t)ptr,
      .user_size = size,
   };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
      goto err_free;
   bo->gem_handle = arg.handle;

   /* Check the buffer for validity before we try and use it in a batch */
   struct drm_i915_gem_set_domain sd = {
      .handle = bo->gem_handle,
      .read_domains = I915_GEM_DOMAIN_CPU,
   };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
      goto err_close;

   bo->name = name;
   bo->size = size;
   bo->map_cpu = ptr;

   bo->bufmgr = bufmgr;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
   if (bo->gtt_offset == 0ull)
      goto err_close;

   p_atomic_set(&bo->refcount, 1);
   bo->userptr = true;
   bo->cache_coherent = true;
   bo->index = -1;
   bo->idle = true;

   return bo;

err_close:
   drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
err_free:
   free(bo);
   return NULL;
}

/**
 * Returns a iris_bo wrapping the given buffer object handle.
 *
 * This can be used when one application needs to pass a buffer object
 * to another.
 */
struct iris_bo *
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
                             const char *name, unsigned int handle)
{
   struct iris_bo *bo;

   /* At the moment most applications only have a few named bo.
    * For instance, in a DRI client only the render buffers passed
    * between X and the client are named. And since X returns the
    * alternating names for the front/back buffer a linear search
    * provides a sufficiently fast match.
    */
   mtx_lock(&bufmgr->lock);
   bo = hash_find_bo(bufmgr->name_table, handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   struct drm_gem_open open_arg = { .name = handle };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
   if (ret != 0) {
      DBG("Couldn't reference %s handle 0x%08x: %s\n",
          name, handle, strerror(errno));
      bo = NULL;
      goto out;
   }
   /* Now see if someone has used a prime handle to get this
    * object from the kernel before by looking through the list
    * again for a matching gem_handle
    */
   bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   bo = bo_calloc();
   if (!bo)
      goto out;

   p_atomic_set(&bo->refcount, 1);

   bo->size = open_arg.size;
   bo->gtt_offset = 0;
   bo->bufmgr = bufmgr;
   bo->gem_handle = open_arg.handle;
   bo->name = name;
   bo->global_name = handle;
   bo->reusable = false;
   bo->external = true;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);

   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
   _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);

   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
   ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
   if (ret != 0)
      goto err_unref;

   bo->tiling_mode = get_tiling.tiling_mode;
   bo->swizzle_mode = get_tiling.swizzle_mode;
   /* XXX stride is unknown */
   DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);

out:
   mtx_unlock(&bufmgr->lock);
   return bo;

err_unref:
   bo_free(bo);
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

static void
bo_free(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (bo->map_cpu && !bo->userptr) {
      VG_NOACCESS(bo->map_cpu, bo->size);
      munmap(bo->map_cpu, bo->size);
   }
   if (bo->map_wc) {
      VG_NOACCESS(bo->map_wc, bo->size);
      munmap(bo->map_wc, bo->size);
   }
   if (bo->map_gtt) {
      VG_NOACCESS(bo->map_gtt, bo->size);
      munmap(bo->map_gtt, bo->size);
   }

   if (bo->external) {
      struct hash_entry *entry;

      if (bo->global_name) {
         entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
         _mesa_hash_table_remove(bufmgr->name_table, entry);
      }

      entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
      _mesa_hash_table_remove(bufmgr->handle_table, entry);
   }

   /* Close this object */
   struct drm_gem_close close = { .handle = bo->gem_handle };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
   if (ret != 0) {
      DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
          bo->gem_handle, bo->name, strerror(errno));
   }

   vma_free(bo->bufmgr, bo->gtt_offset, bo->size);

   free(bo);
}

/** Frees all cached buffers significantly older than @time. */
static void
cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
{
   int i;

   if (bufmgr->time == time)
      return;

   for (i = 0; i < bufmgr->num_buckets; i++) {
      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];

      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
         if (time - bo->free_time <= 1)
            break;

         list_del(&bo->head);

         bo_free(bo);
      }
   }

   bufmgr->time = time;
}

static void
bo_unreference_final(struct iris_bo *bo, time_t time)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct bo_cache_bucket *bucket;

   DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);

   bucket = NULL;
   if (bo->reusable)
      bucket = bucket_for_size(bufmgr, bo->size);
   /* Put the buffer into our internal cache for reuse if we can. */
   if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
      bo->free_time = time;
      bo->name = NULL;

      list_addtail(&bo->head, &bucket->head);
   } else {
      bo_free(bo);
   }
}

void
iris_bo_unreference(struct iris_bo *bo)
{
   if (bo == NULL)
      return;

   assert(p_atomic_read(&bo->refcount) > 0);

   if (atomic_add_unless(&bo->refcount, -1, 1)) {
      struct iris_bufmgr *bufmgr = bo->bufmgr;
      struct timespec time;

      clock_gettime(CLOCK_MONOTONIC, &time);

      mtx_lock(&bufmgr->lock);

      if (p_atomic_dec_zero(&bo->refcount)) {
         bo_unreference_final(bo, time.tv_sec);
         cleanup_bo_cache(bufmgr, time.tv_sec);
      }

      mtx_unlock(&bufmgr->lock);
   }
}

static void
bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
                           struct iris_bo *bo,
                           const char *action)
{
   bool busy = dbg && !bo->idle;
   double elapsed = unlikely(busy) ? -get_time() : 0.0;

   iris_bo_wait_rendering(bo);

   if (unlikely(busy)) {
      elapsed += get_time();
      if (elapsed > 1e-5) /* 0.01ms */ {
         perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
                    action, bo->name, elapsed * 1000);
      }
   }
}

static void
print_flags(unsigned flags)
{
   if (flags & MAP_READ)
      DBG("READ ");
   if (flags & MAP_WRITE)
      DBG("WRITE ");
   if (flags & MAP_ASYNC)
      DBG("ASYNC ");
   if (flags & MAP_PERSISTENT)
      DBG("PERSISTENT ");
   if (flags & MAP_COHERENT)
      DBG("COHERENT ");
   if (flags & MAP_RAW)
      DBG("RAW ");
   DBG("\n");
}

static void *
iris_bo_map_cpu(struct pipe_debug_callback *dbg,
                struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* We disallow CPU maps for writing to non-coherent buffers, as the
    * CPU map can become invalidated when a batch is flushed out, which
    * can happen at unpredictable times.  You should use WC maps instead.
    */
   assert(bo->cache_coherent || !(flags & MAP_WRITE));

   if (!bo->map_cpu) {
      DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap mmap_arg = {
         .handle = bo->gem_handle,
         .size = bo->size,
      };
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }
      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_cpu);

   DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
       bo->map_cpu);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
   }

   if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
      /* If we're reusing an existing CPU mapping, the CPU caches may
       * contain stale data from the last time we read from that mapping.
       * (With the BO cache, it might even be data from a previous buffer!)
       * Even if it's a brand new mapping, the kernel may have zeroed the
       * buffer via CPU writes.
       *
       * We need to invalidate those cachelines so that we see the latest
       * contents, and so long as we only read from the CPU mmap we do not
       * need to write those cachelines back afterwards.
       *
       * On LLC, the emprical evidence suggests that writes from the GPU
       * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
       * cachelines. (Other reads, such as the display engine, bypass the
       * LLC entirely requiring us to keep dirty pixels for the scanout
       * out of any cache.)
       */
      gen_invalidate_range(bo->map_cpu, bo->size);
   }

   return bo->map_cpu;
}

static void *
iris_bo_map_wc(struct pipe_debug_callback *dbg,
               struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (!bo->map_wc) {
      DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap mmap_arg = {
         .handle = bo->gem_handle,
         .size = bo->size,
         .flags = I915_MMAP_WC,
      };
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_wc);

   DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "WC mapping");
   }

   return bo->map_wc;
}

/**
 * Perform an uncached mapping via the GTT.
 *
 * Write access through the GTT is not quite fully coherent. On low power
 * systems especially, like modern Atoms, we can observe reads from RAM before
 * the write via GTT has landed. A write memory barrier that flushes the Write
 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
 * read after the write as the GTT write suffers a small delay through the GTT
 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
 * flushes prior to execbuf submission. However, if we are not informing the
 * kernel about our GTT writes, it will not flush before earlier access, such
 * as when using the cmdparser. Similarly, we need to be careful if we should
 * ever issue a CPU read immediately following a GTT write.
 *
 * Telling the kernel about write access also has one more important
 * side-effect. Upon receiving notification about the write, it cancels any
 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
 * tracking is handled on the buffer exchange instead.
 */
static void *
iris_bo_map_gtt(struct pipe_debug_callback *dbg,
                struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* Get a mapping of the buffer if we haven't before. */
   if (bo->map_gtt == NULL) {
      DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };

      /* Get the fake offset back... */
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      /* and mmap it. */
      void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
                       MAP_SHARED, bufmgr->fd, mmap_arg.offset);
      if (map == MAP_FAILED) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
       * already intercept this mmap call. However, for consistency between
       * all the mmap paths, we mark the pointer as defined now and mark it
       * as inaccessible afterwards.
       */
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_gtt);

   DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
   }

   return bo->map_gtt;
}

static bool
can_map_cpu(struct iris_bo *bo, unsigned flags)
{
   if (bo->cache_coherent)
      return true;

   /* Even if the buffer itself is not cache-coherent (such as a scanout), on
    * an LLC platform reads always are coherent (as they are performed via the
    * central system agent). It is just the writes that we need to take special
    * care to ensure that land in main memory and not stick in the CPU cache.
    */
   if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
      return true;

   /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
    * across batch flushes where the kernel will change cache domains of the
    * bo, invalidating continued access to the CPU mmap on non-LLC device.
    *
    * Similarly, ASYNC typically means that the buffer will be accessed via
    * both the CPU and the GPU simultaneously.  Batches may be executed that
    * use the BO even while it is mapped.  While OpenGL technically disallows
    * most drawing while non-persistent mappings are active, we may still use
    * the GPU for blits or other operations, causing batches to happen at
    * inconvenient times.
    *
    * If RAW is set, we expect the caller to be able to handle a WC buffer
    * more efficiently than the involuntary clflushes.
    */
   if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC | MAP_RAW))
      return false;

   return !(flags & MAP_WRITE);
}

void *
iris_bo_map(struct pipe_debug_callback *dbg,
            struct iris_bo *bo, unsigned flags)
{
   if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
      return iris_bo_map_gtt(dbg, bo, flags);

   void *map;

   if (can_map_cpu(bo, flags))
      map = iris_bo_map_cpu(dbg, bo, flags);
   else
      map = iris_bo_map_wc(dbg, bo, flags);

   /* Allow the attempt to fail by falling back to the GTT where necessary.
    *
    * Not every buffer can be mmaped directly using the CPU (or WC), for
    * example buffers that wrap stolen memory or are imported from other
    * devices. For those, we have little choice but to use a GTT mmapping.
    * However, if we use a slow GTT mmapping for reads where we expected fast
    * access, that order of magnitude difference in throughput will be clearly
    * expressed by angry users.
    *
    * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
    */
   if (!map && !(flags & MAP_RAW)) {
      perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
                 bo->name, flags);
      map = iris_bo_map_gtt(dbg, bo, flags);
   }

   return map;
}

/** Waits for all GPU rendering with the object to have completed. */
void
iris_bo_wait_rendering(struct iris_bo *bo)
{
   /* We require a kernel recent enough for WAIT_IOCTL support.
    * See intel_init_bufmgr()
    */
   iris_bo_wait(bo, -1);
}

/**
 * Waits on a BO for the given amount of time.
 *
 * @bo: buffer object to wait for
 * @timeout_ns: amount of time to wait in nanoseconds.
 *   If value is less than 0, an infinite wait will occur.
 *
 * Returns 0 if the wait was successful ie. the last batch referencing the
 * object has completed within the allotted time. Otherwise some negative return
 * value describes the error. Of particular interest is -ETIME when the wait has
 * failed to yield the desired result.
 *
 * Similar to iris_bo_wait_rendering except a timeout parameter allows
 * the operation to give up after a certain amount of time. Another subtle
 * difference is the internal locking semantics are different (this variant does
 * not hold the lock for the duration of the wait). This makes the wait subject
 * to a larger userspace race window.
 *
 * The implementation shall wait until the object is no longer actively
 * referenced within a batch buffer at the time of the call. The wait will
 * not guarantee that the buffer is re-issued via another thread, or an flinked
 * handle. Userspace must make sure this race does not occur if such precision
 * is important.
 *
 * Note that some kernels have broken the inifite wait for negative values
 * promise, upgrade to latest stable kernels if this is the case.
 */
int
iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* If we know it's idle, don't bother with the kernel round trip */
   if (bo->idle && !bo->external)
      return 0;

   struct drm_i915_gem_wait wait = {
      .bo_handle = bo->gem_handle,
      .timeout_ns = timeout_ns,
   };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
   if (ret != 0)
      return -errno;

   bo->idle = true;

   return ret;
}

void
iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
{
   mtx_destroy(&bufmgr->lock);

   /* Free any cached buffer objects we were going to reuse */
   for (int i = 0; i < bufmgr->num_buckets; i++) {
      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];

      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
         list_del(&bo->head);

         bo_free(bo);
      }
   }

   _mesa_hash_table_destroy(bufmgr->name_table, NULL);
   _mesa_hash_table_destroy(bufmgr->handle_table, NULL);

   for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
      if (z != IRIS_MEMZONE_BINDER)
         util_vma_heap_finish(&bufmgr->vma_allocator[z]);
   }

   free(bufmgr);
}

static int
bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
                       uint32_t stride)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct drm_i915_gem_set_tiling set_tiling;
   int ret;

   if (bo->global_name == 0 &&
       tiling_mode == bo->tiling_mode && stride == bo->stride)
      return 0;

   memset(&set_tiling, 0, sizeof(set_tiling));
   do {
      /* set_tiling is slightly broken and overwrites the
       * input on the error path, so we have to open code
       * drm_ioctl.
       */
      set_tiling.handle = bo->gem_handle;
      set_tiling.tiling_mode = tiling_mode;
      set_tiling.stride = stride;

      ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
   if (ret == -1)
      return -errno;

   bo->tiling_mode = set_tiling.tiling_mode;
   bo->swizzle_mode = set_tiling.swizzle_mode;
   bo->stride = set_tiling.stride;
   return 0;
}

int
iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
                  uint32_t *swizzle_mode)
{
   *tiling_mode = bo->tiling_mode;
   *swizzle_mode = bo->swizzle_mode;
   return 0;
}

struct iris_bo *
iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
{
   uint32_t handle;
   struct iris_bo *bo;

   mtx_lock(&bufmgr->lock);
   int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
   if (ret) {
      DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
          strerror(errno));
      mtx_unlock(&bufmgr->lock);
      return NULL;
   }

   /*
    * See if the kernel has already returned this buffer to us. Just as
    * for named buffers, we must not create two bo's pointing at the same
    * kernel object
    */
   bo = hash_find_bo(bufmgr->handle_table, handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   bo = bo_calloc();
   if (!bo)
      goto out;

   p_atomic_set(&bo->refcount, 1);

   /* Determine size of bo.  The fd-to-handle ioctl really should
    * return the size, but it doesn't.  If we have kernel 3.12 or
    * later, we can lseek on the prime fd to get the size.  Older
    * kernels will just fail, in which case we fall back to the
    * provided (estimated or guess size). */
   ret = lseek(prime_fd, 0, SEEK_END);
   if (ret != -1)
      bo->size = ret;

   bo->bufmgr = bufmgr;

   bo->gem_handle = handle;
   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);

   bo->name = "prime";
   bo->reusable = false;
   bo->external = true;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);

   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
      goto err;

   bo->tiling_mode = get_tiling.tiling_mode;
   bo->swizzle_mode = get_tiling.swizzle_mode;
   /* XXX stride is unknown */

out:
   mtx_unlock(&bufmgr->lock);
   return bo;

err:
   bo_free(bo);
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

static void
iris_bo_make_external_locked(struct iris_bo *bo)
{
   if (!bo->external) {
      _mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
      bo->external = true;
   }
}

static void
iris_bo_make_external(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (bo->external)
      return;

   mtx_lock(&bufmgr->lock);
   iris_bo_make_external_locked(bo);
   mtx_unlock(&bufmgr->lock);
}

int
iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   iris_bo_make_external(bo);

   if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
                          DRM_CLOEXEC, prime_fd) != 0)
      return -errno;

   bo->reusable = false;

   return 0;
}

uint32_t
iris_bo_export_gem_handle(struct iris_bo *bo)
{
   iris_bo_make_external(bo);

   return bo->gem_handle;
}

int
iris_bo_flink(struct iris_bo *bo, uint32_t *name)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (!bo->global_name) {
      struct drm_gem_flink flink = { .handle = bo->gem_handle };

      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
         return -errno;

      mtx_lock(&bufmgr->lock);
      if (!bo->global_name) {
         iris_bo_make_external_locked(bo);
         bo->global_name = flink.name;
         _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
      }
      mtx_unlock(&bufmgr->lock);

      bo->reusable = false;
   }

   *name = bo->global_name;
   return 0;
}

static void
add_bucket(struct iris_bufmgr *bufmgr, int size)
{
   unsigned int i = bufmgr->num_buckets;

   assert(i < ARRAY_SIZE(bufmgr->cache_bucket));

   list_inithead(&bufmgr->cache_bucket[i].head);
   bufmgr->cache_bucket[i].size = size;
   bufmgr->num_buckets++;

   assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
   assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
   assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
}

static void
init_cache_buckets(struct iris_bufmgr *bufmgr)
{
   uint64_t size, cache_max_size = 64 * 1024 * 1024;

   /* OK, so power of two buckets was too wasteful of memory.
    * Give 3 other sizes between each power of two, to hopefully
    * cover things accurately enough.  (The alternative is
    * probably to just go for exact matching of sizes, and assume
    * that for things like composited window resize the tiled
    * width/height alignment and rounding of sizes to pages will
    * get us useful cache hit rates anyway)
    */
   add_bucket(bufmgr, PAGE_SIZE);
   add_bucket(bufmgr, PAGE_SIZE * 2);
   add_bucket(bufmgr, PAGE_SIZE * 3);

   /* Initialize the linked lists for BO reuse cache. */
   for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
      add_bucket(bufmgr, size);

      add_bucket(bufmgr, size + size * 1 / 4);
      add_bucket(bufmgr, size + size * 2 / 4);
      add_bucket(bufmgr, size + size * 3 / 4);
   }
}
Example #11
0
struct radeon_winsys *radeon_drm_winsys_create(int fd)
{
    struct radeon_drm_winsys *ws;

    if (!fd_tab) {
        fd_tab = util_hash_table_create(hash_fd, compare_fd);
    }

    ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
    if (ws) {
        pipe_reference(NULL, &ws->base.reference);
        return &ws->base;
    }

    ws = CALLOC_STRUCT(radeon_drm_winsys);
    if (!ws) {
        return NULL;
    }
    ws->fd = fd;
    util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);

    if (!do_winsys_init(ws))
        goto fail;

    /* Create managers. */
    ws->kman = radeon_bomgr_create(ws);
    if (!ws->kman)
        goto fail;
    ws->cman = pb_cache_manager_create(ws->kman, 1000000);
    if (!ws->cman)
        goto fail;

    if (ws->gen >= DRV_R600) {
        ws->surf_man = radeon_surface_manager_new(fd);
        if (!ws->surf_man)
            goto fail;
    }

    /* init reference */
    pipe_reference_init(&ws->base.reference, 1);

    /* Set functions. */
    ws->base.destroy = radeon_winsys_destroy;
    ws->base.query_info = radeon_query_info;
    ws->base.cs_request_feature = radeon_cs_request_feature;
    ws->base.surface_init = radeon_drm_winsys_surface_init;
    ws->base.surface_best = radeon_drm_winsys_surface_best;
    ws->base.query_value = radeon_query_value;

    radeon_bomgr_init_functions(ws);
    radeon_drm_cs_init_functions(ws);

    pipe_mutex_init(ws->hyperz_owner_mutex);
    pipe_mutex_init(ws->cmask_owner_mutex);
    pipe_mutex_init(ws->cs_stack_lock);

    p_atomic_set(&ws->ncs, 0);
    pipe_semaphore_init(&ws->cs_queued, 0);
    pipe_condvar_init(ws->cs_queue_empty);
    if (ws->num_cpus > 1 && debug_get_option_thread())
        ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);

    return &ws->base;

fail:
    if (ws->cman)
        ws->cman->destroy(ws->cman);
    if (ws->kman)
        ws->kman->destroy(ws->kman);
    if (ws->surf_man)
        radeon_surface_manager_free(ws->surf_man);
    FREE(ws);
    return NULL;
}