コード例 #1
0
static int test2(void)
{
	drm_intel_bo *test_intel_bo;
	uint32_t fb_id;
	drmModeClip clip;
	int prime_fd;
	uint32_t udl_handle;
	int ret;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

	ret = drmPrimeFDToHandle(udl_fd, prime_fd, &udl_handle);
	if (ret)
		goto out;

	ret = drmModeAddFB(udl_fd, 640, 480, 16, 16, 640, udl_handle, &fb_id);
	if (ret)
		goto out;

	clip.x1 = 0;
	clip.y1 = 0;
	clip.x2 = 10;
	clip.y2 = 10;
	ret = drmModeDirtyFB(udl_fd, fb_id, &clip, 1);
	if (ret) {
		return ret;
	}
out:
	dumb_bo_destroy(udl_fd, udl_handle);
	drm_intel_bo_unreference(test_intel_bo);
	return ret;
}
コード例 #2
0
ファイル: freedreno_bo.c プロジェクト: chadversary/libdrm
struct fd_bo *
fd_bo_from_dmabuf(struct fd_device *dev, int fd)
{
	int ret, size;
	uint32_t handle;
	struct fd_bo *bo;

	pthread_mutex_lock(&table_lock);
	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
	if (ret) {
		return NULL;
	}

	bo = lookup_bo(dev->handle_table, handle);
	if (bo)
		goto out_unlock;

	/* lseek() to get bo size */
	size = lseek(fd, 0, SEEK_END);
	lseek(fd, 0, SEEK_CUR);

	bo = bo_from_handle(dev, size, handle);

	VG_BO_ALLOC(bo);

out_unlock:
	pthread_mutex_unlock(&table_lock);

	return bo;
}
コード例 #3
0
ファイル: backend_kms.c プロジェクト: clopez/libgbm
static struct gbm_kms_bo* gbm_kms_import_fd(struct gbm_device *gbm,
					    void *_buffer)
{
	struct gbm_import_fd_data *fd_data = _buffer;
	struct gbm_kms_device *dev = (struct gbm_kms_device*)gbm;
	struct gbm_kms_bo *bo;
	uint32_t handle;

	if (drmPrimeFDToHandle(dev->base.base.fd, fd_data->fd, &handle)) {
		GBM_DEBUG("%s: %s: drmPrimeFDToHandle() failed. %s\n",
			  __FILE__, __func__, strerror(errno));
		return NULL;
	}

	// XXX: BO handle is imported in wayland-kms.
	if (!(bo = calloc(1, sizeof(struct gbm_kms_bo))))
		return NULL;

	bo->base.gbm = gbm;
	bo->base.width = fd_data->width;
	bo->base.height = fd_data->height;
	bo->base.format = fd_data->format;
	bo->base.stride = fd_data->stride;
	bo->base.handle.u32 = handle;
	bo->num_planes = 1;

	return bo;
}
コード例 #4
0
ファイル: radeon_bo_gem.c プロジェクト: iquiw/xsrc
struct radeon_bo *
radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
{
    struct radeon_bo_gem *bo;
    int r;
    uint32_t handle;

    bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
    if (bo == NULL) {
        return NULL;
    }

    bo->base.bom = bom;
    bo->base.handle = 0;
    bo->base.size = size;
    bo->base.alignment = 0;
    bo->base.domains = RADEON_GEM_DOMAIN_GTT;
    bo->base.flags = 0;
    bo->base.ptr = NULL;
    atomic_set(&bo->reloc_in_cs, 0);
    bo->map_count = 0;

    r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
    if (r != 0) {
	free(bo);
	return NULL;
    }

    bo->base.handle = handle;
    bo->name = handle;

    radeon_bo_ref((struct radeon_bo *)bo);
    return (struct radeon_bo *)bo;

}
コード例 #5
0
ファイル: amdgpu_bo.c プロジェクト: janesma/drm
static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
{
	struct drm_gem_flink flink;
	int fd, dma_fd;
	uint32_t handle;
	int r;

	fd = bo->dev->fd;
	handle = bo->handle;
	if (bo->flink_name)
		return 0;


	if (bo->dev->flink_fd != bo->dev->fd) {
		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
				       &dma_fd);
		if (!r) {
			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
			close(dma_fd);
		}
		if (r)
			return r;
		fd = bo->dev->flink_fd;
	}
	memset(&flink, 0, sizeof(flink));
	flink.handle = handle;

	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
	if (r)
		return r;

	bo->flink_name = flink.name;

	if (bo->dev->flink_fd != bo->dev->fd) {
		struct drm_gem_close args = {};
		args.handle = handle;
		drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
	}

	pthread_mutex_lock(&bo->dev->bo_table_mutex);
	util_hash_table_set(bo->dev->bo_flink_names,
			    (void*)(uintptr_t)bo->flink_name,
			    bo);
	pthread_mutex_unlock(&bo->dev->bo_table_mutex);

	return 0;
}
コード例 #6
0
/*
 * simple share and import
 */
static int test1(void)
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	int ret;
	uint32_t udl_handle;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

	ret = drmPrimeFDToHandle(udl_fd, prime_fd, &udl_handle);

	dumb_bo_destroy(udl_fd, udl_handle);
	drm_intel_bo_unreference(test_intel_bo);
	return ret;
}
コード例 #7
0
ファイル: vmw_screen_ioctl.c プロジェクト: dumbbell/mesa
/**
 * vmw_ioctl_surface_req - Fill in a struct surface_req
 *
 * @vws: Winsys screen
 * @whandle: Surface handle
 * @req: The struct surface req to fill in
 * @needs_unref: This call takes a kernel surface reference that needs to
 * be unreferenced.
 *
 * Returns 0 on success, negative error type otherwise.
 * Fills in the surface_req structure according to handle type and kernel
 * capabilities.
 */
static int
vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
                      const struct winsys_handle *whandle,
                      struct drm_vmw_surface_arg *req,
                      boolean *needs_unref)
{
   int ret;

   switch(whandle->type) {
   case DRM_API_HANDLE_TYPE_SHARED:
   case DRM_API_HANDLE_TYPE_KMS:
      *needs_unref = FALSE;
      req->handle_type = DRM_VMW_HANDLE_LEGACY;
      req->sid = whandle->handle;
      break;
   case DRM_API_HANDLE_TYPE_FD:
      if (!vws->ioctl.have_drm_2_6) {
         uint32_t handle;

         ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
         if (ret) {
            vmw_error("Failed to get handle from prime fd %d.\n",
                      (int) whandle->handle);
            return -EINVAL;
         }

         *needs_unref = TRUE;
         req->handle_type = DRM_VMW_HANDLE_LEGACY;
         req->sid = handle;
      } else {
         *needs_unref = FALSE;
         req->handle_type = DRM_VMW_HANDLE_PRIME;
         req->sid = whandle->handle;
      }
      break;
   default:
      vmw_error("Attempt to import unsupported handle type %d.\n",
                whandle->type);
      return -EINVAL;
   }

   return 0;
}
コード例 #8
0
ファイル: dumb_bo.c プロジェクト: AmesianX/xorg-server
struct dumb_bo *
dumb_get_bo_from_fd(int fd, int handle, int pitch, int size)
{
    struct dumb_bo *bo;
    int ret;

    bo = calloc(1, sizeof(*bo));
    if (!bo)
        return NULL;

    ret = drmPrimeFDToHandle(fd, handle, &bo->handle);
    if (ret) {
        free(bo);
        return NULL;
    }
    bo->pitch = pitch;
    bo->size = size;
    return bo;
}
コード例 #9
0
ファイル: extframebuffer.cpp プロジェクト: jsarha/kmsxx
ExtFramebuffer::ExtFramebuffer(Card& card, uint32_t width, uint32_t height, PixelFormat format,
			       int fds[4], uint32_t pitches[4], uint32_t offsets[4])
	: Framebuffer(card, width, height)
{
	int r;

	const PixelFormatInfo& format_info = get_pixel_format_info(format);

	uint32_t handles[4] = { 0 };

	for (int i = 0; i < format_info.num_planes; ++i) {
		r = drmPrimeFDToHandle(card.fd(), fds[i], &handles[i]);
		if (r)
			throw invalid_argument(string("drmPrimeFDToHandle: ") + strerror(errno));
	}

	uint32_t id;
	r = drmModeAddFB2(card.fd(), width, height, (uint32_t)format,
			  handles, pitches, offsets, &id, 0);
	if (r)
		throw invalid_argument(string("drmModeAddFB2 failed: ") + strerror(errno));

	set_id(id);
}
コード例 #10
0
ファイル: iris_bufmgr.c プロジェクト: ChristophHaag/mesa-mesa
int
iris_bo_busy(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };

   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
   if (ret == 0) {
      bo->idle = !busy.busy;
      return busy.busy;
   }
   return false;
}

int
iris_bo_madvise(struct iris_bo *bo, int state)
{
   struct drm_i915_gem_madvise madv = {
      .handle = bo->gem_handle,
      .madv = state,
      .retained = 1,
   };

   drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);

   return madv.retained;
}

/* drop the oldest entries that have been purged by the kernel */
static void
iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
                          struct bo_cache_bucket *bucket)
{
   list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
      if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
         break;

      list_del(&bo->head);
      bo_free(bo);
   }
}

static struct iris_bo *
bo_calloc(void)
{
   struct iris_bo *bo = calloc(1, sizeof(*bo));
   if (bo) {
      bo->hash = _mesa_hash_pointer(bo);
   }
   return bo;
}

static struct iris_bo *
bo_alloc_internal(struct iris_bufmgr *bufmgr,
                  const char *name,
                  uint64_t size,
                  enum iris_memory_zone memzone,
                  unsigned flags,
                  uint32_t tiling_mode,
                  uint32_t stride)
{
   struct iris_bo *bo;
   unsigned int page_size = getpagesize();
   int ret;
   struct bo_cache_bucket *bucket;
   bool alloc_from_cache;
   uint64_t bo_size;
   bool zeroed = false;

   if (flags & BO_ALLOC_ZEROED)
      zeroed = true;

   if ((flags & BO_ALLOC_COHERENT) && !bufmgr->has_llc) {
      bo_size = MAX2(ALIGN(size, page_size), page_size);
      bucket = NULL;
      goto skip_cache;
   }

   /* Round the allocated size up to a power of two number of pages. */
   bucket = bucket_for_size(bufmgr, size);

   /* If we don't have caching at this size, don't actually round the
    * allocation up.
    */
   if (bucket == NULL) {
      bo_size = MAX2(ALIGN(size, page_size), page_size);
   } else {
      bo_size = bucket->size;
   }

   mtx_lock(&bufmgr->lock);
   /* Get a buffer out of the cache if available */
retry:
   alloc_from_cache = false;
   if (bucket != NULL && !list_empty(&bucket->head)) {
      /* If the last BO in the cache is idle, then reuse it.  Otherwise,
       * allocate a fresh buffer to avoid stalling.
       */
      bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
      if (!iris_bo_busy(bo)) {
         alloc_from_cache = true;
         list_del(&bo->head);
      }

      if (alloc_from_cache) {
         if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
            bo_free(bo);
            iris_bo_cache_purge_bucket(bufmgr, bucket);
            goto retry;
         }

         if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
            bo_free(bo);
            goto retry;
         }

         if (zeroed) {
            void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
            if (!map) {
               bo_free(bo);
               goto retry;
            }
            memset(map, 0, bo_size);
         }
      }
   }

   if (alloc_from_cache) {
      /* If the cached BO isn't in the right memory zone, free the old
       * memory and assign it a new address.
       */
      if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
         vma_free(bufmgr, bo->gtt_offset, bo->size);
         bo->gtt_offset = 0ull;
      }
   } else {
skip_cache:
      bo = bo_calloc();
      if (!bo)
         goto err;

      bo->size = bo_size;
      bo->idle = true;

      struct drm_i915_gem_create create = { .size = bo_size };

      /* All new BOs we get from the kernel are zeroed, so we don't need to
       * worry about that here.
       */
      ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
      if (ret != 0) {
         free(bo);
         goto err;
      }

      bo->gem_handle = create.handle;

      bo->bufmgr = bufmgr;

      bo->tiling_mode = I915_TILING_NONE;
      bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
      bo->stride = 0;

      if (bo_set_tiling_internal(bo, tiling_mode, stride))
         goto err_free;

      /* Calling set_domain() will allocate pages for the BO outside of the
       * struct mutex lock in the kernel, which is more efficient than waiting
       * to create them during the first execbuf that uses the BO.
       */
      struct drm_i915_gem_set_domain sd = {
         .handle = bo->gem_handle,
         .read_domains = I915_GEM_DOMAIN_CPU,
         .write_domain = 0,
      };

      if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
         goto err_free;
   }

   bo->name = name;
   p_atomic_set(&bo->refcount, 1);
   bo->reusable = bucket && bufmgr->bo_reuse;
   bo->cache_coherent = bufmgr->has_llc;
   bo->index = -1;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;

   /* By default, capture all driver-internal buffers like shader kernels,
    * surface states, dynamic states, border colors, and so on.
    */
   if (memzone < IRIS_MEMZONE_OTHER)
      bo->kflags |= EXEC_OBJECT_CAPTURE;

   if (bo->gtt_offset == 0ull) {
      bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);

      if (bo->gtt_offset == 0ull)
         goto err_free;
   }

   mtx_unlock(&bufmgr->lock);

   if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
      struct drm_i915_gem_caching arg = {
         .handle = bo->gem_handle,
         .caching = 1,
      };
      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
         bo->cache_coherent = true;
         bo->reusable = false;
      }
   }

   DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo->gem_handle,
       bo->name, memzone_name(memzone), (unsigned long long) size);

   return bo;

err_free:
   bo_free(bo);
err:
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

struct iris_bo *
iris_bo_alloc(struct iris_bufmgr *bufmgr,
              const char *name,
              uint64_t size,
              enum iris_memory_zone memzone)
{
   return bo_alloc_internal(bufmgr, name, size, memzone,
                            0, I915_TILING_NONE, 0);
}

struct iris_bo *
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
                    uint64_t size, enum iris_memory_zone memzone,
                    uint32_t tiling_mode, uint32_t pitch, unsigned flags)
{
   return bo_alloc_internal(bufmgr, name, size, memzone,
                            flags, tiling_mode, pitch);
}

struct iris_bo *
iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
                       void *ptr, size_t size,
                       enum iris_memory_zone memzone)
{
   struct iris_bo *bo;

   bo = bo_calloc();
   if (!bo)
      return NULL;

   struct drm_i915_gem_userptr arg = {
      .user_ptr = (uintptr_t)ptr,
      .user_size = size,
   };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
      goto err_free;
   bo->gem_handle = arg.handle;

   /* Check the buffer for validity before we try and use it in a batch */
   struct drm_i915_gem_set_domain sd = {
      .handle = bo->gem_handle,
      .read_domains = I915_GEM_DOMAIN_CPU,
   };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
      goto err_close;

   bo->name = name;
   bo->size = size;
   bo->map_cpu = ptr;

   bo->bufmgr = bufmgr;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
   if (bo->gtt_offset == 0ull)
      goto err_close;

   p_atomic_set(&bo->refcount, 1);
   bo->userptr = true;
   bo->cache_coherent = true;
   bo->index = -1;
   bo->idle = true;

   return bo;

err_close:
   drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
err_free:
   free(bo);
   return NULL;
}

/**
 * Returns a iris_bo wrapping the given buffer object handle.
 *
 * This can be used when one application needs to pass a buffer object
 * to another.
 */
struct iris_bo *
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
                             const char *name, unsigned int handle)
{
   struct iris_bo *bo;

   /* At the moment most applications only have a few named bo.
    * For instance, in a DRI client only the render buffers passed
    * between X and the client are named. And since X returns the
    * alternating names for the front/back buffer a linear search
    * provides a sufficiently fast match.
    */
   mtx_lock(&bufmgr->lock);
   bo = hash_find_bo(bufmgr->name_table, handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   struct drm_gem_open open_arg = { .name = handle };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
   if (ret != 0) {
      DBG("Couldn't reference %s handle 0x%08x: %s\n",
          name, handle, strerror(errno));
      bo = NULL;
      goto out;
   }
   /* Now see if someone has used a prime handle to get this
    * object from the kernel before by looking through the list
    * again for a matching gem_handle
    */
   bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   bo = bo_calloc();
   if (!bo)
      goto out;

   p_atomic_set(&bo->refcount, 1);

   bo->size = open_arg.size;
   bo->gtt_offset = 0;
   bo->bufmgr = bufmgr;
   bo->gem_handle = open_arg.handle;
   bo->name = name;
   bo->global_name = handle;
   bo->reusable = false;
   bo->external = true;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);

   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
   _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);

   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
   ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
   if (ret != 0)
      goto err_unref;

   bo->tiling_mode = get_tiling.tiling_mode;
   bo->swizzle_mode = get_tiling.swizzle_mode;
   /* XXX stride is unknown */
   DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);

out:
   mtx_unlock(&bufmgr->lock);
   return bo;

err_unref:
   bo_free(bo);
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

static void
bo_free(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (bo->map_cpu && !bo->userptr) {
      VG_NOACCESS(bo->map_cpu, bo->size);
      munmap(bo->map_cpu, bo->size);
   }
   if (bo->map_wc) {
      VG_NOACCESS(bo->map_wc, bo->size);
      munmap(bo->map_wc, bo->size);
   }
   if (bo->map_gtt) {
      VG_NOACCESS(bo->map_gtt, bo->size);
      munmap(bo->map_gtt, bo->size);
   }

   if (bo->external) {
      struct hash_entry *entry;

      if (bo->global_name) {
         entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
         _mesa_hash_table_remove(bufmgr->name_table, entry);
      }

      entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
      _mesa_hash_table_remove(bufmgr->handle_table, entry);
   }

   /* Close this object */
   struct drm_gem_close close = { .handle = bo->gem_handle };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
   if (ret != 0) {
      DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
          bo->gem_handle, bo->name, strerror(errno));
   }

   vma_free(bo->bufmgr, bo->gtt_offset, bo->size);

   free(bo);
}

/** Frees all cached buffers significantly older than @time. */
static void
cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
{
   int i;

   if (bufmgr->time == time)
      return;

   for (i = 0; i < bufmgr->num_buckets; i++) {
      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];

      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
         if (time - bo->free_time <= 1)
            break;

         list_del(&bo->head);

         bo_free(bo);
      }
   }

   bufmgr->time = time;
}

static void
bo_unreference_final(struct iris_bo *bo, time_t time)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct bo_cache_bucket *bucket;

   DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);

   bucket = NULL;
   if (bo->reusable)
      bucket = bucket_for_size(bufmgr, bo->size);
   /* Put the buffer into our internal cache for reuse if we can. */
   if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
      bo->free_time = time;
      bo->name = NULL;

      list_addtail(&bo->head, &bucket->head);
   } else {
      bo_free(bo);
   }
}

void
iris_bo_unreference(struct iris_bo *bo)
{
   if (bo == NULL)
      return;

   assert(p_atomic_read(&bo->refcount) > 0);

   if (atomic_add_unless(&bo->refcount, -1, 1)) {
      struct iris_bufmgr *bufmgr = bo->bufmgr;
      struct timespec time;

      clock_gettime(CLOCK_MONOTONIC, &time);

      mtx_lock(&bufmgr->lock);

      if (p_atomic_dec_zero(&bo->refcount)) {
         bo_unreference_final(bo, time.tv_sec);
         cleanup_bo_cache(bufmgr, time.tv_sec);
      }

      mtx_unlock(&bufmgr->lock);
   }
}

static void
bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
                           struct iris_bo *bo,
                           const char *action)
{
   bool busy = dbg && !bo->idle;
   double elapsed = unlikely(busy) ? -get_time() : 0.0;

   iris_bo_wait_rendering(bo);

   if (unlikely(busy)) {
      elapsed += get_time();
      if (elapsed > 1e-5) /* 0.01ms */ {
         perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
                    action, bo->name, elapsed * 1000);
      }
   }
}

static void
print_flags(unsigned flags)
{
   if (flags & MAP_READ)
      DBG("READ ");
   if (flags & MAP_WRITE)
      DBG("WRITE ");
   if (flags & MAP_ASYNC)
      DBG("ASYNC ");
   if (flags & MAP_PERSISTENT)
      DBG("PERSISTENT ");
   if (flags & MAP_COHERENT)
      DBG("COHERENT ");
   if (flags & MAP_RAW)
      DBG("RAW ");
   DBG("\n");
}

static void *
iris_bo_map_cpu(struct pipe_debug_callback *dbg,
                struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* We disallow CPU maps for writing to non-coherent buffers, as the
    * CPU map can become invalidated when a batch is flushed out, which
    * can happen at unpredictable times.  You should use WC maps instead.
    */
   assert(bo->cache_coherent || !(flags & MAP_WRITE));

   if (!bo->map_cpu) {
      DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap mmap_arg = {
         .handle = bo->gem_handle,
         .size = bo->size,
      };
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }
      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_cpu);

   DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
       bo->map_cpu);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
   }

   if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
      /* If we're reusing an existing CPU mapping, the CPU caches may
       * contain stale data from the last time we read from that mapping.
       * (With the BO cache, it might even be data from a previous buffer!)
       * Even if it's a brand new mapping, the kernel may have zeroed the
       * buffer via CPU writes.
       *
       * We need to invalidate those cachelines so that we see the latest
       * contents, and so long as we only read from the CPU mmap we do not
       * need to write those cachelines back afterwards.
       *
       * On LLC, the emprical evidence suggests that writes from the GPU
       * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
       * cachelines. (Other reads, such as the display engine, bypass the
       * LLC entirely requiring us to keep dirty pixels for the scanout
       * out of any cache.)
       */
      gen_invalidate_range(bo->map_cpu, bo->size);
   }

   return bo->map_cpu;
}

static void *
iris_bo_map_wc(struct pipe_debug_callback *dbg,
               struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (!bo->map_wc) {
      DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap mmap_arg = {
         .handle = bo->gem_handle,
         .size = bo->size,
         .flags = I915_MMAP_WC,
      };
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_wc);

   DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "WC mapping");
   }

   return bo->map_wc;
}

/**
 * Perform an uncached mapping via the GTT.
 *
 * Write access through the GTT is not quite fully coherent. On low power
 * systems especially, like modern Atoms, we can observe reads from RAM before
 * the write via GTT has landed. A write memory barrier that flushes the Write
 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
 * read after the write as the GTT write suffers a small delay through the GTT
 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
 * flushes prior to execbuf submission. However, if we are not informing the
 * kernel about our GTT writes, it will not flush before earlier access, such
 * as when using the cmdparser. Similarly, we need to be careful if we should
 * ever issue a CPU read immediately following a GTT write.
 *
 * Telling the kernel about write access also has one more important
 * side-effect. Upon receiving notification about the write, it cancels any
 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
 * tracking is handled on the buffer exchange instead.
 */
static void *
iris_bo_map_gtt(struct pipe_debug_callback *dbg,
                struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* Get a mapping of the buffer if we haven't before. */
   if (bo->map_gtt == NULL) {
      DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };

      /* Get the fake offset back... */
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      /* and mmap it. */
      void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
                       MAP_SHARED, bufmgr->fd, mmap_arg.offset);
      if (map == MAP_FAILED) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
       * already intercept this mmap call. However, for consistency between
       * all the mmap paths, we mark the pointer as defined now and mark it
       * as inaccessible afterwards.
       */
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_gtt);

   DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
   }

   return bo->map_gtt;
}

static bool
can_map_cpu(struct iris_bo *bo, unsigned flags)
{
   if (bo->cache_coherent)
      return true;

   /* Even if the buffer itself is not cache-coherent (such as a scanout), on
    * an LLC platform reads always are coherent (as they are performed via the
    * central system agent). It is just the writes that we need to take special
    * care to ensure that land in main memory and not stick in the CPU cache.
    */
   if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
      return true;

   /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
    * across batch flushes where the kernel will change cache domains of the
    * bo, invalidating continued access to the CPU mmap on non-LLC device.
    *
    * Similarly, ASYNC typically means that the buffer will be accessed via
    * both the CPU and the GPU simultaneously.  Batches may be executed that
    * use the BO even while it is mapped.  While OpenGL technically disallows
    * most drawing while non-persistent mappings are active, we may still use
    * the GPU for blits or other operations, causing batches to happen at
    * inconvenient times.
    *
    * If RAW is set, we expect the caller to be able to handle a WC buffer
    * more efficiently than the involuntary clflushes.
    */
   if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC | MAP_RAW))
      return false;

   return !(flags & MAP_WRITE);
}

void *
iris_bo_map(struct pipe_debug_callback *dbg,
            struct iris_bo *bo, unsigned flags)
{
   if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
      return iris_bo_map_gtt(dbg, bo, flags);

   void *map;

   if (can_map_cpu(bo, flags))
      map = iris_bo_map_cpu(dbg, bo, flags);
   else
      map = iris_bo_map_wc(dbg, bo, flags);

   /* Allow the attempt to fail by falling back to the GTT where necessary.
    *
    * Not every buffer can be mmaped directly using the CPU (or WC), for
    * example buffers that wrap stolen memory or are imported from other
    * devices. For those, we have little choice but to use a GTT mmapping.
    * However, if we use a slow GTT mmapping for reads where we expected fast
    * access, that order of magnitude difference in throughput will be clearly
    * expressed by angry users.
    *
    * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
    */
   if (!map && !(flags & MAP_RAW)) {
      perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
                 bo->name, flags);
      map = iris_bo_map_gtt(dbg, bo, flags);
   }

   return map;
}

/** Waits for all GPU rendering with the object to have completed. */
void
iris_bo_wait_rendering(struct iris_bo *bo)
{
   /* We require a kernel recent enough for WAIT_IOCTL support.
    * See intel_init_bufmgr()
    */
   iris_bo_wait(bo, -1);
}

/**
 * Waits on a BO for the given amount of time.
 *
 * @bo: buffer object to wait for
 * @timeout_ns: amount of time to wait in nanoseconds.
 *   If value is less than 0, an infinite wait will occur.
 *
 * Returns 0 if the wait was successful ie. the last batch referencing the
 * object has completed within the allotted time. Otherwise some negative return
 * value describes the error. Of particular interest is -ETIME when the wait has
 * failed to yield the desired result.
 *
 * Similar to iris_bo_wait_rendering except a timeout parameter allows
 * the operation to give up after a certain amount of time. Another subtle
 * difference is the internal locking semantics are different (this variant does
 * not hold the lock for the duration of the wait). This makes the wait subject
 * to a larger userspace race window.
 *
 * The implementation shall wait until the object is no longer actively
 * referenced within a batch buffer at the time of the call. The wait will
 * not guarantee that the buffer is re-issued via another thread, or an flinked
 * handle. Userspace must make sure this race does not occur if such precision
 * is important.
 *
 * Note that some kernels have broken the inifite wait for negative values
 * promise, upgrade to latest stable kernels if this is the case.
 */
int
iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* If we know it's idle, don't bother with the kernel round trip */
   if (bo->idle && !bo->external)
      return 0;

   struct drm_i915_gem_wait wait = {
      .bo_handle = bo->gem_handle,
      .timeout_ns = timeout_ns,
   };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
   if (ret != 0)
      return -errno;

   bo->idle = true;

   return ret;
}

void
iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
{
   mtx_destroy(&bufmgr->lock);

   /* Free any cached buffer objects we were going to reuse */
   for (int i = 0; i < bufmgr->num_buckets; i++) {
      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];

      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
         list_del(&bo->head);

         bo_free(bo);
      }
   }

   _mesa_hash_table_destroy(bufmgr->name_table, NULL);
   _mesa_hash_table_destroy(bufmgr->handle_table, NULL);

   for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
      if (z != IRIS_MEMZONE_BINDER)
         util_vma_heap_finish(&bufmgr->vma_allocator[z]);
   }

   free(bufmgr);
}

static int
bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
                       uint32_t stride)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct drm_i915_gem_set_tiling set_tiling;
   int ret;

   if (bo->global_name == 0 &&
       tiling_mode == bo->tiling_mode && stride == bo->stride)
      return 0;

   memset(&set_tiling, 0, sizeof(set_tiling));
   do {
      /* set_tiling is slightly broken and overwrites the
       * input on the error path, so we have to open code
       * drm_ioctl.
       */
      set_tiling.handle = bo->gem_handle;
      set_tiling.tiling_mode = tiling_mode;
      set_tiling.stride = stride;

      ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
   if (ret == -1)
      return -errno;

   bo->tiling_mode = set_tiling.tiling_mode;
   bo->swizzle_mode = set_tiling.swizzle_mode;
   bo->stride = set_tiling.stride;
   return 0;
}

int
iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
                  uint32_t *swizzle_mode)
{
   *tiling_mode = bo->tiling_mode;
   *swizzle_mode = bo->swizzle_mode;
   return 0;
}

struct iris_bo *
iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
{
   uint32_t handle;
   struct iris_bo *bo;

   mtx_lock(&bufmgr->lock);
   int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
   if (ret) {
      DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
          strerror(errno));
      mtx_unlock(&bufmgr->lock);
      return NULL;
   }

   /*
    * See if the kernel has already returned this buffer to us. Just as
    * for named buffers, we must not create two bo's pointing at the same
    * kernel object
    */
   bo = hash_find_bo(bufmgr->handle_table, handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   bo = bo_calloc();
   if (!bo)
      goto out;

   p_atomic_set(&bo->refcount, 1);

   /* Determine size of bo.  The fd-to-handle ioctl really should
    * return the size, but it doesn't.  If we have kernel 3.12 or
    * later, we can lseek on the prime fd to get the size.  Older
    * kernels will just fail, in which case we fall back to the
    * provided (estimated or guess size). */
   ret = lseek(prime_fd, 0, SEEK_END);
   if (ret != -1)
      bo->size = ret;

   bo->bufmgr = bufmgr;

   bo->gem_handle = handle;
   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);

   bo->name = "prime";
   bo->reusable = false;
   bo->external = true;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);

   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
      goto err;

   bo->tiling_mode = get_tiling.tiling_mode;
   bo->swizzle_mode = get_tiling.swizzle_mode;
   /* XXX stride is unknown */

out:
   mtx_unlock(&bufmgr->lock);
   return bo;

err:
   bo_free(bo);
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

static void
iris_bo_make_external_locked(struct iris_bo *bo)
{
   if (!bo->external) {
      _mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
      bo->external = true;
   }
}

static void
iris_bo_make_external(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (bo->external)
      return;

   mtx_lock(&bufmgr->lock);
   iris_bo_make_external_locked(bo);
   mtx_unlock(&bufmgr->lock);
}

int
iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   iris_bo_make_external(bo);

   if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
                          DRM_CLOEXEC, prime_fd) != 0)
      return -errno;

   bo->reusable = false;

   return 0;
}

uint32_t
iris_bo_export_gem_handle(struct iris_bo *bo)
{
   iris_bo_make_external(bo);

   return bo->gem_handle;
}

int
iris_bo_flink(struct iris_bo *bo, uint32_t *name)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (!bo->global_name) {
      struct drm_gem_flink flink = { .handle = bo->gem_handle };

      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
         return -errno;

      mtx_lock(&bufmgr->lock);
      if (!bo->global_name) {
         iris_bo_make_external_locked(bo);
         bo->global_name = flink.name;
         _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
      }
      mtx_unlock(&bufmgr->lock);

      bo->reusable = false;
   }

   *name = bo->global_name;
   return 0;
}

static void
add_bucket(struct iris_bufmgr *bufmgr, int size)
{
   unsigned int i = bufmgr->num_buckets;

   assert(i < ARRAY_SIZE(bufmgr->cache_bucket));

   list_inithead(&bufmgr->cache_bucket[i].head);
   bufmgr->cache_bucket[i].size = size;
   bufmgr->num_buckets++;

   assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
   assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
   assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
}

static void
init_cache_buckets(struct iris_bufmgr *bufmgr)
{
   uint64_t size, cache_max_size = 64 * 1024 * 1024;

   /* OK, so power of two buckets was too wasteful of memory.
    * Give 3 other sizes between each power of two, to hopefully
    * cover things accurately enough.  (The alternative is
    * probably to just go for exact matching of sizes, and assume
    * that for things like composited window resize the tiled
    * width/height alignment and rounding of sizes to pages will
    * get us useful cache hit rates anyway)
    */
   add_bucket(bufmgr, PAGE_SIZE);
   add_bucket(bufmgr, PAGE_SIZE * 2);
   add_bucket(bufmgr, PAGE_SIZE * 3);

   /* Initialize the linked lists for BO reuse cache. */
   for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
      add_bucket(bufmgr, size);

      add_bucket(bufmgr, size + size * 1 / 4);
      add_bucket(bufmgr, size + size * 2 / 4);
      add_bucket(bufmgr, size + size * 3 / 4);
   }
}
コード例 #11
0
ファイル: vmw_screen_dri.c プロジェクト: MIPS/external-mesa3d
static struct svga_winsys_surface *
vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
                            struct winsys_handle *whandle,
			    SVGA3dSurfaceFormat *format)
{
    struct vmw_svga_winsys_surface *vsrf;
    struct svga_winsys_surface *ssrf;
    struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
    union drm_vmw_surface_reference_arg arg;
    struct drm_vmw_surface_arg *req = &arg.req;
    struct drm_vmw_surface_create_req *rep = &arg.rep;
    uint32_t handle = 0;
    struct drm_vmw_size size;
    SVGA3dSize base_size;
    int ret;
    int i;

    if (whandle->offset != 0) {
       fprintf(stderr, "Attempt to import unsupported winsys offset %u\n",
               whandle->offset);
       return NULL;
    }

    switch (whandle->type) {
    case DRM_API_HANDLE_TYPE_SHARED:
    case DRM_API_HANDLE_TYPE_KMS:
       handle = whandle->handle;
       break;
    case DRM_API_HANDLE_TYPE_FD:
       ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle,
                                &handle);
       if (ret) {
	  vmw_error("Failed to get handle from prime fd %d.\n",
		    (int) whandle->handle);
	  return NULL;
       }
       break;
    default:
       vmw_error("Attempt to import unsupported handle type %d.\n",
                 whandle->type);
       return NULL;
    }

    memset(&arg, 0, sizeof(arg));
    req->sid = handle;
    rep->size_addr = (unsigned long)&size;

    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE,
			      &arg, sizeof(arg));

    /*
     * Need to close the handle we got from prime.
     */
    if (whandle->type == DRM_API_HANDLE_TYPE_FD)
       vmw_ioctl_surface_destroy(vws, handle);

    if (ret) {
       /*
        * Any attempt to share something other than a surface, like a dumb
        * kms buffer, should fail here.
        */
       vmw_error("Failed referencing shared surface. SID %d.\n"
                 "Error %d (%s).\n",
                 handle, ret, strerror(-ret));
       return NULL;
    }

    if (rep->mip_levels[0] != 1) {
        vmw_error("Incorrect number of mipmap levels on shared surface."
                  " SID %d, levels %d\n",
                  handle, rep->mip_levels[0]);
	goto out_mip;
    }

    for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
	if (rep->mip_levels[i] != 0) {
            vmw_error("Incorrect number of faces levels on shared surface."
                      " SID %d, face %d present.\n",
                      handle, i);
	    goto out_mip;
	}
   }

    vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
    if (!vsrf)
	goto out_mip;

    pipe_reference_init(&vsrf->refcnt, 1);
    p_atomic_set(&vsrf->validated, 0);
    vsrf->screen = vws;
    vsrf->sid = handle;
    ssrf = svga_winsys_surface(vsrf);
    *format = rep->format;

    /* Estimate usage, for early flushing. */

    base_size.width = size.width;
    base_size.height = size.height;
    base_size.depth = size.depth;
    vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size,
                                                   rep->mip_levels[0],
                                                   FALSE);

    return ssrf;

out_mip:
    vmw_ioctl_surface_destroy(vws, handle);

    return NULL;
}
コード例 #12
0
void CRendererDRMPRIME::SetVideoPlane(CVideoBufferDRMPRIME* buffer)
{
  buffer->m_drm_fd = m_DRM->GetFileDescriptor();

  AVDRMFrameDescriptor* descriptor = buffer->GetDescriptor();
  if (descriptor && descriptor->nb_layers)
  {
    uint32_t handles[4] = {0}, pitches[4] = {0}, offsets[4] = {0};
    uint64_t modifier[4] = {0};
    int ret;

    // convert Prime FD to GEM handle
    for (int object = 0; object < descriptor->nb_objects; object++)
    {
      ret = drmPrimeFDToHandle(m_DRM->GetFileDescriptor(), descriptor->objects[object].fd, &buffer->m_handles[object]);
      if (ret < 0)
      {
        CLog::Log(LOGERROR, "CRendererDRMPRIME::%s - failed to retrieve the GEM handle from prime fd %d, ret = %d", __FUNCTION__, descriptor->objects[object].fd, ret);
        return;
      }
    }

    AVDRMLayerDescriptor* layer = &descriptor->layers[0];

    for (int plane = 0; plane < layer->nb_planes; plane++)
    {
      int object = layer->planes[plane].object_index;
      uint32_t handle = buffer->m_handles[object];
      if (handle && layer->planes[plane].pitch)
      {
        handles[plane] = handle;
        pitches[plane] = layer->planes[plane].pitch;
        offsets[plane] = layer->planes[plane].offset;
        modifier[plane] = descriptor->objects[object].format_modifier;
      }
    }

    // add the video frame FB
    ret = drmModeAddFB2WithModifiers(m_DRM->GetFileDescriptor(), buffer->GetWidth(), buffer->GetHeight(), layer->format, handles, pitches, offsets, modifier, &buffer->m_fb_id, 0);
    if (ret < 0)
    {
      CLog::Log(LOGERROR, "CRendererDRMPRIME::%s - failed to add drm layer %d, ret = %d", __FUNCTION__, buffer->m_fb_id, ret);
      return;
    }

    int32_t crtc_x = static_cast<int32_t>(m_destRect.x1) & ~1;
    int32_t crtc_y = static_cast<int32_t>(m_destRect.y1) & ~1;
    uint32_t crtc_w = (static_cast<uint32_t>(m_destRect.Width()) + 1) & ~1;
    uint32_t crtc_h = (static_cast<uint32_t>(m_destRect.Height()) + 1) & ~1;
    uint32_t src_x = 0;
    uint32_t src_y = 0;
    uint32_t src_w = buffer->GetWidth() << 16;
    uint32_t src_h = buffer->GetHeight() << 16;

    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "FB_ID",   buffer->m_fb_id);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "CRTC_ID", m_DRM->GetCrtc()->crtc->crtc_id);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "SRC_X",   src_x);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "SRC_Y",   src_y);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "SRC_W",   src_w);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "SRC_H",   src_h);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "CRTC_X",  crtc_x);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "CRTC_Y",  crtc_y);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "CRTC_W",  crtc_w);
    m_DRM->AddProperty(m_DRM->GetPrimaryPlane(), "CRTC_H",  crtc_h);
  }
}
コード例 #13
0
ファイル: renderonly.c プロジェクト: MIPS/external-mesa3d
struct renderonly_scanout *
renderonly_create_kms_dumb_buffer_for_resource(struct pipe_resource *rsc,
                                               struct renderonly *ro)
{
   struct pipe_screen *screen = rsc->screen;
   struct renderonly_scanout *scanout;
   struct winsys_handle handle;
   int prime_fd, err;
   struct drm_mode_create_dumb create_dumb = {
      .width = rsc->width0,
      .height = rsc->height0,
      .bpp = 32,
   };
   struct drm_mode_destroy_dumb destroy_dumb = { };

   scanout = CALLOC_STRUCT(renderonly_scanout);
   if (!scanout)
      return NULL;

   /* create dumb buffer at scanout GPU */
   err = ioctl(ro->kms_fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
   if (err < 0) {
      fprintf(stderr, "DRM_IOCTL_MODE_CREATE_DUMB failed: %s\n",
            strerror(errno));
      goto free_scanout;
   }

   scanout->handle = create_dumb.handle;
   scanout->stride = create_dumb.pitch;

   /* export dumb buffer */
   err = drmPrimeHandleToFD(ro->kms_fd, create_dumb.handle, O_CLOEXEC,
         &prime_fd);
   if (err < 0) {
      fprintf(stderr, "failed to export dumb buffer: %s\n", strerror(errno));
      goto free_dumb;
   }

   /* import dumb buffer */
   handle.type = DRM_API_HANDLE_TYPE_FD;
   handle.handle = prime_fd;
   handle.stride = create_dumb.pitch;

   scanout->prime = screen->resource_from_handle(screen, rsc,
         &handle, PIPE_HANDLE_USAGE_READ_WRITE);

   if (!scanout->prime) {
      fprintf(stderr, "failed to create resource_from_handle: %s\n", strerror(errno));
      goto free_dumb;
   }

   return scanout;

free_dumb:
   destroy_dumb.handle = scanout->handle;
   ioctl(ro->kms_fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);

free_scanout:
   FREE(scanout);

   return NULL;
}

struct renderonly_scanout *
renderonly_create_gpu_import_for_resource(struct pipe_resource *rsc,
                                          struct renderonly *ro)
{
   struct pipe_screen *screen = rsc->screen;
   struct renderonly_scanout *scanout;
   boolean status;
   int fd, err;
   struct winsys_handle handle = {
      .type = DRM_API_HANDLE_TYPE_FD
   };

   scanout = CALLOC_STRUCT(renderonly_scanout);
   if (!scanout)
      return NULL;

   status = screen->resource_get_handle(screen, NULL, rsc, &handle,
         PIPE_HANDLE_USAGE_READ_WRITE);
   if (!status)
      goto free_scanout;

   scanout->stride = handle.stride;
   fd = handle.handle;

   err = drmPrimeFDToHandle(ro->kms_fd, fd, &scanout->handle);
   close(fd);

   if (err < 0) {
      fprintf(stderr, "drmPrimeFDToHandle() failed: %s\n", strerror(errno));
      goto free_scanout;
   }

   return scanout;

free_scanout:
   FREE(scanout);

   return NULL;
}
コード例 #14
0
ファイル: exynos_drm.c プロジェクト: jvesely/libdrm
/*
 * Destroy a exynos buffer object.
 *
 * @bo: a exynos buffer object to be destroyed.
 */
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
{
	if (!bo)
		return;

	if (bo->vaddr)
		munmap(bo->vaddr, bo->size);

	if (bo->handle) {
		struct drm_gem_close req = {
			.handle = bo->handle,
		};

		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	free(bo);
}


/*
 * Get a exynos buffer object from a gem global object name.
 *
 * @dev: a exynos device object.
 * @name: a gem global object name exported by another process.
 *
 * this interface is used to get a exynos buffer object from a gem
 * global object name sent by another process for buffer sharing.
 *
 * if true, return a exynos buffer object else NULL.
 *
 */
drm_public struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
	struct exynos_bo *bo;
	struct drm_gem_open req = {
		.name = name,
	};

	bo = calloc(sizeof(*bo), 1);
	if (!bo) {
		fprintf(stderr, "failed to allocate bo[%s].\n",
				strerror(errno));
		return NULL;
	}

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		fprintf(stderr, "failed to open gem object[%s].\n",
				strerror(errno));
		goto err_free_bo;
	}

	bo->dev = dev;
	bo->name = name;
	bo->handle = req.handle;

	return bo;

err_free_bo:
	free(bo);
	return NULL;
}

/*
 * Get a gem global object name from a gem object handle.
 *
 * @bo: a exynos buffer object including gem handle.
 * @name: a gem global object name to be got by kernel driver.
 *
 * this interface is used to get a gem global object name from a gem object
 * handle to a buffer that wants to share it with another process.
 *
 * if true, return 0 else negative.
 */
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
			.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			fprintf(stderr, "failed to get gem global name[%s].\n",
					strerror(errno));
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
{
	return bo->handle;
}

/*
 * Mmap a buffer to user space.
 *
 * @bo: a exynos buffer object including a gem object handle to be mmapped
 *	to user space.
 *
 * if true, user pointer mmapped else NULL.
 */
drm_public void *exynos_bo_map(struct exynos_bo *bo)
{
	if (!bo->vaddr) {
		struct exynos_device *dev = bo->dev;
		struct drm_mode_map_dumb arg;
		void *map = NULL;
		int ret;

		memset(&arg, 0, sizeof(arg));
		arg.handle = bo->handle;

		ret = drmIoctl(dev->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
		if (ret) {
			fprintf(stderr, "failed to map dumb buffer[%s].\n",
				strerror(errno));
			return NULL;
		}

		map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
				dev->fd, arg.offset);

		if (map != MAP_FAILED)
			bo->vaddr = map;
	}

	return bo->vaddr;
}

/*
 * Export gem object to dmabuf as file descriptor.
 *
 * @dev: exynos device object
 * @handle: gem handle to export as file descriptor of dmabuf
 * @fd: file descriptor returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
{
	return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
}

/*
 * Import file descriptor into gem handle.
 *
 * @dev: exynos device object
 * @fd: file descriptor of dmabuf to import
 * @handle: gem handle returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
{
	return drmPrimeFDToHandle(dev->fd, fd, handle);
}



/*
 * Request Wireless Display connection or disconnection.
 *
 * @dev: a exynos device object.
 * @connect: indicate whether connectoin or disconnection request.
 * @ext: indicate whether edid data includes extensions data or not.
 * @edid: a pointer to edid data from Wireless Display device.
 *
 * this interface is used to request Virtual Display driver connection or
 * disconnection. for this, user should get a edid data from the Wireless
 * Display device and then send that data to kernel driver with connection
 * request
 *
 * if true, return 0 else negative.
 */
drm_public int
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
		       uint32_t ext, void *edid)
{
	struct drm_exynos_vidi_connection req = {
		.connection	= connect,
		.extensions	= ext,
		.edid		= (uint64_t)(uintptr_t)edid,
	};
	int ret;

	ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_VIDI_CONNECTION, &req);
	if (ret) {
		fprintf(stderr, "failed to request vidi connection[%s].\n",
				strerror(errno));
		return ret;
	}

	return 0;
}

static void
exynos_handle_vendor(int fd, struct drm_event *e, void *ctx)
{
	struct drm_exynos_g2d_event *g2d;
	struct exynos_event_context *ectx = ctx;

	switch (e->type) {
		case DRM_EXYNOS_G2D_EVENT:
			if (ectx->version < 1 || ectx->g2d_event_handler == NULL)
				break;
			g2d = (struct drm_exynos_g2d_event *)e;
			ectx->g2d_event_handler(fd, g2d->cmdlist_no, g2d->tv_sec,
						g2d->tv_usec, U642VOID(g2d->user_data));
			break;

		default:
			break;
	}
}

drm_public int
exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
{
	char buffer[1024];
	int len, i;
	struct drm_event *e;
	struct drm_event_vblank *vblank;
	drmEventContextPtr evctx = &ctx->base;

	/* The DRM read semantics guarantees that we always get only
	 * complete events. */
	len = read(dev->fd, buffer, sizeof buffer);
	if (len == 0)
		return 0;
	if (len < (int)sizeof *e)
		return -1;

	i = 0;
	while (i < len) {
		e = (struct drm_event *)(buffer + i);
		switch (e->type) {
		case DRM_EVENT_VBLANK:
			if (evctx->version < 1 ||
			    evctx->vblank_handler == NULL)
				break;
			vblank = (struct drm_event_vblank *) e;
			evctx->vblank_handler(dev->fd,
					      vblank->sequence,
					      vblank->tv_sec,
					      vblank->tv_usec,
					      U642VOID (vblank->user_data));
			break;
		case DRM_EVENT_FLIP_COMPLETE:
			if (evctx->version < 2 ||
			    evctx->page_flip_handler == NULL)
				break;
			vblank = (struct drm_event_vblank *) e;
			evctx->page_flip_handler(dev->fd,
						 vblank->sequence,
						 vblank->tv_sec,
						 vblank->tv_usec,
						 U642VOID (vblank->user_data));
			break;
		default:
			exynos_handle_vendor(dev->fd, e, evctx);
			break;
		}
		i += e->length;
	}

	return 0;
}
コード例 #15
0
ファイル: exynos_drm.c プロジェクト: markyzq/android_drm
/*
 * Destroy a exynos buffer object.
 *
 * @bo: a exynos buffer object to be destroyed.
 */
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
{
	if (!bo)
		return;

	if (bo->vaddr)
		munmap(bo->vaddr, bo->size);

	if (bo->handle) {
		struct drm_gem_close req = {
			.handle = bo->handle,
		};

		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	free(bo);
}


/*
 * Get a exynos buffer object from a gem global object name.
 *
 * @dev: a exynos device object.
 * @name: a gem global object name exported by another process.
 *
 * this interface is used to get a exynos buffer object from a gem
 * global object name sent by another process for buffer sharing.
 *
 * if true, return a exynos buffer object else NULL.
 *
 */
drm_public struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
	struct exynos_bo *bo;
	struct drm_gem_open req = {
		.name = name,
	};

	bo = calloc(sizeof(*bo), 1);
	if (!bo) {
		fprintf(stderr, "failed to allocate bo[%s].\n",
				strerror(errno));
		return NULL;
	}

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		fprintf(stderr, "failed to open gem object[%s].\n",
				strerror(errno));
		goto err_free_bo;
	}

	bo->dev = dev;
	bo->name = name;
	bo->handle = req.handle;

	return bo;

err_free_bo:
	free(bo);
	return NULL;
}

/*
 * Get a gem global object name from a gem object handle.
 *
 * @bo: a exynos buffer object including gem handle.
 * @name: a gem global object name to be got by kernel driver.
 *
 * this interface is used to get a gem global object name from a gem object
 * handle to a buffer that wants to share it with another process.
 *
 * if true, return 0 else negative.
 */
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
			.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			fprintf(stderr, "failed to get gem global name[%s].\n",
					strerror(errno));
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
{
	return bo->handle;
}

/*
 * Mmap a buffer to user space.
 *
 * @bo: a exynos buffer object including a gem object handle to be mmapped
 *	to user space.
 *
 * if true, user pointer mmaped else NULL.
 */
drm_public void *exynos_bo_map(struct exynos_bo *bo)
{
	if (!bo->vaddr) {
		struct exynos_device *dev = bo->dev;
		struct drm_exynos_gem_mmap req = {
			.handle = bo->handle,
			.size	= bo->size,
		};
		int ret;

		ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_GEM_MMAP, &req);
		if (ret) {
			fprintf(stderr, "failed to mmap[%s].\n",
				strerror(errno));
			return NULL;
		}

		bo->vaddr = (void *)(uintptr_t)req.mapped;
	}

	return bo->vaddr;
}

/*
 * Export gem object to dmabuf as file descriptor.
 *
 * @dev: exynos device object
 * @handle: gem handle to export as file descriptor of dmabuf
 * @fd: file descriptor returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
{
	return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
}

/*
 * Import file descriptor into gem handle.
 *
 * @dev: exynos device object
 * @fd: file descriptor of dmabuf to import
 * @handle: gem handle returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
{
	return drmPrimeFDToHandle(dev->fd, fd, handle);
}



/*
 * Request Wireless Display connection or disconnection.
 *
 * @dev: a exynos device object.
 * @connect: indicate whether connectoin or disconnection request.
 * @ext: indicate whether edid data includes extentions data or not.
 * @edid: a pointer to edid data from Wireless Display device.
 *
 * this interface is used to request Virtual Display driver connection or
 * disconnection. for this, user should get a edid data from the Wireless
 * Display device and then send that data to kernel driver with connection
 * request
 *
 * if true, return 0 else negative.
 */
drm_public int
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
		       uint32_t ext, void *edid)
{
	struct drm_exynos_vidi_connection req = {
		.connection	= connect,
		.extensions	= ext,
		.edid		= (uint64_t)(uintptr_t)edid,
	};
	int ret;

	ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_VIDI_CONNECTION, &req);
	if (ret) {
		fprintf(stderr, "failed to request vidi connection[%s].\n",
				strerror(errno));
		return ret;
	}

	return 0;
}
コード例 #16
0
/* Get GEM handle for the pixmap */
Bool radeon_get_pixmap_handle(PixmapPtr pixmap, uint32_t *handle)
{
    struct radeon_bo *bo = radeon_get_pixmap_bo(pixmap);
#ifdef USE_GLAMOR
    ScreenPtr screen = pixmap->drawable.pScreen;
    RADEONInfoPtr info = RADEONPTR(xf86ScreenToScrn(screen));
#endif

    if (bo) {
	*handle = bo->handle;
	return TRUE;
    }

#ifdef USE_GLAMOR
    if (info->use_glamor) {
	struct radeon_pixmap *priv = radeon_get_pixmap_private(pixmap);
	CARD16 stride;
	CARD32 size;
	int fd, r;

	if (!priv) {
	    priv = calloc(1, sizeof(*priv));
	    radeon_set_pixmap_private(pixmap, priv);
	}

	if (priv->handle_valid) {
	    *handle = priv->handle;
	    return TRUE;
	}

	fd = glamor_fd_from_pixmap(screen, pixmap, &stride, &size);
	if (fd < 0)
	    return FALSE;

	r = drmPrimeFDToHandle(info->dri2.drm_fd, fd, &priv->handle);
	close(fd);
	if (r == 0) {
	    struct drm_radeon_gem_set_tiling args = { .handle = priv->handle };

	    priv->handle_valid = TRUE;
	    *handle = priv->handle;

	    if (drmCommandWriteRead(info->dri2.drm_fd,
				    DRM_RADEON_GEM_GET_TILING, &args,
				    sizeof(args)) == 0)
		priv->tiling_flags = args.tiling_flags;

	    return TRUE;
	}
    }
#endif

    return FALSE;
}

uint32_t radeon_get_pixmap_tiling_flags(PixmapPtr pPix)
{
#ifdef USE_GLAMOR
    RADEONInfoPtr info = RADEONPTR(xf86ScreenToScrn(pPix->drawable.pScreen));

    if (info->use_glamor) {
	struct radeon_pixmap *priv = radeon_get_pixmap_private(pPix);

	if (!priv || (!priv->bo && !priv->handle_valid)) {
	    uint32_t handle;

	    radeon_get_pixmap_handle(pPix, &handle);
	    priv = radeon_get_pixmap_private(pPix);
	}

	return priv ? priv->tiling_flags : 0;
    } else
#endif
    {
	struct radeon_exa_pixmap_priv *driver_priv;
	driver_priv = exaGetPixmapDriverPrivate(pPix);
	return driver_priv ? driver_priv->tiling_flags : 0;
    }
}
コード例 #17
0
ファイル: amdgpu_bo.c プロジェクト: janesma/drm
int amdgpu_bo_import(amdgpu_device_handle dev,
		     enum amdgpu_bo_handle_type type,
		     uint32_t shared_handle,
		     struct amdgpu_bo_import_result *output)
{
	struct drm_gem_open open_arg = {};
	struct amdgpu_bo *bo = NULL;
	int r;
	int dma_fd;
	uint64_t dma_buf_size = 0;

	/* Convert a DMA buf handle to a KMS handle now. */
	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
		uint32_t handle;
		off_t size;

		/* Get a KMS handle. */
		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
		if (r) {
			return r;
		}

		/* Query the buffer size. */
		size = lseek(shared_handle, 0, SEEK_END);
		if (size == (off_t)-1) {
			amdgpu_close_kms_handle(dev, handle);
			return -errno;
		}
		lseek(shared_handle, 0, SEEK_SET);

		dma_buf_size = size;
		shared_handle = handle;
	}

	/* We must maintain a list of pairs <handle, bo>, so that we always
	 * return the same amdgpu_bo instance for the same handle. */
	pthread_mutex_lock(&dev->bo_table_mutex);

	/* If we have already created a buffer with this handle, find it. */
	switch (type) {
	case amdgpu_bo_handle_type_gem_flink_name:
		bo = util_hash_table_get(dev->bo_flink_names,
					 (void*)(uintptr_t)shared_handle);
		break;

	case amdgpu_bo_handle_type_dma_buf_fd:
		bo = util_hash_table_get(dev->bo_handles,
					 (void*)(uintptr_t)shared_handle);
		break;

	case amdgpu_bo_handle_type_kms:
		/* Importing a KMS handle in not allowed. */
		pthread_mutex_unlock(&dev->bo_table_mutex);
		return -EPERM;

	default:
		pthread_mutex_unlock(&dev->bo_table_mutex);
		return -EINVAL;
	}

	if (bo) {
		pthread_mutex_unlock(&dev->bo_table_mutex);

		/* The buffer already exists, just bump the refcount. */
		atomic_inc(&bo->refcount);

		output->buf_handle = bo;
		output->alloc_size = bo->alloc_size;
		return 0;
	}

	bo = calloc(1, sizeof(struct amdgpu_bo));
	if (!bo) {
		pthread_mutex_unlock(&dev->bo_table_mutex);
		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
			amdgpu_close_kms_handle(dev, shared_handle);
		}
		return -ENOMEM;
	}

	/* Open the handle. */
	switch (type) {
	case amdgpu_bo_handle_type_gem_flink_name:
		open_arg.name = shared_handle;
		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
		if (r) {
			free(bo);
			pthread_mutex_unlock(&dev->bo_table_mutex);
			return r;
		}

		bo->handle = open_arg.handle;
		if (dev->flink_fd != dev->fd) {
			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
			if (r) {
				free(bo);
				pthread_mutex_unlock(&dev->bo_table_mutex);
				return r;
			}
			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );

			close(dma_fd);

			if (r) {
				free(bo);
				pthread_mutex_unlock(&dev->bo_table_mutex);
				return r;
			}
		}
		bo->flink_name = shared_handle;
		bo->alloc_size = open_arg.size;
		util_hash_table_set(dev->bo_flink_names,
				    (void*)(uintptr_t)bo->flink_name, bo);
		break;

	case amdgpu_bo_handle_type_dma_buf_fd:
		bo->handle = shared_handle;
		bo->alloc_size = dma_buf_size;
		break;

	case amdgpu_bo_handle_type_kms:
		assert(0); /* unreachable */
	}

	/* Initialize it. */
	atomic_set(&bo->refcount, 1);
	bo->dev = dev;
	pthread_mutex_init(&bo->cpu_access_mutex, NULL);

	util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
	pthread_mutex_unlock(&dev->bo_table_mutex);

	output->buf_handle = bo;
	output->alloc_size = bo->alloc_size;
	return 0;
}
コード例 #18
0
ファイル: tegra.c プロジェクト: grate-driver/libdrm
drm_public
int drm_tegra_bo_from_dmabuf(struct drm_tegra_bo **bop, struct drm_tegra *drm,
			     int fd, uint32_t flags)
{
	struct drm_tegra_bo *dup;
	struct drm_tegra_bo *bo;
	uint32_t handle;
	uint32_t size;
	int err;

	if (!drm || !bop)
		return -EINVAL;

	pthread_mutex_lock(&table_lock);

	bo = calloc(1, sizeof(*bo));
	if (!bo) {
		err = -ENOMEM;
		goto unlock;
	}

	err = drmPrimeFDToHandle(drm->fd, fd, &handle);
	if (err) {
		free(bo);
		bo = NULL;
		goto unlock;
	}

	/* check handle table to see if BO is already open */
	dup = lookup_bo(drm->handle_table, handle);
	if (dup) {
		DBG_BO(dup, "success reused\n");
		free(bo);
		bo = dup;
		goto unlock;
	}

	errno = 0;
	/* lseek() to get bo size */
	size = lseek(fd, 0, SEEK_END);
	lseek(fd, 0, SEEK_CUR);
	/* store lseek() error number */
	err = -errno;

	atomic_set(&bo->ref, 1);
	bo->handle = handle;
	bo->flags = flags;
	bo->size = size;
	bo->drm = drm;

	VG_BO_ALLOC(bo);

	/* add ourself into the handle table: */
	drmHashInsert(drm->handle_table, handle, bo);

	/* handle lseek() error */
	if (err) {
		VDBG_BO(bo, "lseek failed %d (%s)\n", err, strerror(-err));
		drm_tegra_bo_free(bo);
		bo = NULL;
	} else {
		DBG_BO(bo, "success\n");
	}

unlock:
	pthread_mutex_unlock(&table_lock);

	*bop = bo;

	return err;
}
コード例 #19
0
ファイル: virgl_drm_winsys.c プロジェクト: grate-driver/mesa
static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
                                        struct winsys_handle *whandle)
{
   struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
   struct drm_gem_open open_arg = {};
   struct drm_virtgpu_resource_info info_arg = {};
   struct virgl_hw_res *res;
   uint32_t handle = whandle->handle;

   if (whandle->offset != 0) {
      fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
              whandle->offset);
      return NULL;
   }

   mtx_lock(&qdws->bo_handles_mutex);

   if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
      res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
      if (res) {
         struct virgl_hw_res *r = NULL;
         virgl_drm_resource_reference(qdws, &r, res);
         goto done;
      }
   }

   if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      int r;
      r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
      if (r) {
         res = NULL;
         goto done;
      }
   }

   res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
   fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
   if (res) {
      struct virgl_hw_res *r = NULL;
      virgl_drm_resource_reference(qdws, &r, res);
      goto done;
   }

   res = CALLOC_STRUCT(virgl_hw_res);
   if (!res)
      goto done;

   if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      res->bo_handle = handle;
   } else {
      fprintf(stderr, "gem open handle %d\n", handle);
      memset(&open_arg, 0, sizeof(open_arg));
      open_arg.name = whandle->handle;
      if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
         FREE(res);
         res = NULL;
         goto done;
      }
      res->bo_handle = open_arg.handle;
   }
   res->name = handle;

   memset(&info_arg, 0, sizeof(info_arg));
   info_arg.bo_handle = res->bo_handle;

   if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
      /* close */
      FREE(res);
      res = NULL;
      goto done;
   }

   res->res_handle = info_arg.res_handle;

   res->size = info_arg.size;
   res->stride = info_arg.stride;
   pipe_reference_init(&res->reference, 1);
   res->num_cs_references = 0;

   util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);

done:
   mtx_unlock(&qdws->bo_handles_mutex);
   return res;
}