Beispiel #1
0
static int exynos_alloc(struct hook_data *data) {
  struct exynos_device *device;
  struct exynos_bo *bo;
  struct exynos_page *pages;
  struct drm_prime_handle req = { 0 };
  unsigned i;

  const unsigned flags = 0;

  device = exynos_device_create(data->drm_fd);
  if (device == NULL) {
    fprintf(stderr, "[exynos_init] error: failed to create device from fd\n");
    return -1;
  }

  pages = calloc(data->num_pages, sizeof(struct exynos_page));
  if (pages == NULL) {
    fprintf(stderr, "[exynos_init] error: failed to allocate pages\n");
    goto fail_alloc;
  }

  for (i = 0; i < data->num_pages; ++i) {
    bo = exynos_bo_create(device, data->size, flags);
    if (bo == NULL) {
      fprintf(stderr, "[exynos_init] error: failed to create buffer object\n");
      goto fail;
    }

    req.handle = bo->handle;

    if (drmIoctl(data->drm_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req) < 0) {
      fprintf(stderr, "[exynos_init] error: failed to get fd from bo\n");
      exynos_bo_destroy(bo);
      goto fail;
    }

    /* Don't map the BO, since we don't access it through userspace. */

    pages[i].bo = bo;
    pages[i].fd = req.fd;
    pages[i].base = data;

    pages[i].used = false;
    pages[i].clear = true;
  }

  if (vconf.use_screen == 1) {
    const uint32_t pixel_format = (data->bpp == 2) ? DRM_FORMAT_RGB565 : DRM_FORMAT_XRGB8888;
    uint32_t handles[4] = {0}, pitches[4] = {0}, offsets[4] = {0};

    pitches[0] = data->pitch;
    offsets[0] = 0;

    for (i = 0; i < data->num_pages; ++i) {
      handles[0] = pages[i].bo->handle;

      if (drmModeAddFB2(data->drm_fd, data->width, data->height,
                        pixel_format, handles, pitches, offsets,
                        &pages[i].buf_id, flags)) {
        fprintf(stderr, "[exynos_init] error: failed to add bo %u to fb\n", i);
        goto fail;
      }
    }
  }

  if (vconf.use_screen == 1) {
    /* Setup CRTC: display the last allocated page. */
    if (drmModeSetCrtc(data->drm_fd, data->drm->crtc_id, pages[data->num_pages - 1].buf_id,
                       0, 0, &data->drm->connector_id, 1, data->drm->mode)) {
      fprintf(stderr, "[exynos_init] error: drmModeSetCrtc failed\n");
      goto fail;
    }
  }

  data->pages = pages;
  data->device = device;

  return 0;

fail:
  clean_up_pages(pages, data->num_pages);

fail_alloc:
  exynos_device_destroy(device);

  return -1;
}
uint32_t
update_buffer_nativesurface(struct ivi_share_nativesurface *p_nativesurface)
{
    if (NULL == p_nativesurface || NULL == p_nativesurface->surface) {
        return IVI_SHAREBUFFER_NOT_AVAILABLE;
    }

    struct drm_backend *backend =
        (struct drm_backend*)p_nativesurface->surface->compositor->backend;
    if (NULL == backend) {
        return IVI_SHAREBUFFER_NOT_AVAILABLE;
    }

    struct weston_buffer *buffer = p_nativesurface->surface->buffer_ref.buffer;
    if (!buffer) {
        return IVI_SHAREBUFFER_NOT_AVAILABLE;
    }

    struct gbm_bo *bo = gbm_bo_import(backend->gbm, GBM_BO_IMPORT_WL_BUFFER,
                                      buffer->legacy_buffer, GBM_BO_USE_SCANOUT);
    if (!bo) {
        weston_log("failed to import gbm_bo\n");
        return IVI_SHAREBUFFER_INVALID;
    }

    struct drm_gem_flink flink = {0};
    flink.handle = gbm_bo_get_handle(bo).u32;
    if (drmIoctl(gbm_device_get_fd(backend->gbm), DRM_IOCTL_GEM_FLINK, &flink) != 0) {
        weston_log("gem_flink: returned non-zero failed\n");
        gbm_bo_destroy(bo);
        return IVI_SHAREBUFFER_INVALID;
    }

    uint32_t name   = flink.name;
    uint32_t width  = gbm_bo_get_width(bo);
    uint32_t height = gbm_bo_get_height(bo);
    uint32_t stride = gbm_bo_get_stride(bo);
    uint32_t format = IVI_SHARE_SURFACE_FORMAT_ARGB8888;
    uint32_t ret    = IVI_SHAREBUFFER_STABLE;

    if (name != p_nativesurface->name) {
        ret |= IVI_SHAREBUFFER_DAMAGE;
    }
    if (width != p_nativesurface->width) {
        ret |= IVI_SHAREBUFFER_CONFIGURE;
    }
    if (height != p_nativesurface->height) {
        ret |= IVI_SHAREBUFFER_CONFIGURE;
    }
    if (stride != p_nativesurface->stride) {
        ret |= IVI_SHAREBUFFER_CONFIGURE;
    }

    p_nativesurface->name   = name;
    p_nativesurface->width  = width;
    p_nativesurface->height = height;
    p_nativesurface->stride = stride;
    p_nativesurface->format = format;

    gbm_bo_destroy(bo);

    return ret;
}
static int
blit(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo)
{
	uint32_t batch[12];
	struct drm_i915_gem_relocation_entry reloc[2];
	struct drm_i915_gem_exec_object2 *obj;
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int n, ret, i=0;

	batch[i++] = XY_SRC_COPY_BLT_CMD |
		  XY_SRC_COPY_BLT_WRITE_ALPHA |
		  XY_SRC_COPY_BLT_WRITE_RGB;
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i - 1] |= 8;
	else
		batch[i - 1] |= 6;
	batch[i++] = (3 << 24) | /* 32 bits */
		  (0xcc << 16) | /* copy ROP */
		  WIDTH*4;
	batch[i++] = 0; /* dst x1,y1 */
	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
	batch[i++] = 0; /* dst reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = 0; /* src x1,y1 */
	batch[i++] = WIDTH*4;
	batch[i++] = 0; /* src reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = MI_BATCH_BUFFER_END;
	batch[i++] = MI_NOOP;

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	reloc[0].target_handle = dst;
	reloc[0].delta = 0;
	reloc[0].offset = 4 * sizeof(batch[0]);
	reloc[0].presumed_offset = 0;
	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;

	reloc[1].target_handle = src;
	reloc[1].delta = 0;
	reloc[1].offset = 7 * sizeof(batch[0]);
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		reloc[1].offset += sizeof(batch[0]);
	reloc[1].presumed_offset = 0;
	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[1].write_domain = 0;

	obj = calloc(n_bo + 1, sizeof(*obj));
	for (n = 0; n < n_bo; n++)
		obj[n].handle = all_bo[n];
	obj[n].handle = handle;
	obj[n].relocation_count = 2;
	obj[n].relocs_ptr = (uintptr_t)reloc;

	exec.buffers_ptr = (uintptr_t)obj;
	exec.buffer_count = n_bo + 1;
	exec.batch_start_offset = 0;
	exec.batch_len = i * 4;
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
	i915_execbuffer2_set_context_id(exec, 0);
	exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	if (ret)
		ret = errno;

	gem_close(fd, handle);
	free(obj);

	return ret;
}
Beispiel #4
0
drmModeResPtr drmModeGetResources(int fd)
{
	struct drm_mode_card_res res, counts;
	drmModeResPtr r = 0;

retry:
	memset(&res, 0, sizeof(struct drm_mode_card_res));
	if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
		return 0;

	counts = res;

	if (res.count_fbs) {
		res.fb_id_ptr = VOID2U64(drmMalloc(res.count_fbs*sizeof(uint32_t)));
		if (!res.fb_id_ptr)
			goto err_allocs;
	}
	if (res.count_crtcs) {
		res.crtc_id_ptr = VOID2U64(drmMalloc(res.count_crtcs*sizeof(uint32_t)));
		if (!res.crtc_id_ptr)
			goto err_allocs;
	}
	if (res.count_connectors) {
		res.connector_id_ptr = VOID2U64(drmMalloc(res.count_connectors*sizeof(uint32_t)));
		if (!res.connector_id_ptr)
			goto err_allocs;
	}
	if (res.count_encoders) {
		res.encoder_id_ptr = VOID2U64(drmMalloc(res.count_encoders*sizeof(uint32_t)));
		if (!res.encoder_id_ptr)
			goto err_allocs;
	}

	if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
		goto err_allocs;

	/* The number of available connectors and etc may have changed with a
	 * hotplug event in between the ioctls, in which case the field is
	 * silently ignored by the kernel.
	 */
	if (counts.count_fbs < res.count_fbs ||
	    counts.count_crtcs < res.count_crtcs ||
	    counts.count_connectors < res.count_connectors ||
	    counts.count_encoders < res.count_encoders)
	{
		drmFree(U642VOID(res.fb_id_ptr));
		drmFree(U642VOID(res.crtc_id_ptr));
		drmFree(U642VOID(res.connector_id_ptr));
		drmFree(U642VOID(res.encoder_id_ptr));

		goto retry;
	}

	/*
	 * return
	 */
	if (!(r = drmMalloc(sizeof(*r))))
		goto err_allocs;

	r->min_width     = res.min_width;
	r->max_width     = res.max_width;
	r->min_height    = res.min_height;
	r->max_height    = res.max_height;
	r->count_fbs     = res.count_fbs;
	r->count_crtcs   = res.count_crtcs;
	r->count_connectors = res.count_connectors;
	r->count_encoders = res.count_encoders;

	r->fbs        = drmAllocCpy(U642VOID(res.fb_id_ptr), res.count_fbs, sizeof(uint32_t));
	r->crtcs      = drmAllocCpy(U642VOID(res.crtc_id_ptr), res.count_crtcs, sizeof(uint32_t));
	r->connectors = drmAllocCpy(U642VOID(res.connector_id_ptr), res.count_connectors, sizeof(uint32_t));
	r->encoders   = drmAllocCpy(U642VOID(res.encoder_id_ptr), res.count_encoders, sizeof(uint32_t));
	if ((res.count_fbs && !r->fbs) ||
	    (res.count_crtcs && !r->crtcs) ||
	    (res.count_connectors && !r->connectors) ||
	    (res.count_encoders && !r->encoders))
	{
		drmFree(r->fbs);
		drmFree(r->crtcs);
		drmFree(r->connectors);
		drmFree(r->encoders);
		drmFree(r);
		r = 0;
	}

err_allocs:
	drmFree(U642VOID(res.fb_id_ptr));
	drmFree(U642VOID(res.crtc_id_ptr));
	drmFree(U642VOID(res.connector_id_ptr));
	drmFree(U642VOID(res.encoder_id_ptr));

	return r;
}
Beispiel #5
0
/* Called under table_lock */
static void bo_del(struct fd_bo *bo)
{
	if (bo->map)
		munmap(bo->map, bo->size);

	/* TODO probably bo's in bucket list get removed from
	 * handle table??
	 */

	if (bo->handle) {
		struct drm_gem_close req = {
				.handle = bo->handle,
		};
		drmHashDelete(bo->dev->handle_table, bo->handle);
		if (bo->name)
			drmHashDelete(bo->dev->name_table, bo->name);
		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	bo->funcs->destroy(bo);
}

int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			return ret;
		}

		pthread_mutex_lock(&table_lock);
		set_name(bo, req.name);
		pthread_mutex_unlock(&table_lock);
	}

	*name = bo->name;

	return 0;
}

uint32_t fd_bo_handle(struct fd_bo *bo)
{
	return bo->handle;
}

uint32_t fd_bo_size(struct fd_bo *bo)
{
	return bo->size;
}

void * fd_bo_map(struct fd_bo *bo)
{
	if (!bo->map) {
		uint64_t offset;
		int ret;

		ret = bo->funcs->offset(bo, &offset);
		if (ret) {
			return NULL;
		}

		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
				bo->dev->fd, offset);
		if (bo->map == MAP_FAILED) {
			ERROR_MSG("mmap failed: %s", strerror(errno));
			bo->map = NULL;
		}
	}
	return bo->map;
}

/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
	return bo->funcs->cpu_prep(bo, pipe, op);
}

void fd_bo_cpu_fini(struct fd_bo *bo)
{
	bo->funcs->cpu_fini(bo);
}
/* Simulates SNA behaviour using negative self-relocations for
 * STATE_BASE_ADDRESS command packets. If they wrap around (to values greater
 * than the total size of the GTT), the GPU will hang.
 * See https://bugs.freedesktop.org/show_bug.cgi?id=78533
 */
static int negative_reloc(int fd, unsigned flags)
{
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 gem_exec[2];
	struct drm_i915_gem_relocation_entry gem_reloc[1000];
	uint64_t gtt_max = get_page_table_size(fd);
	uint32_t buf[1024] = {MI_BATCH_BUFFER_END};
	int i;

#define BIAS (256*1024)

	igt_require(intel_gen(intel_get_drm_devid(fd)) >= 7);

	memset(gem_exec, 0, sizeof(gem_exec));
	gem_exec[0].handle = gem_create(fd, 4096);
	gem_write(fd, gem_exec[0].handle, 0, buf, 8);

	gem_reloc[0].offset = 1024;
	gem_reloc[0].delta = 0;
	gem_reloc[0].target_handle = gem_exec[0].handle;
	gem_reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;

	gem_exec[1].handle = gem_create(fd, 4096);
	gem_write(fd, gem_exec[1].handle, 0, buf, 8);
	gem_exec[1].relocation_count = 1;
	gem_exec[1].relocs_ptr = (uintptr_t)gem_reloc;

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)gem_exec;
	execbuf.buffer_count = 2;
	execbuf.batch_len = 8;

	do_or_die(drmIoctl(fd,
			   DRM_IOCTL_I915_GEM_EXECBUFFER2,
			   &execbuf));
	gem_close(fd, gem_exec[1].handle);

	igt_info("Found offset %lld for 4k batch\n", (long long)gem_exec[0].offset);
	/*
	 * Ideally we'd like to be able to control where the kernel is going to
	 * place the buffer. We don't SKIP here because it causes the test
	 * to "randomly" flip-flop between the SKIP and PASS states.
	 */
	if (gem_exec[0].offset < BIAS) {
		igt_info("Offset is below BIAS, not testing anything\n");
		return 0;
	}

	memset(gem_reloc, 0, sizeof(gem_reloc));
	for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++) {
		gem_reloc[i].offset = 8 + 4*i;
		gem_reloc[i].delta = -BIAS*i/1024;
		gem_reloc[i].target_handle = flags & USE_LUT ? 0 : gem_exec[0].handle;
		gem_reloc[i].read_domains = I915_GEM_DOMAIN_COMMAND;
	}

	gem_exec[0].relocation_count = sizeof(gem_reloc)/sizeof(gem_reloc[0]);
	gem_exec[0].relocs_ptr = (uintptr_t)gem_reloc;

	execbuf.buffer_count = 1;
	execbuf.flags = flags & USE_LUT;
	do_or_die(drmIoctl(fd,
			   DRM_IOCTL_I915_GEM_EXECBUFFER2,
			   &execbuf));

	igt_info("Batch is now at offset %lld\n", (long long)gem_exec[0].offset);

	gem_read(fd, gem_exec[0].handle, 0, buf, sizeof(buf));
	gem_close(fd, gem_exec[0].handle);

	for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++)
		igt_assert(buf[2 + i] < gtt_max);

	return 0;
}
Beispiel #7
0
int
main(int argc, char **argv)
{
    int fd;
    uint32_t handle[MAX_VMS], stride[MAX_VMS];
    drmModeRes *resources;
    drmModeConnector *connector;
    drmModeEncoder *encoder;
    drmModeModeInfo *mode;
    int i;
    uint32_t fb_id[MAX_VMS], total_vms = 0, current_vm;

    struct drm_i915_gem_vgtfb create;

    fd = open("/dev/dri/card0", O_RDWR);
    drmSetMaster(fd);

    /* Find the first available connector with modes */

    resources = drmModeGetResources(fd);
    if (!resources) {
        fprintf(stderr, "drmModeGetResources failed\n");
        return -1;
    }

    for (i = 0; i < resources->count_connectors; i++) {
        connector = drmModeGetConnector(fd, resources->connectors[i]);
        if (connector == NULL)
            continue;

        if (connector->connection == DRM_MODE_CONNECTED &&
                connector->count_modes > 0)
            break;

        drmModeFreeConnector(connector);
    }

    if (i == resources->count_connectors) {
        fprintf(stderr, "No currently active connector found.\n");
        return -1;
    }

    printf("Using connector: %s\n", connector_type_str(connector->connector_type));

    for (i = 0; i < resources->count_encoders; i++) {
        encoder = drmModeGetEncoder(fd, resources->encoders[i]);

        if (encoder == NULL)
            continue;

        if (encoder->encoder_id == connector->encoder_id)
            break;

        drmModeFreeEncoder(encoder);
    }

    for (i = 0; i < connector->count_modes; i++) {
        mode = &connector->modes[i];
        if (!strcmp(mode->name, "1680x1050"))
            break;
    }

    printf("Using mode: %s\n", mode->name);


    for (i = 0; i < (argc - 1) && i < MAX_VMS; i++) {
        int ret;
        create.vmid = atoi(argv[i+1]);

        printf("Requesting object for VM %d\n", create.vmid);

        if (ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_VGTFB, &create)) {
            printf("drmIoctl failed for domain %d. ret = %d\n", create.vmid, ret);
            continue;
        }
        handle[i] = create.handle;
        stride[i] = create.stride;

        /* Create a KMS framebuffer handle to set a mode with */
        if (ret = drmModeAddFB(fd, mode->hdisplay, mode->vdisplay, 24, 32, stride[i], handle[i], &fb_id[i])) {
            printf("drmModeAddFB failed for handle %d. ret = %d\n", handle[i], ret);
            continue;
        }
        total_vms++;
    }

    drmDropMaster(fd);

    if (total_vms == 0)
        return -1;

    current_vm = 0;
    do {
        drmSetMaster(fd);
        drmModeSetCrtc(fd, encoder->crtc_id, fb_id[current_vm], 0, 0, &connector->connector_id, 1, mode);
        current_vm++;
        current_vm %= total_vms;
        drmDropMaster(fd);
    } while (getchar() != EOF);


    return 0;
}
Beispiel #8
0
int drmModeRmFB(int fd, uint32_t bufferId)
{
	return drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &bufferId);


}
Beispiel #9
0
static int gbm_rockchip_bo_create(struct gbm_bo *bo,
				  uint32_t width, uint32_t height,
				  uint32_t format, uint32_t flags)
{
	size_t plane;

	switch (format) {
	case GBM_FORMAT_NV12:
		width = ALIGN(width, 4);
		height = ALIGN(height, 4);
		bo->strides[0] = bo->strides[1] = width;
		bo->sizes[0] = height * bo->strides[0];
		bo->sizes[1] = height * bo->strides[1] / 2;
		bo->offsets[0] = bo->offsets[1] = 0;
		break;
	case GBM_FORMAT_XRGB8888:
	case GBM_FORMAT_ARGB8888:
	case GBM_FORMAT_ABGR8888:
		bo->strides[0] = gbm_stride_from_format(format, width);
		bo->sizes[0] = height * bo->strides[0];
		bo->offsets[0] = 0;
		break;
	default:
		fprintf(stderr, "minigbm: rockchip: unsupported format %4.4s\n",
			(char*)&format);
		assert(0);
		return -EINVAL;
	}

	int ret;
	for (plane = 0; plane < bo->num_planes; plane++) {
		size_t size = bo->sizes[plane];
		struct drm_rockchip_gem_create gem_create;

		memset(&gem_create, 0, sizeof(gem_create));
		gem_create.size = size;

		ret = drmIoctl(bo->gbm->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE,
			       &gem_create);
		if (ret) {
			fprintf(stderr, "minigbm: DRM_IOCTL_ROCKCHIP_GEM_CREATE failed "
					"(size=%zu)\n", size);
			goto cleanup_planes;
		}

		bo->handles[plane].u32 = gem_create.handle;
	}

	return 0;

cleanup_planes:
	for ( ; plane != 0; plane--) {
		struct drm_gem_close gem_close;
		memset(&gem_close, 0, sizeof(gem_close));
		gem_close.handle = bo->handles[plane - 1].u32;
		int gem_close_ret = drmIoctl(bo->gbm->fd, DRM_IOCTL_GEM_CLOSE,
					     &gem_close);
		if (gem_close_ret) {
			fprintf(stderr,
				"minigbm: DRM_IOCTL_GEM_CLOSE failed: %d\n",
				gem_close_ret);
		}
	}

	return ret;
}
Beispiel #10
0
/*
 * Destroy a exynos buffer object.
 *
 * @bo: a exynos buffer object to be destroyed.
 */
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
{
	if (!bo)
		return;

	if (bo->vaddr)
		munmap(bo->vaddr, bo->size);

	if (bo->handle) {
		struct drm_gem_close req = {
			.handle = bo->handle,
		};

		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	free(bo);
}


/*
 * Get a exynos buffer object from a gem global object name.
 *
 * @dev: a exynos device object.
 * @name: a gem global object name exported by another process.
 *
 * this interface is used to get a exynos buffer object from a gem
 * global object name sent by another process for buffer sharing.
 *
 * if true, return a exynos buffer object else NULL.
 *
 */
drm_public struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
	struct exynos_bo *bo;
	struct drm_gem_open req = {
		.name = name,
	};

	bo = calloc(sizeof(*bo), 1);
	if (!bo) {
		fprintf(stderr, "failed to allocate bo[%s].\n",
				strerror(errno));
		return NULL;
	}

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		fprintf(stderr, "failed to open gem object[%s].\n",
				strerror(errno));
		goto err_free_bo;
	}

	bo->dev = dev;
	bo->name = name;
	bo->handle = req.handle;

	return bo;

err_free_bo:
	free(bo);
	return NULL;
}

/*
 * Get a gem global object name from a gem object handle.
 *
 * @bo: a exynos buffer object including gem handle.
 * @name: a gem global object name to be got by kernel driver.
 *
 * this interface is used to get a gem global object name from a gem object
 * handle to a buffer that wants to share it with another process.
 *
 * if true, return 0 else negative.
 */
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
			.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			fprintf(stderr, "failed to get gem global name[%s].\n",
					strerror(errno));
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
{
	return bo->handle;
}

/*
 * Mmap a buffer to user space.
 *
 * @bo: a exynos buffer object including a gem object handle to be mmapped
 *	to user space.
 *
 * if true, user pointer mmapped else NULL.
 */
drm_public void *exynos_bo_map(struct exynos_bo *bo)
{
	if (!bo->vaddr) {
		struct exynos_device *dev = bo->dev;
		struct drm_mode_map_dumb arg;
		void *map = NULL;
		int ret;

		memset(&arg, 0, sizeof(arg));
		arg.handle = bo->handle;

		ret = drmIoctl(dev->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
		if (ret) {
			fprintf(stderr, "failed to map dumb buffer[%s].\n",
				strerror(errno));
			return NULL;
		}

		map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
				dev->fd, arg.offset);

		if (map != MAP_FAILED)
			bo->vaddr = map;
	}

	return bo->vaddr;
}

/*
 * Export gem object to dmabuf as file descriptor.
 *
 * @dev: exynos device object
 * @handle: gem handle to export as file descriptor of dmabuf
 * @fd: file descriptor returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
{
	return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
}

/*
 * Import file descriptor into gem handle.
 *
 * @dev: exynos device object
 * @fd: file descriptor of dmabuf to import
 * @handle: gem handle returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
{
	return drmPrimeFDToHandle(dev->fd, fd, handle);
}



/*
 * Request Wireless Display connection or disconnection.
 *
 * @dev: a exynos device object.
 * @connect: indicate whether connectoin or disconnection request.
 * @ext: indicate whether edid data includes extensions data or not.
 * @edid: a pointer to edid data from Wireless Display device.
 *
 * this interface is used to request Virtual Display driver connection or
 * disconnection. for this, user should get a edid data from the Wireless
 * Display device and then send that data to kernel driver with connection
 * request
 *
 * if true, return 0 else negative.
 */
drm_public int
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
		       uint32_t ext, void *edid)
{
	struct drm_exynos_vidi_connection req = {
		.connection	= connect,
		.extensions	= ext,
		.edid		= (uint64_t)(uintptr_t)edid,
	};
	int ret;

	ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_VIDI_CONNECTION, &req);
	if (ret) {
		fprintf(stderr, "failed to request vidi connection[%s].\n",
				strerror(errno));
		return ret;
	}

	return 0;
}

static void
exynos_handle_vendor(int fd, struct drm_event *e, void *ctx)
{
	struct drm_exynos_g2d_event *g2d;
	struct exynos_event_context *ectx = ctx;

	switch (e->type) {
		case DRM_EXYNOS_G2D_EVENT:
			if (ectx->version < 1 || ectx->g2d_event_handler == NULL)
				break;
			g2d = (struct drm_exynos_g2d_event *)e;
			ectx->g2d_event_handler(fd, g2d->cmdlist_no, g2d->tv_sec,
						g2d->tv_usec, U642VOID(g2d->user_data));
			break;

		default:
			break;
	}
}

drm_public int
exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
{
	char buffer[1024];
	int len, i;
	struct drm_event *e;
	struct drm_event_vblank *vblank;
	drmEventContextPtr evctx = &ctx->base;

	/* The DRM read semantics guarantees that we always get only
	 * complete events. */
	len = read(dev->fd, buffer, sizeof buffer);
	if (len == 0)
		return 0;
	if (len < (int)sizeof *e)
		return -1;

	i = 0;
	while (i < len) {
		e = (struct drm_event *)(buffer + i);
		switch (e->type) {
		case DRM_EVENT_VBLANK:
			if (evctx->version < 1 ||
			    evctx->vblank_handler == NULL)
				break;
			vblank = (struct drm_event_vblank *) e;
			evctx->vblank_handler(dev->fd,
					      vblank->sequence,
					      vblank->tv_sec,
					      vblank->tv_usec,
					      U642VOID (vblank->user_data));
			break;
		case DRM_EVENT_FLIP_COMPLETE:
			if (evctx->version < 2 ||
			    evctx->page_flip_handler == NULL)
				break;
			vblank = (struct drm_event_vblank *) e;
			evctx->page_flip_handler(dev->fd,
						 vblank->sequence,
						 vblank->tv_sec,
						 vblank->tv_usec,
						 U642VOID (vblank->user_data));
			break;
		default:
			exynos_handle_vendor(dev->fd, e, evctx);
			break;
		}
		i += e->length;
	}

	return 0;
}
static void blt_copy(int fd, uint32_t dst, uint32_t src)
{
	uint32_t batch[1024], *b = batch;
	struct drm_i915_gem_relocation_entry reloc[2], *r = reloc;
	struct drm_i915_gem_exec_object2 obj[3];
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int ret;

	*b++ = (XY_SRC_COPY_BLT_CMD |
		XY_SRC_COPY_BLT_WRITE_ALPHA |
		XY_SRC_COPY_BLT_WRITE_RGB);
	*b++ = 3 << 24 | 0xcc << 16 | WIDTH * 4;
	*b++ = 0;
	*b++ = HEIGHT << 16 | WIDTH;
	*b = fill_reloc(r++, b-batch, dst,
			I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); b++;
	*b++ = 0;
	*b++ = WIDTH*4;
	*b = fill_reloc(r++, b-batch, src, I915_GEM_DOMAIN_RENDER, 0); b++;

	*b++ = MI_BATCH_BUFFER_END;
	if ((b - batch) & 1)
		*b++ = 0;

	igt_assert(b - batch <= 1024);
	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, (b-batch)*sizeof(batch[0]));

	igt_assert(r-reloc == 2);

	obj[0].handle = dst;
	obj[0].relocation_count = 0;
	obj[0].relocs_ptr = 0;
	obj[0].alignment = 0;
	obj[0].offset = 0;
	obj[0].flags = EXEC_OBJECT_NEEDS_FENCE;
	obj[0].rsvd1 = 0;
	obj[0].rsvd2 = 0;

	obj[1].handle = src;
	obj[1].relocation_count = 0;
	obj[1].relocs_ptr = 0;
	obj[1].alignment = 0;
	obj[1].offset = 0;
	obj[1].flags = EXEC_OBJECT_NEEDS_FENCE;
	obj[1].rsvd1 = 0;
	obj[1].rsvd2 = 0;

	obj[2].handle = handle;
	obj[2].relocation_count = 2;
	obj[2].relocs_ptr = (uintptr_t)reloc;
	obj[2].alignment = 0;
	obj[2].offset = 0;
	obj[2].flags = 0;
	obj[2].rsvd1 = obj[2].rsvd2 = 0;

	exec.buffers_ptr = (uintptr_t)obj;
	exec.buffer_count = 3;
	exec.batch_start_offset = 0;
	exec.batch_len = (b-batch)*sizeof(batch[0]);
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = 0;
	i915_execbuffer2_set_context_id(exec, 0);
	exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	while (ret && errno == EBUSY) {
		drmCommandNone(fd, DRM_I915_GEM_THROTTLE);
		ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	}
	igt_assert(ret == 0);

	gem_close(fd, handle);
}
Beispiel #12
0
static void modeset_destroy_fb(int fd, struct modeset_buf *buf)
{
    if (buf->map) {
        munmap(buf->map, buf->size);
    }
    if (buf->fb) {
        drmModeRmFB(fd, buf->fb);
    }
    if (buf->handle) {
        struct drm_mode_destroy_dumb dreq = {
            .handle = buf->handle,
        };
        drmIoctl(fd, DRM_IOCTL_MODE_DESTROY_DUMB, &dreq);
    }
}

static int modeset_create_fb(struct vo *vo, int fd, struct modeset_buf *buf)
{
    int ret = 0;

    buf->handle = 0;

    // create dumb buffer
    struct drm_mode_create_dumb creq = {
        .width = buf->width,
        .height = buf->height,
        .bpp = 32,
    };
    ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &creq);
    if (ret < 0) {
        MP_ERR(vo, "Cannot create dumb buffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }
    buf->stride = creq.pitch;
    buf->size = creq.size;
    buf->handle = creq.handle;

    // create framebuffer object for the dumb-buffer
    ret = drmModeAddFB(fd, buf->width, buf->height, 24, 32, buf->stride,
                       buf->handle, &buf->fb);
    if (ret) {
        MP_ERR(vo, "Cannot create framebuffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    // prepare buffer for memory mapping
    struct drm_mode_map_dumb mreq = {
        .handle = buf->handle,
    };
    ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &mreq);
    if (ret) {
        MP_ERR(vo, "Cannot map dumb buffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    // perform actual memory mapping
    buf->map = mmap(0, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED,
                    fd, mreq.offset);
    if (buf->map == MAP_FAILED) {
        MP_ERR(vo, "Cannot map dumb buffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    memset(buf->map, 0, buf->size);

end:
    if (ret == 0) {
        return 0;
    }

    modeset_destroy_fb(fd, buf);
    return ret;
}

static int modeset_find_crtc(struct vo *vo, int fd, drmModeRes *res,
                             drmModeConnector *conn, struct modeset_dev *dev)
{
    for (unsigned int i = 0; i < conn->count_encoders; ++i) {
        drmModeEncoder *enc = drmModeGetEncoder(fd, conn->encoders[i]);
        if (!enc) {
            MP_WARN(vo, "Cannot retrieve encoder %u:%u: %s\n",
                    i, conn->encoders[i], mp_strerror(errno));
            continue;
        }

        // iterate all global CRTCs
        for (unsigned int j = 0; j < res->count_crtcs; ++j) {
            // check whether this CRTC works with the encoder
            if (!(enc->possible_crtcs & (1 << j)))
                continue;

            dev->enc = enc;
            dev->crtc = enc->crtc_id;
            return 0;
        }

        drmModeFreeEncoder(enc);
    }

    MP_ERR(vo, "Connector %u has no suitable CRTC\n", conn->connector_id);
    return -ENOENT;
}

static bool is_connector_valid(struct vo *vo, int conn_id,
                               drmModeConnector *conn, bool silent)
{
    if (!conn) {
        if (!silent) {
            MP_ERR(vo, "Cannot get connector %d: %s\n", conn_id,
                   mp_strerror(errno));
        }
        return false;
    }

    if (conn->connection != DRM_MODE_CONNECTED) {
        if (!silent) {
            MP_ERR(vo, "Connector %d is disconnected\n", conn_id);
        }
        return false;
    }

    if (conn->count_modes == 0) {
        if (!silent) {
            MP_ERR(vo, "Connector %d has no valid modes\n", conn_id);
        }
        return false;
    }

    return true;
}

static int modeset_prepare_dev(struct vo *vo, int fd, int conn_id,
                               struct modeset_dev **out)
{
    struct modeset_dev *dev = NULL;
    drmModeConnector *conn = NULL;

    int ret = 0;
    *out = NULL;

    drmModeRes *res = drmModeGetResources(fd);
    if (!res) {
        MP_ERR(vo, "Cannot retrieve DRM resources: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    if (conn_id == -1) {
        // get the first connected connector
        for (int i = 0; i < res->count_connectors; i++) {
            conn = drmModeGetConnector(fd, res->connectors[i]);
            if (is_connector_valid(vo, i, conn, true)) {
                conn_id = i;
                break;
            }
            if (conn) {
                drmModeFreeConnector(conn);
                conn = NULL;
            }
        }
        if (conn_id == -1) {
            MP_ERR(vo, "No connected connectors found\n");
            ret = -ENODEV;
            goto end;
        }
    }

    if (conn_id < 0 || conn_id >= res->count_connectors) {
        MP_ERR(vo, "Bad connector ID. Max valid connector ID = %u\n",
               res->count_connectors);
        ret = -ENODEV;
        goto end;
    }

    conn = drmModeGetConnector(fd, res->connectors[conn_id]);
    if (!is_connector_valid(vo, conn_id, conn, false)) {
        ret = -ENODEV;
        goto end;
    }

    dev = talloc_zero(vo->priv, struct modeset_dev);
    dev->conn = conn->connector_id;
    dev->front_buf = 0;
    dev->mode = conn->modes[0];
    dev->bufs[0].width = conn->modes[0].hdisplay;
    dev->bufs[0].height = conn->modes[0].vdisplay;
    dev->bufs[1].width = conn->modes[0].hdisplay;
    dev->bufs[1].height = conn->modes[0].vdisplay;

    MP_INFO(vo, "Connector using mode %ux%u\n",
            dev->bufs[0].width, dev->bufs[0].height);

    ret = modeset_find_crtc(vo, fd, res, conn, dev);
    if (ret) {
        MP_ERR(vo, "Connector %d has no valid CRTC\n", conn_id);
        goto end;
    }

    for (unsigned int i = 0; i < BUF_COUNT; i++) {
        ret = modeset_create_fb(vo, fd, &dev->bufs[i]);
        if (ret) {
            MP_ERR(vo, "Cannot create framebuffer for connector %d\n",
                   conn_id);
            for (unsigned int j = 0; j < i; j++) {
                modeset_destroy_fb(fd, &dev->bufs[j]);
            }
            goto end;
        }
    }

end:
    if (conn) {
        drmModeFreeConnector(conn);
        conn = NULL;
    }
    if (res) {
        drmModeFreeResources(res);
        res = NULL;
    }
    if (ret == 0) {
        *out = dev;
    } else {
        talloc_free(dev);
    }
    return ret;
}

static void modeset_page_flipped(int fd, unsigned int frame, unsigned int sec,
                                 unsigned int usec, void *data)
{
    struct priv *p = data;
    p->pflip_happening = false;
}



static int setup_vo_crtc(struct vo *vo)
{
    struct priv *p = vo->priv;
    if (p->active)
        return 0;
    p->old_crtc = drmModeGetCrtc(p->fd, p->dev->crtc);
    int ret = drmModeSetCrtc(p->fd, p->dev->crtc,
                          p->dev->bufs[p->dev->front_buf + BUF_COUNT - 1].fb,
                          0, 0, &p->dev->conn, 1, &p->dev->mode);
    p->active = true;
    return ret;
}

static void release_vo_crtc(struct vo *vo)
{
    struct priv *p = vo->priv;

    if (!p->active)
        return;
    p->active = false;

    // wait for current page flip
    while (p->pflip_happening) {
        int ret = drmHandleEvent(p->fd, &p->ev);
        if (ret) {
            MP_ERR(vo, "drmHandleEvent failed: %i\n", ret);
            break;
        }
    }

    if (p->old_crtc) {
        drmModeSetCrtc(p->fd,
                       p->old_crtc->crtc_id,
                       p->old_crtc->buffer_id,
                       p->old_crtc->x,
                       p->old_crtc->y,
                       &p->dev->conn,
                       1,
                       &p->dev->mode);
        drmModeFreeCrtc(p->old_crtc);
        p->old_crtc = NULL;
    }
}

static void release_vt(void *data)
{
    struct vo *vo = data;
    release_vo_crtc(vo);
    if (USE_MASTER) {
        //this function enables support for switching to x, weston etc.
        //however, for whatever reason, it can be called only by root users.
        //until things change, this is commented.
        struct priv *p = vo->priv;
        if (drmDropMaster(p->fd)) {
            MP_WARN(vo, "Failed to drop DRM master: %s\n", mp_strerror(errno));
        }
    }
}

static void acquire_vt(void *data)
{
    struct vo *vo = data;
    if (USE_MASTER) {
        struct priv *p = vo->priv;
        if (drmSetMaster(p->fd)) {
            MP_WARN(vo, "Failed to acquire DRM master: %s\n", mp_strerror(errno));
        }
    }

    setup_vo_crtc(vo);
}



static int wait_events(struct vo *vo, int64_t until_time_us)
{
    struct priv *p = vo->priv;
    int64_t wait_us = until_time_us - mp_time_us();
    int timeout_ms = MPCLAMP((wait_us + 500) / 1000, 0, 10000);
    vt_switcher_poll(&p->vt_switcher, timeout_ms);
    return 0;
}

static void wakeup(struct vo *vo)
{
    struct priv *p = vo->priv;
    vt_switcher_interrupt_poll(&p->vt_switcher);
}

static int reconfig(struct vo *vo, struct mp_image_params *params, int flags)
{
    struct priv *p = vo->priv;

    vo->dwidth = p->device_w;
    vo->dheight = p->device_h;
    vo_get_src_dst_rects(vo, &p->src, &p->dst, &p->osd);

    int32_t w = p->dst.x1 - p->dst.x0;
    int32_t h = p->dst.y1 - p->dst.y0;

    // p->osd contains the parameters assuming OSD rendering in window
    // coordinates, but OSD can only be rendered in the intersection
    // between window and video rectangle (i.e. not into panscan borders).
    p->osd.w = w;
    p->osd.h = h;
    p->osd.mt = MPMIN(0, p->osd.mt);
    p->osd.mb = MPMIN(0, p->osd.mb);
    p->osd.mr = MPMIN(0, p->osd.mr);
    p->osd.ml = MPMIN(0, p->osd.ml);

    p->x = (p->device_w - w) >> 1;
    p->y = (p->device_h - h) >> 1;

    mp_sws_set_from_cmdline(p->sws, vo->opts->sws_opts);
    p->sws->src = *params;
    p->sws->dst = (struct mp_image_params) {
        .imgfmt = IMGFMT_BGR0,
        .w = w,
        .h = h,
        .d_w = w,
        .d_h = h,
    };

    talloc_free(p->cur_frame);
    p->cur_frame = mp_image_alloc(IMGFMT_BGR0, p->device_w, p->device_h);
    mp_image_params_guess_csp(&p->sws->dst);
    mp_image_set_params(p->cur_frame, &p->sws->dst);

    struct modeset_buf *buf = p->dev->bufs;
    memset(buf[0].map, 0, buf[0].size);
    memset(buf[1].map, 0, buf[1].size);

    if (mp_sws_reinit(p->sws) < 0)
        return -1;

    vo->want_redraw = true;
    return 0;
}

static void draw_image(struct vo *vo, mp_image_t *mpi)
{
    struct priv *p = vo->priv;

    if (p->active) {
        struct mp_image src = *mpi;
        struct mp_rect src_rc = p->src;
        src_rc.x0 = MP_ALIGN_DOWN(src_rc.x0, mpi->fmt.align_x);
        src_rc.y0 = MP_ALIGN_DOWN(src_rc.y0, mpi->fmt.align_y);
        mp_image_crop_rc(&src, src_rc);
        mp_sws_scale(p->sws, p->cur_frame, &src);
        osd_draw_on_image(vo->osd, p->osd, src.pts, 0, p->cur_frame);

        struct modeset_buf *front_buf = &p->dev->bufs[p->dev->front_buf];
        int32_t shift = (p->device_w * p->y + p->x) * 4;
        memcpy_pic(front_buf->map + shift,
                   p->cur_frame->planes[0],
                   (p->dst.x1 - p->dst.x0) * 4,
                   p->dst.y1 - p->dst.y0,
                   p->device_w * 4,
                   p->cur_frame->stride[0]);
    }

    if (mpi != p->last_input) {
        talloc_free(p->last_input);
        p->last_input = mpi;
    }
}

static void flip_page(struct vo *vo)
{
    struct priv *p = vo->priv;
    if (!p->active || p->pflip_happening)
        return;

    int ret = drmModePageFlip(p->fd, p->dev->crtc,
                              p->dev->bufs[p->dev->front_buf].fb,
                              DRM_MODE_PAGE_FLIP_EVENT, p);
    if (ret) {
        MP_WARN(vo, "Cannot flip page for connector\n");
    } else {
        p->dev->front_buf++;
        p->dev->front_buf %= BUF_COUNT;
        p->pflip_happening = true;
    }

    // poll page flip finish event
    const int timeout_ms = 3000;
    struct pollfd fds[1] = {
        { .events = POLLIN, .fd = p->fd },
    };
    poll(fds, 1, timeout_ms);
    if (fds[0].revents & POLLIN) {
        ret = drmHandleEvent(p->fd, &p->ev);
        if (ret != 0) {
            MP_ERR(vo, "drmHandleEvent failed: %i\n", ret);
            return;
        }
    }
}

static void uninit(struct vo *vo)
{
    struct priv *p = vo->priv;

    if (p->dev) {
        release_vo_crtc(vo);

        modeset_destroy_fb(p->fd, &p->dev->bufs[1]);
        modeset_destroy_fb(p->fd, &p->dev->bufs[0]);
        drmModeFreeEncoder(p->dev->enc);
    }

    vt_switcher_destroy(&p->vt_switcher);
    talloc_free(p->last_input);
    talloc_free(p->cur_frame);
    talloc_free(p->dev);
    close(p->fd);
}

static int preinit(struct vo *vo)
{
    struct priv *p = vo->priv;
    p->sws = mp_sws_alloc(vo);
    p->fd = -1;
    p->ev.version = DRM_EVENT_CONTEXT_VERSION;
    p->ev.page_flip_handler = modeset_page_flipped;

    if (vt_switcher_init(&p->vt_switcher, vo->log))
        goto err;

    vt_switcher_acquire(&p->vt_switcher, acquire_vt, vo);
    vt_switcher_release(&p->vt_switcher, release_vt, vo);

    if (modeset_open(vo, &p->fd, p->device_path))
        goto err;

    if (modeset_prepare_dev(vo, p->fd, p->connector_id, &p->dev))
        goto err;

    assert(p->dev);
    p->device_w = p->dev->bufs[0].width;
    p->device_h = p->dev->bufs[0].height;

    if (setup_vo_crtc(vo)) {
        MP_ERR(vo, "Cannot set CRTC for connector %u: %s\n", p->connector_id,
               mp_strerror(errno));
        goto err;
    }

    return 0;

err:
    uninit(vo);
    return -1;
}

static int query_format(struct vo *vo, int format)
{
    return sws_isSupportedInput(imgfmt2pixfmt(format));
}

static int control(struct vo *vo, uint32_t request, void *data)
{
    struct priv *p = vo->priv;
    switch (request) {
    case VOCTRL_SCREENSHOT_WIN:
        *(struct mp_image**)data = mp_image_new_copy(p->cur_frame);
        return VO_TRUE;
    case VOCTRL_REDRAW_FRAME:
        draw_image(vo, p->last_input);
        return VO_TRUE;
    case VOCTRL_GET_PANSCAN:
        return VO_TRUE;
    case VOCTRL_SET_PANSCAN:
        if (vo->config_ok)
            reconfig(vo, vo->params, 0);
        return VO_TRUE;
    }
    return VO_NOTIMPL;
}

#define OPT_BASE_STRUCT struct priv

const struct vo_driver video_out_drm = {
    .name = "drm",
    .description = "Direct Rendering Manager",
    .preinit = preinit,
    .query_format = query_format,
    .reconfig = reconfig,
    .control = control,
    .draw_image = draw_image,
    .flip_page = flip_page,
    .uninit = uninit,
    .wait_events = wait_events,
    .wakeup = wakeup,
    .priv_size = sizeof(struct priv),
    .options = (const struct m_option[]) {
        OPT_STRING("devpath", device_path, 0),
        OPT_INT("connector", connector_id, 0),
        {0},
    },
    .priv_defaults = &(const struct priv) {
Beispiel #13
0
int amdgpu_bo_import(amdgpu_device_handle dev,
		     enum amdgpu_bo_handle_type type,
		     uint32_t shared_handle,
		     struct amdgpu_bo_import_result *output)
{
	struct drm_gem_open open_arg = {};
	struct amdgpu_bo *bo = NULL;
	int r;
	int dma_fd;
	uint64_t dma_buf_size = 0;

	/* We must maintain a list of pairs <handle, bo>, so that we always
	 * return the same amdgpu_bo instance for the same handle. */
	pthread_mutex_lock(&dev->bo_table_mutex);

	/* Convert a DMA buf handle to a KMS handle now. */
	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
		uint32_t handle;
		off_t size;

		/* Get a KMS handle. */
		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
		if (r) {
			return r;
		}

		/* Query the buffer size. */
		size = lseek(shared_handle, 0, SEEK_END);
		if (size == (off_t)-1) {
			pthread_mutex_unlock(&dev->bo_table_mutex);
			amdgpu_close_kms_handle(dev, handle);
			return -errno;
		}
		lseek(shared_handle, 0, SEEK_SET);

		dma_buf_size = size;
		shared_handle = handle;
	}

	/* If we have already created a buffer with this handle, find it. */
	switch (type) {
	case amdgpu_bo_handle_type_gem_flink_name:
		bo = util_hash_table_get(dev->bo_flink_names,
					 (void*)(uintptr_t)shared_handle);
		break;

	case amdgpu_bo_handle_type_dma_buf_fd:
		bo = util_hash_table_get(dev->bo_handles,
					 (void*)(uintptr_t)shared_handle);
		break;

	case amdgpu_bo_handle_type_kms:
		/* Importing a KMS handle in not allowed. */
		pthread_mutex_unlock(&dev->bo_table_mutex);
		return -EPERM;

	default:
		pthread_mutex_unlock(&dev->bo_table_mutex);
		return -EINVAL;
	}

	if (bo) {
		pthread_mutex_unlock(&dev->bo_table_mutex);

		/* The buffer already exists, just bump the refcount. */
		atomic_inc(&bo->refcount);

		output->buf_handle = bo;
		output->alloc_size = bo->alloc_size;
		return 0;
	}

	bo = calloc(1, sizeof(struct amdgpu_bo));
	if (!bo) {
		pthread_mutex_unlock(&dev->bo_table_mutex);
		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
			amdgpu_close_kms_handle(dev, shared_handle);
		}
		return -ENOMEM;
	}

	/* Open the handle. */
	switch (type) {
	case amdgpu_bo_handle_type_gem_flink_name:
		open_arg.name = shared_handle;
		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
		if (r) {
			free(bo);
			pthread_mutex_unlock(&dev->bo_table_mutex);
			return r;
		}

		bo->handle = open_arg.handle;
		if (dev->flink_fd != dev->fd) {
			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
			if (r) {
				free(bo);
				pthread_mutex_unlock(&dev->bo_table_mutex);
				return r;
			}
			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );

			close(dma_fd);

			if (r) {
				free(bo);
				pthread_mutex_unlock(&dev->bo_table_mutex);
				return r;
			}
		}
		bo->flink_name = shared_handle;
		bo->alloc_size = open_arg.size;
		util_hash_table_set(dev->bo_flink_names,
				    (void*)(uintptr_t)bo->flink_name, bo);
		break;

	case amdgpu_bo_handle_type_dma_buf_fd:
		bo->handle = shared_handle;
		bo->alloc_size = dma_buf_size;
		break;

	case amdgpu_bo_handle_type_kms:
		assert(0); /* unreachable */
	}

	/* Initialize it. */
	atomic_set(&bo->refcount, 1);
	bo->dev = dev;
	pthread_mutex_init(&bo->cpu_access_mutex, NULL);

	util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
	pthread_mutex_unlock(&dev->bo_table_mutex);

	output->buf_handle = bo;
	output->alloc_size = bo->alloc_size;
	return 0;
}
Beispiel #14
0
static int init_rb(struct uterm_display *disp, struct dumb_rb *rb)
{
	int ret;
	struct uterm_video *video = disp->video;
	struct drm_mode_create_dumb req;
	struct drm_mode_destroy_dumb dreq;
	struct drm_mode_map_dumb mreq;

	memset(&req, 0, sizeof(req));
	req.width = disp->current_mode->dumb.info.hdisplay;
	req.height = disp->current_mode->dumb.info.vdisplay;
	req.bpp = 32;
	req.flags = 0;

	ret = drmIoctl(video->dumb.fd, DRM_IOCTL_MODE_CREATE_DUMB, &req);
	if (ret < 0) {
		log_err("cannot create dumb drm buffer");
		return -EFAULT;
	}

	rb->handle = req.handle;
	rb->stride = req.pitch;
	rb->size = req.size;

	ret = drmModeAddFB(video->dumb.fd,
			disp->current_mode->dumb.info.hdisplay,
			disp->current_mode->dumb.info.vdisplay,
			24, 32, rb->stride, rb->handle, &rb->fb);
	if (ret) {
		log_err("cannot add drm-fb");
		ret = -EFAULT;
		goto err_buf;
	}

	memset(&mreq, 0, sizeof(mreq));
	mreq.handle = rb->handle;

	ret = drmIoctl(video->dumb.fd, DRM_IOCTL_MODE_MAP_DUMB, &mreq);
	if (ret) {
		log_err("cannot map dumb buffer");
		ret = -EFAULT;
		goto err_fb;
	}

	rb->map = mmap(0, rb->size, PROT_READ | PROT_WRITE, MAP_SHARED,
		       video->dumb.fd, mreq.offset);
	if (rb->map == MAP_FAILED) {
		log_err("cannot mmap dumb buffer");
		ret = -EFAULT;
		goto err_fb;
	}

	return 0;

err_fb:
	drmModeRmFB(video->dumb.fd, rb->fb);
err_buf:
	memset(&dreq, 0, sizeof(dreq));
	dreq.handle = rb->handle;
	ret = drmIoctl(video->dumb.fd, DRM_IOCTL_MODE_DESTROY_DUMB, &dreq);
	if (ret)
		log_warning("cannot destroy dumb buffer");

	return ret;
}
Beispiel #15
0
Datei: main.c Projekt: yuq/gfx
int main(int argc, char **argv)
{
	int fd = drmOpen("radeon", NULL);
	assert(fd >= 0);

	// expose all planes including primary and cursor planes
	assert(!drmSetClientCap(fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1));

	drmModeResPtr res = drmModeGetResources(fd);
	assert(res);

	int i, j;
	drmModeConnectorPtr connector = NULL;
	for (i = 0; i < res->count_connectors; i++) {
		connector = drmModeGetConnector(fd, res->connectors[i]);
		assert(connector);

		if (connector->connection == DRM_MODE_CONNECTED)
			break;

		drmFree(connector);
	}

	drmModeEncoderPtr encoder = drmModeGetEncoder(fd, connector->encoder_id);
	assert(encoder);

	drmModeCrtcPtr crtc = drmModeGetCrtc(fd, encoder->crtc_id);
	assert(crtc);

	drmModeFBPtr fb = drmModeGetFB(fd, crtc->buffer_id);
	assert(fb);

	drmModePlaneResPtr plane_res = drmModeGetPlaneResources(fd);
	assert(plane_res);

	drmModePlanePtr plane = NULL;
	for (i = 0; i < plane_res->count_planes; i++) {
		plane = drmModeGetPlane(fd, plane_res->planes[i]);
		assert(plane);

		if (plane->fb_id == fb->fb_id)
			break;

		drmFree(plane);
	}

	uint64_t has_dumb;
	assert(!drmGetCap(fd, DRM_CAP_DUMB_BUFFER, &has_dumb));
	assert(has_dumb);

	struct drm_mode_create_dumb creq;
	memset(&creq, 0, sizeof(creq));
	creq.width = fb->width;
	creq.height = fb->height;
	creq.bpp = fb->bpp;
	assert(!drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &creq));

	printf("width=%d height=%d bpp=%d pitch=%d size=%d\n",
		   creq.width, creq.height, creq.bpp, creq.pitch, creq.size);

	uint32_t my_fb;
	assert(!drmModeAddFB(fd, creq.width, creq.height, 24, creq.bpp, creq.pitch, creq.handle, &my_fb));	

	struct drm_mode_map_dumb mreq;
	memset(&mreq, 0, sizeof(mreq));
	mreq.handle = creq.handle;
	assert(!drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &mreq));

	uint32_t *map = mmap(0, creq.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mreq.offset);
	assert(map != MAP_FAILED);
	memset(map, 0, creq.size);

	for (i = 100; i < 500; i++)
		for (j = 200; j < 460; j++)
			map[i * (creq.pitch >> 2) + j] = 0x12345678;

	assert(!drmModeSetCrtc(fd, crtc->crtc_id, my_fb, 0, 0, &connector->connector_id, 1, &crtc->mode));
	sleep(10);
	assert(!drmModeSetCrtc(fd, crtc->crtc_id, fb->fb_id, 0, 0, &connector->connector_id, 1, &crtc->mode));

	assert(!drmModeRmFB(fd, my_fb));
	struct drm_mode_destroy_dumb dreq;
	memset(&dreq, 0, sizeof(dreq));
	dreq.handle = creq.handle;
	assert(!drmIoctl(fd, DRM_IOCTL_MODE_DESTROY_DUMB, &dreq));

	drmFree(plane);
	drmFree(plane_res);
	drmFree(fb);
	drmFree(crtc);
	drmFree(encoder);
	drmFree(connector);
	drmFree(res);
	drmClose(fd);
	return 0;
}
static void
copy(int fd, uint32_t dst, uint32_t src)
{
	uint32_t batch[1024], *b = batch;
	struct drm_i915_gem_relocation_entry reloc[2], *r = reloc;
	struct drm_i915_gem_exec_object2 obj[3];
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int ret;

	/* invariant state */
	*b++ = (_3DSTATE_AA_CMD |
		AA_LINE_ECAAR_WIDTH_ENABLE |
		AA_LINE_ECAAR_WIDTH_1_0 |
		AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0);
	*b++ = (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
		IAB_MODIFY_ENABLE |
		IAB_MODIFY_FUNC | (BLENDFUNC_ADD << IAB_FUNC_SHIFT) |
		IAB_MODIFY_SRC_FACTOR | (BLENDFACT_ONE <<
					 IAB_SRC_FACTOR_SHIFT) |
		IAB_MODIFY_DST_FACTOR | (BLENDFACT_ZERO <<
					 IAB_DST_FACTOR_SHIFT));
	*b++ = (_3DSTATE_DFLT_DIFFUSE_CMD);
	*b++ = (0);
	*b++ = (_3DSTATE_DFLT_SPEC_CMD);
	*b++ = (0);
	*b++ = (_3DSTATE_DFLT_Z_CMD);
	*b++ = (0);
	*b++ = (_3DSTATE_COORD_SET_BINDINGS |
		CSB_TCB(0, 0) |
		CSB_TCB(1, 1) |
		CSB_TCB(2, 2) |
		CSB_TCB(3, 3) |
		CSB_TCB(4, 4) |
		CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7));
	*b++ = (_3DSTATE_RASTER_RULES_CMD |
		ENABLE_POINT_RASTER_RULE |
		OGL_POINT_RASTER_RULE |
		ENABLE_LINE_STRIP_PROVOKE_VRTX |
		ENABLE_TRI_FAN_PROVOKE_VRTX |
		LINE_STRIP_PROVOKE_VRTX(1) |
		TRI_FAN_PROVOKE_VRTX(2) | ENABLE_TEXKILL_3D_4D | TEXKILL_4D);
	*b++ = (_3DSTATE_MODES_4_CMD |
		ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC(LOGICOP_COPY) |
		ENABLE_STENCIL_WRITE_MASK | STENCIL_WRITE_MASK(0xff) |
		ENABLE_STENCIL_TEST_MASK | STENCIL_TEST_MASK(0xff));
	*b++ = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | I1_LOAD_S(4) | I1_LOAD_S(5) | 2);
	*b++ = (0x00000000);	/* Disable texture coordinate wrap-shortest */
	*b++ = ((1 << S4_POINT_WIDTH_SHIFT) |
		S4_LINE_WIDTH_ONE |
		S4_CULLMODE_NONE |
		S4_VFMT_XY);
	*b++ = (0x00000000);	/* Stencil. */
	*b++ = (_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
	*b++ = (_3DSTATE_SCISSOR_RECT_0_CMD);
	*b++ = (0);
	*b++ = (0);
	*b++ = (_3DSTATE_DEPTH_SUBRECT_DISABLE);
	*b++ = (_3DSTATE_LOAD_INDIRECT | 0);	/* disable indirect state */
	*b++ = (0);
	*b++ = (_3DSTATE_STIPPLE);
	*b++ = (0x00000000);
	*b++ = (_3DSTATE_BACKFACE_STENCIL_OPS | BFO_ENABLE_STENCIL_TWO_SIDE | 0);

	/* samler state */
#define TEX_COUNT 1
	*b++ = (_3DSTATE_MAP_STATE | (3 * TEX_COUNT));
	*b++ = ((1 << TEX_COUNT) - 1);
	*b = fill_reloc(r++, b-batch, src, I915_GEM_DOMAIN_SAMPLER, 0); b++;
	*b++ = (MAPSURF_32BIT | MT_32BIT_ARGB8888 |
		MS3_TILED_SURFACE |
		(HEIGHT - 1) << MS3_HEIGHT_SHIFT |
		(WIDTH - 1) << MS3_WIDTH_SHIFT);
	*b++ = ((WIDTH-1) << MS4_PITCH_SHIFT);

	*b++ = (_3DSTATE_SAMPLER_STATE | (3 * TEX_COUNT));
	*b++ = ((1 << TEX_COUNT) - 1);
	*b++ = (MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT |
		FILTER_NEAREST << SS2_MAG_FILTER_SHIFT |
		FILTER_NEAREST << SS2_MIN_FILTER_SHIFT);
	*b++ = (TEXCOORDMODE_WRAP << SS3_TCX_ADDR_MODE_SHIFT |
		TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT |
		0 << SS3_TEXTUREMAP_INDEX_SHIFT);
	*b++ = (0x00000000);

	/* render target state */
	*b++ = (_3DSTATE_BUF_INFO_CMD);
	*b++ = (BUF_3D_ID_COLOR_BACK | BUF_3D_TILED_SURFACE |  WIDTH*4);
	*b = fill_reloc(r++, b-batch, dst,
			I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
	b++;

	*b++ = (_3DSTATE_DST_BUF_VARS_CMD);
	*b++ = (COLR_BUF_ARGB8888 |
		DSTORG_HORT_BIAS(0x8) |
		DSTORG_VERT_BIAS(0x8));

	/* draw rect is unconditional */
	*b++ = (_3DSTATE_DRAW_RECT_CMD);
	*b++ = (0x00000000);
	*b++ = (0x00000000);	/* ymin, xmin */
	*b++ = (DRAW_YMAX(HEIGHT - 1) |
		DRAW_XMAX(WIDTH - 1));
	/* yorig, xorig (relate to color buffer?) */
	*b++ = (0x00000000);

	/* texfmt */
	*b++ = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(1) | I1_LOAD_S(2) | I1_LOAD_S(6) | 2);
	*b++ = ((4 << S1_VERTEX_WIDTH_SHIFT) | (4 << S1_VERTEX_PITCH_SHIFT));
	*b++ = (~S2_TEXCOORD_FMT(0, TEXCOORDFMT_NOT_PRESENT) |
		S2_TEXCOORD_FMT(0, TEXCOORDFMT_2D));
	*b++ = (S6_CBUF_BLEND_ENABLE | S6_COLOR_WRITE_ENABLE |
		BLENDFUNC_ADD << S6_CBUF_BLEND_FUNC_SHIFT |
		BLENDFACT_ONE << S6_CBUF_SRC_BLEND_FACT_SHIFT |
		BLENDFACT_ZERO << S6_CBUF_DST_BLEND_FACT_SHIFT);

	/* pixel shader */
	*b++ = (_3DSTATE_PIXEL_SHADER_PROGRAM | (1 + 3*3 - 2));
	/* decl FS_T0 */
	*b++ = (D0_DCL |
		REG_TYPE(FS_T0) << D0_TYPE_SHIFT |
		REG_NR(FS_T0) << D0_NR_SHIFT |
		((REG_TYPE(FS_T0) != REG_TYPE_S) ? D0_CHANNEL_ALL : 0));
	*b++ = (0);
	*b++ = (0);
	/* decl FS_S0 */
	*b++ = (D0_DCL |
		(REG_TYPE(FS_S0) << D0_TYPE_SHIFT) |
		(REG_NR(FS_S0) << D0_NR_SHIFT) |
		((REG_TYPE(FS_S0) != REG_TYPE_S) ? D0_CHANNEL_ALL : 0));
	*b++ = (0);
	*b++ = (0);
	/* texld(FS_OC, FS_S0, FS_T0 */
	*b++ = (T0_TEXLD |
		(REG_TYPE(FS_OC) << T0_DEST_TYPE_SHIFT) |
		(REG_NR(FS_OC) << T0_DEST_NR_SHIFT) |
		(REG_NR(FS_S0) << T0_SAMPLER_NR_SHIFT));
	*b++ = ((REG_TYPE(FS_T0) << T1_ADDRESS_REG_TYPE_SHIFT) |
		(REG_NR(FS_T0) << T1_ADDRESS_REG_NR_SHIFT));
	*b++ = (0);

	*b++ = (PRIM3D_RECTLIST | (3*4 - 1));
	*b++ = pack_float(WIDTH);
	*b++ = pack_float(HEIGHT);
	*b++ = pack_float(WIDTH);
	*b++ = pack_float(HEIGHT);

	*b++ = pack_float(0);
	*b++ = pack_float(HEIGHT);
	*b++ = pack_float(0);
	*b++ = pack_float(HEIGHT);

	*b++ = pack_float(0);
	*b++ = pack_float(0);
	*b++ = pack_float(0);
	*b++ = pack_float(0);

	*b++ = MI_BATCH_BUFFER_END;
	if ((b - batch) & 1)
		*b++ = 0;

	assert(b - batch <= 1024);
	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, (b-batch)*sizeof(batch[0]), batch);

	assert(r-reloc == 2);

	obj[0].handle = dst;
	obj[0].relocation_count = 0;
	obj[0].relocs_ptr = 0;
	obj[0].alignment = 0;
	obj[0].offset = 0;
	obj[0].flags = 0;
	obj[0].rsvd1 = 0;
	obj[0].rsvd2 = 0;

	obj[1].handle = src;
	obj[1].relocation_count = 0;
	obj[1].relocs_ptr = 0;
	obj[1].alignment = 0;
	obj[1].offset = 0;
	obj[1].flags = 0;
	obj[1].rsvd1 = 0;
	obj[1].rsvd2 = 0;

	obj[2].handle = handle;
	obj[2].relocation_count = 2;
	obj[2].relocs_ptr = (uintptr_t)reloc;
	obj[2].alignment = 0;
	obj[2].offset = 0;
	obj[2].flags = 0;
	obj[2].rsvd1 = obj[2].rsvd2 = 0;

	exec.buffers_ptr = (uintptr_t)obj;
	exec.buffer_count = 3;
	exec.batch_start_offset = 0;
	exec.batch_len = (b-batch)*sizeof(batch[0]);
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = 0;
	exec.rsvd1 = exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	while (ret && errno == EBUSY) {
		drmCommandNone(fd, DRM_I915_GEM_THROTTLE);
		ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	}
	assert(ret == 0);

	gem_close(fd, handle);
}
Beispiel #17
0
int pscnv_gem_close(int fd, uint32_t handle) {
	struct drm_gem_close req;
	req.handle = handle;
	return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &req);
}
Beispiel #18
0
drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
{
	struct drm_mode_get_connector conn;
	drmModeConnectorPtr r = NULL;

	conn.connector_id = connector_id;
	conn.connector_type_id = 0;
	conn.connector_type  = 0;
	conn.count_modes  = 0;
	conn.modes_ptr    = 0;
	conn.count_props  = 0;
	conn.props_ptr    = 0;
	conn.prop_values_ptr = 0;
	conn.count_encoders  = 0;
	conn.encoders_ptr = 0;

	if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
		return 0;

	if (conn.count_props) {
		conn.props_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint32_t)));
		conn.prop_values_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint64_t)));
	}

	if (conn.count_modes)
		conn.modes_ptr = VOID2U64(drmMalloc(conn.count_modes*sizeof(struct drm_mode_modeinfo)));

	if (conn.count_encoders)
		conn.encoders_ptr = VOID2U64(drmMalloc(conn.count_encoders*sizeof(uint32_t)));

	if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
		goto err_allocs;

	if(!(r = drmMalloc(sizeof(*r)))) {
		goto err_allocs;
	}

	r->connector_id = conn.connector_id;
	r->encoder_id = conn.encoder_id;
	r->connection   = conn.connection;
	r->mmWidth      = conn.mm_width;
	r->mmHeight     = conn.mm_height;
	/* convert subpixel from kernel to userspace */
	r->subpixel     = conn.subpixel + 1;
	r->count_modes  = conn.count_modes;
	/* TODO we should test if these alloc & cpy fails. */
	r->count_props  = conn.count_props;
	r->props        = drmAllocCpy(U642VOID(conn.props_ptr), conn.count_props, sizeof(uint32_t));
	r->prop_values  = drmAllocCpy(U642VOID(conn.prop_values_ptr), conn.count_props, sizeof(uint64_t));
	r->modes        = drmAllocCpy(U642VOID(conn.modes_ptr), conn.count_modes, sizeof(struct drm_mode_modeinfo));
	r->count_encoders = conn.count_encoders;
	r->encoders     = drmAllocCpy(U642VOID(conn.encoders_ptr), conn.count_encoders, sizeof(uint32_t));
	r->connector_type  = conn.connector_type;
	r->connector_type_id = conn.connector_type_id;

	if (!r->props || !r->prop_values || !r->modes || !r->encoders)
		goto err_allocs;

err_allocs:
	drmFree(U642VOID(conn.prop_values_ptr));
	drmFree(U642VOID(conn.props_ptr));
	drmFree(U642VOID(conn.modes_ptr));
	drmFree(U642VOID(conn.encoders_ptr));

	return r;
}
Beispiel #19
0
/*
 * Destroy a exynos buffer object.
 *
 * @bo: a exynos buffer object to be destroyed.
 */
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
{
	if (!bo)
		return;

	if (bo->vaddr)
		munmap(bo->vaddr, bo->size);

	if (bo->handle) {
		struct drm_gem_close req = {
			.handle = bo->handle,
		};

		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	free(bo);
}


/*
 * Get a exynos buffer object from a gem global object name.
 *
 * @dev: a exynos device object.
 * @name: a gem global object name exported by another process.
 *
 * this interface is used to get a exynos buffer object from a gem
 * global object name sent by another process for buffer sharing.
 *
 * if true, return a exynos buffer object else NULL.
 *
 */
drm_public struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
	struct exynos_bo *bo;
	struct drm_gem_open req = {
		.name = name,
	};

	bo = calloc(sizeof(*bo), 1);
	if (!bo) {
		fprintf(stderr, "failed to allocate bo[%s].\n",
				strerror(errno));
		return NULL;
	}

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		fprintf(stderr, "failed to open gem object[%s].\n",
				strerror(errno));
		goto err_free_bo;
	}

	bo->dev = dev;
	bo->name = name;
	bo->handle = req.handle;

	return bo;

err_free_bo:
	free(bo);
	return NULL;
}

/*
 * Get a gem global object name from a gem object handle.
 *
 * @bo: a exynos buffer object including gem handle.
 * @name: a gem global object name to be got by kernel driver.
 *
 * this interface is used to get a gem global object name from a gem object
 * handle to a buffer that wants to share it with another process.
 *
 * if true, return 0 else negative.
 */
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
			.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			fprintf(stderr, "failed to get gem global name[%s].\n",
					strerror(errno));
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
{
	return bo->handle;
}

/*
 * Mmap a buffer to user space.
 *
 * @bo: a exynos buffer object including a gem object handle to be mmapped
 *	to user space.
 *
 * if true, user pointer mmaped else NULL.
 */
drm_public void *exynos_bo_map(struct exynos_bo *bo)
{
	if (!bo->vaddr) {
		struct exynos_device *dev = bo->dev;
		struct drm_exynos_gem_mmap req = {
			.handle = bo->handle,
			.size	= bo->size,
		};
		int ret;

		ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_GEM_MMAP, &req);
		if (ret) {
			fprintf(stderr, "failed to mmap[%s].\n",
				strerror(errno));
			return NULL;
		}

		bo->vaddr = (void *)(uintptr_t)req.mapped;
	}

	return bo->vaddr;
}

/*
 * Export gem object to dmabuf as file descriptor.
 *
 * @dev: exynos device object
 * @handle: gem handle to export as file descriptor of dmabuf
 * @fd: file descriptor returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
{
	return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
}

/*
 * Import file descriptor into gem handle.
 *
 * @dev: exynos device object
 * @fd: file descriptor of dmabuf to import
 * @handle: gem handle returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
{
	return drmPrimeFDToHandle(dev->fd, fd, handle);
}



/*
 * Request Wireless Display connection or disconnection.
 *
 * @dev: a exynos device object.
 * @connect: indicate whether connectoin or disconnection request.
 * @ext: indicate whether edid data includes extentions data or not.
 * @edid: a pointer to edid data from Wireless Display device.
 *
 * this interface is used to request Virtual Display driver connection or
 * disconnection. for this, user should get a edid data from the Wireless
 * Display device and then send that data to kernel driver with connection
 * request
 *
 * if true, return 0 else negative.
 */
drm_public int
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
		       uint32_t ext, void *edid)
{
	struct drm_exynos_vidi_connection req = {
		.connection	= connect,
		.extensions	= ext,
		.edid		= (uint64_t)(uintptr_t)edid,
	};
	int ret;

	ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_VIDI_CONNECTION, &req);
	if (ret) {
		fprintf(stderr, "failed to request vidi connection[%s].\n",
				strerror(errno));
		return ret;
	}

	return 0;
}
Beispiel #20
0
/* get buffer info */
static int get_buffer_info(struct omap_bo *bo)
{
	struct drm_omap_gem_info req = {
			.handle = bo->handle,
	};
	int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
			&req, sizeof(req));
	if (ret) {
		return ret;
	}

	/* really all we need for now is mmap offset */
	bo->offset = req.offset;
	bo->size = req.size;

	return 0;
}

/* import a buffer object from DRI2 name */
drm_public struct omap_bo *
omap_bo_from_name(struct omap_device *dev, uint32_t name)
{
	struct omap_bo *bo = NULL;
	struct drm_gem_open req = {
			.name = name,
	};

	pthread_mutex_lock(&table_lock);

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		goto fail;
	}

	bo = lookup_bo(dev, req.handle);
	if (!bo) {
		bo = bo_from_handle(dev, req.handle);
		bo->name = name;
	}

	pthread_mutex_unlock(&table_lock);

	return bo;

fail:
	pthread_mutex_unlock(&table_lock);
	free(bo);
	return NULL;
}

/* import a buffer from dmabuf fd, does not take ownership of the
 * fd so caller should close() the fd when it is otherwise done
 * with it (even if it is still using the 'struct omap_bo *')
 */
drm_public struct omap_bo *
omap_bo_from_dmabuf(struct omap_device *dev, int fd)
{
	struct omap_bo *bo = NULL;
	struct drm_prime_handle req = {
			.fd = fd,
	};
	int ret;

	pthread_mutex_lock(&table_lock);

	ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
	if (ret) {
		goto fail;
	}

	bo = lookup_bo(dev, req.handle);
	if (!bo) {
		bo = bo_from_handle(dev, req.handle);
	}

	pthread_mutex_unlock(&table_lock);

	return bo;

fail:
	pthread_mutex_unlock(&table_lock);
	free(bo);
	return NULL;
}

/* destroy a buffer object */
drm_public void omap_bo_del(struct omap_bo *bo)
{
	if (!bo) {
		return;
	}

	if (!atomic_dec_and_test(&bo->refcnt))
		return;

	if (bo->map) {
		munmap(bo->map, bo->size);
	}

	if (bo->fd >= 0) {
		close(bo->fd);
	}

	if (bo->handle) {
		struct drm_gem_close req = {
				.handle = bo->handle,
		};
		pthread_mutex_lock(&table_lock);
		drmHashDelete(bo->dev->handle_table, bo->handle);
		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
		pthread_mutex_unlock(&table_lock);
	}

	omap_device_del(bo->dev);

	free(bo);
}

/* get the global flink/DRI2 buffer name */
drm_public int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t omap_bo_handle(struct omap_bo *bo)
{
	return bo->handle;
}

/* caller owns the dmabuf fd that is returned and is responsible
 * to close() it when done
 */
drm_public int omap_bo_dmabuf(struct omap_bo *bo)
{
	if (bo->fd < 0) {
		struct drm_prime_handle req = {
				.handle = bo->handle,
				.flags = DRM_CLOEXEC,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
		if (ret) {
			return ret;
		}

		bo->fd = req.fd;
	}
	return dup(bo->fd);
}

drm_public uint32_t omap_bo_size(struct omap_bo *bo)
{
	if (!bo->size) {
		get_buffer_info(bo);
	}
	return bo->size;
}

drm_public void *omap_bo_map(struct omap_bo *bo)
{
	if (!bo->map) {
		if (!bo->offset) {
			get_buffer_info(bo);
		}

		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
				MAP_SHARED, bo->dev->fd, bo->offset);
		if (bo->map == MAP_FAILED) {
			bo->map = NULL;
		}
	}
	return bo->map;
}

drm_public int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
{
	struct drm_omap_gem_cpu_prep req = {
			.handle = bo->handle,
			.op = op,
	};
	return drmCommandWrite(bo->dev->fd,
			DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
}

drm_public int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
{
	struct drm_omap_gem_cpu_fini req = {
			.handle = bo->handle,
			.op = op,
			.nregions = 0,
	};
	return drmCommandWrite(bo->dev->fd,
			DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));
}
Beispiel #21
0
/*
 * checks if a modesetting capable driver has attached to the pci id
 * returns 0 if modesetting supported.
 *  -EINVAL or invalid bus id
 *  -ENOSYS if no modesetting support
*/
drm_public int drmCheckModesettingSupported(const char *busid)
{
#if defined (__linux__)
	char pci_dev_dir[1024];
	int domain, bus, dev, func;
	DIR *sysdir;
	struct dirent *dent;
	int found = 0, ret;

	ret = sscanf(busid, "pci:%04x:%02x:%02x.%d", &domain, &bus, &dev, &func);
	if (ret != 4)
		return -EINVAL;

	sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/drm",
		domain, bus, dev, func);

	sysdir = opendir(pci_dev_dir);
	if (sysdir) {
		dent = readdir(sysdir);
		while (dent) {
			if (!strncmp(dent->d_name, "controlD", 8)) {
				found = 1;
				break;
			}

			dent = readdir(sysdir);
		}
		closedir(sysdir);
		if (found)
			return 0;
	}

	sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/",
		domain, bus, dev, func);

	sysdir = opendir(pci_dev_dir);
	if (!sysdir)
		return -EINVAL;

	dent = readdir(sysdir);
	while (dent) {
		if (!strncmp(dent->d_name, "drm:controlD", 12)) {
			found = 1;
			break;
		}

		dent = readdir(sysdir);
	}

	closedir(sysdir);
	if (found)
		return 0;
#elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
	char kbusid[1024], sbusid[1024];
	char oid[128];
	int domain, bus, dev, func;
	int i, modesetting, ret;
	size_t len;

	ret = sscanf(busid, "pci:%04x:%02x:%02x.%d", &domain, &bus, &dev,
	    &func);
	if (ret != 4)
		return -EINVAL;
	snprintf(kbusid, sizeof(kbusid), "pci:%04x:%02x:%02x.%d", domain, bus,
	    dev, func);

	/* How many GPUs do we expect in the machine ? */
	for (i = 0; i < 16; i++) {
		snprintf(oid, sizeof(oid), "hw.dri.%d.busid", i);
		len = sizeof(sbusid);
		ret = sysctlbyname(oid, sbusid, &len, NULL, 0);
		if (ret == -1) {
			if (errno == ENOENT)
				continue;
			return -EINVAL;
		}
		if (strcmp(sbusid, kbusid) != 0)
			continue;
		snprintf(oid, sizeof(oid), "hw.dri.%d.modesetting", i);
		len = sizeof(modesetting);
		ret = sysctlbyname(oid, &modesetting, &len, NULL, 0);
		if (ret == -1 || len != sizeof(modesetting))
			return -EINVAL;
		return (modesetting ? 0 : -ENOSYS);
	}
#elif defined(__DragonFly__)
	return 0;
#elif defined(__OpenBSD__)
	int	fd;
	struct drm_mode_card_res res;
	drmModeResPtr r = 0;

	if ((fd = drmOpen(NULL, busid)) < 0)
		return -EINVAL;

	memset(&res, 0, sizeof(struct drm_mode_card_res));

	if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res)) {
		drmClose(fd);
		return -errno;
	}

	drmClose(fd);
	return 0;
#endif
	return -ENOSYS;
}
static void
copy(int fd, uint32_t dst, uint32_t src, unsigned int error)
{
	uint32_t batch[12];
	struct drm_i915_gem_relocation_entry reloc[2];
	struct drm_i915_gem_exec_object2 obj[3];
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int ret, i=0;

	batch[i++] = XY_SRC_COPY_BLT_CMD |
		  XY_SRC_COPY_BLT_WRITE_ALPHA |
		  XY_SRC_COPY_BLT_WRITE_RGB;
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i - 1] |= 8;
	else
		batch[i - 1] |= 6;

	batch[i++] = (3 << 24) | /* 32 bits */
		  (0xcc << 16) | /* copy ROP */
		  WIDTH*4;
	batch[i++] = 0; /* dst x1,y1 */
	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
	batch[i++] = 0; /* dst reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = 0; /* src x1,y1 */
	batch[i++] = WIDTH*4;
	batch[i++] = 0; /* src reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = MI_BATCH_BUFFER_END;
	batch[i++] = MI_NOOP;

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	reloc[0].target_handle = dst;
	reloc[0].delta = 0;
	reloc[0].offset = 4 * sizeof(batch[0]);
	reloc[0].presumed_offset = 0;
	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;

	reloc[1].target_handle = src;
	reloc[1].delta = 0;
	reloc[1].offset = 7 * sizeof(batch[0]);
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		reloc[1].offset += sizeof(batch[0]);
	reloc[1].presumed_offset = 0;
	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[1].write_domain = 0;

	memset(obj, 0, sizeof(obj));
	exec.buffer_count = 0;
	obj[exec.buffer_count++].handle = dst;
	if (src != dst)
		obj[exec.buffer_count++].handle = src;
	obj[exec.buffer_count].handle = handle;
	obj[exec.buffer_count].relocation_count = 2;
	obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc;
	exec.buffer_count++;
	exec.buffers_ptr = (uintptr_t)obj;

	exec.batch_start_offset = 0;
	exec.batch_len = i * 4;
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
	i915_execbuffer2_set_context_id(exec, 0);
	exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	if (ret)
		ret = errno;

	if (error == ~0)
		igt_assert_neq(ret, 0);
	else
		igt_assert(ret == error);

	gem_close(fd, handle);
}
Beispiel #23
0
static void busy(data_t *data, uint32_t handle, int size, int loops)
{
	struct drm_i915_gem_relocation_entry reloc[20];
	struct drm_i915_gem_exec_object2 gem_exec[2];
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_pwrite gem_pwrite;
	struct drm_i915_gem_create create;
	uint32_t buf[170], *b;
	int i;

	memset(reloc, 0, sizeof(reloc));
	memset(gem_exec, 0, sizeof(gem_exec));
	memset(&execbuf, 0, sizeof(execbuf));

	b = buf;
	for (i = 0; i < 20; i++) {
		*b++ = XY_COLOR_BLT_CMD_NOLEN |
			((data->intel_gen >= 8) ? 5 : 4) |
			COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
		*b++ = 0xf0 << 16 | 1 << 25 | 1 << 24 | 4096;
		*b++ = 0;
		*b++ = size >> 12 << 16 | 1024;
		reloc[i].offset = (b - buf) * sizeof(uint32_t);
		reloc[i].target_handle = handle;
		reloc[i].read_domains = I915_GEM_DOMAIN_RENDER;
		reloc[i].write_domain = I915_GEM_DOMAIN_RENDER;
		*b++ = 0;
		if (data->intel_gen >= 8)
			*b++ = 0;
		*b++ = canary;
	}
	*b++ = MI_BATCH_BUFFER_END;
	if ((b - buf) & 1)
		*b++ = 0;

	gem_exec[0].handle = handle;
	gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE;

	create.handle = 0;
	create.size = 4096;
	drmIoctl(data->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
	gem_exec[1].handle = create.handle;
	gem_exec[1].relocation_count = 20;
	gem_exec[1].relocs_ptr = (uintptr_t)reloc;

	execbuf.buffers_ptr = (uintptr_t)gem_exec;
	execbuf.buffer_count = 2;
	execbuf.batch_len = (b - buf) * sizeof(buf[0]);
	execbuf.flags = 1 << 11;
	if (HAS_BLT_RING(data->devid))
		execbuf.flags |= I915_EXEC_BLT;

	gem_pwrite.handle = gem_exec[1].handle;
	gem_pwrite.offset = 0;
	gem_pwrite.size = execbuf.batch_len;
	gem_pwrite.data_ptr = (uintptr_t)buf;
	if (drmIoctl(data->fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite) == 0) {
		while (loops--)
			drmIoctl(data->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
	}

	drmIoctl(data->fd, DRM_IOCTL_GEM_CLOSE, &create.handle);
}
Beispiel #24
0
static inline DRM_IOCTL(int fd, int cmd, void *arg)
{
	int ret = drmIoctl(fd, cmd, arg);
	return ret < 0 ? -errno : ret;
}
Beispiel #25
0
static inline int DRM_IOCTL(int fd, unsigned long cmd, void *arg)
{
	int ret = drmIoctl(fd, cmd, arg);
	return ret < 0 ? -errno : ret;
}
Beispiel #26
0
drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
{
	struct drm_mode_get_connector conn, counts;
	drmModeConnectorPtr r = NULL;

retry:
	memset(&conn, 0, sizeof(struct drm_mode_get_connector));
	conn.connector_id = connector_id;

	if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
		return 0;

	counts = conn;

	if (conn.count_props) {
		conn.props_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint32_t)));
		if (!conn.props_ptr)
			goto err_allocs;
		conn.prop_values_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint64_t)));
		if (!conn.prop_values_ptr)
			goto err_allocs;
	}

	if (conn.count_modes) {
		conn.modes_ptr = VOID2U64(drmMalloc(conn.count_modes*sizeof(struct drm_mode_modeinfo)));
		if (!conn.modes_ptr)
			goto err_allocs;
	}

	if (conn.count_encoders) {
		conn.encoders_ptr = VOID2U64(drmMalloc(conn.count_encoders*sizeof(uint32_t)));
		if (!conn.encoders_ptr)
			goto err_allocs;
	}

	if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
		goto err_allocs;

	/* The number of available connectors and etc may have changed with a
	 * hotplug event in between the ioctls, in which case the field is
	 * silently ignored by the kernel.
	 */
	if (counts.count_props < conn.count_props ||
	    counts.count_modes < conn.count_modes ||
	    counts.count_encoders < conn.count_encoders) {
		drmFree(U642VOID(conn.props_ptr));
		drmFree(U642VOID(conn.prop_values_ptr));
		drmFree(U642VOID(conn.modes_ptr));
		drmFree(U642VOID(conn.encoders_ptr));

		goto retry;
	}

	if(!(r = drmMalloc(sizeof(*r)))) {
		goto err_allocs;
	}

	r->connector_id = conn.connector_id;
	r->encoder_id = conn.encoder_id;
	r->connection   = conn.connection;
	r->mmWidth      = conn.mm_width;
	r->mmHeight     = conn.mm_height;
	/* convert subpixel from kernel to userspace */
	r->subpixel     = conn.subpixel + 1;
	r->count_modes  = conn.count_modes;
	r->count_props  = conn.count_props;
	r->props        = drmAllocCpy(U642VOID(conn.props_ptr), conn.count_props, sizeof(uint32_t));
	r->prop_values  = drmAllocCpy(U642VOID(conn.prop_values_ptr), conn.count_props, sizeof(uint64_t));
	r->modes        = drmAllocCpy(U642VOID(conn.modes_ptr), conn.count_modes, sizeof(struct drm_mode_modeinfo));
	r->count_encoders = conn.count_encoders;
	r->encoders     = drmAllocCpy(U642VOID(conn.encoders_ptr), conn.count_encoders, sizeof(uint32_t));
	r->connector_type  = conn.connector_type;
	r->connector_type_id = conn.connector_type_id;

	if ((r->count_props && !r->props) ||
	    (r->count_props && !r->prop_values) ||
	    (r->count_modes && !r->modes) ||
	    (r->count_encoders && !r->encoders)) {
		drmFree(r->props);
		drmFree(r->prop_values);
		drmFree(r->modes);
		drmFree(r->encoders);
		drmFree(r);
		r = 0;
	}

err_allocs:
	drmFree(U642VOID(conn.prop_values_ptr));
	drmFree(U642VOID(conn.props_ptr));
	drmFree(U642VOID(conn.modes_ptr));
	drmFree(U642VOID(conn.encoders_ptr));

	return r;
}
Beispiel #27
0
static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
                                        struct winsys_handle *whandle)
{
   struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
   struct drm_gem_open open_arg = {};
   struct drm_virtgpu_resource_info info_arg = {};
   struct virgl_hw_res *res;
   uint32_t handle = whandle->handle;

   if (whandle->offset != 0) {
      fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
              whandle->offset);
      return NULL;
   }

   mtx_lock(&qdws->bo_handles_mutex);

   if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
      res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
      if (res) {
         struct virgl_hw_res *r = NULL;
         virgl_drm_resource_reference(qdws, &r, res);
         goto done;
      }
   }

   if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      int r;
      r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
      if (r) {
         res = NULL;
         goto done;
      }
   }

   res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
   fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
   if (res) {
      struct virgl_hw_res *r = NULL;
      virgl_drm_resource_reference(qdws, &r, res);
      goto done;
   }

   res = CALLOC_STRUCT(virgl_hw_res);
   if (!res)
      goto done;

   if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      res->bo_handle = handle;
   } else {
      fprintf(stderr, "gem open handle %d\n", handle);
      memset(&open_arg, 0, sizeof(open_arg));
      open_arg.name = whandle->handle;
      if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
         FREE(res);
         res = NULL;
         goto done;
      }
      res->bo_handle = open_arg.handle;
   }
   res->name = handle;

   memset(&info_arg, 0, sizeof(info_arg));
   info_arg.bo_handle = res->bo_handle;

   if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
      /* close */
      FREE(res);
      res = NULL;
      goto done;
   }

   res->res_handle = info_arg.res_handle;

   res->size = info_arg.size;
   res->stride = info_arg.stride;
   pipe_reference_init(&res->reference, 1);
   res->num_cs_references = 0;

   util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);

done:
   mtx_unlock(&qdws->bo_handles_mutex);
   return res;
}