Ejemplo n.º 1
0
static void alloc_resource(struct amdgpu_vce_bo *vce_bo, unsigned size, unsigned domain)
{
	struct amdgpu_bo_alloc_request req = {0};
	amdgpu_bo_handle buf_handle;
	amdgpu_va_handle va_handle;
	uint64_t va = 0;
	int r;

	req.alloc_size = ALIGN(size, 4096);
	req.preferred_heap = domain;
	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
	CU_ASSERT_EQUAL(r, 0);
	r = amdgpu_va_range_alloc(device_handle,
				  amdgpu_gpu_va_range_general,
				  req.alloc_size, 1, 0, &va,
				  &va_handle, 0);
	CU_ASSERT_EQUAL(r, 0);
	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
			    AMDGPU_VA_OP_MAP);
	CU_ASSERT_EQUAL(r, 0);
	vce_bo->addr = va;
	vce_bo->handle = buf_handle;
	vce_bo->size = req.alloc_size;
	vce_bo->va_handle = va_handle;
	r = amdgpu_bo_cpu_map(vce_bo->handle, (void **)&vce_bo->ptr);
	CU_ASSERT_EQUAL(r, 0);
	memset(vce_bo->ptr, 0, size);
	r = amdgpu_bo_cpu_unmap(vce_bo->handle);
	CU_ASSERT_EQUAL(r, 0);
}
Ejemplo n.º 2
0
static void amdgpu_cs_uvd_destroy(void)
{
	struct amdgpu_bo_alloc_request req = {0};
	amdgpu_bo_handle buf_handle;
	amdgpu_va_handle va_handle;
	uint64_t va = 0;
	void *msg;
	int i, r;

	req.alloc_size = 4*1024;
	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;

	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_alloc(device_handle,
				  amdgpu_gpu_va_range_general,
				  req.alloc_size, 1, 0, &va,
				  &va_handle, 0);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
			    AMDGPU_VA_OP_MAP);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_cpu_map(buf_handle, &msg);
	CU_ASSERT_EQUAL(r, 0);

	memcpy(msg, uvd_destroy_msg, sizeof(uvd_destroy_msg));
	if (family_id >= AMDGPU_FAMILY_VI)
		((uint8_t*)msg)[0x10] = 7;

	r = amdgpu_bo_cpu_unmap(buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	num_resources = 0;
	resources[num_resources++] = buf_handle;
	resources[num_resources++] = ib_handle;

	i = 0;
	uvd_cmd(va, 0x0, &i);
	for (; i % 16; ++i)
		ib_cpu[i] = 0x80000000;

	r = submit(i, AMDGPU_HW_IP_UVD);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0, AMDGPU_VA_OP_UNMAP);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_free(va_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_free(buf_handle);
	CU_ASSERT_EQUAL(r, 0);
}
Ejemplo n.º 3
0
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
					    void *pointer, uint64_t size)
{
    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
    amdgpu_bo_handle buf_handle;
    struct amdgpu_winsys_bo *bo;
    uint64_t va;
    amdgpu_va_handle va_handle;

    bo = CALLOC_STRUCT(amdgpu_winsys_bo);
    if (!bo)
        return NULL;

    if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
        goto error;

    if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                              size, 1 << 12, 0, &va, &va_handle, 0))
        goto error_va_alloc;

    if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
        goto error_va_map;

    /* Initialize it. */
    pipe_reference_init(&bo->base.reference, 1);
    bo->bo = buf_handle;
    bo->base.alignment = 0;
    bo->base.size = size;
    bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
    bo->ws = ws;
    bo->user_ptr = pointer;
    bo->va = va;
    bo->va_handle = va_handle;
    bo->initial_domain = RADEON_DOMAIN_GTT;
    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);

    ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);

    amdgpu_add_buffer_to_global_list(bo);

    return (struct pb_buffer*)bo;

error_va_map:
    amdgpu_va_range_free(va_handle);

error_va_alloc:
    amdgpu_bo_free(buf_handle);

error:
    FREE(bo);
    return NULL;
}
Ejemplo n.º 4
0
int suite_bo_tests_init(void)
{
	struct amdgpu_bo_alloc_request req = {0};
	amdgpu_bo_handle buf_handle;
	uint64_t va;
	int r;

	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
				  &minor_version, &device_handle);
	if (r) {
		if ((r == -EACCES) && (errno == EACCES))
			printf("\n\nError:%s. "
				"Hint:Try to run this test program as root.",
				strerror(errno));

		return CUE_SINIT_FAILED;
	}

	req.alloc_size = BUFFER_SIZE;
	req.phys_alignment = BUFFER_ALIGN;
	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;

	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
	if (r)
		return CUE_SINIT_FAILED;

	r = amdgpu_va_range_alloc(device_handle,
				  amdgpu_gpu_va_range_general,
				  BUFFER_SIZE, BUFFER_ALIGN, 0,
				  &va, &va_handle, 0);
	if (r)
		goto error_va_alloc;

	r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, va, 0, AMDGPU_VA_OP_MAP);
	if (r)
		goto error_va_map;

	buffer_handle = buf_handle;
	virtual_mc_base_address = va;

	return CUE_SUCCESS;

error_va_map:
	amdgpu_va_range_free(va_handle);

error_va_alloc:
	amdgpu_bo_free(buf_handle);
	return CUE_SINIT_FAILED;
}
Ejemplo n.º 5
0
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
                                               struct winsys_handle *whandle,
                                               unsigned *stride)
{
   struct amdgpu_winsys *ws = amdgpu_winsys(rws);
   struct amdgpu_winsys_bo *bo;
   enum amdgpu_bo_handle_type type;
   struct amdgpu_bo_import_result result = {0};
   uint64_t va;
   amdgpu_va_handle va_handle;
   struct amdgpu_bo_info info = {0};
   enum radeon_bo_domain initial = 0;
   int r;

   /* Initialize the structure. */
   bo = CALLOC_STRUCT(amdgpu_winsys_bo);
   if (!bo) {
      return NULL;
   }

   switch (whandle->type) {
   case DRM_API_HANDLE_TYPE_SHARED:
      type = amdgpu_bo_handle_type_gem_flink_name;
      break;
   case DRM_API_HANDLE_TYPE_FD:
      type = amdgpu_bo_handle_type_dma_buf_fd;
      break;
   default:
      return NULL;
   }

   r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
   if (r)
      goto error;

   /* Get initial domains. */
   r = amdgpu_bo_query_info(result.buf_handle, &info);
   if (r)
      goto error_query;

   r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                             result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
   if (r)
      goto error_query;

   r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
   if (r)
      goto error_va_map;

   if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
      initial |= RADEON_DOMAIN_VRAM;
   if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
      initial |= RADEON_DOMAIN_GTT;


   pipe_reference_init(&bo->base.reference, 1);
   bo->base.alignment = info.phys_alignment;
   bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
   bo->bo = result.buf_handle;
   bo->base.size = result.alloc_size;
   bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
   bo->rws = ws;
   bo->va = va;
   bo->va_handle = va_handle;
   bo->initial_domain = initial;
   bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
   bo->is_shared = true;

   if (stride)
      *stride = whandle->stride;

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      ws->allocated_vram += align(bo->base.size, ws->gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);

   return &bo->base;

error_va_map:
   amdgpu_va_range_free(va_handle);

error_query:
   amdgpu_bo_free(result.buf_handle);

error:
   FREE(bo);
   return NULL;
}
Ejemplo n.º 6
0
static struct pb_buffer *amdgpu_bomgr_create_bo(struct pb_manager *_mgr,
                                                pb_size size,
                                                const struct pb_desc *desc)
{
   struct amdgpu_winsys *rws = get_winsys(_mgr);
   struct amdgpu_bo_desc *rdesc = (struct amdgpu_bo_desc*)desc;
   struct amdgpu_bo_alloc_request request = {0};
   amdgpu_bo_handle buf_handle;
   uint64_t va = 0;
   struct amdgpu_winsys_bo *bo;
   amdgpu_va_handle va_handle;
   int r;

   assert(rdesc->initial_domain & RADEON_DOMAIN_VRAM_GTT);
   bo = CALLOC_STRUCT(amdgpu_winsys_bo);
   if (!bo) {
      return NULL;
   }

   request.alloc_size = size;
   request.phys_alignment = desc->alignment;

   if (rdesc->initial_domain & RADEON_DOMAIN_VRAM) {
      request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
      if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
         request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
   }
   if (rdesc->initial_domain & RADEON_DOMAIN_GTT) {
      request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
      if (rdesc->flags & RADEON_FLAG_GTT_WC)
         request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
   }

   r = amdgpu_bo_alloc(rws->dev, &request, &buf_handle);
   if (r) {
      fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
      fprintf(stderr, "amdgpu:    size      : %d bytes\n", size);
      fprintf(stderr, "amdgpu:    alignment : %d bytes\n", desc->alignment);
      fprintf(stderr, "amdgpu:    domains   : %d\n", rdesc->initial_domain);
      goto error_bo_alloc;
   }

   r = amdgpu_va_range_alloc(rws->dev, amdgpu_gpu_va_range_general,
                             size, desc->alignment, 0, &va, &va_handle, 0);
   if (r)
      goto error_va_alloc;

   r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
   if (r)
      goto error_va_map;

   pipe_reference_init(&bo->base.reference, 1);
   bo->base.alignment = desc->alignment;
   bo->base.usage = desc->usage;
   bo->base.size = size;
   bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
   bo->rws = rws;
   bo->bo = buf_handle;
   bo->va = va;
   bo->va_handle = va_handle;
   bo->initial_domain = rdesc->initial_domain;
   bo->unique_id = __sync_fetch_and_add(&rws->next_bo_unique_id, 1);

   if (rdesc->initial_domain & RADEON_DOMAIN_VRAM)
      rws->allocated_vram += align(size, rws->gart_page_size);
   else if (rdesc->initial_domain & RADEON_DOMAIN_GTT)
      rws->allocated_gtt += align(size, rws->gart_page_size);

   return &bo->base;

error_va_map:
   amdgpu_va_range_free(va_handle);

error_va_alloc:
   amdgpu_bo_free(buf_handle);

error_bo_alloc:
   FREE(bo);
   return NULL;
}
Ejemplo n.º 7
0
static void amdgpu_userptr_test(void)
{
	int i, r, j;
	uint32_t *pm4 = NULL;
	uint64_t bo_mc;
	void *ptr = NULL;
	int pm4_dw = 256;
	int sdma_write_length = 4;
	amdgpu_bo_handle handle;
	amdgpu_context_handle context_handle;
	struct amdgpu_cs_ib_info *ib_info;
	struct amdgpu_cs_request *ibs_request;
	amdgpu_bo_handle buf_handle;
	amdgpu_va_handle va_handle;

	pm4 = calloc(pm4_dw, sizeof(*pm4));
	CU_ASSERT_NOT_EQUAL(pm4, NULL);

	ib_info = calloc(1, sizeof(*ib_info));
	CU_ASSERT_NOT_EQUAL(ib_info, NULL);

	ibs_request = calloc(1, sizeof(*ibs_request));
	CU_ASSERT_NOT_EQUAL(ibs_request, NULL);

	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
	CU_ASSERT_EQUAL(r, 0);

	posix_memalign(&ptr, sysconf(_SC_PAGE_SIZE), BUFFER_SIZE);
	CU_ASSERT_NOT_EQUAL(ptr, NULL);
	memset(ptr, 0, BUFFER_SIZE);

	r = amdgpu_create_bo_from_user_mem(device_handle,
					   ptr, BUFFER_SIZE, &buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_alloc(device_handle,
				  amdgpu_gpu_va_range_general,
				  BUFFER_SIZE, 1, 0, &bo_mc,
				  &va_handle, 0);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_MAP);
	CU_ASSERT_EQUAL(r, 0);

	handle = buf_handle;

	j = i = 0;
	pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
			       SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
	pm4[i++] = 0xffffffff & bo_mc;
	pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
	pm4[i++] = sdma_write_length;

	while (j++ < sdma_write_length)
		pm4[i++] = 0xdeadbeaf;

	amdgpu_sdma_test_exec_cs(context_handle, 0,
				 i, pm4,
				 1, &handle,
				 ib_info, ibs_request);
	i = 0;
	while (i < sdma_write_length) {
		CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf);
	}
	free(ibs_request);
	free(ib_info);
	free(pm4);

	r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_UNMAP);
	CU_ASSERT_EQUAL(r, 0);
	r = amdgpu_va_range_free(va_handle);
	CU_ASSERT_EQUAL(r, 0);
	r = amdgpu_bo_free(buf_handle);
	CU_ASSERT_EQUAL(r, 0);
	free(ptr);

	r = amdgpu_cs_ctx_free(context_handle);
	CU_ASSERT_EQUAL(r, 0);
}
Ejemplo n.º 8
0
static void amdgpu_cs_uvd_decode(void)
{
	const unsigned dpb_size = 15923584, ctx_size = 5287680, dt_size = 737280;
	uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, ctx_addr, dt_addr, it_addr;
	struct amdgpu_bo_alloc_request req = {0};
	amdgpu_bo_handle buf_handle;
	amdgpu_va_handle va_handle;
	uint64_t va = 0;
	uint64_t sum;
	uint8_t *ptr;
	int i, r;

	req.alloc_size = 4*1024; /* msg */
	req.alloc_size += 4*1024; /* fb */
	if (family_id >= AMDGPU_FAMILY_VI)
		req.alloc_size += 4096; /*it_scaling_table*/
	req.alloc_size += ALIGN(sizeof(uvd_bitstream), 4*1024);
	req.alloc_size += ALIGN(dpb_size, 4*1024);
	req.alloc_size += ALIGN(dt_size, 4*1024);

	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;

	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_alloc(device_handle,
				  amdgpu_gpu_va_range_general,
				  req.alloc_size, 1, 0, &va,
				  &va_handle, 0);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
			    AMDGPU_VA_OP_MAP);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_cpu_map(buf_handle, (void **)&ptr);
	CU_ASSERT_EQUAL(r, 0);

	memcpy(ptr, uvd_decode_msg, sizeof(uvd_create_msg));
	if (family_id >= AMDGPU_FAMILY_VI) {
		ptr[0x10] = 7;
		ptr[0x98] = 0x00;
		ptr[0x99] = 0x02;
		/* chip polaris10/11 */
		if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) {
			/*dpb size */
			ptr[0x24] = 0x00;
			ptr[0x25] = 0x94;
			ptr[0x26] = 0x6B;
			ptr[0x27] = 0x00;
			/*ctx size */
			ptr[0x2C] = 0x00;
			ptr[0x2D] = 0xAF;
			ptr[0x2E] = 0x50;
			ptr[0x2F] = 0x00;
		}
	}

	ptr += 4*1024;
	memset(ptr, 0, 4*1024);
	if (family_id >= AMDGPU_FAMILY_VI) {
		ptr += 4*1024;
		memcpy(ptr, uvd_it_scaling_table, sizeof(uvd_it_scaling_table));
	}

	ptr += 4*1024;
	memcpy(ptr, uvd_bitstream, sizeof(uvd_bitstream));

	ptr += ALIGN(sizeof(uvd_bitstream), 4*1024);
	memset(ptr, 0, dpb_size);

	ptr += ALIGN(dpb_size, 4*1024);
	memset(ptr, 0, dt_size);

	num_resources = 0;
	resources[num_resources++] = buf_handle;
	resources[num_resources++] = ib_handle;

	msg_addr = va;
	fb_addr = msg_addr + 4*1024;
	if (family_id >= AMDGPU_FAMILY_VI) {
		it_addr = fb_addr + 4*1024;
		bs_addr = it_addr + 4*1024;
	} else
		bs_addr = fb_addr + 4*1024;
	dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024);

	if ((family_id >= AMDGPU_FAMILY_VI) &&
		(chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A)) {
		ctx_addr = ALIGN(dpb_addr + 0x006B9400, 4*1024);
	}

	dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024);

	i = 0;
	uvd_cmd(msg_addr, 0x0, &i);
	uvd_cmd(dpb_addr, 0x1, &i);
	uvd_cmd(dt_addr, 0x2, &i);
	uvd_cmd(fb_addr, 0x3, &i);
	uvd_cmd(bs_addr, 0x100, &i);
	if (family_id >= AMDGPU_FAMILY_VI) {
		uvd_cmd(it_addr, 0x204, &i);
		if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A)
			uvd_cmd(ctx_addr, 0x206, &i);
}
	ib_cpu[i++] = 0x3BC6;
	ib_cpu[i++] = 0x1;
	for (; i % 16; ++i)
		ib_cpu[i] = 0x80000000;

	r = submit(i, AMDGPU_HW_IP_UVD);
	CU_ASSERT_EQUAL(r, 0);

	/* TODO: use a real CRC32 */
	for (i = 0, sum = 0; i < dt_size; ++i)
		sum += ptr[i];
	CU_ASSERT_EQUAL(sum, 0x20345d8);

	r = amdgpu_bo_cpu_unmap(buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0, AMDGPU_VA_OP_UNMAP);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_free(va_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_free(buf_handle);
	CU_ASSERT_EQUAL(r, 0);
}
Ejemplo n.º 9
0
static void amdgpu_cs_uvd_create(void)
{
	struct amdgpu_bo_alloc_request req = {0};
	amdgpu_bo_handle buf_handle;
	uint64_t va = 0;
	amdgpu_va_handle va_handle;
	void *msg;
	int i, r;

	req.alloc_size = 4*1024;
	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;

	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_alloc(device_handle,
				  amdgpu_gpu_va_range_general,
				  4096, 1, 0, &va,
				  &va_handle, 0);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, 4096, va, 0, AMDGPU_VA_OP_MAP);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_cpu_map(buf_handle, &msg);
	CU_ASSERT_EQUAL(r, 0);

	memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg));
	if (family_id >= AMDGPU_FAMILY_VI) {
		((uint8_t*)msg)[0x10] = 7;
		/* chip polaris 10/11 */
		if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) {
			/* dpb size */
			((uint8_t*)msg)[0x28] = 0x00;
			((uint8_t*)msg)[0x29] = 0x94;
			((uint8_t*)msg)[0x2A] = 0x6B;
			((uint8_t*)msg)[0x2B] = 0x00;
		}
	}

	r = amdgpu_bo_cpu_unmap(buf_handle);
	CU_ASSERT_EQUAL(r, 0);

	num_resources = 0;
	resources[num_resources++] = buf_handle;
	resources[num_resources++] = ib_handle;

	i = 0;
	uvd_cmd(va, 0x0, &i);
	for (; i % 16; ++i)
		ib_cpu[i] = 0x80000000;

	r = submit(i, AMDGPU_HW_IP_UVD);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_va_op(buf_handle, 0, 4096, va, 0, AMDGPU_VA_OP_UNMAP);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_va_range_free(va_handle);
	CU_ASSERT_EQUAL(r, 0);

	r = amdgpu_bo_free(buf_handle);
	CU_ASSERT_EQUAL(r, 0);
}
Ejemplo n.º 10
0
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
                                                 unsigned size,
                                                 unsigned alignment,
                                                 unsigned usage,
                                                 enum radeon_bo_domain initial_domain,
                                                 unsigned flags)
{
   struct amdgpu_bo_alloc_request request = {0};
   amdgpu_bo_handle buf_handle;
   uint64_t va = 0;
   struct amdgpu_winsys_bo *bo;
   amdgpu_va_handle va_handle;
   int r;

   assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
   bo = CALLOC_STRUCT(amdgpu_winsys_bo);
   if (!bo) {
      return NULL;
   }

   pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base);
   request.alloc_size = size;
   request.phys_alignment = alignment;

   if (initial_domain & RADEON_DOMAIN_VRAM) {
      request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
      if (flags & RADEON_FLAG_CPU_ACCESS)
         request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
   }
   if (initial_domain & RADEON_DOMAIN_GTT) {
      request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
      if (flags & RADEON_FLAG_GTT_WC)
         request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
   }

   r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
   if (r) {
      fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
      fprintf(stderr, "amdgpu:    size      : %d bytes\n", size);
      fprintf(stderr, "amdgpu:    alignment : %d bytes\n", alignment);
      fprintf(stderr, "amdgpu:    domains   : %d\n", initial_domain);
      goto error_bo_alloc;
   }

   r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                             size, alignment, 0, &va, &va_handle, 0);
   if (r)
      goto error_va_alloc;

   r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
   if (r)
      goto error_va_map;

   pipe_reference_init(&bo->base.reference, 1);
   bo->base.alignment = alignment;
   bo->base.usage = usage;
   bo->base.size = size;
   bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
   bo->ws = ws;
   bo->bo = buf_handle;
   bo->va = va;
   bo->va_handle = va_handle;
   bo->initial_domain = initial_domain;
   bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);

   if (initial_domain & RADEON_DOMAIN_VRAM)
      ws->allocated_vram += align(size, ws->gart_page_size);
   else if (initial_domain & RADEON_DOMAIN_GTT)
      ws->allocated_gtt += align(size, ws->gart_page_size);

   return bo;

error_va_map:
   amdgpu_va_range_free(va_handle);

error_va_alloc:
   amdgpu_bo_free(buf_handle);

error_bo_alloc:
   FREE(bo);
   return NULL;
}