static int submit(unsigned ndw, unsigned ip) { struct amdgpu_cs_request ibs_request = {0}; struct amdgpu_cs_ib_info ib_info = {0}; struct amdgpu_cs_fence fence_status = {0}; uint32_t expired; int r; ib_info.ib_mc_address = ib_mc_address; ib_info.size = ndw; ibs_request.ip_type = ip; r = amdgpu_bo_list_create(device_handle, num_resources, resources, NULL, &ibs_request.resources); if (r) return r; ibs_request.number_of_ibs = 1; ibs_request.ibs = &ib_info; ibs_request.fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1); if (r) return r; r = amdgpu_bo_list_destroy(ibs_request.resources); if (r) return r; fence_status.context = context_handle; fence_status.ip_type = ip; fence_status.fence = ibs_request.seq_no; r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); if (r) return r; return 0; }
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr); struct drm_amdgpu_bo_list_entry *info; struct amdgpu_bo_list *list; int r; info = kvmalloc_array(args->in.bo_number, sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL); if (!info) return -ENOMEM; /* copy the handle array from userspace to a kernel buffer */ r = -EFAULT; if (likely(info_size == args->in.bo_info_size)) { unsigned long bytes = args->in.bo_number * args->in.bo_info_size; if (copy_from_user(info, uptr, bytes)) goto error_free; } else { unsigned long bytes = min(args->in.bo_info_size, info_size); unsigned i; memset(info, 0, args->in.bo_number * info_size); for (i = 0; i < args->in.bo_number; ++i) { if (copy_from_user(&info[i], uptr, bytes)) goto error_free; uptr += args->in.bo_info_size; } } switch (args->in.operation) { case AMDGPU_BO_LIST_OP_CREATE: r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, &handle); if (r) goto error_free; break; case AMDGPU_BO_LIST_OP_DESTROY: amdgpu_bo_list_destroy(fpriv, handle); handle = 0; break; case AMDGPU_BO_LIST_OP_UPDATE: r = -ENOENT; list = amdgpu_bo_list_get(fpriv, handle); if (!list) goto error_free; r = amdgpu_bo_list_set(adev, filp, list, info, args->in.bo_number); amdgpu_bo_list_put(list); if (r) goto error_free; break; default: r = -EINVAL; goto error_free; } memset(args, 0, sizeof(*args)); args->out.list_handle = handle; kvfree(info); return 0; error_free: kvfree(info); return r; }
static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags, struct pipe_fence_handle **fence, uint32_t cs_trace_id) { struct amdgpu_cs *cs = amdgpu_cs(rcs); struct amdgpu_winsys *ws = cs->ctx->ws; switch (cs->base.ring_type) { case RING_DMA: /* pad DMA ring to 8 DWs */ while (rcs->cdw & 7) OUT_CS(&cs->base, 0x00000000); /* NOP packet */ break; case RING_GFX: /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */ while (rcs->cdw & 7) OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */ break; case RING_UVD: while (rcs->cdw & 15) OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */ break; default: break; } if (rcs->cdw > rcs->max_dw) { fprintf(stderr, "amdgpu: command stream overflowed\n"); } amdgpu_cs_add_buffer(rcs, (void*)cs->big_ib_winsys_buffer, RADEON_USAGE_READ, 0, RADEON_PRIO_IB1); /* If the CS is not empty or overflowed.... */ if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) { int r; r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles, cs->flags, &cs->request.resources); if (r) { fprintf(stderr, "amdgpu: resource list creation failed (%d)\n", r); cs->request.resources = NULL; goto cleanup; } cs->ib.size = cs->base.cdw; cs->used_ib_space += cs->base.cdw * 4; amdgpu_cs_do_submission(cs, fence); /* Cleanup. */ if (cs->request.resources) amdgpu_bo_list_destroy(cs->request.resources); } cleanup: amdgpu_cs_context_cleanup(cs); amdgpu_get_new_ib(cs); ws->num_cs_flushes++; }
/* * caller need create/release: * pm4_src, resources, ib_info, and ibs_request * submit command stream described in ibs_request and wait for this IB accomplished */ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle, int instance, int pm4_dw, uint32_t *pm4_src, int res_cnt, amdgpu_bo_handle *resources, struct amdgpu_cs_ib_info *ib_info, struct amdgpu_cs_request *ibs_request) { int r; uint32_t expired; uint32_t *ring_ptr; amdgpu_bo_handle ib_result_handle; void *ib_result_cpu; uint64_t ib_result_mc_address; struct amdgpu_cs_fence fence_status = {0}; amdgpu_bo_handle *all_res = alloca(sizeof(resources[0]) * (res_cnt + 1)); amdgpu_va_handle va_handle; /* prepare CS */ CU_ASSERT_NOT_EQUAL(pm4_src, NULL); CU_ASSERT_NOT_EQUAL(resources, NULL); CU_ASSERT_NOT_EQUAL(ib_info, NULL); CU_ASSERT_NOT_EQUAL(ibs_request, NULL); CU_ASSERT_TRUE(pm4_dw <= 1024); /* allocate IB */ r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle, &ib_result_cpu, &ib_result_mc_address, &va_handle); CU_ASSERT_EQUAL(r, 0); /* copy PM4 packet to ring from caller */ ring_ptr = ib_result_cpu; memcpy(ring_ptr, pm4_src, pm4_dw * sizeof(*pm4_src)); ib_info->ib_mc_address = ib_result_mc_address; ib_info->size = pm4_dw; ibs_request->ip_type = AMDGPU_HW_IP_DMA; ibs_request->ring = instance; ibs_request->number_of_ibs = 1; ibs_request->ibs = ib_info; ibs_request->fence_info.handle = NULL; memcpy(all_res, resources, sizeof(resources[0]) * res_cnt); all_res[res_cnt] = ib_result_handle; r = amdgpu_bo_list_create(device_handle, res_cnt+1, all_res, NULL, &ibs_request->resources); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_NOT_EQUAL(ibs_request, NULL); /* submit CS */ r = amdgpu_cs_submit(context_handle, 0, ibs_request, 1); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(ibs_request->resources); CU_ASSERT_EQUAL(r, 0); fence_status.ip_type = AMDGPU_HW_IP_DMA; fence_status.ring = ibs_request->ring; fence_status.context = context_handle; fence_status.fence = ibs_request->seq_no; /* wait for IB accomplished */ r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(expired, true); r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle, ib_result_mc_address, 4096); CU_ASSERT_EQUAL(r, 0); }
static void amdgpu_command_submission_compute(void) { amdgpu_context_handle context_handle; amdgpu_bo_handle ib_result_handle; void *ib_result_cpu; uint64_t ib_result_mc_address; struct amdgpu_cs_request ibs_request; struct amdgpu_cs_ib_info ib_info; struct amdgpu_cs_fence fence_status; uint32_t *ptr; uint32_t expired; int i, r, instance; amdgpu_bo_list_handle bo_list; amdgpu_va_handle va_handle; r = amdgpu_cs_ctx_create(device_handle, &context_handle); CU_ASSERT_EQUAL(r, 0); for (instance = 0; instance < 8; instance++) { r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle, &ib_result_cpu, &ib_result_mc_address, &va_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL, &bo_list); CU_ASSERT_EQUAL(r, 0); ptr = ib_result_cpu; for (i = 0; i < 16; ++i) ptr[i] = 0xffff1000; memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info)); ib_info.ib_mc_address = ib_result_mc_address; ib_info.size = 16; memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request)); ibs_request.ip_type = AMDGPU_HW_IP_COMPUTE; ibs_request.ring = instance; ibs_request.number_of_ibs = 1; ibs_request.ibs = &ib_info; ibs_request.resources = bo_list; ibs_request.fence_info.handle = NULL; memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence)); r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle; fence_status.ip_type = AMDGPU_HW_IP_COMPUTE; fence_status.ring = instance; fence_status.fence = ibs_request.seq_no; r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(bo_list); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle, ib_result_mc_address, 4096); CU_ASSERT_EQUAL(r, 0); } r = amdgpu_cs_ctx_free(context_handle); CU_ASSERT_EQUAL(r, 0); }
static void amdgpu_command_submission_gfx_shared_ib(void) { amdgpu_context_handle context_handle; amdgpu_bo_handle ib_result_handle; void *ib_result_cpu; uint64_t ib_result_mc_address; struct amdgpu_cs_request ibs_request = {0}; struct amdgpu_cs_ib_info ib_info[2]; struct amdgpu_cs_fence fence_status = {0}; uint32_t *ptr; uint32_t expired; amdgpu_bo_list_handle bo_list; amdgpu_va_handle va_handle; int r; r = amdgpu_cs_ctx_create(device_handle, &context_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle, &ib_result_cpu, &ib_result_mc_address, &va_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL, &bo_list); CU_ASSERT_EQUAL(r, 0); memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info)); /* IT_SET_CE_DE_COUNTERS */ ptr = ib_result_cpu; ptr[0] = 0xc0008900; ptr[1] = 0; ptr[2] = 0xc0008400; ptr[3] = 1; ib_info[0].ib_mc_address = ib_result_mc_address; ib_info[0].size = 4; ib_info[0].flags = AMDGPU_IB_FLAG_CE; ptr = (uint32_t *)ib_result_cpu + 4; ptr[0] = 0xc0008600; ptr[1] = 0x00000001; ib_info[1].ib_mc_address = ib_result_mc_address + 16; ib_info[1].size = 2; ibs_request.ip_type = AMDGPU_HW_IP_GFX; ibs_request.number_of_ibs = 2; ibs_request.ibs = ib_info; ibs_request.resources = bo_list; ibs_request.fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle; fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.fence = ibs_request.seq_no; r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle, ib_result_mc_address, 4096); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(bo_list); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_ctx_free(context_handle); CU_ASSERT_EQUAL(r, 0); }
static void amdgpu_semaphore_test(void) { amdgpu_context_handle context_handle[2]; amdgpu_semaphore_handle sem; amdgpu_bo_handle ib_result_handle[2]; void *ib_result_cpu[2]; uint64_t ib_result_mc_address[2]; struct amdgpu_cs_request ibs_request[2] = {0}; struct amdgpu_cs_ib_info ib_info[2] = {0}; struct amdgpu_cs_fence fence_status = {0}; uint32_t *ptr; uint32_t expired; amdgpu_bo_list_handle bo_list[2]; amdgpu_va_handle va_handle[2]; int r, i; r = amdgpu_cs_create_semaphore(&sem); CU_ASSERT_EQUAL(r, 0); for (i = 0; i < 2; i++) { r = amdgpu_cs_ctx_create(device_handle, &context_handle[i]); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle[i], &ib_result_cpu[i], &ib_result_mc_address[i], &va_handle[i]); CU_ASSERT_EQUAL(r, 0); r = amdgpu_get_bo_list(device_handle, ib_result_handle[i], NULL, &bo_list[i]); CU_ASSERT_EQUAL(r, 0); } /* 1. same context different engine */ ptr = ib_result_cpu[0]; ptr[0] = SDMA_NOP; ib_info[0].ib_mc_address = ib_result_mc_address[0]; ib_info[0].size = 1; ibs_request[0].ip_type = AMDGPU_HW_IP_DMA; ibs_request[0].number_of_ibs = 1; ibs_request[0].ibs = &ib_info[0]; ibs_request[0].resources = bo_list[0]; ibs_request[0].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); ptr = ib_result_cpu[1]; ptr[0] = GFX_COMPUTE_NOP; ib_info[1].ib_mc_address = ib_result_mc_address[1]; ib_info[1].size = 1; ibs_request[1].ip_type = AMDGPU_HW_IP_GFX; ibs_request[1].number_of_ibs = 1; ibs_request[1].ibs = &ib_info[1]; ibs_request[1].resources = bo_list[1]; ibs_request[1].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[1], 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle[0]; fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.fence = ibs_request[1].seq_no; r = amdgpu_cs_query_fence_status(&fence_status, 500000000, 0, &expired); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(expired, true); /* 2. same engine different context */ ptr = ib_result_cpu[0]; ptr[0] = GFX_COMPUTE_NOP; ib_info[0].ib_mc_address = ib_result_mc_address[0]; ib_info[0].size = 1; ibs_request[0].ip_type = AMDGPU_HW_IP_GFX; ibs_request[0].number_of_ibs = 1; ibs_request[0].ibs = &ib_info[0]; ibs_request[0].resources = bo_list[0]; ibs_request[0].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); ptr = ib_result_cpu[1]; ptr[0] = GFX_COMPUTE_NOP; ib_info[1].ib_mc_address = ib_result_mc_address[1]; ib_info[1].size = 1; ibs_request[1].ip_type = AMDGPU_HW_IP_GFX; ibs_request[1].number_of_ibs = 1; ibs_request[1].ibs = &ib_info[1]; ibs_request[1].resources = bo_list[1]; ibs_request[1].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request[1], 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle[1]; fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.fence = ibs_request[1].seq_no; r = amdgpu_cs_query_fence_status(&fence_status, 500000000, 0, &expired); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(expired, true); for (i = 0; i < 2; i++) { r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i], ib_result_mc_address[i], 4096); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(bo_list[i]); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_ctx_free(context_handle[i]); CU_ASSERT_EQUAL(r, 0); } r = amdgpu_cs_destroy_semaphore(sem); CU_ASSERT_EQUAL(r, 0); }