static int submit(unsigned ndw, unsigned ip) { struct amdgpu_cs_request ibs_request = {0}; struct amdgpu_cs_ib_info ib_info = {0}; struct amdgpu_cs_fence fence_status = {0}; uint32_t expired; int r; ib_info.ib_mc_address = ib_mc_address; ib_info.size = ndw; ibs_request.ip_type = ip; r = amdgpu_bo_list_create(device_handle, num_resources, resources, NULL, &ibs_request.resources); if (r) return r; ibs_request.number_of_ibs = 1; ibs_request.ibs = &ib_info; ibs_request.fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1); if (r) return r; r = amdgpu_bo_list_destroy(ibs_request.resources); if (r) return r; fence_status.context = context_handle; fence_status.ip_type = ip; fence_status.fence = ibs_request.seq_no; r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); if (r) return r; return 0; }
static void amdgpu_cs_do_submission(struct amdgpu_cs *cs, struct pipe_fence_handle **out_fence) { struct amdgpu_winsys *ws = cs->ctx->ws; struct pipe_fence_handle *fence; int i, j, r; /* Create a fence. */ fence = amdgpu_fence_create(cs->ctx, cs->request.ip_type, cs->request.ip_instance, cs->request.ring); if (out_fence) amdgpu_fence_reference(out_fence, fence); cs->request.number_of_dependencies = 0; /* Since the kernel driver doesn't synchronize execution between different * rings automatically, we have to add fence dependencies manually. */ pipe_mutex_lock(ws->bo_fence_lock); for (i = 0; i < cs->num_buffers; i++) { for (j = 0; j < RING_LAST; j++) { struct amdgpu_cs_fence *dep; unsigned idx; struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j]; if (!bo_fence) continue; if (bo_fence->ctx == cs->ctx && bo_fence->fence.ip_type == cs->request.ip_type && bo_fence->fence.ip_instance == cs->request.ip_instance && bo_fence->fence.ring == cs->request.ring) continue; if (amdgpu_fence_wait((void *)bo_fence, 0, false)) continue; idx = cs->request.number_of_dependencies++; if (idx >= cs->max_dependencies) { unsigned size; cs->max_dependencies = idx + 8; size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence); cs->request.dependencies = realloc(cs->request.dependencies, size); } dep = &cs->request.dependencies[idx]; memcpy(dep, &bo_fence->fence, sizeof(*dep)); } } cs->request.fence_info.handle = NULL; if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) { cs->request.fence_info.handle = cs->ctx->user_fence_bo; cs->request.fence_info.offset = cs->base.ring_type; } r = amdgpu_cs_submit(cs->ctx->ctx, 0, &cs->request, 1); if (r) { if (r == -ENOMEM) fprintf(stderr, "amdgpu: Not enough memory for command submission.\n"); else fprintf(stderr, "amdgpu: The CS has been rejected, " "see dmesg for more information.\n"); amdgpu_fence_signalled(fence); } else { /* Success. */ uint64_t *user_fence = NULL; if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) user_fence = cs->ctx->user_fence_cpu_address_base + cs->request.fence_info.offset; amdgpu_fence_submitted(fence, &cs->request, user_fence); for (i = 0; i < cs->num_buffers; i++) amdgpu_fence_reference(&cs->buffers[i].bo->fence[cs->base.ring_type], fence); } pipe_mutex_unlock(ws->bo_fence_lock); amdgpu_fence_reference(&fence, NULL); }
/* * caller need create/release: * pm4_src, resources, ib_info, and ibs_request * submit command stream described in ibs_request and wait for this IB accomplished */ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle, int instance, int pm4_dw, uint32_t *pm4_src, int res_cnt, amdgpu_bo_handle *resources, struct amdgpu_cs_ib_info *ib_info, struct amdgpu_cs_request *ibs_request) { int r; uint32_t expired; uint32_t *ring_ptr; amdgpu_bo_handle ib_result_handle; void *ib_result_cpu; uint64_t ib_result_mc_address; struct amdgpu_cs_fence fence_status = {0}; amdgpu_bo_handle *all_res = alloca(sizeof(resources[0]) * (res_cnt + 1)); amdgpu_va_handle va_handle; /* prepare CS */ CU_ASSERT_NOT_EQUAL(pm4_src, NULL); CU_ASSERT_NOT_EQUAL(resources, NULL); CU_ASSERT_NOT_EQUAL(ib_info, NULL); CU_ASSERT_NOT_EQUAL(ibs_request, NULL); CU_ASSERT_TRUE(pm4_dw <= 1024); /* allocate IB */ r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle, &ib_result_cpu, &ib_result_mc_address, &va_handle); CU_ASSERT_EQUAL(r, 0); /* copy PM4 packet to ring from caller */ ring_ptr = ib_result_cpu; memcpy(ring_ptr, pm4_src, pm4_dw * sizeof(*pm4_src)); ib_info->ib_mc_address = ib_result_mc_address; ib_info->size = pm4_dw; ibs_request->ip_type = AMDGPU_HW_IP_DMA; ibs_request->ring = instance; ibs_request->number_of_ibs = 1; ibs_request->ibs = ib_info; ibs_request->fence_info.handle = NULL; memcpy(all_res, resources, sizeof(resources[0]) * res_cnt); all_res[res_cnt] = ib_result_handle; r = amdgpu_bo_list_create(device_handle, res_cnt+1, all_res, NULL, &ibs_request->resources); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_NOT_EQUAL(ibs_request, NULL); /* submit CS */ r = amdgpu_cs_submit(context_handle, 0, ibs_request, 1); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(ibs_request->resources); CU_ASSERT_EQUAL(r, 0); fence_status.ip_type = AMDGPU_HW_IP_DMA; fence_status.ring = ibs_request->ring; fence_status.context = context_handle; fence_status.fence = ibs_request->seq_no; /* wait for IB accomplished */ r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(expired, true); r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle, ib_result_mc_address, 4096); CU_ASSERT_EQUAL(r, 0); }
static void amdgpu_command_submission_compute(void) { amdgpu_context_handle context_handle; amdgpu_bo_handle ib_result_handle; void *ib_result_cpu; uint64_t ib_result_mc_address; struct amdgpu_cs_request ibs_request; struct amdgpu_cs_ib_info ib_info; struct amdgpu_cs_fence fence_status; uint32_t *ptr; uint32_t expired; int i, r, instance; amdgpu_bo_list_handle bo_list; amdgpu_va_handle va_handle; r = amdgpu_cs_ctx_create(device_handle, &context_handle); CU_ASSERT_EQUAL(r, 0); for (instance = 0; instance < 8; instance++) { r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle, &ib_result_cpu, &ib_result_mc_address, &va_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL, &bo_list); CU_ASSERT_EQUAL(r, 0); ptr = ib_result_cpu; for (i = 0; i < 16; ++i) ptr[i] = 0xffff1000; memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info)); ib_info.ib_mc_address = ib_result_mc_address; ib_info.size = 16; memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request)); ibs_request.ip_type = AMDGPU_HW_IP_COMPUTE; ibs_request.ring = instance; ibs_request.number_of_ibs = 1; ibs_request.ibs = &ib_info; ibs_request.resources = bo_list; ibs_request.fence_info.handle = NULL; memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence)); r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle; fence_status.ip_type = AMDGPU_HW_IP_COMPUTE; fence_status.ring = instance; fence_status.fence = ibs_request.seq_no; r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(bo_list); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle, ib_result_mc_address, 4096); CU_ASSERT_EQUAL(r, 0); } r = amdgpu_cs_ctx_free(context_handle); CU_ASSERT_EQUAL(r, 0); }
static void amdgpu_command_submission_gfx_shared_ib(void) { amdgpu_context_handle context_handle; amdgpu_bo_handle ib_result_handle; void *ib_result_cpu; uint64_t ib_result_mc_address; struct amdgpu_cs_request ibs_request = {0}; struct amdgpu_cs_ib_info ib_info[2]; struct amdgpu_cs_fence fence_status = {0}; uint32_t *ptr; uint32_t expired; amdgpu_bo_list_handle bo_list; amdgpu_va_handle va_handle; int r; r = amdgpu_cs_ctx_create(device_handle, &context_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle, &ib_result_cpu, &ib_result_mc_address, &va_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL, &bo_list); CU_ASSERT_EQUAL(r, 0); memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info)); /* IT_SET_CE_DE_COUNTERS */ ptr = ib_result_cpu; ptr[0] = 0xc0008900; ptr[1] = 0; ptr[2] = 0xc0008400; ptr[3] = 1; ib_info[0].ib_mc_address = ib_result_mc_address; ib_info[0].size = 4; ib_info[0].flags = AMDGPU_IB_FLAG_CE; ptr = (uint32_t *)ib_result_cpu + 4; ptr[0] = 0xc0008600; ptr[1] = 0x00000001; ib_info[1].ib_mc_address = ib_result_mc_address + 16; ib_info[1].size = 2; ibs_request.ip_type = AMDGPU_HW_IP_GFX; ibs_request.number_of_ibs = 2; ibs_request.ibs = ib_info; ibs_request.resources = bo_list; ibs_request.fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle; fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.fence = ibs_request.seq_no; r = amdgpu_cs_query_fence_status(&fence_status, AMDGPU_TIMEOUT_INFINITE, 0, &expired); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle, ib_result_mc_address, 4096); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(bo_list); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_ctx_free(context_handle); CU_ASSERT_EQUAL(r, 0); }
static void amdgpu_semaphore_test(void) { amdgpu_context_handle context_handle[2]; amdgpu_semaphore_handle sem; amdgpu_bo_handle ib_result_handle[2]; void *ib_result_cpu[2]; uint64_t ib_result_mc_address[2]; struct amdgpu_cs_request ibs_request[2] = {0}; struct amdgpu_cs_ib_info ib_info[2] = {0}; struct amdgpu_cs_fence fence_status = {0}; uint32_t *ptr; uint32_t expired; amdgpu_bo_list_handle bo_list[2]; amdgpu_va_handle va_handle[2]; int r, i; r = amdgpu_cs_create_semaphore(&sem); CU_ASSERT_EQUAL(r, 0); for (i = 0; i < 2; i++) { r = amdgpu_cs_ctx_create(device_handle, &context_handle[i]); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, AMDGPU_GEM_DOMAIN_GTT, 0, &ib_result_handle[i], &ib_result_cpu[i], &ib_result_mc_address[i], &va_handle[i]); CU_ASSERT_EQUAL(r, 0); r = amdgpu_get_bo_list(device_handle, ib_result_handle[i], NULL, &bo_list[i]); CU_ASSERT_EQUAL(r, 0); } /* 1. same context different engine */ ptr = ib_result_cpu[0]; ptr[0] = SDMA_NOP; ib_info[0].ib_mc_address = ib_result_mc_address[0]; ib_info[0].size = 1; ibs_request[0].ip_type = AMDGPU_HW_IP_DMA; ibs_request[0].number_of_ibs = 1; ibs_request[0].ibs = &ib_info[0]; ibs_request[0].resources = bo_list[0]; ibs_request[0].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); ptr = ib_result_cpu[1]; ptr[0] = GFX_COMPUTE_NOP; ib_info[1].ib_mc_address = ib_result_mc_address[1]; ib_info[1].size = 1; ibs_request[1].ip_type = AMDGPU_HW_IP_GFX; ibs_request[1].number_of_ibs = 1; ibs_request[1].ibs = &ib_info[1]; ibs_request[1].resources = bo_list[1]; ibs_request[1].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[1], 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle[0]; fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.fence = ibs_request[1].seq_no; r = amdgpu_cs_query_fence_status(&fence_status, 500000000, 0, &expired); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(expired, true); /* 2. same engine different context */ ptr = ib_result_cpu[0]; ptr[0] = GFX_COMPUTE_NOP; ib_info[0].ib_mc_address = ib_result_mc_address[0]; ib_info[0].size = 1; ibs_request[0].ip_type = AMDGPU_HW_IP_GFX; ibs_request[0].number_of_ibs = 1; ibs_request[0].ibs = &ib_info[0]; ibs_request[0].resources = bo_list[0]; ibs_request[0].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem); CU_ASSERT_EQUAL(r, 0); ptr = ib_result_cpu[1]; ptr[0] = GFX_COMPUTE_NOP; ib_info[1].ib_mc_address = ib_result_mc_address[1]; ib_info[1].size = 1; ibs_request[1].ip_type = AMDGPU_HW_IP_GFX; ibs_request[1].number_of_ibs = 1; ibs_request[1].ibs = &ib_info[1]; ibs_request[1].resources = bo_list[1]; ibs_request[1].fence_info.handle = NULL; r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request[1], 1); CU_ASSERT_EQUAL(r, 0); fence_status.context = context_handle[1]; fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.fence = ibs_request[1].seq_no; r = amdgpu_cs_query_fence_status(&fence_status, 500000000, 0, &expired); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(expired, true); for (i = 0; i < 2; i++) { r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i], ib_result_mc_address[i], 4096); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_list_destroy(bo_list[i]); CU_ASSERT_EQUAL(r, 0); r = amdgpu_cs_ctx_free(context_handle[i]); CU_ASSERT_EQUAL(r, 0); } r = amdgpu_cs_destroy_semaphore(sem); CU_ASSERT_EQUAL(r, 0); }