/** * amdgpu_ib_get - request an IB (Indirect Buffer) * * @ring: ring index the IB is associated with * @size: requested IB size * @ib: IB object returned * * Request an IB (all asics). IBs are allocated using the * suballocator. * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; int r; if (size) { r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); return r; } ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); if (!vm) ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } amdgpu_sync_create(&ib->sync); ib->ring = ring; ib->vm = vm; return 0; }
/** * amdgpu_ib_get - request an IB (Indirect Buffer) * * @ring: ring index the IB is associated with * @size: requested IB size * @ib: IB object returned * * Request an IB (all asics). IBs are allocated using the * suballocator. * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib) { int r; if (size) { r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); return r; } ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); if (!vm) ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } return 0; }
int amdgpu_semaphore_create(struct amdgpu_device *adev, struct amdgpu_semaphore **semaphore) { int r; *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, &(*semaphore)->sa_bo, 8, 8); if (r) { kfree(*semaphore); *semaphore = NULL; return r; } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo); *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; return 0; }