/** * radeon_ib_get - request an IB (Indirect Buffer) * * @rdev: radeon_device pointer * @ring: ring index the IB is associated with * @ib: IB object returned * @size: requested IB size * * Request an IB (all asics). IBs are allocated using the * suballocator. * Returns 0 on success, error on failure. */ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size) { int i, r; r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); if (r) { dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); return r; } r = radeon_semaphore_create(rdev, &ib->semaphore); if (r) { return r; } ib->ring = ring; ib->fence = NULL; ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); ib->vm = vm; if (vm) { /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address * space and soffset is the offset inside the pool bo */ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; } else { ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); } ib->is_const_ib = false; for (i = 0; i < RADEON_NUM_RINGS; ++i) ib->sync_to[i] = NULL; return 0; }
/** * radeon_ib_get - request an IB (Indirect Buffer) * * @rdev: radeon_device pointer * @ring: ring index the IB is associated with * @ib: IB object returned * @size: requested IB size * * Request an IB (all asics). IBs are allocated using the * suballocator. * Returns 0 on success, error on failure. */ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, unsigned size) { int i, r; r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); if (r) { dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); return r; } r = radeon_semaphore_create(rdev, &ib->semaphore); if (r) { return r; } ib->ring = ring; ib->fence = NULL; ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); ib->vm_id = 0; ib->is_const_ib = false; for (i = 0; i < RADEON_NUM_RINGS; ++i) ib->sync_to[i] = NULL; return 0; }
int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { uint64_t *cpu_addr; int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 8 * RADEON_NUM_SYNCS, 8); if (r) { kfree(*semaphore); *semaphore = NULL; return r; } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); for (i = 0; i < RADEON_NUM_SYNCS; ++i) cpu_addr[i] = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i) (*semaphore)->sync_to[i] = NULL; return 0; }
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, unsigned size) { int r; r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); if (r) { dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); return r; } r = radeon_fence_create(rdev, &ib->fence, ring); if (r) { dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r); radeon_sa_bo_free(rdev, &ib->sa_bo, NULL); return r; } ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); ib->vm_id = 0; ib->is_const_ib = false; ib->semaphore = NULL; return 0; }
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib, unsigned size) { struct radeon_fence *fence; unsigned cretry = 0; int r = 0, i, idx; *ib = NULL; /* align size on 256 bytes */ size = ALIGN(size, 256); r = radeon_fence_create(rdev, &fence, ring); if (r) { dev_err(rdev->dev, "failed to create fence for new IB\n"); return r; } radeon_mutex_lock(&rdev->ib_pool.mutex); idx = rdev->ib_pool.head_id; retry: if (cretry > 5) { dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); radeon_mutex_unlock(&rdev->ib_pool.mutex); radeon_fence_unref(&fence); return -ENOMEM; } cretry++; for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]); if (rdev->ib_pool.ibs[idx].fence == NULL) { r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, &rdev->ib_pool.ibs[idx].sa_bo, size, 256); if (!r) { *ib = &rdev->ib_pool.ibs[idx]; (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr; (*ib)->ptr += ((*ib)->sa_bo.offset >> 2); (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr; (*ib)->gpu_addr += (*ib)->sa_bo.offset; (*ib)->fence = fence; (*ib)->vm_id = 0; (*ib)->is_const_ib = false; /* ib are most likely to be allocated in a ring fashion * thus rdev->ib_pool.head_id should be the id of the * oldest ib */ rdev->ib_pool.head_id = (1 + idx); rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); radeon_mutex_unlock(&rdev->ib_pool.mutex); return 0; } }
int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { int r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 8, 8, true); if (r) { kfree(*semaphore); *semaphore = NULL; return r; } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; return 0; }