static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
				    uint64_t saddr, uint64_t daddr,
				    int flag, int n)
{
	unsigned long start_jiffies;
	unsigned long end_jiffies;
	struct radeon_fence *fence = NULL;
	int i, r;

	start_jiffies = jiffies;
	for (i = 0; i < n; i++) {
		switch (flag) {
		case RADEON_BENCHMARK_COPY_DMA:
			fence = radeon_copy_dma(rdev, saddr, daddr,
					    size / RADEON_GPU_PAGE_SIZE,
						NULL);
			break;
		case RADEON_BENCHMARK_COPY_BLIT:
			fence = radeon_copy_blit(rdev, saddr, daddr,
					     size / RADEON_GPU_PAGE_SIZE,
						 NULL);
			break;
		default:
			DRM_ERROR("Unknown copy method\n");
			return -EINVAL;
		}
		if (IS_ERR(fence))
			return PTR_ERR(fence);

		r = radeon_fence_wait(fence, false);
		radeon_fence_unref(&fence);
		if (r)
			return r;
	}
	end_jiffies = jiffies;
	return jiffies_to_msecs(end_jiffies - start_jiffies);
}
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
				bool lazy, bool interruptible)
{
	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}
void radeon_test_ring_sync2(struct radeon_device *rdev,
			    struct radeon_ring *ringA,
			    struct radeon_ring *ringB,
			    struct radeon_ring *ringC)
{
	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
	struct radeon_semaphore *semaphore = NULL;
	int ridxA = radeon_ring_index(rdev, ringA);
	int ridxB = radeon_ring_index(rdev, ringB);
	int ridxC = radeon_ring_index(rdev, ringC);
	bool sigA, sigB;
	int i, r;

	r = radeon_fence_create(rdev, &fenceA, ridxA);
	if (r) {
		DRM_ERROR("Failed to create sync fence 1\n");
		goto out_cleanup;
	}
	r = radeon_fence_create(rdev, &fenceB, ridxB);
	if (r) {
		DRM_ERROR("Failed to create sync fence 2\n");
		goto out_cleanup;
	}

	r = radeon_semaphore_create(rdev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
	radeon_fence_emit(rdev, fenceA);
	radeon_ring_unlock_commit(rdev, ringA);

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %d\n", ridxB);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
	radeon_fence_emit(rdev, fenceB);
	radeon_ring_unlock_commit(rdev, ringB);

	mdelay(1000);

	if (radeon_fence_signaled(fenceA)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}
	if (radeon_fence_signaled(fenceB)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
	radeon_ring_unlock_commit(rdev, ringC);

	for (i = 0; i < 30; ++i) {
		mdelay(100);
		sigA = radeon_fence_signaled(fenceA);
		sigB = radeon_fence_signaled(fenceB);
		if (sigA || sigB)
			break;
	}

	if (!sigA && !sigB) {
		DRM_ERROR("Neither fence A nor B has been signaled\n");
		goto out_cleanup;
	} else if (sigA && sigB) {
		DRM_ERROR("Both fence A and B has been signaled\n");
		goto out_cleanup;
	}

	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
	radeon_ring_unlock_commit(rdev, ringC);

	mdelay(1000);

	r = radeon_fence_wait(fenceA, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence A\n");
		goto out_cleanup;
	}
	r = radeon_fence_wait(fenceB, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence B\n");
		goto out_cleanup;
	}

out_cleanup:
	if (semaphore)
		radeon_semaphore_free(rdev, semaphore);

	if (fenceA)
		radeon_fence_unref(&fenceA);

	if (fenceB)
		radeon_fence_unref(&fenceB);

	if (r)
		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
void radeon_test_moves(struct radeon_device *rdev)
{
	struct radeon_bo *vram_obj = NULL;
	struct radeon_bo **gtt_obj = NULL;
	struct radeon_fence *fence = NULL;
	uint64_t gtt_addr, vram_addr;
	unsigned i, n, size;
	int r;

	size = 1024 * 1024;

	/* Number of tests =
	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
	 */
	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
		n -= rdev->ring[i].ring_size;
	if (rdev->wb.wb_obj)
		n -= RADEON_GPU_PAGE_SIZE;
	if (rdev->ih.ring_obj)
		n -= rdev->ih.ring_size;
	n /= size;

	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
	if (!gtt_obj) {
		DRM_ERROR("Failed to allocate %d pointers\n", n);
		r = 1;
		goto out_cleanup;
	}

	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
				&vram_obj);
	if (r) {
		DRM_ERROR("Failed to create VRAM object\n");
		goto out_cleanup;
	}
	r = radeon_bo_reserve(vram_obj, false);
	if (unlikely(r != 0))
		goto out_cleanup;
	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
	if (r) {
		DRM_ERROR("Failed to pin VRAM object\n");
		goto out_cleanup;
	}
	for (i = 0; i < n; i++) {
		void *gtt_map, *vram_map;
		void **gtt_start, **gtt_end;
		void **vram_start, **vram_end;

		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
					 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
		if (r) {
			DRM_ERROR("Failed to create GTT object %d\n", i);
			goto out_cleanup;
		}

		r = radeon_bo_reserve(gtt_obj[i], false);
		if (unlikely(r != 0))
			goto out_cleanup;
		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
		if (r) {
			DRM_ERROR("Failed to pin GTT object %d\n", i);
			goto out_cleanup;
		}

		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
		     gtt_start < gtt_end;
		     gtt_start++)
			*gtt_start = gtt_start;

		radeon_bo_kunmap(gtt_obj[i]);

		r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
		if (r) {
			DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
			goto out_cleanup;
		}

		r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
		if (r) {
			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
			goto out_cleanup;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
			goto out_cleanup;
		}

		radeon_fence_unref(&fence);

		r = radeon_bo_kmap(vram_obj, &vram_map);
		if (r) {
			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     vram_start < vram_end;
		     gtt_start++, vram_start++) {
			if (*vram_start != gtt_start) {
				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
					  "expected 0x%p (GTT/VRAM offset "
					  "0x%16llx/0x%16llx)\n",
					  i, *vram_start, gtt_start,
					  (unsigned long long)
					  (gtt_addr - rdev->mc.gtt_start +
					   (void*)gtt_start - gtt_map),
					  (unsigned long long)
					  (vram_addr - rdev->mc.vram_start +
					   (void*)gtt_start - gtt_map));
				radeon_bo_kunmap(vram_obj);
				goto out_cleanup;
			}
			*vram_start = vram_start;
		}

		radeon_bo_kunmap(vram_obj);

		r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
		if (r) {
			DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
			goto out_cleanup;
		}

		r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
		if (r) {
			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
			goto out_cleanup;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
			goto out_cleanup;
		}

		radeon_fence_unref(&fence);

		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     gtt_start < gtt_end;
		     gtt_start++, vram_start++) {
			if (*gtt_start != vram_start) {
				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
					  "expected 0x%p (VRAM/GTT offset "
					  "0x%16llx/0x%16llx)\n",
					  i, *gtt_start, vram_start,
					  (unsigned long long)
					  (vram_addr - rdev->mc.vram_start +
					   (void*)vram_start - vram_map),
					  (unsigned long long)
					  (gtt_addr - rdev->mc.gtt_start +
					   (void*)vram_start - vram_map));
				radeon_bo_kunmap(gtt_obj[i]);
				goto out_cleanup;
			}
		}

		radeon_bo_kunmap(gtt_obj[i]);

		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
			 gtt_addr - rdev->mc.gtt_start);
	}

out_cleanup:
	if (vram_obj) {
		if (radeon_bo_is_reserved(vram_obj)) {
			radeon_bo_unpin(vram_obj);
			radeon_bo_unreserve(vram_obj);
		}
		radeon_bo_unref(&vram_obj);
	}
	if (gtt_obj) {
		for (i = 0; i < n; i++) {
			if (gtt_obj[i]) {
				if (radeon_bo_is_reserved(gtt_obj[i])) {
					radeon_bo_unpin(gtt_obj[i]);
					radeon_bo_unreserve(gtt_obj[i]);
				}
				radeon_bo_unref(&gtt_obj[i]);
			}
		}
		kfree(gtt_obj);
	}
	if (fence) {
		radeon_fence_unref(&fence);
	}
	if (r) {
		printk(KERN_WARNING "Error while testing BO move.\n");
	}
}
void radeon_test_ring_sync(struct radeon_device *rdev,
			   struct radeon_ring *ringA,
			   struct radeon_ring *ringB)
{
	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
	struct radeon_semaphore *semaphore = NULL;
	int ridxA = radeon_ring_index(rdev, ringA);
	int ridxB = radeon_ring_index(rdev, ringB);
	int r;

	r = radeon_fence_create(rdev, &fence1, ridxA);
	if (r) {
		DRM_ERROR("Failed to create sync fence 1\n");
		goto out_cleanup;
	}
	r = radeon_fence_create(rdev, &fence2, ridxA);
	if (r) {
		DRM_ERROR("Failed to create sync fence 2\n");
		goto out_cleanup;
	}

	r = radeon_semaphore_create(rdev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
	radeon_fence_emit(rdev, fence1);
	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
	radeon_fence_emit(rdev, fence2);
	radeon_ring_unlock_commit(rdev, ringA);

	mdelay(1000);

	if (radeon_fence_signaled(fence1)) {
		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringB);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
	radeon_ring_unlock_commit(rdev, ringB);

	r = radeon_fence_wait(fence1, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence 1\n");
		goto out_cleanup;
	}

	mdelay(1000);

	if (radeon_fence_signaled(fence2)) {
		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringB);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
	radeon_ring_unlock_commit(rdev, ringB);

	r = radeon_fence_wait(fence2, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence 1\n");
		goto out_cleanup;
	}

out_cleanup:
	if (semaphore)
		radeon_semaphore_free(rdev, semaphore);

	if (fence1)
		radeon_fence_unref(&fence1);

	if (fence2)
		radeon_fence_unref(&fence2);

	if (r)
		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
	struct radeon_bo *vram_obj = NULL;
	struct radeon_bo **gtt_obj = NULL;
	uint64_t gtt_addr, vram_addr;
	unsigned n, size;
	int i, r, ring;

	switch (flag) {
	case RADEON_TEST_COPY_DMA:
		ring = radeon_copy_dma_ring_index(rdev);
		break;
	case RADEON_TEST_COPY_BLIT:
		ring = radeon_copy_blit_ring_index(rdev);
		break;
	default:
		DRM_ERROR("Unknown copy method\n");
		return;
	}

	size = 1024 * 1024;

	/* Number of tests =
	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
	 */
	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
		n -= rdev->ring[i].ring_size;
	if (rdev->wb.wb_obj)
		n -= RADEON_GPU_PAGE_SIZE;
	if (rdev->ih.ring_obj)
		n -= rdev->ih.ring_size;
	n /= size;

	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
	if (!gtt_obj) {
		DRM_ERROR("Failed to allocate %d pointers\n", n);
		r = 1;
		goto out_cleanup;
	}

	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
			     NULL, &vram_obj);
	if (r) {
		DRM_ERROR("Failed to create VRAM object\n");
		goto out_cleanup;
	}
	r = radeon_bo_reserve(vram_obj, false);
	if (unlikely(r != 0))
		goto out_unref;
	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
	if (r) {
		DRM_ERROR("Failed to pin VRAM object\n");
		goto out_unres;
	}
	for (i = 0; i < n; i++) {
		void *gtt_map, *vram_map;
		void **gtt_start, **gtt_end;
		void **vram_start, **vram_end;
		struct radeon_fence *fence = NULL;

		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
				     RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
		if (r) {
			DRM_ERROR("Failed to create GTT object %d\n", i);
			goto out_lclean;
		}

		r = radeon_bo_reserve(gtt_obj[i], false);
		if (unlikely(r != 0))
			goto out_lclean_unref;
		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
		if (r) {
			DRM_ERROR("Failed to pin GTT object %d\n", i);
			goto out_lclean_unres;
		}

		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object %d\n", i);
			goto out_lclean_unpin;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
		     gtt_start < gtt_end;
		     gtt_start++)
			*gtt_start = gtt_start;

		radeon_bo_kunmap(gtt_obj[i]);

		if (ring == R600_RING_TYPE_DMA_INDEX)
			r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
		else
			r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
		if (r) {
			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
			goto out_lclean_unpin;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
			goto out_lclean_unpin;
		}

		radeon_fence_unref(&fence);

		r = radeon_bo_kmap(vram_obj, &vram_map);
		if (r) {
			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
			goto out_lclean_unpin;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     vram_start < vram_end;
		     gtt_start++, vram_start++) {
			if (*vram_start != gtt_start) {
				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
					  "expected 0x%p (GTT/VRAM offset "
					  "0x%16llx/0x%16llx)\n",
					  i, *vram_start, gtt_start,
					  (unsigned long long)
					  (gtt_addr - rdev->mc.gtt_start +
					   (void*)gtt_start - gtt_map),
					  (unsigned long long)
					  (vram_addr - rdev->mc.vram_start +
					   (void*)gtt_start - gtt_map));
				radeon_bo_kunmap(vram_obj);
				goto out_lclean_unpin;
			}
			*vram_start = vram_start;
		}

		radeon_bo_kunmap(vram_obj);

		if (ring == R600_RING_TYPE_DMA_INDEX)
			r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
		else
			r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
		if (r) {
			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
			goto out_lclean_unpin;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
			goto out_lclean_unpin;
		}

		radeon_fence_unref(&fence);

		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
			goto out_lclean_unpin;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     gtt_start < gtt_end;
		     gtt_start++, vram_start++) {
			if (*gtt_start != vram_start) {
				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
					  "expected 0x%p (VRAM/GTT offset "
					  "0x%16llx/0x%16llx)\n",
					  i, *gtt_start, vram_start,
					  (unsigned long long)
					  (vram_addr - rdev->mc.vram_start +
					   (void*)vram_start - vram_map),
					  (unsigned long long)
					  (gtt_addr - rdev->mc.gtt_start +
					   (void*)vram_start - vram_map));
				radeon_bo_kunmap(gtt_obj[i]);
				goto out_lclean_unpin;
			}
		}

		radeon_bo_kunmap(gtt_obj[i]);

		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
			 gtt_addr - rdev->mc.gtt_start);
		continue;

out_lclean_unpin:
		radeon_bo_unpin(gtt_obj[i]);
out_lclean_unres:
		radeon_bo_unreserve(gtt_obj[i]);
out_lclean_unref:
		radeon_bo_unref(&gtt_obj[i]);
out_lclean:
		for (--i; i >= 0; --i) {
			radeon_bo_unpin(gtt_obj[i]);
			radeon_bo_unreserve(gtt_obj[i]);
			radeon_bo_unref(&gtt_obj[i]);
		}
		if (fence)
			radeon_fence_unref(&fence);
		break;
	}

	radeon_bo_unpin(vram_obj);
out_unres:
	radeon_bo_unreserve(vram_obj);
out_unref:
	radeon_bo_unref(&vram_obj);
out_cleanup:
	kfree(gtt_obj);
	if (r) {
		printk(KERN_WARNING "Error while testing BO move.\n");
	}
}
Exemple #7
0
/**
 * radeon_semaphore_sync_rings - sync ring to all registered fences
 *
 * @rdev: radeon_device pointer
 * @semaphore: semaphore object to use for sync
 * @ring: ring that needs sync
 *
 * Ensure that all registered fences are signaled before letting
 * the ring continue. The caller must hold the ring lock.
 */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
				struct radeon_semaphore *semaphore,
				int ring)
{
	unsigned count = 0;
	int i, r;

        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		struct radeon_fence *fence = semaphore->sync_to[i];

		/* check if we really need to sync */
                if (!radeon_fence_need_sync(fence, ring))
			continue;

		/* prevent GPU deadlocks */
		if (!rdev->ring[i].ready) {
			dev_err(rdev->dev, "Syncing to a disabled ring!");
			return -EINVAL;
		}

		if (++count > RADEON_NUM_SYNCS) {
			/* not enough room, wait manually */
			r = radeon_fence_wait(fence, false);
			if (r)
				return r;
			continue;
		}

		/* allocate enough space for sync command */
		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
		if (r) {
			return r;
		}

		/* emit the signal semaphore */
		if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
			/* signaling wasn't successful wait manually */
			radeon_ring_undo(&rdev->ring[i]);
			r = radeon_fence_wait(fence, false);
			if (r)
				return r;
			continue;
		}

		/* we assume caller has already allocated space on waiters ring */
		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
			/* waiting wasn't successful wait manually */
			radeon_ring_undo(&rdev->ring[i]);
			r = radeon_fence_wait(fence, false);
			if (r)
				return r;
			continue;
		}

		radeon_ring_commit(rdev, &rdev->ring[i], false);
		radeon_fence_note_sync(fence, ring);

		semaphore->gpu_addr += 8;
	}

	return 0;
}
Exemple #8
0
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
			   unsigned sdomain, unsigned ddomain)
{
	struct radeon_bo *dobj = NULL;
	struct radeon_bo *sobj = NULL;
	struct radeon_fence *fence = NULL;
	uint64_t saddr, daddr;
	unsigned long start_jiffies;
	unsigned long end_jiffies;
	unsigned long time;
	unsigned i, n, size;
	int r;

	size = bsize;
	n = 1024;
	r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
	if (r) {
		goto out_cleanup;
	}
	r = radeon_bo_reserve(sobj, false);
	if (unlikely(r != 0))
		goto out_cleanup;
	r = radeon_bo_pin(sobj, sdomain, &saddr);
	radeon_bo_unreserve(sobj);
	if (r) {
		goto out_cleanup;
	}
	r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
	if (r) {
		goto out_cleanup;
	}
	r = radeon_bo_reserve(dobj, false);
	if (unlikely(r != 0))
		goto out_cleanup;
	r = radeon_bo_pin(dobj, ddomain, &daddr);
	radeon_bo_unreserve(dobj);
	if (r) {
		goto out_cleanup;
	}

	/* r100 doesn't have dma engine so skip the test */
	if (rdev->asic->copy_dma) {

		start_jiffies = jiffies;
		for (i = 0; i < n; i++) {
			r = radeon_fence_create(rdev, &fence);
			if (r) {
				goto out_cleanup;
			}

			r = radeon_copy_dma(rdev, saddr, daddr,
					size / RADEON_GPU_PAGE_SIZE, fence);

			if (r) {
				goto out_cleanup;
			}
			r = radeon_fence_wait(fence, false);
			if (r) {
				goto out_cleanup;
			}
			radeon_fence_unref(&fence);
		}
		end_jiffies = jiffies;
		time = end_jiffies - start_jiffies;
		time = jiffies_to_msecs(time);
		if (time > 0) {
			i = ((n * size) >> 10) / time;
			printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
					" %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
					n, size >> 10,
					sdomain, ddomain, time,
					i, i * 1000, (i * 1000) / 1024);
		}
Exemple #9
0
void radeon_test_ring_sync(struct radeon_device *rdev,
                           struct radeon_ring *ringA,
                           struct radeon_ring *ringB)
{
    struct radeon_fence *fence1 = NULL, *fence2 = NULL;
    struct radeon_semaphore *semaphore = NULL;
    int r;

    r = radeon_semaphore_create(rdev, &semaphore);
    if (r) {
        DRM_ERROR("Failed to create semaphore\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringA, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fence1, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit fence 1\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fence2, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit fence 2\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_ring_unlock_commit(rdev, ringA);

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fence1)) {
        DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringB);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringB);

    r = radeon_fence_wait(fence1, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence 1\n");
        goto out_cleanup;
    }

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fence2)) {
        DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringB);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringB);

    r = radeon_fence_wait(fence2, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence 1\n");
        goto out_cleanup;
    }

out_cleanup:
    radeon_semaphore_free(rdev, &semaphore, NULL);

    if (fence1)
        radeon_fence_unref(&fence1);

    if (fence2)
        radeon_fence_unref(&fence2);

    if (r)
        DRM_ERROR("Error while testing ring sync (%d).\n", r);
}
Exemple #10
0
void radeon_test_moves(struct radeon_device *rdev)
{
	struct radeon_object *vram_obj = NULL;
	struct radeon_object **gtt_obj = NULL;
	struct radeon_fence *fence = NULL;
	uint64_t gtt_addr, vram_addr;
	unsigned i, n, size;
	int r;

	size = 1024 * 1024;

	
	n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
	     rdev->cp.ring_size)) / size;

	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
	if (!gtt_obj) {
		DRM_ERROR("Failed to allocate %d pointers\n", n);
		r = 1;
		goto out_cleanup;
	}

	r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
				 false, &vram_obj);
	if (r) {
		DRM_ERROR("Failed to create VRAM object\n");
		goto out_cleanup;
	}

	r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
	if (r) {
		DRM_ERROR("Failed to pin VRAM object\n");
		goto out_cleanup;
	}

	for (i = 0; i < n; i++) {
		void *gtt_map, *vram_map;
		void **gtt_start, **gtt_end;
		void **vram_start, **vram_end;

		r = radeon_object_create(rdev, NULL, size, true,
					 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
		if (r) {
			DRM_ERROR("Failed to create GTT object %d\n", i);
			goto out_cleanup;
		}

		r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
		if (r) {
			DRM_ERROR("Failed to pin GTT object %d\n", i);
			goto out_cleanup;
		}

		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
		     gtt_start < gtt_end;
		     gtt_start++)
			*gtt_start = gtt_start;

		radeon_object_kunmap(gtt_obj[i]);

		r = radeon_fence_create(rdev, &fence);
		if (r) {
			DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
			goto out_cleanup;
		}

		r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
		if (r) {
			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
			goto out_cleanup;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
			goto out_cleanup;
		}

		radeon_fence_unref(&fence);

		r = radeon_object_kmap(vram_obj, &vram_map);
		if (r) {
			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     vram_start < vram_end;
		     gtt_start++, vram_start++) {
			if (*vram_start != gtt_start) {
				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
					  "expected 0x%p (GTT map 0x%p-0x%p)\n",
					  i, *vram_start, gtt_start, gtt_map,
					  gtt_end);
				radeon_object_kunmap(vram_obj);
				goto out_cleanup;
			}
			*vram_start = vram_start;
		}

		radeon_object_kunmap(vram_obj);

		r = radeon_fence_create(rdev, &fence);
		if (r) {
			DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
			goto out_cleanup;
		}

		r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
		if (r) {
			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
			goto out_cleanup;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
			goto out_cleanup;
		}

		radeon_fence_unref(&fence);

		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     gtt_start < gtt_end;
		     gtt_start++, vram_start++) {
			if (*gtt_start != vram_start) {
				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
					  "expected 0x%p (VRAM map 0x%p-0x%p)\n",
					  i, *gtt_start, vram_start, vram_map,
					  vram_end);
				radeon_object_kunmap(gtt_obj[i]);
				goto out_cleanup;
			}
		}

		radeon_object_kunmap(gtt_obj[i]);

		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
			 gtt_addr - rdev->mc.gtt_location);
	}

out_cleanup:
	if (vram_obj) {
		radeon_object_unpin(vram_obj);
		radeon_object_unref(&vram_obj);
	}
	if (gtt_obj) {
		for (i = 0; i < n; i++) {
			if (gtt_obj[i]) {
				radeon_object_unpin(gtt_obj[i]);
				radeon_object_unref(&gtt_obj[i]);
			}
		}
		kfree(gtt_obj);
	}
	if (fence) {
		radeon_fence_unref(&fence);
	}
	if (r) {
		printk(KERN_WARNING "Error while testing BO move.\n");
	}
}
Exemple #11
0
static void radeon_test_ring_sync2(struct radeon_device *rdev,
			    struct radeon_ring *ringA,
			    struct radeon_ring *ringB,
			    struct radeon_ring *ringC)
{
	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
	struct radeon_semaphore *semaphore = NULL;
	bool sigA, sigB;
	int i, r;

	r = radeon_semaphore_create(rdev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringA, false);

	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
	if (r)
		goto out_cleanup;

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringB, false);
	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
	if (r)
		goto out_cleanup;

	msleep(1000);

	if (radeon_fence_signaled(fenceA)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}
	if (radeon_fence_signaled(fenceB)) {
		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringC, false);

	for (i = 0; i < 30; ++i) {
		msleep(100);
		sigA = radeon_fence_signaled(fenceA);
		sigB = radeon_fence_signaled(fenceB);
		if (sigA || sigB)
			break;
	}

	if (!sigA && !sigB) {
		DRM_ERROR("Neither fence A nor B has been signaled\n");
		goto out_cleanup;
	} else if (sigA && sigB) {
		DRM_ERROR("Both fence A and B has been signaled\n");
		goto out_cleanup;
	}

	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringC, false);

	msleep(1000);

	r = radeon_fence_wait(fenceA, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence A\n");
		goto out_cleanup;
	}
	r = radeon_fence_wait(fenceB, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence B\n");
		goto out_cleanup;
	}

out_cleanup:
	radeon_semaphore_free(rdev, &semaphore, NULL);

	if (fenceA)
		radeon_fence_unref(&fenceA);

	if (fenceB)
		radeon_fence_unref(&fenceB);

	if (r)
		pr_warn("Error while testing ring sync (%d)\n", r);
}