Beispiel #1
0
/**
 * radeon_irq_reset_work_func - execute gpu reset
 *
 * @work: work struct
 *
 * Execute scheduled gpu reset (cayman+).
 * This function is called when the irq handler
 * thinks we need a gpu reset.
 */
static void radeon_irq_reset_work_func(struct work_struct *work)
{
	struct radeon_device *rdev = container_of(work, struct radeon_device,
						  reset_work);

	radeon_gpu_reset(rdev);
}
Beispiel #2
0
static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
{
	if (r == -EDEADLK) {
		r = radeon_gpu_reset(rdev);
		if (!r)
			r = -EAGAIN;
	}
	return r;
}
Beispiel #3
0
/**
 * radeon_fence_wait_empty_locked - wait for all fences to signal
 *
 * @rdev: radeon device pointer
 * @ring: ring index the fence is associated with
 *
 * Wait for all fences on the requested ring to signal (all asics).
 * Returns 0 if the fences have passed, error for all other cases.
 * Caller must hold ring lock.
 */
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];

	while(1) {
		int r;
		r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
		if (r == -EDEADLK) {
			mutex_unlock(&rdev->ring_lock);
			r = radeon_gpu_reset(rdev);
			mutex_lock(&rdev->ring_lock);
			if (!r)
				continue;
		}
		if (r) {
			dev_err(rdev->dev, "error waiting for ring to become"
				" idle (%d)\n", r);
		}
		return;
	}
}
Beispiel #4
0
/**
 * radeon_irq_reset_work_func - execute gpu reset
 *
 * @work: work struct
 *
 * Execute scheduled gpu reset (cayman+).
 * This function is called when the irq handler
 * thinks we need a gpu reset.
 */
static void radeon_irq_reset_work_func(void *arg, int pending)
{
	struct radeon_device *rdev = arg;

	radeon_gpu_reset(rdev);
}
Beispiel #5
0
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_cs_parser parser;
	int r;

	lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE);
	if (!rdev->accel_working) {
		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
		return -EBUSY;
	}
	if (rdev->in_reset) {
		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
		r = radeon_gpu_reset(rdev);
		if (!r)
			r = -EAGAIN;
		return r;
	}
	/* initialize parser */
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
	parser.filp = filp;
	parser.rdev = rdev;
	parser.dev = rdev->dev;
	parser.family = rdev->family;
	r = radeon_cs_parser_init(&parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		radeon_cs_parser_fini(&parser, r, false);
		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
		r = radeon_cs_handle_lockup(rdev, r);
		return r;
	}

	r = radeon_cs_ib_fill(rdev, &parser);
	if (!r) {
		r = radeon_cs_parser_relocs(&parser);
		if (r && r != -ERESTARTSYS)
			DRM_ERROR("Failed to parse relocation %d!\n", r);
	}

	if (r) {
		radeon_cs_parser_fini(&parser, r, false);
		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
		r = radeon_cs_handle_lockup(rdev, r);
		return r;
	}

#if TRACE_TODO
	trace_radeon_cs(&parser);
#endif

	r = radeon_cs_ib_chunk(rdev, &parser);
	if (r) {
		goto out;
	}
	r = radeon_cs_ib_vm_chunk(rdev, &parser);
	if (r) {
		goto out;
	}
out:
	radeon_cs_parser_fini(&parser, r, true);
	lockmgr(&rdev->exclusive_lock, LK_RELEASE);
	r = radeon_cs_handle_lockup(rdev, r);
	return r;
}