示例#1
0
int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
		    struct amdgpu_ctx *ctx)
{
	unsigned i, j;
	int r;

	memset(ctx, 0, sizeof(*ctx));
	ctx->adev = adev;
	kref_init(&ctx->refcount);
	spin_lock_init(&ctx->ring_lock);
	ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
			 AMDGPU_MAX_RINGS, GFP_KERNEL);
	if (!ctx->fences)
		return -ENOMEM;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		ctx->rings[i].sequence = 1;
		ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
			amdgpu_sched_jobs * i;
	}
	if (amdgpu_enable_scheduler) {
		/* create context entity for each ring */
		for (i = 0; i < adev->num_rings; i++) {
			struct amd_sched_rq *rq;
			if (pri >= AMD_SCHED_MAX_PRIORITY) {
				kfree(ctx->fences);
				return -EINVAL;
			}
			rq = &adev->rings[i]->sched.sched_rq[pri];
			r = amd_sched_entity_init(&adev->rings[i]->sched,
						  &ctx->rings[i].entity,
						  rq, amdgpu_sched_jobs);
			if (r)
				break;
		}

		if (i < adev->num_rings) {
			for (j = 0; j < i; j++)
				amd_sched_entity_fini(&adev->rings[j]->sched,
						      &ctx->rings[j].entity);
			kfree(ctx->fences);
			return r;
		}
	}
	return 0;
}
示例#2
0
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
{
	struct amdgpu_device *adev = ctx->adev;
	unsigned i, j;

	if (!adev)
		return;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		for (j = 0; j < amdgpu_sched_jobs; ++j)
			fence_put(ctx->rings[i].fences[j]);
	kfree(ctx->fences);
	ctx->fences = NULL;

	for (i = 0; i < adev->num_rings; i++)
		amd_sched_entity_fini(&adev->rings[i]->sched,
				      &ctx->rings[i].entity);
}
示例#3
0
static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
{
	unsigned i, j;
	int r;

	memset(ctx, 0, sizeof(*ctx));
	ctx->adev = adev;
	kref_init(&ctx->refcount);
	spin_lock_init(&ctx->ring_lock);
	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
			      sizeof(struct fence*), GFP_KERNEL);
	if (!ctx->fences)
		return -ENOMEM;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		ctx->rings[i].sequence = 1;
		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
	}

	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);

	/* create context entity for each ring */
	for (i = 0; i < adev->num_rings; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		struct amd_sched_rq *rq;

		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
					  rq, amdgpu_sched_jobs);
		if (r)
			break;
	}

	if (i < adev->num_rings) {
		for (j = 0; j < i; j++)
			amd_sched_entity_fini(&adev->rings[j]->sched,
					      &ctx->rings[j].entity);
		kfree(ctx->fences);
		ctx->fences = NULL;
		return r;
	}
	return 0;
}