Example #1
0
struct spu_context *alloc_spu_context(struct address_space *local_store)
{
	struct spu_context *ctx;
	ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx)
		goto out;
	/* Binding to physical processor deferred
	 * until spu_activate().
	 */
	spu_init_csa(&ctx->csa);
	if (!ctx->csa.lscsa) {
		goto out_free;
	}
	spin_lock_init(&ctx->mmio_lock);
	kref_init(&ctx->kref);
	init_rwsem(&ctx->state_sema);
	init_MUTEX(&ctx->run_sema);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
	ctx->ibox_fasync = NULL;
	ctx->wbox_fasync = NULL;
	ctx->state = SPU_STATE_SAVED;
	ctx->local_store = local_store;
	ctx->spu = NULL;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	goto out;
out_free:
	kfree(ctx);
	ctx = NULL;
out:
	return ctx;
}
Example #2
0
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
	struct spu_context *ctx;
	struct timespec ts;

	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx)
		goto out;
	/* Binding to physical processor deferred
	 * until spu_activate().
	 */
	if (spu_init_csa(&ctx->csa))
		goto out_free;
	spin_lock_init(&ctx->mmio_lock);
	mutex_init(&ctx->mapping_lock);
	kref_init(&ctx->kref);
	mutex_init(&ctx->state_mutex);
	mutex_init(&ctx->run_mutex);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
	init_waitqueue_head(&ctx->mfc_wq);
	init_waitqueue_head(&ctx->run_wq);
	ctx->state = SPU_STATE_SAVED;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	INIT_LIST_HEAD(&ctx->rq);
	INIT_LIST_HEAD(&ctx->aff_list);
	if (gang)
		spu_gang_add_ctx(gang, ctx);

	__spu_update_sched_info(ctx);
	spu_set_timeslice(ctx);
	ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
	ktime_get_ts(&ts);
	ctx->stats.tstamp = timespec_to_ns(&ts);

	atomic_inc(&nr_spu_contexts);
	goto out;
out_free:
	kfree(ctx);
	ctx = NULL;
out:
	return ctx;
}
Example #3
0
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
	struct spu_context *ctx;
	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx)
		goto out;
	/* Binding to physical processor deferred
	 * until spu_activate().
	 */
	spu_init_csa(&ctx->csa);
	if (!ctx->csa.lscsa) {
		goto out_free;
	}
	spin_lock_init(&ctx->mmio_lock);
	kref_init(&ctx->kref);
	mutex_init(&ctx->state_mutex);
	init_MUTEX(&ctx->run_sema);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
	init_waitqueue_head(&ctx->mfc_wq);
	ctx->state = SPU_STATE_SAVED;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	if (gang)
		spu_gang_add_ctx(gang, ctx);
	ctx->rt_priority = current->rt_priority;
	ctx->policy = current->policy;
	ctx->prio = current->prio;
	INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
	goto out;
out_free:
	kfree(ctx);
	ctx = NULL;
out:
	return ctx;
}