Пример #1
0
/*
 * Initialises a CXL context.
 */
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
    int i;

    spin_lock_init(&ctx->sste_lock);
    ctx->afu = afu;
    ctx->master = master;
    ctx->pid = NULL; /* Set in start work ioctl */

    /*
     * Allocate the segment table before we put it in the IDR so that we
     * can always access it when dereferenced from IDR. For the same
     * reason, the segment table is only destroyed after the context is
     * removed from the IDR.  Access to this in the IOCTL is protected by
     * Linux filesytem symantics (can't IOCTL until open is complete).
     */
    i = cxl_alloc_sst(ctx);
    if (i)
        return i;

    INIT_WORK(&ctx->fault_work, cxl_handle_fault);

    init_waitqueue_head(&ctx->wq);
    spin_lock_init(&ctx->lock);

    ctx->irq_bitmap = NULL;
    ctx->pending_irq = false;
    ctx->pending_fault = false;
    ctx->pending_afu_err = false;

    /*
     * When we have to destroy all contexts in cxl_context_detach_all() we
     * end up with afu_release_irqs() called from inside a
     * idr_for_each_entry(). Hence we need to make sure that anything
     * dereferenced from this IDR is ok before we allocate the IDR here.
     * This clears out the IRQ ranges to ensure this.
     */
    for (i = 0; i < CXL_IRQ_RANGES; i++)
        ctx->irqs.range[i] = 0;

    mutex_init(&ctx->status_mutex);

    ctx->status = OPENED;

    /*
     * Allocating IDR! We better make sure everything's setup that
     * dereferences from it.
     */
    idr_preload(GFP_KERNEL);
    spin_lock(&afu->contexts_lock);
    i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
                  ctx->afu->num_procs, GFP_NOWAIT);
    spin_unlock(&afu->contexts_lock);
    idr_preload_end();
    if (i < 0)
        return i;

    ctx->pe = i;
    ctx->elem = &ctx->afu->spa[i];
    ctx->pe_inserted = false;
    return 0;
}
Пример #2
0
/**
 * kdbus_node_link() - link a node into the nodes system
 * @node:	Pointer to the node to initialize
 * @parent:	Pointer to a parent node, may be %NULL
 * @name:	The name of the node (or NULL if root node)
 *
 * This links a node into the hierarchy. This must not be called multiple times.
 * If @parent is NULL, the node becomes a new root node.
 *
 * This call will fail if @name is not unique across all its siblings or if no
 * ID could be allocated. You must not activate a node if linking failed! It is
 * safe to deactivate it, though.
 *
 * Once you linked a node, you must call kdbus_node_deactivate() before you drop
 * the last reference (even if you never activate the node).
 *
 * Return: 0 on success. negative error otherwise.
 */
int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
		    const char *name)
{
	int ret;

	if (WARN_ON(node->type != KDBUS_NODE_DOMAIN && !parent))
		return -EINVAL;

	if (WARN_ON(parent && !name))
		return -EINVAL;

	if (name) {
		node->name = kstrdup(name, GFP_KERNEL);
		if (!node->name)
			return -ENOMEM;

		node->hash = kdbus_node_name_hash(name);
	}

	down_write(&kdbus_node_idr_lock);
	ret = idr_alloc(&kdbus_node_idr, node, 1, 0, GFP_KERNEL);
	if (ret >= 0)
		node->id = ret;
	up_write(&kdbus_node_idr_lock);

	if (ret < 0)
		return ret;

	ret = 0;

	if (parent) {
		struct rb_node **n, *prev;

		if (!kdbus_node_acquire(parent))
			return -ESHUTDOWN;

		mutex_lock(&parent->lock);

		n = &parent->children.rb_node;
		prev = NULL;

		while (*n) {
			struct kdbus_node *pos;
			int result;

			pos = kdbus_node_from_rb(*n);
			prev = *n;
			result = kdbus_node_name_compare(node->hash,
							 node->name,
							 pos);
			if (result == 0) {
				ret = -EEXIST;
				goto exit_unlock;
			}

			if (result < 0)
				n = &pos->rb.rb_left;
			else
				n = &pos->rb.rb_right;
		}

		/* add new node and rebalance the tree */
		rb_link_node(&node->rb, prev, n);
		rb_insert_color(&node->rb, &parent->children);
		node->parent = kdbus_node_ref(parent);

exit_unlock:
		mutex_unlock(&parent->lock);
		kdbus_node_release(parent);
	}

	return ret;
}
Пример #3
0
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
		    struct drm_i915_file_private *file_priv)
{
	struct i915_gem_context *ctx;
	int ret;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);

	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

	kref_init(&ctx->ref);
	list_add_tail(&ctx->link, &dev_priv->context_list);
	ctx->i915 = dev_priv;

	if (dev_priv->hw_context_size) {
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

		obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
			goto err_out;
		}

		vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
	}

	/* Default context will never have a file_priv */
	ret = DEFAULT_CONTEXT_HANDLE;
	if (file_priv) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
		if (ret < 0)
			goto err_out;
	}
	ctx->user_handle = ret;

	ctx->file_priv = file_priv;
	if (file_priv) {
		ctx->pid = get_task_pid(current, PIDTYPE_PID);
		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
				      current->comm,
				      pid_nr(ctx->pid),
				      ctx->user_handle);
		if (!ctx->name) {
			ret = -ENOMEM;
			goto err_pid;
		}
	}

	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);

	i915_gem_context_set_bannable(ctx);
	ctx->ring_size = 4 * PAGE_SIZE;
	ctx->desc_template =
		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);

	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
	 * present or not in use we still need a small bias as ring wraparound
	 * at offset 0 sometimes hangs. No idea why.
	 */
	if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
	else
		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;

	return ctx;

err_pid:
	put_pid(ctx->pid);
	idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
	context_close(ctx);
	return ERR_PTR(ret);
}
Пример #4
0
/**
 * slim_do_transfer() - Process a SLIMbus-messaging transaction
 *
 * @ctrl: Controller handle
 * @txn: Transaction to be sent over SLIMbus
 *
 * Called by controller to transmit messaging transactions not dealing with
 * Interface/Value elements. (e.g. transmittting a message to assign logical
 * address to a slave device
 *
 * Return: -ETIMEDOUT: If transmission of this message timed out
 *	(e.g. due to bus lines not being clocked or driven by controller)
 */
int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
	DECLARE_COMPLETION_ONSTACK(done);
	bool need_tid = false, clk_pause_msg = false;
	unsigned long flags;
	int ret, tid, timeout;

	/*
	 * do not vote for runtime-PM if the transactions are part of clock
	 * pause sequence
	 */
	if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
		(txn->mt == SLIM_MSG_MT_CORE &&
		 txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
		 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
		clk_pause_msg = true;

	if (!clk_pause_msg) {
		ret = pm_runtime_get_sync(ctrl->dev);
		if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
			dev_err(ctrl->dev, "ctrl wrong state:%d, ret:%d\n",
				ctrl->sched.clk_state, ret);
			goto slim_xfer_err;
		}
	}

	need_tid = slim_tid_txn(txn->mt, txn->mc);

	if (need_tid) {
		spin_lock_irqsave(&ctrl->txn_lock, flags);
		tid = idr_alloc(&ctrl->tid_idr, txn, 0,
				SLIM_MAX_TIDS, GFP_ATOMIC);
		txn->tid = tid;

		if (!txn->msg->comp)
			txn->comp = &done;
		else
			txn->comp = txn->comp;

		spin_unlock_irqrestore(&ctrl->txn_lock, flags);

		if (tid < 0)
			return tid;
	}

	ret = ctrl->xfer_msg(ctrl, txn);

	if (ret && need_tid && !txn->msg->comp) {
		unsigned long ms = txn->rl + HZ;

		timeout = wait_for_completion_timeout(txn->comp,
						      msecs_to_jiffies(ms));
		if (!timeout) {
			ret = -ETIMEDOUT;
			spin_lock_irqsave(&ctrl->txn_lock, flags);
			idr_remove(&ctrl->tid_idr, tid);
			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
		}
	}

	if (ret)
		dev_err(ctrl->dev, "Tx:MT:0x%x, MC:0x%x, LA:0x%x failed:%d\n",
			txn->mt, txn->mc, txn->la, ret);

slim_xfer_err:
	if (!clk_pause_msg && (!need_tid  || ret == -ETIMEDOUT)) {
		/*
		 * remove runtime-pm vote if this was TX only, or
		 * if there was error during this transaction
		 */
		pm_runtime_mark_last_busy(ctrl->dev);
		pm_runtime_mark_last_busy(ctrl->dev);
	}
	return ret;
}
Пример #5
0
/*
 * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
 *
 * Create and attach a fence to the vGEM handle. This fence is then exposed
 * via the dma-buf reservation object and visible to consumers of the exported
 * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
 * vGEM buffer is being written to by the client and is exposed as an exclusive
 * fence, otherwise the fence indicates the client is current reading from the
 * buffer and all future writes should wait for the client to signal its
 * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
 * an exclusive fence when adding a read, or any fence when adding a write),
 * -EBUSY is reported. Serialisation between operations should be handled
 * by waiting upon the dma-buf.
 *
 * This returns the handle for the new fence that must be signaled within 10
 * seconds (or otherwise it will automatically expire). See
 * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
 *
 * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
 */
int vgem_fence_attach_ioctl(struct drm_device *dev,
			    void *data,
			    struct drm_file *file)
{
	struct drm_vgem_fence_attach *arg = data;
	struct vgem_file *vfile = file->driver_priv;
	struct reservation_object *resv;
	struct drm_gem_object *obj;
	struct dma_fence *fence;
	int ret;

	if (arg->flags & ~VGEM_FENCE_WRITE)
		return -EINVAL;

	if (arg->pad)
		return -EINVAL;

	obj = drm_gem_object_lookup(file, arg->handle);
	if (!obj)
		return -ENOENT;

	ret = attach_dmabuf(dev, obj);
	if (ret)
		goto err;

	fence = vgem_fence_create(vfile, arg->flags);
	if (!fence) {
		ret = -ENOMEM;
		goto err;
	}

	/* Check for a conflicting fence */
	resv = obj->dma_buf->resv;
	if (!reservation_object_test_signaled_rcu(resv,
						  arg->flags & VGEM_FENCE_WRITE)) {
		ret = -EBUSY;
		goto err_fence;
	}

	/* Expose the fence via the dma-buf */
	ret = 0;
	reservation_object_lock(resv, NULL);
	if (arg->flags & VGEM_FENCE_WRITE)
		reservation_object_add_excl_fence(resv, fence);
	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
		reservation_object_add_shared_fence(resv, fence);
	reservation_object_unlock(resv);

	/* Record the fence in our idr for later signaling */
	if (ret == 0) {
		mutex_lock(&vfile->fence_mutex);
		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
		mutex_unlock(&vfile->fence_mutex);
		if (ret > 0) {
			arg->out_fence = ret;
			ret = 0;
		}
	}
err_fence:
	if (ret) {
		dma_fence_signal(fence);
		dma_fence_put(fence);
	}
err:
	drm_gem_object_put_unlocked(obj);
	return ret;
}
Пример #6
0
struct drm_i915_gem_object *
kos_gem_fb_object_create(struct drm_device *dev,
                           u32 gtt_offset,
                           u32 size)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct i915_address_space *ggtt = &dev_priv->gtt.base;
    struct drm_i915_gem_object *obj;
    struct drm_mm_node *fb_node;
    struct i915_vma *vma;
    int ret;

    DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
                  gtt_offset, size);

    /* KISS and expect everything to be page-aligned */
    BUG_ON(size & 4095);

    if (WARN_ON(size == 0))
        return NULL;

    fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
    if (!fb_node)
        return NULL;

    fb_node->start = gtt_offset;
    fb_node->size = size;

    obj = _kos_fb_object_create(dev, fb_node);
    if (obj == NULL) {
        DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
        kfree(fb_node);
        return NULL;
    }

    vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
    if (IS_ERR(vma)) {
        ret = PTR_ERR(vma);
        goto err_out;
    }

    /* To simplify the initialisation sequence between KMS and GTT,
     * we allow construction of the stolen object prior to
     * setting up the GTT space. The actual reservation will occur
     * later.
     */
    vma->node.start = gtt_offset;
    vma->node.size = size;
    if (drm_mm_initialized(&ggtt->mm)) {
        ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
        if (ret) {
            DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
            goto err_vma;
        }
    }

//    obj->has_global_gtt_mapping = 1;

    list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
    list_add_tail(&vma->mm_list, &ggtt->inactive_list);

    mutex_lock(&dev->object_name_lock);
    idr_preload(GFP_KERNEL);

    if (!obj->base.name) {
        ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
        if (ret < 0)
            goto err_gem;

        obj->base.name = ret;

        /* Allocate a reference for the name table.  */
        drm_gem_object_reference(&obj->base);

        DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
    }

    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
    drm_gem_object_unreference(&obj->base);
    return obj;

err_gem:
    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
err_vma:
    i915_gem_vma_destroy(vma);
err_out:
    kfree(fb_node);
    drm_gem_object_unreference(&obj->base);
    return NULL;
}
Пример #7
0
static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
				     const struct i2c_device_id *id)
{
	struct bq27xxx_device_info *di;
	int ret;
	char *name;
	int num;

	/* Get new ID for the new battery device */
	mutex_lock(&battery_mutex);
	num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
	mutex_unlock(&battery_mutex);
	if (num < 0)
		return num;

	name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
	if (!name)
		goto err_mem;

	di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
	if (!di)
		goto err_mem;

	di->id = num;
	di->dev = &client->dev;
	di->chip = id->driver_data;
	di->name = name;
	di->bus.read = bq27xxx_battery_i2c_read;

	ret = bq27xxx_battery_setup(di);
	if (ret)
		goto err_failed;

	/* Schedule a polling after about 1 min */
	schedule_delayed_work(&di->work, 60 * HZ);

	i2c_set_clientdata(client, di);

	if (client->irq) {
		ret = devm_request_threaded_irq(&client->dev, client->irq,
				NULL, bq27xxx_battery_irq_handler_thread,
				IRQF_ONESHOT,
				di->name, di);
		if (ret) {
			dev_err(&client->dev,
				"Unable to register IRQ %d error %d\n",
				client->irq, ret);
			return ret;
		}
	}

	return 0;

err_mem:
	ret = -ENOMEM;

err_failed:
	mutex_lock(&battery_mutex);
	idr_remove(&battery_id, num);
	mutex_unlock(&battery_mutex);

	return ret;
}
Пример #8
0
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
		struct intel_vgpu_creation_params *param)
{
	struct intel_vgpu *vgpu;
	int ret;

	gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
			param->handle, param->low_gm_sz, param->high_gm_sz,
			param->fence_sz);

	vgpu = vzalloc(sizeof(*vgpu));
	if (!vgpu)
		return ERR_PTR(-ENOMEM);

	mutex_lock(&gvt->lock);

	ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
	if (ret < 0)
		goto out_free_vgpu;

	vgpu->id = ret;
	vgpu->handle = param->handle;
	vgpu->gvt = gvt;
	bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);

	intel_vgpu_init_cfg_space(vgpu, param->primary);

	ret = intel_vgpu_init_mmio(vgpu);
	if (ret)
		goto out_clean_idr;

	ret = intel_vgpu_alloc_resource(vgpu, param);
	if (ret)
		goto out_clean_vgpu_mmio;

	populate_pvinfo_page(vgpu);

	ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
	if (ret)
		goto out_clean_vgpu_resource;

	ret = intel_vgpu_init_gtt(vgpu);
	if (ret)
		goto out_detach_hypervisor_vgpu;

	ret = intel_vgpu_init_display(vgpu, param->resolution);
	if (ret)
		goto out_clean_gtt;

	ret = intel_vgpu_init_execlist(vgpu);
	if (ret)
		goto out_clean_display;

	ret = intel_vgpu_init_gvt_context(vgpu);
	if (ret)
		goto out_clean_execlist;

	ret = intel_vgpu_init_sched_policy(vgpu);
	if (ret)
		goto out_clean_shadow_ctx;

	vgpu->active = true;
	mutex_unlock(&gvt->lock);

	return vgpu;

out_clean_shadow_ctx:
	intel_vgpu_clean_gvt_context(vgpu);
out_clean_execlist:
	intel_vgpu_clean_execlist(vgpu);
out_clean_display:
	intel_vgpu_clean_display(vgpu);
out_clean_gtt:
	intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
	intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
	intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
	intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
	idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
	vfree(vgpu);
	mutex_unlock(&gvt->lock);
	return ERR_PTR(ret);
}