Example #1
0
File: main.c Project: OpenDZ/bus1
static void __exit bus1_exit(void)
{
	WARN_ON(!idr_is_empty(&bus1_user_ida.idr));
	WARN_ON(!idr_is_empty(&bus1_user_idr));

	misc_deregister(&bus1_misc);
	ida_destroy(&bus1_user_ida);
	idr_destroy(&bus1_user_idr);
}
Example #2
0
/**
 * intel_gvt_destroy_vgpu - destroy a virtual GPU
 * @vgpu: virtual GPU
 *
 * This function is called when user wants to destroy a virtual GPU.
 *
 */
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
	struct intel_gvt *gvt = vgpu->gvt;

	mutex_lock(&gvt->lock);

	WARN(vgpu->active, "vGPU is still active!\n");

	intel_gvt_debugfs_remove_vgpu(vgpu);
	idr_remove(&gvt->vgpu_idr, vgpu->id);
	if (idr_is_empty(&gvt->vgpu_idr))
		intel_gvt_clean_irq(gvt);
	intel_vgpu_clean_sched_policy(vgpu);
	intel_vgpu_clean_submission(vgpu);
	intel_vgpu_clean_display(vgpu);
	intel_vgpu_clean_opregion(vgpu);
	intel_vgpu_clean_gtt(vgpu);
	intel_gvt_hypervisor_detach_vgpu(vgpu);
	intel_vgpu_free_resource(vgpu);
	intel_vgpu_clean_mmio(vgpu);
	intel_vgpu_dmabuf_cleanup(vgpu);
	vfree(vgpu);

	intel_gvt_update_vgpu_types(gvt);
	mutex_unlock(&gvt->lock);
}
Example #3
0
/**
 * kdbus_node_unref() - Drop object reference
 * @node:	node to drop reference to (or NULL)
 *
 * This drops an object reference to @node. You must not access the node if you
 * no longer own a reference.
 * If the ref-count drops to 0, the object will be destroyed (->free_cb will be
 * called).
 *
 * If you linked or activated the node, you must deactivate the node before you
 * drop your last reference! If you didn't link or activate the node, you can
 * drop any reference you want.
 *
 * Note that this calls into ->free_cb() and thus _might_ sleep. The ->free_cb()
 * callbacks must not acquire any outer locks, though. So you can safely drop
 * references while holding locks.
 *
 * If @node is NULL, this is a no-op.
 *
 * Return: This always returns NULL
 */
struct kdbus_node *kdbus_node_unref(struct kdbus_node *node)
{
	if (node && atomic_dec_and_test(&node->refcnt)) {
		struct kdbus_node safe = *node;

		WARN_ON(atomic_read(&node->active) != KDBUS_NODE_DRAINED);
		WARN_ON(!RB_EMPTY_NODE(&node->rb));

		if (node->free_cb)
			node->free_cb(node);

		down_write(&kdbus_node_idr_lock);
		if (safe.id > 0)
			idr_remove(&kdbus_node_idr, safe.id);
		/* drop caches after last node to not leak memory on unload */
		if (idr_is_empty(&kdbus_node_idr)) {
			idr_destroy(&kdbus_node_idr);
			idr_init(&kdbus_node_idr);
		}
		up_write(&kdbus_node_idr_lock);

		kfree(safe.name);

		/*
		 * kdbusfs relies on the parent to be available even after the
		 * node was deactivated and unlinked. Therefore, we pin it
		 * until a node is destroyed.
		 */
		kdbus_node_unref(safe.parent);
	}

	return NULL;
}
Example #4
0
void
linux_writecomb_fini(void)
{

	KASSERT(idr_is_empty(&linux_writecomb.idr));
	idr_destroy(&linux_writecomb.idr);
	mutex_destroy(&linux_writecomb.lock);
}
Example #5
0
static ssize_t mode_store(struct device *device, struct device_attribute *attr,
			  const char *buf, size_t count)
{
	struct cxl_afu *afu = to_cxl_afu(device);
	int old_mode, mode = -1;
	int rc = -EBUSY;

	/* can't change this if we have a user */
	spin_lock(&afu->contexts_lock);
	if (!idr_is_empty(&afu->contexts_idr))
		goto err;

	if (!strncmp(buf, "dedicated_process", 17))
		mode = CXL_MODE_DEDICATED;
	if (!strncmp(buf, "afu_directed", 12))
		mode = CXL_MODE_DIRECTED;
	if (!strncmp(buf, "none", 4))
		mode = 0;

	if (mode == -1) {
		rc = -EINVAL;
		goto err;
	}

	/*
	 * cxl_afu_deactivate_mode needs to be done outside the lock, prevent
	 * other contexts coming in before we are ready:
	 */
	old_mode = afu->current_mode;
	afu->current_mode = 0;
	afu->num_procs = 0;

	spin_unlock(&afu->contexts_lock);

	if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
		return rc;
	if ((rc = cxl_afu_activate_mode(afu, mode)))
		return rc;

	return count;
err:
	spin_unlock(&afu->contexts_lock);
	return rc;
}
Example #6
0
static ssize_t reset_store_afu(struct device *device,
			       struct device_attribute *attr,
			       const char *buf, size_t count)
{
	struct cxl_afu *afu = to_cxl_afu(device);
	int rc;

	/* Not safe to reset if it is currently in use */
	spin_lock(&afu->contexts_lock);
	if (!idr_is_empty(&afu->contexts_idr)) {
		rc = -EBUSY;
		goto err;
	}

	if ((rc = cxl_afu_reset(afu)))
		goto err;

	rc = count;
err:
	spin_unlock(&afu->contexts_lock);
	return rc;
}