Esempio n. 1
0
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
	idr_destroy(&file_priv->context_idr);
}
Esempio n. 2
0
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
	struct event *e, *next_e;

	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

	/* Freeze client->resource_idr and client->event_list */
	spin_lock_irq(&client->lock);
	client->in_shutdown = true;
	spin_unlock_irq(&client->lock);

	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);

	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);

	client_put(client);

	return 0;
}
Esempio n. 3
0
void shm_destroy_orphaned(struct ipc_namespace *ns)
{
	down_write(&shm_ids(ns).rwsem);
	if (shm_ids(ns).in_use)
		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
	up_write(&shm_ids(ns).rwsem);
}
Esempio n. 4
0
/**
 * Called at close time when the filp is going away.
 *
 * Releases any remaining references on objects by this filp.
 */
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
	idr_for_each(&file_private->object_idr,
		     &drm_gem_object_release_handle, file_private);
	idr_destroy(&file_private->object_idr);
}
static void inotify_free_group_priv(struct fsnotify_group *group)
{
	/* ideally the idr is empty and we won't hit the BUG in teh callback */
	idr_for_each(&group->inotify_data.idr, idr_callback, group);
	idr_remove_all(&group->inotify_data.idr);
	idr_destroy(&group->inotify_data.idr);
}
Esempio n. 6
0
File: drm_gem.c Progetto: E-LLP/n900
/**
 * Called at close time when the filp is going away.
 *
 * Releases any remaining references on objects by this filp.
 */
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
	mutex_lock(&dev->struct_mutex);
	idr_for_each(&file_private->object_idr,
		     &drm_gem_object_release_handle, NULL);

	idr_destroy(&file_private->object_idr);
	mutex_unlock(&dev->struct_mutex);
}
Esempio n. 7
0
void ipc_space_put(ipc_namespace_t* space)
{
	debug_msg("ipc_space_put() on space %p\n", space);

	mutex_lock(&space->mutex);
	
	/* Destroy all rights */
	idr_for_each(&space->names, __ipc_right_put, NULL);
	
	idr_destroy(&space->names);
}
Esempio n. 8
0
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{

	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);
	dma_pool_destroy(htt->tx_pool);

	if (htt->frag_desc.vaddr) {
		athp_descdma_free(htt->ar, &htt->frag_desc.dd);
	}
	mtx_destroy(&htt->tx_lock);
}
Esempio n. 9
0
/**
 * Called at close time when the filp is going away.
 *
 * Releases any remaining references on objects by this filp.
 */
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
	idr_for_each(&file_private->object_idr,
		     &drm_gem_object_release_handle, file_private);

	idr_remove_all(&file_private->object_idr);
	idr_destroy(&file_private->object_idr);
#ifdef __NetBSD__
	spin_lock_destroy(&file_private->table_lock);
#endif
}
Esempio n. 10
0
mach_msg_return_t ipc_space_make_send(ipc_namespace_t* space, darling_mach_port_t* port, bool once, mach_port_name_t* name_out)
{
	mach_msg_return_t ret;
	struct mach_port_right* right = NULL;
	int id;
	
	mutex_lock(&space->mutex);
	
	/* Memory optimization for MACH_PORT_RIGHT_SEND rights */
	if (!once)
	{
		struct idr_right_find_arg arg = { port, name_out };
		
		*name_out = 0;
		
		/* Try to find an existing identical right for this port
		 * and increment its reference count
		 */
		idr_for_each(&space->names, __ipc_right_find, &arg);
		
		if (*name_out != 0)
		{
			mutex_unlock(&space->mutex);
			return KERN_SUCCESS;
		}
	}
	
	right = ipc_right_new(port, once ? MACH_PORT_RIGHT_SEND_ONCE : MACH_PORT_RIGHT_SEND);
	if (right == NULL)
	{
		ret = KERN_RESOURCE_SHORTAGE;
		goto err;
	}
	
	id = idr_alloc(&space->names, right, 1, -1, GFP_KERNEL);
	if (id < 0)
	{
		ret = KERN_RESOURCE_SHORTAGE;
		goto err;
	}
	
	*name_out = id;
	
	mutex_unlock(&space->mutex);
	
	return KERN_SUCCESS;
	
err:
	ipc_right_put(right);

	mutex_unlock(&space->mutex);
	return ret;
}
Esempio n. 11
0
void exit_shm(struct task_struct *task)
{
	struct ipc_namespace *ns = task->nsproxy->ipc_ns;

	if (shm_ids(ns).in_use == 0)
		return;

	/* Destroy all already created segments, but not mapped yet */
	down_write(&shm_ids(ns).rwsem);
	if (shm_ids(ns).in_use)
		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
	up_write(&shm_ids(ns).rwsem);
}
Esempio n. 12
0
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
	int size;

	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);
	dma_pool_destroy(htt->tx_pool);

	if (htt->frag_desc.vaddr) {
		size = htt->max_num_pending_tx *
				  sizeof(struct htt_msdu_ext_desc);
		dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
				  htt->frag_desc.paddr);
	}
}
Esempio n. 13
0
static ssize_t exynos_drm_show_gem_info(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct drm_device *drm_dev = dev_get_drvdata(dev);
	struct drm_file *filp;

	DRM_INFO("pid \thandle \trefcount \thcount \tsize \t\tflags "\
		"\tpage_size \tpfnmap \texport_to_fd \timport_from_fd\n");

	list_for_each_entry(filp, &drm_dev->filelist, lhead)
		idr_for_each(&filp->object_idr, &exynos_drm_list_gem_info,
				filp);

	return strlen(buf);
}
Esempio n. 14
0
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
	int size;

	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);

	if (htt->txbuf.vaddr) {
		size = htt->max_num_pending_tx *
				  sizeof(struct ath10k_htt_txbuf);
		dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
				  htt->txbuf.paddr);
	}

	ath10k_htt_tx_free_txq(htt);
	ath10k_htt_tx_free_cont_frag_desc(htt);
}
Esempio n. 15
0
/**
 * rppc_find_dmabuf - find and return the rppc buffer descriptor of an imported
 *		      buffer
 * @rpc: rpc instance
 * @fd: dma-buf file descriptor of the buffer
 *
 * This function is used to find and return the rppc buffer descriptor of an
 * imported buffer. The function is used to check if ia buffer has already
 * been imported (during manual registration to return an error), and to return
 * the rppc buffer descriptor to be used for freeing (during manual
 * deregistration). It is also used during auto-registration to see if the
 * buffer needs to be imported through a rppc_alloc_dmabuf if not found.
 *
 * Return: rppc buffer descriptor of the buffer if it has already been imported,
 *	   or NULL otherwise.
 */
struct rppc_dma_buf *rppc_find_dmabuf(struct rppc_instance *rpc, int fd)
{
	struct rppc_dma_buf *node = NULL;
	void *data = (void *)fd;

	dev_dbg(rpc->dev, "looking for fd %u\n", fd);

	mutex_lock(&rpc->lock);
	node = (struct rppc_dma_buf *)
			idr_for_each(&rpc->dma_idr, find_dma_by_fd, data);
	mutex_unlock(&rpc->lock);

	dev_dbg(rpc->dev, "returning node %p for fd %u\n",
		node, fd);

	return node;
}
Esempio n. 16
0
static int exynos_drm_gem_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *drm_dev = node->minor->dev;
	struct exynos_drm_gem_info_data gem_info_data;

	gem_info_data.m = m;

	seq_printf(gem_info_data.m, "pid \ttgid \thandle \trefcount \thcount "\
				"\tsize \t\tflags \tpage_size \tpfnmap \t"\
				"exyport_to_fd \timport_from_fd\n");

	list_for_each_entry(gem_info_data.filp, &drm_dev->filelist, lhead)
		idr_for_each(&gem_info_data.filp->object_idr,
				exynos_drm_gem_one_info, &gem_info_data);

	return 0;
}
Esempio n. 17
0
static void queue_bus_reset_event(struct client *client)
{
	struct bus_reset_event *e;

	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

	fill_bus_reset_event(&e->reset, client);

	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
}
Esempio n. 18
0
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
	int size;

	tasklet_kill(&htt->txrx_compl_task);

	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);

	if (htt->txbuf.vaddr) {
		size = htt->max_num_pending_tx *
				  sizeof(struct ath10k_htt_txbuf);
		dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
				  htt->txbuf.paddr);
	}

	ath10k_htt_tx_free_txq(htt);
	ath10k_htt_tx_free_cont_frag_desc(htt);
	WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
	kfifo_free(&htt->txdone_fifo);
}
Esempio n. 19
0
/**
 * rppc_xlate_buffers - translate argument pointers in the marshalled packet
 * @rpc: rppc instance
 * @func: rppc function packet being acted upon
 * @direction: direction of translation
 *
 * This function translates all the pointers within the function call packet
 * structure, based on the translation descriptor structures. The translation
 * replaces the pointers to the appropriate pointers based on the direction.
 * The function is invoked in preparing the packet to be sent to the remote
 * processor-side and replaces the pointers to the remote processor device
 * address pointers; and in processing the packet back after executing the
 * function and replacing back the remote processor device addresses with
 * the original pointers.
 *
 * Return: 0 on success, or an appropriate failure code otherwise
 */
int rppc_xlate_buffers(struct rppc_instance *rpc, struct rppc_function *func,
		       int direction)
{
	uint8_t *base_ptr = NULL;
	struct dma_buf *dbuf = NULL;
	struct device *dev = rpc->dev;
	uint32_t ptr_idx, pri_offset, sec_offset, offset, pg_offset, size;
	int i, limit, inc = 1;
	virt_addr_t kva, uva, buva;
	dev_addr_t rda;
	int ret = 0, final_ret = 0;
	int xlate_fd;

	limit = func->num_translations;
	if (WARN_ON(!limit))
		return 0;

	dev_dbg(dev, "operating on %d pointers\n", func->num_translations);

	/* sanity check the translation elements */
	for (i = 0; i < limit; i++) {
		ptr_idx = func->translations[i].index;
		pri_offset = func->params[ptr_idx].data -
						func->params[ptr_idx].base;
		sec_offset = func->translations[i].offset;
		size = func->params[ptr_idx].size;

		if (ptr_idx >= RPPC_MAX_PARAMETERS) {
			dev_err(dev, "xlate[%d] - invalid parameter pointer index %u\n",
				i, ptr_idx);
			return -EINVAL;
		}
		if (func->params[ptr_idx].type != RPPC_PARAM_TYPE_PTR) {
			dev_err(dev, "xlate[%d] - parameter index %u is not a pointer (type %u)\n",
				i, ptr_idx, func->params[ptr_idx].type);
			return -EINVAL;
		}
		if (func->params[ptr_idx].data == 0) {
			dev_err(dev, "xlate[%d] - supplied user pointer is NULL!\n",
				i);
			return -EINVAL;
		}
		if (sec_offset > (size - sizeof(virt_addr_t))) {
			dev_err(dev, "xlate[%d] offset is larger than data area! (sec_offset = %u size = %u)\n",
				i, sec_offset, size);
			return -ENOSPC;
		}
	}

	/*
	 * we may have a failure during translation, in which case use the same
	 * loop to unwind the whole operation
	 */
	for (i = 0; i != limit; i += inc) {
		dev_dbg(dev, "starting translation %d of %d by %d\n",
			i, limit, inc);

		ptr_idx = func->translations[i].index;
		pri_offset = func->params[ptr_idx].data -
						func->params[ptr_idx].base;
		sec_offset = func->translations[i].offset;
		offset = pri_offset + sec_offset;
		pg_offset = (offset & (PAGE_SIZE - 1));

		/*
		 * map into kernel the page containing the offset, where the
		 * pointer needs to be translated.
		 */
		ret = rppc_map_page(rpc, func->params[ptr_idx].fd, offset,
				    &base_ptr, &dbuf);
		if (ret) {
			dev_err(dev, "rppc_map_page failed, translation = %d param_index = %d fd = %d ret = %d\n",
				i, ptr_idx, func->params[ptr_idx].fd, ret);
			goto unwind;
		}

		/*
		 * perform the actual translation as per the direction.
		 */
		if (direction == RPPC_UVA_TO_RPA) {
			kva = (virt_addr_t)&base_ptr[pg_offset];
			if (kva & 0x3) {
				dev_err(dev, "kernel virtual address %p is not aligned for translation = %d\n",
					(void *)kva, i);
				ret = -EADDRNOTAVAIL;
				goto unmap;
			}

			uva = *(virt_addr_t *)kva;
			if (!uva) {
				dev_err(dev, "user pointer in the translated offset location is NULL for translation = %d\n",
					i);
				print_hex_dump(KERN_DEBUG, "KMAP: ",
					       DUMP_PREFIX_NONE, 16, 1,
					       base_ptr, PAGE_SIZE, true);
				ret = -EADDRNOTAVAIL;
				goto unmap;
			}

			buva = (virt_addr_t)func->translations[i].base;
			xlate_fd = func->translations[i].fd;

			dev_dbg(dev, "replacing UVA %p at KVA %p prt_idx = %u pg_offset = 0x%x fd = %d\n",
				(void *)uva, (void *)kva, ptr_idx,
				pg_offset, xlate_fd);

			/* compute the corresponding remote device address */
			rda = rppc_buffer_lookup(rpc, uva, buva, xlate_fd);
			if (!rda) {
				ret = -ENODATA;
				goto unmap;
			}

			/*
			 * replace the pointer, save the old value for replacing
			 * it back on the function return path
			 */
			func->translations[i].fd = (int32_t)uva;
			*(phys_addr_t *)kva = rda;
			dev_dbg(dev, "replaced UVA %p with RDA %p at KVA %p\n",
				(void *)uva, (void *)rda, (void *)kva);
		} else if (direction == RPPC_RPA_TO_UVA) {
			kva = (virt_addr_t)&base_ptr[pg_offset];
			if (kva & 0x3) {
				ret = -EADDRNOTAVAIL;
				goto unmap;
			}

			rda = *(phys_addr_t *)kva;
			uva = (virt_addr_t)func->translations[i].fd;
			WARN_ON(!uva);
			*(virt_addr_t *)kva = uva;

			dev_dbg(dev, "replaced RDA %p with UVA %p at KVA %p\n",
				(void *)rda, (void *)uva, (void *)kva);
		}

unmap:
		/*
		 * unmap the page containing the translation from kernel, the
		 * next translation acting on the same fd might be in a
		 * different page altogether from the current one
		 */
		rppc_unmap_page(rpc, offset, base_ptr, dbuf);
		dbuf = NULL;
		base_ptr = NULL;

		if (!ret)
			continue;

unwind:
		/*
		 * unwind all the previous translations if the failure occurs
		 * while sending a message to the remote-side. There's nothing
		 * to do but to continue if the failure occurs during the
		 * processing of a function response.
		 */
		if (direction == RPPC_UVA_TO_RPA) {
			dev_err(dev, "unwinding UVA to RDA translations! translation = %d\n",
				i);
			direction = RPPC_RPA_TO_UVA;
			inc = -1;
			limit = -1;
		} else if (direction == RPPC_RPA_TO_UVA) {
			dev_err(dev, "error during UVA to RDA translations!! current translation = %d\n",
				i);
		}
		/*
		 * store away the return value to return back to caller
		 * in case of an error, record only the first error
		 */
		if (!final_ret)
			final_ret = ret;
	}

	/*
	 * all the in-place pointer replacements are done, release all the
	 * imported buffers during the remote function return path
	 */
	if (direction == RPPC_RPA_TO_UVA) {
		mutex_lock(&rpc->lock);
		idr_for_each(&rpc->dma_idr, rppc_free_auto_dmabuf, rpc);
		mutex_unlock(&rpc->lock);
	}

	return final_ret;
}
Esempio n. 20
0
/* Snapshot the Linux specific information */
static int snapshot_os(struct kgsl_device *device,
	void *snapshot, int remain, void *priv)
{
	struct kgsl_snapshot_linux *header = snapshot;
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct task_struct *task;
	pid_t pid;
	int hang = (int) priv;
	int ctxtcount = 0;
	int size = sizeof(*header);

	/* Figure out how many active contexts there are - these will
	 * be appended on the end of the structure */

	idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);

	size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);

	/* Make sure there is enough room for the data */
	if (remain < size) {
		SNAPSHOT_ERR_NOMEM(device, "OS");
		return 0;
	}

	memset(header, 0, sizeof(*header));

	header->osid = KGSL_SNAPSHOT_OS_LINUX;

	header->state = hang ? SNAPSHOT_STATE_HUNG : SNAPSHOT_STATE_RUNNING;

	/* Get the kernel build information */
	strlcpy(header->release, utsname()->release, sizeof(header->release));
	strlcpy(header->version, utsname()->version, sizeof(header->version));

	/* Get the Unix time for the timestamp */
	header->seconds = get_seconds();

	/* Remember the power information */
	header->power_flags = pwr->power_flags;
	header->power_level = pwr->active_pwrlevel;
	header->power_interval_timeout = pwr->interval_timeout;
	header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
	header->busclk = kgsl_get_clkrate(pwr->ebi1_clk);

	/* Future proof for per-context timestamps */
	header->current_context = -1;

	/* Get the current PT base */
	header->ptbase = kgsl_mmu_get_current_ptbase(device);
	/* And the PID for the task leader */
	pid = header->pid = kgsl_mmu_get_ptname_from_ptbase(header->ptbase);

	task = find_task_by_vpid(pid);

	if (task)
		get_task_comm(header->comm, task);

	header->ctxtcount = ctxtcount;

	/* append information for each context */
	_ctxtptr = snapshot + sizeof(*header);
	idr_for_each(&device->context_idr, snapshot_context_info, NULL);

	/* Return the size of the data segment */
	return size;
}
Esempio n. 21
0
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);
	dma_pool_destroy(htt->tx_pool);
}
Esempio n. 22
0
void drm_drawable_free_all(struct drm_device *dev)
{
    idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
    idr_remove_all(&dev->drw_idr);
}
Esempio n. 23
0
void vgem_fence_close(struct vgem_file *vfile)
{
	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
	idr_destroy(&vfile->fence_idr);
}