Exemple #1
0
/* This function performs what I'm calling the "gluing" together of strings.
 * For example, if you type
 *
 *   $ echo "a"'b'
 *
 * at the shell, the double-quoted string "a" will be glued together with the
 * single-quoted string 'b' because they are not separated by any whitespace.
 *
 * This transformation occurs after parameter expansion and word splitting, but
 * before filename expansion.  So, for example, "a"'b'* would be equivalent to
 * ab*, and ${a}b would be equivalent to "a" "cb" if the shell variable $a is
 * set to "a c".
 *
 * The input is a list @string_list that gives a list of strings passed to the
 * shell.  Each string is glued to any adjacent, succeeding strings that have
 * the flag STRING_FLAG_PRECEDING_WHITESPACE set, which indicates that they
 * should be glued to the preceding string.  The resulting list of "glued"
 * strings replaces the input list.
 *
 * This function also has a special responsibility where it looks for "glued"
 * strings that are constructed from an unquoted string that matches the regular
 * expression [A-Za-z_][A-Za-z_0-9]*=.*, followed by zero or more unquoted or
 * quoted strings.  So, for example,
 *   $ a="b"
 *   $ a=b
 *   $ a="b"'c'
 *   $ a=           # this is legal; it unsets the variable a
 * The glued strings of this form are interpreted as variable assignments, so
 * the STRING_FLAG_VAR_ASSIGNMENT flag is set on these glued strings.
 * */
static int glue_strings(struct list_head *string_list)
{
	struct string *s, *tmp;
	LIST_HEAD(new_list);
	while (!list_empty(string_list)) {
		struct list_head *first = string_list->next;
		int flags;
		/* Glue one string */
		LIST_HEAD(glue_list);
		list_move_tail(first, &glue_list);
		list_for_each_entry_safe(s, tmp, string_list, list) {
			if (s->flags & STRING_FLAG_PRECEDING_WHITESPACE)
				break;
			else
				list_move_tail(&s->list, &glue_list);
		}
		/* Detect variable assignments
		 * TODO: Somehow make it so that unquoted strings where
		 * parameter expansion has occurred on the left side of the
		 * equals sign are not considered variable assignments. */
		s = list_entry(first, struct string, list);
		if (s->flags & STRING_FLAG_UNQUOTED &&
		    string_matches_param_assignment(s))
			flags = STRING_FLAG_VAR_ASSIGNMENT;
		else
			flags = 0;
		s = join_strings(&glue_list);
		s->flags = flags;
		list_add_tail(&s->list, &new_list);
	}
	/* Replace @string_list with the list of glued strings */
	list_splice_tail(&new_list, string_list);
	return 0;
}
Exemple #2
0
static int auxtrace_queues__grow(struct auxtrace_queues *queues,
				 unsigned int new_nr_queues)
{
	unsigned int nr_queues = queues->nr_queues;
	struct auxtrace_queue *queue_array;
	unsigned int i;

	if (!nr_queues)
		nr_queues = AUXTRACE_INIT_NR_QUEUES;

	while (nr_queues && nr_queues < new_nr_queues)
		nr_queues <<= 1;

	if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
		return -EINVAL;

	queue_array = auxtrace_alloc_queue_array(nr_queues);
	if (!queue_array)
		return -ENOMEM;

	for (i = 0; i < queues->nr_queues; i++) {
		list_splice_tail(&queues->queue_array[i].head,
				 &queue_array[i].head);
		queue_array[i].priv = queues->queue_array[i].priv;
	}

	queues->nr_queues = nr_queues;
	queues->queue_array = queue_array;

	return 0;
}
Exemple #3
0
static void
rpcrdma_schedule_tasklet(struct list_head *sched_list)
{
	unsigned long flags;

	spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	list_splice_tail(sched_list, &rpcrdma_tasklets_g);
	spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
	tasklet_schedule(&rpcrdma_tasklet_g);
}
Exemple #4
0
/* Performs filename expansion on a list of strings.  The list of strings is
 * replaced with the list of expanded strings.  0 is returned on success; -1 is
 * returned on read error in glob(). */
static int do_filename_expansion(struct list_head *string_list)
{
	struct string *s, *tmp;
	int ret = 0;

	LIST_HEAD(new_list);
	list_for_each_entry_safe(s, tmp, string_list, list)
		ret |= string_do_filename_expansion(s, &new_list);
	INIT_LIST_HEAD(string_list);
	if (ret)
		free_string_list(&new_list);
	else
		list_splice_tail(&new_list, string_list);
	return ret;
}
/**
 * i915_gem_shrink - Shrink buffer object caches
 * @dev_priv: i915 device
 * @target: amount of memory to make available, in pages
 * @nr_scanned: optional output for number of pages scanned (incremental)
 * @flags: control flags for selecting cache types
 *
 * This function is the main interface to the shrinker. It will try to release
 * up to @target pages of main memory backing storage from buffer objects.
 * Selection of the specific caches can be done with @flags. This is e.g. useful
 * when purgeable objects should be removed from caches preferentially.
 *
 * Note that it's not guaranteed that released amount is actually available as
 * free system memory - the pages might still be in-used to due to other reasons
 * (like cpu mmaps) or the mm core has reused them before we could grab them.
 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
 *
 * Also note that any kind of pinning (both per-vma address space pins and
 * backing storage pins at the buffer object level) result in the shrinker code
 * having to skip the object.
 *
 * Returns:
 * The number of pages of backing storage actually released.
 */
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
		unsigned long target,
		unsigned long *nr_scanned,
		unsigned flags)
{
	const struct {
		struct list_head *list;
		unsigned int bit;
	} phases[] = {
		{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
		{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
		{ NULL, 0 },
	}, *phase;
	unsigned long count = 0;
	unsigned long scanned = 0;
	bool unlock;

	if (!shrinker_lock(dev_priv, &unlock))
		return 0;

	/*
	 * When shrinking the active list, also consider active contexts.
	 * Active contexts are pinned until they are retired, and so can
	 * not be simply unbound to retire and unpin their pages. To shrink
	 * the contexts, we must wait until the gpu is idle.
	 *
	 * We don't care about errors here; if we cannot wait upon the GPU,
	 * we will free as much as we can and hope to get a second chance.
	 */
	if (flags & I915_SHRINK_ACTIVE)
		i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);

	trace_i915_gem_shrink(dev_priv, target, flags);
	i915_gem_retire_requests(dev_priv);

	/*
	 * Unbinding of objects will require HW access; Let us not wake the
	 * device just to recover a little memory. If absolutely necessary,
	 * we will force the wake during oom-notifier.
	 */
	if ((flags & I915_SHRINK_BOUND) &&
	    !intel_runtime_pm_get_if_in_use(dev_priv))
		flags &= ~I915_SHRINK_BOUND;

	/*
	 * As we may completely rewrite the (un)bound list whilst unbinding
	 * (due to retiring requests) we have to strictly process only
	 * one element of the list at the time, and recheck the list
	 * on every iteration.
	 *
	 * In particular, we must hold a reference whilst removing the
	 * object as we may end up waiting for and/or retiring the objects.
	 * This might release the final reference (held by the active list)
	 * and result in the object being freed from under us. This is
	 * similar to the precautions the eviction code must take whilst
	 * removing objects.
	 *
	 * Also note that although these lists do not hold a reference to
	 * the object we can safely grab one here: The final object
	 * unreferencing and the bound_list are both protected by the
	 * dev->struct_mutex and so we won't ever be able to observe an
	 * object on the bound_list with a reference count equals 0.
	 */
	for (phase = phases; phase->list; phase++) {
		struct list_head still_in_list;
		struct drm_i915_gem_object *obj;

		if ((flags & phase->bit) == 0)
			continue;

		INIT_LIST_HEAD(&still_in_list);

		/*
		 * We serialize our access to unreferenced objects through
		 * the use of the struct_mutex. While the objects are not
		 * yet freed (due to RCU then a workqueue) we still want
		 * to be able to shrink their pages, so they remain on
		 * the unbound/bound list until actually freed.
		 */
		spin_lock(&dev_priv->mm.obj_lock);
		while (count < target &&
		       (obj = list_first_entry_or_null(phase->list,
						       typeof(*obj),
						       mm.link))) {
			list_move_tail(&obj->mm.link, &still_in_list);

			if (flags & I915_SHRINK_PURGEABLE &&
			    obj->mm.madv != I915_MADV_DONTNEED)
				continue;

			if (flags & I915_SHRINK_VMAPS &&
			    !is_vmalloc_addr(obj->mm.mapping))
				continue;

			if (!(flags & I915_SHRINK_ACTIVE) &&
			    (i915_gem_object_is_active(obj) ||
			     i915_gem_object_is_framebuffer(obj)))
				continue;

			if (!can_release_pages(obj))
				continue;

			spin_unlock(&dev_priv->mm.obj_lock);

			if (unsafe_drop_pages(obj)) {
				/* May arrive from get_pages on another bo */
				mutex_lock_nested(&obj->mm.lock,
						  I915_MM_SHRINKER);
				if (!i915_gem_object_has_pages(obj)) {
					__i915_gem_object_invalidate(obj);
					count += obj->base.size >> PAGE_SHIFT;
				}
				mutex_unlock(&obj->mm.lock);
			}
			scanned += obj->base.size >> PAGE_SHIFT;

			spin_lock(&dev_priv->mm.obj_lock);
		}
		list_splice_tail(&still_in_list, phase->list);
		spin_unlock(&dev_priv->mm.obj_lock);
	}
Exemple #6
0
static int
execute_update_commands(WIMStruct *wim,
			const struct wimlib_update_command *cmds,
			size_t num_cmds,
			int update_flags)
{
	struct wim_inode_table *inode_table;
	struct wim_sd_set *sd_set;
	struct list_head unhashed_streams;
	struct update_command_journal *j;
	union wimlib_progress_info info;
	int ret;

	if (have_command_type(cmds, num_cmds, WIMLIB_UPDATE_OP_ADD)) {
		/* If we have at least one "add" command, create the inode and
		 * security descriptor tables to index new inodes and new
		 * security descriptors, respectively.  */
		inode_table = alloca(sizeof(struct wim_inode_table));
		sd_set = alloca(sizeof(struct wim_sd_set));

		ret = init_inode_table(inode_table, 9001);
		if (ret)
			goto out;

		ret = init_sd_set(sd_set, wim_get_current_security_data(wim));
		if (ret)
			goto out_destroy_inode_table;

		INIT_LIST_HEAD(&unhashed_streams);
	} else {
		inode_table = NULL;
		sd_set = NULL;
	}

	/* Start an in-memory journal to allow rollback if something goes wrong
	 */
	j = new_update_command_journal(num_cmds,
				       &wim_get_current_image_metadata(wim)->root_dentry,
				       wim->lookup_table);
	if (!j) {
		ret = WIMLIB_ERR_NOMEM;
		goto out_destroy_sd_set;
	}

	info.update.completed_commands = 0;
	info.update.total_commands = num_cmds;
	ret = 0;
	for (size_t i = 0; i < num_cmds; i++) {
		DEBUG("Executing update command %zu of %zu (op=%"TS")",
		      i + 1, num_cmds, update_op_to_str(cmds[i].op));
		info.update.command = &cmds[i];
		if (update_flags & WIMLIB_UPDATE_FLAG_SEND_PROGRESS) {
			ret = call_progress(wim->progfunc,
					    WIMLIB_PROGRESS_MSG_UPDATE_BEGIN_COMMAND,
					    &info, wim->progctx);
			if (ret)
				goto rollback;
		}

		switch (cmds[i].op) {
		case WIMLIB_UPDATE_OP_ADD:
			ret = execute_add_command(j, wim, &cmds[i], inode_table,
						  sd_set, &unhashed_streams);
			break;
		case WIMLIB_UPDATE_OP_DELETE:
			ret = execute_delete_command(j, wim, &cmds[i]);
			break;
		case WIMLIB_UPDATE_OP_RENAME:
			ret = execute_rename_command(j, wim, &cmds[i]);
			break;
		}
		if (unlikely(ret))
			goto rollback;
		info.update.completed_commands++;
		if (update_flags & WIMLIB_UPDATE_FLAG_SEND_PROGRESS) {
			ret = call_progress(wim->progfunc,
					    WIMLIB_PROGRESS_MSG_UPDATE_END_COMMAND,
					    &info, wim->progctx);
			if (ret)
				goto rollback;
		}
		next_command(j);
	}

	commit_update(j);
	if (inode_table) {
		struct wim_image_metadata *imd;

		imd = wim_get_current_image_metadata(wim);

		list_splice_tail(&unhashed_streams, &imd->unhashed_streams);
		inode_table_prepare_inode_list(inode_table, &imd->inode_list);
	}
	goto out_destroy_sd_set;

rollback:
	if (sd_set)
		rollback_new_security_descriptors(sd_set);
	rollback_update(j);
out_destroy_sd_set:
	if (sd_set)
		destroy_sd_set(sd_set);
out_destroy_inode_table:
	if (inode_table)
		destroy_inode_table(inode_table);
out:
	return ret;
}
Exemple #7
0
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
		struct list_head local_allocated;

		INIT_LIST_HEAD(&local_allocated);

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}
			/* dma address must be no more than 36 bits */
			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
			/* and also 256 byte aligned! */
			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}