Ejemplo n.º 1
0
static struct perf_pmu *pmu_lookup(char *name)
{
	struct perf_pmu *pmu;
	LIST_HEAD(format);
	__u32 type;

	/*
	 * The pmu data we store & need consists of the pmu
	 * type value and format definitions. Load both right
	 * now.
	 */
	if (pmu_format(name, &format))
		return NULL;

	if (pmu_type(name, &type))
		return NULL;

	pmu = zalloc(sizeof(*pmu));
	if (!pmu)
		return NULL;

	INIT_LIST_HEAD(&pmu->format);
	list_splice(&format, &pmu->format);
	pmu->name = strdup(name);
	pmu->type = type;
	return pmu;
}
Ejemplo n.º 2
0
struct list *lexer_read_command_aux(struct lexer *lx)
{
	int spaces_deleted = lexer_discard_white_space(lx);

	struct list *tokens = list_create();

	//Preserve space in substitutions.
	if(spaces_deleted && lx->depth > 0) {
		list_push_tail(tokens, lexer_pack_token(lx, TOKEN_SPACE));
	}

	/* Read all command tokens. Note that we read from lx, but put in lx_c. */
	while(1) {
		struct token *t = lexer_read_command_argument(lx);
		if(!t)
			break;

		if(t->type == TOKEN_SUBSTITUTION) {
			tokens = list_splice(tokens, lexer_expand_substitution(lx, t, lexer_read_command_aux));
			lexer_free_token(t);
			continue;
		} else {
			list_push_tail(tokens, t);
			if(t->type==TOKEN_NEWLINE) break;
		}
	}

	return tokens;
}
Ejemplo n.º 3
0
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
			     struct list_head *validated)
{
	/* This is based on the bucket sort with O(n) time complexity.
	 * An item with priority "i" is added to bucket[i]. The lists are then
	 * concatenated in descending order.
	 */
	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
	unsigned i;

	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
		INIT_LIST_HEAD(&bucket[i]);

	/* Since buffers which appear sooner in the relocation list are
	 * likely to be used more often than buffers which appear later
	 * in the list, the sort mustn't change the ordering of buffers
	 * with the same priority, i.e. it must be stable.
	 */
	for (i = 0; i < list->num_entries; i++) {
		unsigned priority = list->array[i].priority;

		list_add_tail(&list->array[i].tv.head,
			      &bucket[priority]);
		list->array[i].user_pages = NULL;
	}

	/* Connect the sorted buckets in the output list. */
	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
		list_splice(&bucket[i], validated);
}
Ejemplo n.º 4
0
/* Free/alloc mq cache entry structures. */
static void takeout_queue(struct list_head *lh, struct queue *q)
{
	unsigned level;

	for (level = 0; level < NR_QUEUE_LEVELS; level++)
		list_splice(q->qs + level, lh);
}
Ejemplo n.º 5
0
int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *ue,
			unsigned nr,
			unsigned flags)
{
	struct list_head irq_list = LIST_HEAD_INIT(irq_list);
	struct list_head tmp = LIST_HEAD_INIT(tmp);
	struct kvm_kernel_irq_routing_entry *e = NULL;
	unsigned i;
	int r;

	for (i = 0; i < nr; ++i) {
		r = -EINVAL;
		if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
			goto out;
		if (ue->flags)
			goto out;
		r = -ENOMEM;
		e = kzalloc(sizeof(*e), GFP_KERNEL);
		if (!e)
			goto out;
		r = setup_routing_entry(e, ue);
		if (r)
			goto out;
		++ue;
		list_add(&e->link, &irq_list);
		e = NULL;
	}

	mutex_lock(&kvm->lock);
	list_splice(&kvm->irq_routing, &tmp);
	INIT_LIST_HEAD(&kvm->irq_routing);
	list_splice(&irq_list, &kvm->irq_routing);
	INIT_LIST_HEAD(&irq_list);
	list_splice(&tmp, &irq_list);
	mutex_unlock(&kvm->lock);

	r = 0;

out:
	kfree(e);
	__kvm_free_irq_routing(&irq_list);
	return r;
}
Ejemplo n.º 6
0
static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
				       struct list_head *out_list)
{
	unsigned i;

	/* Connect the sorted buckets in the output list. */
	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
		list_splice(&b->bucket[i], out_list);
	}
}
Ejemplo n.º 7
0
/* Remove text list between @first and @last.
 *
 *  Let's have a text list that looks something like this:
 *
 *      +---+  +---+  +---+  +---+  +---+  +---+  +---+
 * ...==| A |==| B |==| C |==| D |==| E |==| F |==| G |==...
 *      +---+  +---+  +---+  +---+  +---+  +---+  +---+
 *               ^                           ^      ^
 *             first                       last   next
 *
 *  Removing B..F is straightforward as long as there are no nodes
 *  pointing to this range.  Things get a bit more complicated when
 *  part of a node starts or ends between B and F.
 *
 *  We'll assume that the removed text is going to be replaced by
 *  another list <@rep_first; @rep_last>.  Since we know nothing about
 *  the internal organization of this new list, we'll assume that it
 *  replaces any and every part of the removed list. That way we'll
 *  preserve the relative position of such nodes to its surrounding.
 *
 *  In short:
 *    a. if a node ends inside <@first; @next>, we'll change its
 *       last text to @rep_last.
 *    b. if a node starts inside <@first; @next>, we'll change its
 *       first text to @rep_first.
 */
static void
do_remove(struct dynstr *first, struct dynstr *last,
	  struct dynstr *rep_first, struct dynstr *rep_last)
{
	struct dynstr *next = next_dynstr(last);
	node_t *node;

	do {
		list_for_each_entry(node, &first->node_first, first_list)
			node->loc.first.text = rep_first;
		list_splice(&first->node_first, &rep_first->node_first);

		list_for_each_entry(node, &first->node_last, last_list)
			node->loc.last.text = rep_last;
		list_splice(&first->node_last, &rep_last->node_last);

		first = dynstr_del(first);
	} while (first != next);
}
Ejemplo n.º 8
0
/*
 * mount 'source_mnt' under the destination 'dest_mnt' at
 * dentry 'dest_dentry'. And propagate that mount to
 * all the peer and slave mounts of 'dest_mnt'.
 * Link all the new mounts into a propagation tree headed at
 * source_mnt. Also link all the new mounts using ->mnt_list
 * headed at source_mnt's ->mnt_list
 *
 * @dest_mnt: destination mount.
 * @dest_dentry: destination dentry.
 * @source_mnt: source mount.
 * @tree_list : list of heads of trees to be attached.
 */
int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
		    struct mount *source_mnt, struct list_head *tree_list)
{
	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
	struct mount *m, *child;
	int ret = 0;
	struct mount *prev_dest_mnt = dest_mnt;
	struct mount *prev_src_mnt  = source_mnt;
	LIST_HEAD(tmp_list);
	LIST_HEAD(umount_list);

	for (m = propagation_next(dest_mnt, dest_mnt); m;
			m = propagation_next(m, dest_mnt)) {
		int type;
		struct mount *source;

		if (IS_MNT_NEW(m))
			continue;

		source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);

		/* Notice when we are propagating across user namespaces */
		if (m->mnt_ns->user_ns != user_ns)
			type |= CL_UNPRIVILEGED;

		child = copy_tree(source, source->mnt.mnt_root, type);
		if (IS_ERR(child)) {
			ret = PTR_ERR(child);
			list_splice(tree_list, tmp_list.prev);
			goto out;
		}

		if (is_subdir(dest_dentry, m->mnt.mnt_root)) {
			mnt_set_mountpoint(m, dest_dentry, child);
			list_add_tail(&child->mnt_hash, tree_list);
		} else {
			/*
			 * This can happen if the parent mount was bind mounted
			 * on some subdirectory of a shared/slave mount.
			 */
			list_add_tail(&child->mnt_hash, &tmp_list);
		}
		prev_dest_mnt = m;
		prev_src_mnt  = child;
	}
out:
	br_write_lock(&vfsmount_lock);
	while (!list_empty(&tmp_list)) {
		child = list_first_entry(&tmp_list, struct mount, mnt_hash);
		umount_tree(child, 0, &umount_list);
	}
	br_write_unlock(&vfsmount_lock);
	release_mounts(&umount_list);
	return ret;
}
Ejemplo n.º 9
0
int NLog::get_write_buffer_list( list_head &_buf_list )
{
    int ret = -1;

    AutoLock( &mutex_ );
    if( !list_empty( &write_list_ ) ) {
        list_splice( &write_list_, &_buf_list );
        INIT_LIST_HEAD( &write_list_ );
        AutoLock( &buf_count_mtx_ );
        buf_count_ = 0;
        ret = 0;
    }
    return ret;
}
Ejemplo n.º 10
0
/*
 * mount 'source_mnt' under the destination 'dest_mnt' at
 * dentry 'dest_dentry'. And propagate that mount to
 * all the peer and slave mounts of 'dest_mnt'.
 * Link all the new mounts into a propagation tree headed at
 * source_mnt. Also link all the new mounts using ->mnt_list
 * headed at source_mnt's ->mnt_list
 *
 * @dest_mnt: destination mount.
 * @dest_dentry: destination dentry.
 * @source_mnt: source mount.
 * @tree_list : list of heads of trees to be attached.
 */
int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
		    struct vfsmount *source_mnt, struct list_head *tree_list)
{
	struct vfsmount *m, *child;
	int ret = 0;
	struct vfsmount *prev_dest_mnt = dest_mnt;
	struct vfsmount *prev_src_mnt  = source_mnt;
	LIST_HEAD(tmp_list);
	LIST_HEAD(umount_list);

	for (m = propagation_next(dest_mnt, dest_mnt); m;
			m = propagation_next(m, dest_mnt)) {
		int type;
		struct vfsmount *source;

		if (IS_MNT_NEW(m))
			continue;

		source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);

		if (!(child = copy_tree(source, source->mnt_root, type))) {
			ret = -ENOMEM;
			list_splice(tree_list, tmp_list.prev);
			goto out;
		}

		if (is_subdir(dest_dentry, m->mnt_root)) {
			mnt_set_mountpoint(m, dest_dentry, child);
			list_add_tail(&child->mnt_hash, tree_list);
		} else {
			/*
			 * This can happen if the parent mount was bind mounted
			 * on some subdirectory of a shared/slave mount.
			 */
			list_add_tail(&child->mnt_hash, &tmp_list);
		}
		prev_dest_mnt = m;
		prev_src_mnt  = child;
	}
out:
	spin_lock(&vfsmount_lock);
	while (!list_empty(&tmp_list)) {
		child = list_entry(tmp_list.next, struct vfsmount, mnt_hash);
		list_del_init(&child->mnt_hash);
		umount_tree(child, 0, &umount_list);
	}
	spin_unlock(&vfsmount_lock);
	release_mounts(&umount_list);
	return ret;
}
Ejemplo n.º 11
0
static int do_make_slave(struct mount *mnt)
{
	struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
	struct mount *slave_mnt;

	/*
	 * slave 'mnt' to a peer mount that has the
	 * same root dentry. If none is available then
	 * slave it to anything that is available.
	 */
	while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
	       peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;

	if (peer_mnt == mnt) {
		peer_mnt = next_peer(mnt);
		if (peer_mnt == mnt)
			peer_mnt = NULL;
	}
	if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
	    list_empty(&mnt->mnt_share))
		mnt_release_group_id(mnt);

	list_del_init(&mnt->mnt_share);
	mnt->mnt_group_id = 0;

	if (peer_mnt)
		master = peer_mnt;

	if (master) {
		list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
			slave_mnt->mnt_master = master;
		list_move(&mnt->mnt_slave, &master->mnt_slave_list);
		list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
		INIT_LIST_HEAD(&mnt->mnt_slave_list);
	} else {
		struct list_head *p = &mnt->mnt_slave_list;
		while (!list_empty(p)) {
                        slave_mnt = list_first_entry(p,
					struct mount, mnt_slave);
			list_del_init(&slave_mnt->mnt_slave);
			slave_mnt->mnt_master = NULL;
		}
	}
	mnt->mnt_master = master;
	CLEAR_MNT_SHARED(mnt);
	return 0;
}
Ejemplo n.º 12
0
int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
		    struct mount *source_mnt, struct list_head *tree_list)
{
	struct mount *m, *child;
	int ret = 0;
	struct mount *prev_dest_mnt = dest_mnt;
	struct mount *prev_src_mnt  = source_mnt;
	LIST_HEAD(tmp_list);
	LIST_HEAD(umount_list);

	for (m = propagation_next(dest_mnt, dest_mnt); m;
			m = propagation_next(m, dest_mnt)) {
		int type;
		struct mount *source;

		if (IS_MNT_NEW(m))
			continue;

		source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);

		if (!(child = copy_tree(source, source->mnt.mnt_root, type))) {
			ret = -ENOMEM;
			list_splice(tree_list, tmp_list.prev);
			goto out;
		}

		if (is_subdir(dest_dentry, m->mnt.mnt_root)) {
			mnt_set_mountpoint(m, dest_dentry, child);
			list_add_tail(&child->mnt_hash, tree_list);
		} else {
			list_add_tail(&child->mnt_hash, &tmp_list);
		}
		prev_dest_mnt = m;
		prev_src_mnt  = child;
	}
out:
	br_write_lock(vfsmount_lock);
	while (!list_empty(&tmp_list)) {
		child = list_first_entry(&tmp_list, struct mount, mnt_hash);
		umount_tree(child, 0, &umount_list);
	}
	br_write_unlock(vfsmount_lock);
	release_mounts(&umount_list);
	return ret;
}
Ejemplo n.º 13
0
void kmem_cache_reap(struct kmem_cache *cache)
{
	LIST_HEAD(list);
	const bool enabled = spin_lock_irqsave(&cache->lock);
	list_splice(&cache->free_list, &list);
	spin_unlock_irqrestore(&cache->lock, enabled);

	for (struct list_head *ptr = list.next; ptr != &list;) {
		struct kmem_slab *slab =
			LIST_ENTRY(ptr, struct kmem_slab, link);
		struct page *pages = slab->pages;

		ptr = ptr->next;
		if (cache->ops->destroy)
			cache->ops->destroy(cache, slab);

		free_pages(pages, cache->order);	
	}
}
Ejemplo n.º 14
0
struct list *catalog_query_sort_hostlist(const char *hosts) {
	const char *next_host;
	char *n;
	struct catalog_host *h;
	struct list *previously_up = list_create();
	struct list *previously_down = list_create();

	if(string_null_or_empty(hosts)) {
		next_host = CATALOG_HOST;
	} else {
		next_host = hosts;
	}

	if(!down_hosts) {
		down_hosts = set_create(0);
	}

	do {
		int port;
		char host[DOMAIN_NAME_MAX];
		h = xxmalloc(sizeof(*h));
		next_host = parse_hostlist(next_host, host, &port);

		h->host = xxstrdup(host);
		h->url = string_format("http://%s:%d/query.json", host, port);
		h->down = 0;

		set_first_element(down_hosts);
		while((n = set_next_element(down_hosts))) {
			if(!strcmp(n, host)) {
				h->down = 1;
			}
		}
		if(h->down) {
			list_push_tail(previously_down, h);
		} else {
			list_push_tail(previously_up, h);
		}
	} while (next_host);

	return list_splice(previously_up, previously_down);
}
Ejemplo n.º 15
0
void setup_buddy(void)
{
	balloc_for_each_region(&memory_node_add);
	balloc_for_each_free_region(&memory_free_region);

	struct list_head type_nodes[NT_COUNT];

	for (int i = 0; i != NT_COUNT; ++i)
		list_init(&type_nodes[i]);

	for (int i = 0; i != memory_nodes; ++i) {
		struct memory_node *node = memory_node_get(i);

		list_add_tail(&node->link, &type_nodes[node->type]);
	}

	for (int i = 0; i != NT_COUNT; ++i) {
		list_splice(type_nodes + i, &node_order);
		node_type[i] = node_order.next;
	}
}
Ejemplo n.º 16
0
Archivo: list.c Proyecto: cjd2951/POS1
/* Merges A0 through A1B0 (exclusive) with A1B0 through B1
   (exclusive) to form a combined range also ending at B1
   (exclusive).  Both input ranges must be nonempty and sorted in
   nondecreasing order according to LESS given auxiliary data
   AUX.  The output range will be sorted the same way. */
static void
inplace_merge (struct list_elem *a0, struct list_elem *a1b0,
               struct list_elem *b1,
               list_less_func *less, void *aux)
{
  ASSERT (a0 != NULL);
  ASSERT (a1b0 != NULL);
  ASSERT (b1 != NULL);
  ASSERT (less != NULL);
  ASSERT (is_sorted (a0, a1b0, less, aux));
  ASSERT (is_sorted (a1b0, b1, less, aux));

  while (a0 != a1b0 && a1b0 != b1)
    if (!less (a1b0, a0, aux)) 
      a0 = list_next (a0);
    else 
      {
        a1b0 = list_next (a1b0);
        list_splice (a0, list_prev (a1b0), a1b0);
      }
}
Ejemplo n.º 17
0
static int
__tapdisk_image_open_chain(int type, const char *name, int flags,
			   struct list_head *_head, int prt_devnum)
{
	struct list_head head = LIST_HEAD_INIT(head);
	td_image_t *image;
	int err;

	err = tapdisk_image_open(type, name, flags, &image);
	if (err)
		goto fail;

	list_add_tail(&image->next, &head);

	if (unlikely(prt_devnum >= 0)) {
		char dev[32];
		snprintf(dev, sizeof(dev),
			 "%s%d", BLKTAP2_IO_DEVICE, prt_devnum);
		err = tapdisk_image_open(DISK_TYPE_AIO, dev,
					 flags|TD_OPEN_RDONLY, &image);
		if (err)
			goto fail;

		list_add_tail(&image->next, &head);
		goto done;
	}

	err = tapdisk_image_open_parents(image);
	if (err)
		goto fail;

done:
	list_splice(&head, _head);
	return 0;

fail:
	tapdisk_image_close_chain(&head);
	return err;
}
Ejemplo n.º 18
0
struct list *makeflow_wrapper_generate_files( struct list *result, struct list *input, struct dag_node *n, struct makeflow_wrapper *w)
{
	char *f;
	char *nodeid = string_format("%d",n->nodeid);

	struct list *files = list_create();

	list_first_item(input);
	while((f = list_next_item(input)))
	{
		char *filename = string_replace_percents(f, nodeid);
		char *f = xxstrdup(filename);
		free(filename);

		char *remote, *p;
		struct dag_file *file;
		p = strchr(f, '=');
		if(p) {
			*p = 0;
			file = dag_file_lookup_or_create(n->d, f);
			if(!n->local_job && !itable_lookup(w->remote_names, (uintptr_t) file)){
				remote = xxstrdup(p+1);
				itable_insert(w->remote_names, (uintptr_t) file, (void *)remote);
				hash_table_insert(w->remote_names_inv, remote, (void *)file);
			}
			*p = '=';
		} else {
			file = dag_file_lookup_or_create(n->d, f);
		}
		free(f);
		list_push_tail(files, file);
	}
	free(nodeid);

	result = list_splice(result, files);

	return result;
}
Ejemplo n.º 19
0
//opened tracks whether it is the opening (opened = 0) or closing (opened = 1) double quote we encounter.
struct list *lexer_read_expandable_recursive(struct lexer *lx, char end_marker, int opened)
{
	lexer_discard_white_space(lx);

	struct list *tokens = list_create();

	while(!lx->eof) {
		int c = lexer_next_peek(lx);

		if(c == '$') {
			list_push_tail(tokens, lexer_read_substitution(lx));
		}

		if(c == '\'') {
			lexer_read_literal(lx);
			list_push_tail(tokens, lexer_pack_token(lx, TOKEN_LITERAL));
		} else if(c == '"' && opened == 0) {
				lexer_add_to_lexeme(lx, lexer_next_char(lx));
				list_push_tail(tokens, lexer_pack_token(lx, TOKEN_LITERAL));     // Add first "
				tokens = list_splice(tokens, lexer_read_expandable_recursive(lx, '"', 1));
				lexer_add_to_lexeme(lx, '"');
				list_push_tail(tokens, lexer_pack_token(lx, TOKEN_LITERAL));     // Add closing "
				if(end_marker == '"')
					return tokens;
		} else if(c == '#' && end_marker != '"') {
			lexer_discard_comments(lx);
		} else if(c == end_marker) {
			lexer_next_char(lx);	/* Jump end_marker */
			return tokens;
		} else {
			list_push_tail(tokens, lexer_read_literal_in_expandable_until(lx, end_marker));
		}
	}

	lexer_report_error(lx, "Found EOF before end marker: %c.\n", end_marker);

	return NULL;
}
Ejemplo n.º 20
0
struct list *lexer_read_file_list_aux(struct lexer *lx)
{
	struct list *tokens = list_create();

	lexer_discard_white_space(lx);

	while(1) {
		struct token *t = lexer_read_file(lx);
		if(!t) break;

		//Do substitution recursively
		if(t->type == TOKEN_SUBSTITUTION) {
			tokens = list_splice(tokens, lexer_expand_substitution(lx, t, lexer_read_file_list_aux));
			lexer_free_token(t);
			continue;
		} else {
			list_push_tail(tokens, t);
			if(t->type==TOKEN_NEWLINE) break;
		}
	}

	return tokens;
}
Ejemplo n.º 21
0
/*
 * Dispose-list gets a local list, so it doesn't need to
 * worry about list corruption.
 */
static void dispose_list(struct list_head * head)
{
	struct list_head *next;
	int count = 0;

	next = head->next;
	for (;;) {
		struct list_head * tmp = next;
		struct inode * inode;

		next = next->next;
		if (tmp == head)
			break;
		inode = list_entry(tmp, struct inode, i_list);
		clear_inode(inode);
		count++;
	}

	/* Add them all to the unused list in one fell swoop */
	spin_lock(&inode_lock);
	list_splice(head, &inode_unused);
	inodes_stat.nr_free_inodes += count;
	spin_unlock(&inode_lock);
}
Ejemplo n.º 22
0
static void mnt_resort_siblings(struct mount_info *tree)
{
	struct mount_info *m, *p;
	LIST_HEAD(list);

	/*
	 * Put siblings of each node in an order they can be (u)mounted
	 * I.e. if we have mounts on foo/bar/, foo/bar/foobar/ and foo/
	 * we should put them in the foo/bar/foobar/, foo/bar/, foo/ order.
	 * Otherwise we will not be able to (u)mount them in a sequence.
	 *
	 * Funny, but all we need for this is to sort them in the descending
	 * order of the amount of /-s in a path =)
	 *
	 * Use stupid insertion sort here, we're not expecting mount trees
	 * to contain hundreds (or more) elements.
	 */

	pr_info("\tResorting siblings on %d\n", tree->mnt_id);
	while (!list_empty(&tree->children)) {
		int depth;

		m = list_first_entry(&tree->children, struct mount_info, siblings);
		list_del(&m->siblings);

		depth = mnt_depth(m);
		list_for_each_entry(p, &list, siblings)
			if (mnt_depth(p) <= depth)
				break;

		list_add(&m->siblings, &p->siblings);
		mnt_resort_siblings(m);
	}

	list_splice(&list, &tree->children);
}
Ejemplo n.º 23
0
struct trace_cpu_data *trace_get_tcd(void)
{
	struct trace_cpu_data *tcd;
	int nr_pages;
	struct list_head pages;

	/*
	 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
	 * from here: this will lead to infinite recursion.
	 */

	/*
	 * debugging check for recursive call to libcfs_debug_msg()
	 */
	if (trace_owner == current_thread()) {
                /*
                 * Cannot assert here.
                 */
		printk(KERN_EMERG "recursive call to %s", __FUNCTION__);
		/*
                 * "The death of God left the angels in a strange position."
		 */
		cfs_enter_debugger();
	}
	tcd = &trace_data[0].tcd;
        CFS_INIT_LIST_HEAD(&pages);
	if (get_preemption_level() == 0)
		nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
	else
		nr_pages = 0;
	spin_lock(&trace_cpu_serializer);
	trace_owner = current_thread();
	tcd->tcd_cur_stock_pages += nr_pages;
	list_splice(&pages, &tcd->tcd_stock_pages);
	return tcd;
}
Ejemplo n.º 24
0
Archivo: direct.c Proyecto: jeffpc/hvf
void directory_alloc_user(char *name, int auth, struct directory_prop *prop,
			  struct list_head *vdevs)
{
	struct user *user;

	assert(name);
	assert(prop->got_storage);

	user = malloc(sizeof(struct user), ZONE_NORMAL);
	assert(user);

	memset(user, 0, sizeof(struct user));

	INIT_LIST_HEAD(&user->list);
	INIT_LIST_HEAD(&user->devices);
	list_splice(vdevs, &user->devices);

	user->userid = name;
	user->storage_size = prop->storage;
	user->auth = auth;

	FIXME("locking?");
	list_add_tail(&user->list, &directory);
}
Ejemplo n.º 25
0
void
__cpool_rt_task_dispatch(cpool_rt_t *rtp, struct list_head *rmq, int dispatched_bypool)
{
	int  n;
	long task_counter = 0;
	ctask_t *ptask, *nptask;
	SMLINK_Q_HEAD(qcache);
	LIST_HEAD(q_null);
	
	/**
	 * Cut the dispatch queue into two 
	 */
	n =__cpool_com_get_err_handler_q(rmq, &q_null);
	if (!list_empty(&q_null)) 
		__cpool_com_list_to_smq(&q_null, &qcache);
	
	if (list_empty(rmq))
		goto out;

	if (dispatched_bypool) {
		OSPX_pthread_mutex_lock(&rtp->core->mut);	
		/**
		 * We just remove all tasks into the dispatching queue of
		 * the rtp->core if the user does not want to procces the error
		 * handlers directly.
		 */
		rtp->core->n_qdispatchs += n;
		list_splice(rmq, &rtp->core->dispatch_q);
		
		/**
		 * We notify the rtp->core to schedule the error handlers
		 * if it is necessary
		 */ 
		if (cpool_core_need_ensure_servicesl(rtp->core))
			cpool_core_ensure_servicesl(rtp->core, NULL);
		OSPX_pthread_mutex_unlock(&rtp->core->mut);
		/**
		 * Decrease the task counter
		 */
		OSPX_interlocked_add(&rtp->tsks_held_by_dispatcher, (long)-n);

	} else {
		list_for_each_entry_safe(ptask, nptask, rmq, ctask_t, link) {
			assert (ptask->task_err_handler); 
			
			/**
			 * Reset its status and run the error handler for the rt
			 */
			ptask->f_stat = (eTASK_STAT_F_DISPATCHING|eTASK_STAT_F_SCHEDULING);
			ptask->task_err_handler(ptask, cpool_rt_core_err_reasons(TASK_CAST_CORE(ptask)));
		
			/**
			 * We deliver the task into the queue if it is requested to be rescheduled again
			 */
			if (eTASK_STAT_F_WPENDING & ptask->f_stat) {
				if (rtp->lflags & eFUNC_F_PRIORITY)
					__cpool_rt_pri_task_queue(rtp->core, ptask);
				else
					__cpool_rt_task_queue(rtp->core, ptask);
				
				++ task_counter;
			} else
				smlink_q_push(&qcache, ptask);
		}

		if (task_counter)
			OSPX_interlocked_add(&rtp->tsks_held_by_dispatcher, -task_counter);
	}
Ejemplo n.º 26
0
void list_sort(list_head * list, int (*node_compare)(list_node *, list_node *))
{
    struct list_link *p, *q, *t;
    list_head tmp;
    int merges = 0;
    int k = 1;
    int psize, qsize; 

    if (list_empty(list))
	return;

    do
    {
	INIT_LIST_HEAD(&tmp);
	p = list->next;
	merges = 0;
	psize = qsize = 0;

	while (p != list)
	{
	    merges++;
	    q = p;

	    while (q != list && psize < k)
	    {
		q = q->next;
		psize++;
	    }
		
	    qsize = k;

	    while (psize || (qsize && q != list))
	    {
		if (psize && (qsize == 0 || q == list || node_compare(p, q) <= 0))
		{
		    t = p;
		    p = p->next;
		    psize--;
		}
		else if (qsize == 0)
		{
		    printf("whoaa. qsize is zero\n");
		    exit (1);
		}
		else
		{
		    t = q;
		    q = q->next;
		    qsize--;
		}
		
		list_del(t);
		
		list_add_tail(t, &tmp);
	    }

	    p = q;
	}

	if (!list_empty(list))
	{
	    printf("whoaa. initial list not empty\n");
	    exit (1);
	}
	    
	list_splice(&tmp, list);
	k *= 2;

	//printf("done w sort pass %d %d\n", k, merges);
    }
    while (merges > 1);
}
Ejemplo n.º 27
0
static int
tapdisk_image_open_x_chain(const char *path, struct list_head *_head)
{
	struct list_head head = LIST_HEAD_INIT(head);
	td_image_t *image = NULL, *next;
	regex_t _im, *im = NULL, _ws, *ws = NULL;
	FILE *s;
	int err;

	s = fopen(path, "r");
	if (!s) {
		err = -errno;
		goto fail;
	}

	err = regcomp(&_ws, "^[:space:]*$", REG_NOSUB);
	if (err)
		goto fail;
	ws = &_ws;

	err = regcomp(&_im,
		      "^([^:]+):([^ \t]+)([ \t]+([a-z,]+))?",
		      REG_EXTENDED|REG_NEWLINE);
	if (err)
		goto fail;
	im = &_im;

	do {
		char line[512], *l;
		regmatch_t match[5];
		char *typename, *path, *args = NULL;
		unsigned long flags;
		int type;

		l = fgets(line, sizeof(line), s);
		if (!l)
			break;

		err = regexec(im, line, ARRAY_SIZE(match), match, 0);
		if (err) {
			err = regexec(ws, line, ARRAY_SIZE(match), match, 0);
			if (!err)
				continue;
			err = -EINVAL;
			goto fail;
		}

		line[match[1].rm_eo] = 0;
		typename = line + match[1].rm_so;

		line[match[2].rm_eo] = 0;
		path = line + match[2].rm_so;

		if (match[4].rm_so >= 0) {
			line[match[4].rm_eo] = 0;
			args = line + match[4].rm_so;
		}

		type = tapdisk_disktype_find(typename);
		if (type < 0) {
			err = type;
			goto fail;
		}

		flags = 0;

		if (args) {
			err = tapdisk_image_parse_flags(args, &flags);
			if (err)
				goto fail;
		}

		err = tapdisk_image_open(type, path, flags, &image);
		if (err)
			goto fail;

		list_add_tail(&image->next, &head);
	} while (1);

	if (!image) {
		err = -EINVAL;
		goto fail;
	}

	err = tapdisk_image_open_parents(image);
	if (err)
		goto fail;

	list_splice(&head, _head);
out:
	if (im)
		regfree(im);
	if (ws)
		regfree(ws);
	if (s)
		fclose(s);

	return err;

fail:
	tapdisk_for_each_image_safe(image, next, &head)
		tapdisk_image_free(image);

	goto out;
}
Ejemplo n.º 28
0
/**
 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
 *                      the scan code, to call f_op->poll(). Also allows for
 *                      O(NumReady) performance.
 *
 * @ep: Pointer to the epoll private data structure.
 * @sproc: Pointer to the scan callback.
 * @priv: Private opaque data passed to the @sproc callback.
 *
 * Returns: The same integer error code returned by the @sproc callback.
 */
static int ep_scan_ready_list(struct eventpoll *ep,
			      int (*sproc)(struct eventpoll *,
					   struct list_head *, void *),
			      void *priv)
{
	int error, pwake = 0;
	unsigned long flags;
	struct epitem *epi, *nepi;
	LIST_HEAD(txlist);

	/*
	 * We need to lock this because we could be hit by
	 * eventpoll_release_file() and epoll_ctl().
	 */
	mutex_lock(&ep->mtx);

	/*
	 * Steal the ready list, and re-init the original one to the
	 * empty list. Also, set ep->ovflist to NULL so that events
	 * happening while looping w/out locks, are not lost. We cannot
	 * have the poll callback to queue directly on ep->rdllist,
	 * because we want the "sproc" callback to be able to do it
	 * in a lockless way.
	 */
	spin_lock_irqsave(&ep->lock, flags);
	/* 这一步要注意, 首先, 所有监听到events的epitem都链到rdllist上了,
	 * 但是这一步之后, 所有的epitem都转移到了txlist上, 而rdllist被清空了,
	 * 要注意哦, rdllist已经被清空了! */
	list_splice_init(&ep->rdllist, &txlist);
	/* ovflist, 在ep_poll_callback()里面我解释过, 此时此刻我们不希望
	 * 有新的event加入到ready list中了, 保存后下次再处理... */
	ep->ovflist = NULL;
	spin_unlock_irqrestore(&ep->lock, flags);

	/*
	 * Now call the callback function.
	 */
	/* 在这个回调函数里面处理每个epitem
	 * sproc 就是 ep_send_events_proc, 下面会注释到. */
	error = (*sproc)(ep, &txlist, priv);

	spin_lock_irqsave(&ep->lock, flags);
	/*
	 * During the time we spent inside the "sproc" callback, some
	 * other events might have been queued by the poll callback.
	 * We re-insert them inside the main ready-list here.
	 */
	/* 现在我们来处理ovflist, 这些epitem都是我们在传递数据给用户空间时
	 * 监听到了事件. */
	for (nepi = ep->ovflist; (epi = nepi) != NULL;
	     nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
		/*
		 * We need to check if the item is already in the list.
		 * During the "sproc" callback execution time, items are
		 * queued into ->ovflist but the "txlist" might already
		 * contain them, and the list_splice() below takes care of them.
		 */
		/* 将这些直接放入readylist */
		if (!ep_is_linked(&epi->rdllink))
			list_add_tail(&epi->rdllink, &ep->rdllist);
	}
	/*
	 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
	 * releasing the lock, events will be queued in the normal way inside
	 * ep->rdllist.
	 */
	ep->ovflist = EP_UNACTIVE_PTR;

	/*
	 * Quickly re-inject items left on "txlist".
	 */
	/* 上一次没有处理完的epitem, 重新插入到ready list */
	list_splice(&txlist, &ep->rdllist);

	/* ready list不为空, 直接唤醒... */
	if (!list_empty(&ep->rdllist)) {
		/*
		 * Wake up (if active) both the eventpoll wait list and
		 * the ->poll() wait list (delayed after we release the lock).
		 */
		if (waitqueue_active(&ep->wq))
			wake_up_locked(&ep->wq);
		if (waitqueue_active(&ep->poll_wait))
			pwake++;
	}
	spin_unlock_irqrestore(&ep->lock, flags);

	mutex_unlock(&ep->mtx);

	/* We have to call this outside the lock */
	if (pwake)
		ep_poll_safewake(&ep->poll_wait);

	return error;
}