Example #1
0
/** @internal @This is called when there is input data.
 *
 * @param upipe description structure of the pipe
 * @param uref input uref structure
 * @param upump_p reference to pump that generated the buffer
 */
static void upipe_burst_input(struct upipe *upipe, struct uref *uref,
                              struct upump **upump_p)
{
    struct upipe_burst *upipe_burst = upipe_burst_from_upipe(upipe);

    ulist_add(&upipe_burst->urefs, uref_to_uchain(uref));
    upipe_burst_throw_update(upipe, false);
    ueventfd_write(&upipe_burst->ueventfd);
}
Example #2
0
/* allocate es configuration */
static struct es_conf *es_conf_alloc(struct udict_mgr *mgr,
                                     uint64_t id, struct uchain *list)
{
    struct es_conf *conf = malloc(sizeof(struct es_conf));
    if (unlikely(conf == NULL))
        return NULL;
    memset(conf, 0, sizeof(struct es_conf));
    uchain_init(&conf->uchain);
    if (list) {
        ulist_add(list, &conf->uchain);
    }

    conf->options = udict_alloc(mgr, 0);
    conf->id = id;
    return conf;
}
Example #3
0
/** @internal @This checks an URI.
 *
 * @param upipe description structure of the pipe
 * @param flow_def the current flow definition
 * @param uri the uri
 * @return an error code
 */
static int upipe_m3u_reader_process_uri(struct upipe *upipe,
                                        struct uref *flow_def,
                                        const char *uri)
{
    struct upipe_m3u_reader *upipe_m3u_reader =
        upipe_m3u_reader_from_upipe(upipe);

    upipe_verbose_va(upipe, "uri %s", uri);
    UBASE_RETURN(uref_flow_match_def(flow_def, M3U_FLOW_DEF));
    struct uref *item;
    UBASE_RETURN(upipe_m3u_reader_get_item(upipe, flow_def, &item));
    UBASE_RETURN(uref_m3u_set_uri(item, uri));
    if (upipe_m3u_reader->key)
        UBASE_RETURN(uref_m3u_playlist_key_copy(item, upipe_m3u_reader->key));
    upipe_m3u_reader->item = NULL;
    ulist_add(&upipe_m3u_reader->items, uref_to_uchain(item));
    return UBASE_ERR_NONE;
}
Example #4
0
/** @internal @This catches events thrown by pipes.
 *
 * @param uprobe pointer to probe
 * @param upipe pointer to pipe throwing the event
 * @param event event thrown
 * @param args optional event-specific parameters
 * @return an error code
 */
static int uprobe_pfx_throw(struct uprobe *uprobe, struct upipe *upipe,
                            int event, va_list args)
{
    struct uprobe_pfx *uprobe_pfx = uprobe_pfx_from_uprobe(uprobe);
    if (event != UPROBE_LOG)
        return uprobe_throw_next(uprobe, upipe, event, args);

    struct ulog *ulog = va_arg(args, struct ulog *);
    enum uprobe_log_level level = ulog->level;
    if (uprobe->next == NULL || uprobe_pfx->min_level > level)
        return UBASE_ERR_NONE;

    struct ulog_pfx ulog_pfx;
    ulog_pfx.tag = likely(uprobe_pfx->name != NULL) ?
        uprobe_pfx->name : "unknown";
    ulist_add(&ulog->prefixes, ulog_pfx_to_uchain(&ulog_pfx));

    return uprobe_throw(uprobe->next, upipe, event, ulog);
}
Example #5
0
/** @internal @This merges the new access unit into a possibly existing
 * incomplete PES, and outputs the PES if possible.
 *
 * @param upipe description structure of the pipe
 * @param uref uref structure
 * @param upump_p reference to pump that generated the buffer
 * @return false if the input must be blocked
 */
static bool upipe_ts_pese_handle(struct upipe *upipe, struct uref *uref,
                                 struct upump **upump_p)
{
    struct upipe_ts_pese *upipe_ts_pese = upipe_ts_pese_from_upipe(upipe);
    const char *def;
    if (unlikely(ubase_check(uref_flow_get_def(uref, &def)))) {
        upipe_ts_pese_work(upipe, NULL);

        uref_ts_flow_get_pes_id(uref, &upipe_ts_pese->pes_id);
        upipe_ts_pese->pes_header_size = 0;
        uref_ts_flow_get_pes_header(uref, &upipe_ts_pese->pes_header_size);
        upipe_ts_pese->pes_min_duration = 0;
        uref_ts_flow_get_pes_min_duration(uref,
                                          &upipe_ts_pese->pes_min_duration);
        upipe_ts_pese->input_latency = 0;
        uref_clock_get_latency(uref, &upipe_ts_pese->input_latency);
        if (unlikely(!ubase_check(uref_clock_set_latency(uref,
                                        upipe_ts_pese->input_latency +
                                        upipe_ts_pese->pes_min_duration))))
            upipe_throw_fatal(upipe, UBASE_ERR_ALLOC);
        upipe_ts_pese_store_flow_def(upipe, NULL);
        upipe_ts_pese_require_ubuf_mgr(upipe, uref);
        return true;
    }

    if (upipe_ts_pese->flow_def == NULL)
        return false;

    uint64_t uref_duration = upipe_ts_pese->pes_min_duration;
    uref_clock_get_duration(uref, &uref_duration);
    size_t uref_size = 0;
    uref_block_size(uref, &uref_size);
    uref_block_delete_start(uref);

    ulist_add(&upipe_ts_pese->next_pes, uref_to_uchain(uref));
    upipe_ts_pese->next_pes_duration += uref_duration;
    upipe_ts_pese->next_pes_size += uref_size;

    if (upipe_ts_pese->next_pes_duration >= upipe_ts_pese->pes_min_duration)
        upipe_ts_pese_work(upipe, upump_p);
    return true;
}
Example #6
0
/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
			     u64 time_seq, struct ulist *refs,
			     struct ulist *roots, const u64 *extent_item_pos)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
	struct btrfs_delayed_ref_head *head;
	int info_level = 0;
	int ret;
	struct list_head prefs_delayed;
	struct list_head prefs;
	struct __prelim_ref *ref;

	INIT_LIST_HEAD(&prefs);
	INIT_LIST_HEAD(&prefs_delayed);

	key.objectid = bytenr;
	key.offset = (u64)-1;
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	if (!trans)
		path->search_commit_root = 1;

	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
	head = NULL;

	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

	if (trans) {
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
		head = btrfs_find_delayed_ref_head(trans, bytenr);
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
				atomic_inc(&head->node.refs);
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
				btrfs_put_delayed_ref(&head->node);
				goto again;
			}
			ret = __add_delayed_refs(head, time_seq,
						 &prefs_delayed);
			mutex_unlock(&head->mutex);
			if (ret) {
				spin_unlock(&delayed_refs->lock);
				goto out;
			}
		}
		spin_unlock(&delayed_refs->lock);
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

		path->slots[0]--;
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
			ret = __add_inline_refs(fs_info, path, bytenr,
						&info_level, &prefs);
			if (ret)
				goto out;
			ret = __add_keyed_refs(fs_info, path, bytenr,
					       info_level, &prefs);
			if (ret)
				goto out;
		}
	}
	btrfs_release_path(path);

	list_splice_init(&prefs_delayed, &prefs);

	ret = __add_missing_keys(fs_info, &prefs);
	if (ret)
		goto out;

	__merge_refs(&prefs, 1);

	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
				      extent_item_pos);
	if (ret)
		goto out;

	__merge_refs(&prefs, 2);

	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		list_del(&ref->list);
		WARN_ON(ref->count < 0);
		if (ref->count && ref->root_id && ref->parent == 0) {
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
			if (ret < 0)
				goto out;
		}
		if (ref->count && ref->parent) {
			struct extent_inode_elem *eie = NULL;
			if (extent_item_pos && !ref->inode_list) {
				u32 bsz;
				struct extent_buffer *eb;
				bsz = btrfs_level_size(fs_info->extent_root,
							info_level);
				eb = read_tree_block(fs_info->extent_root,
							   ref->parent, bsz, 0);
				if (!eb || !extent_buffer_uptodate(eb)) {
					free_extent_buffer(eb);
					ret = -EIO;
					goto out;
				}
				ret = find_extent_in_eb(eb, bytenr,
							*extent_item_pos, &eie);
				ref->inode_list = eie;
				free_extent_buffer(eb);
			}
			ret = ulist_add_merge(refs, ref->parent,
					      (uintptr_t)ref->inode_list,
					      (u64 *)&eie, GFP_NOFS);
			if (ret < 0)
				goto out;
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
		}
		kfree(ref);
	}

out:
	btrfs_free_path(path);
	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		list_del(&ref->list);
		kfree(ref);
	}
	while (!list_empty(&prefs_delayed)) {
		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
				       list);
		list_del(&ref->list);
		kfree(ref);
	}

	return ret;
}
Example #7
0
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
				struct ulist *parents, int level,
				struct btrfs_key *key_for_search, u64 time_seq,
				u64 wanted_disk_byte,
				const u64 *extent_item_pos)
{
	int ret = 0;
	int slot;
	struct extent_buffer *eb;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	struct extent_inode_elem *eie = NULL, *old = NULL;
	u64 disk_byte;

	if (level != 0) {
		eb = path->nodes[level];
		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
		if (ret < 0)
			return ret;
		return 0;
	}

	/*
	 * We normally enter this function with the path already pointing to
	 * the first item to check. But sometimes, we may enter it with
	 * slot==nritems. In that case, go to the next leaf before we continue.
	 */
	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
		ret = btrfs_next_old_leaf(root, path, time_seq);

	while (!ret) {
		eb = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(eb, &key, slot);

		if (key.objectid != key_for_search->objectid ||
		    key.type != BTRFS_EXTENT_DATA_KEY)
			break;

		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);

		if (disk_byte == wanted_disk_byte) {
			eie = NULL;
			old = NULL;
			if (extent_item_pos) {
				ret = check_extent_in_eb(&key, eb, fi,
						*extent_item_pos,
						&eie);
				if (ret < 0)
					break;
			}
			if (ret > 0)
				goto next;
			ret = ulist_add_merge(parents, eb->start,
					      (uintptr_t)eie,
					      (u64 *)&old, GFP_NOFS);
			if (ret < 0)
				break;
			if (!ret && extent_item_pos) {
				while (old->next)
					old = old->next;
				old->next = eie;
			}
		}
next:
		ret = btrfs_next_old_item(root, path, time_seq);
	}

	if (ret > 0)
		ret = 0;
	return ret;
}
Example #8
0
static void __add_item_to_page(struct page *pg, struct item *it)
{
	it->owner = pg;
	ulist_add(&pg->items, &it->ulist);
	__inc_items_num(pg);
}
Example #9
0
static void __add_page_to_data(struct data *dt, struct page *pg)
{
	pg->owner = dt;
	ulist_add(&dt->pages, &pg->ulist);
	__inc_pages_and_items_num(dt, pg);
}
Example #10
0
static void sanitize_passwd(void) {
	struct stat s;
	if (stat("/etc/passwd", &s) == -1)
		return;
	assert(uid_min);
	if (arg_debug)
		printf("Sanitizing /etc/passwd, UID_MIN %d\n", uid_min);
	if (is_link("/etc/passwd")) {
		fprintf(stderr, "Error: invalid /etc/passwd\n");
		exit(1);
	}

	FILE *fpin = NULL;
	FILE *fpout = NULL;

	// open files
	/* coverity[toctou] */
	fpin = fopen("/etc/passwd", "r");
	if (!fpin)
		goto errout;
	fpout = fopen(RUN_PASSWD_FILE, "w");
	if (!fpout)
		goto errout;

	// read the file line by line
	char buf[MAXBUF];
	uid_t myuid = getuid();
	while (fgets(buf, MAXBUF, fpin)) {
		// comments and empty lines
		if (*buf == '\0' || *buf == '#')
			continue;

		// sample line:
		// 	www-data:x:33:33:www-data:/var/www:/bin/sh
		// drop lines with uid > 1000 and not the current user
		char *ptr = buf;

		// advance to uid
		while (*ptr != ':' && *ptr != '\0')
			ptr++;
		if (*ptr == '\0')
			goto errout;
		char *ptr1 = ptr;
		ptr++;
		while (*ptr != ':' && *ptr != '\0')
			ptr++;
		if (*ptr == '\0')
			goto errout;
		ptr++;
		if (*ptr == '\0')
			goto errout;

		// process uid
		int uid;
		int rv = sscanf(ptr, "%d:", &uid);
		if (rv == 0 || uid < 0)
			goto errout;
		assert(uid_min);
		if (uid < uid_min || uid == 65534) { // on Debian platforms user nobody is 65534
			fprintf(fpout, "%s", buf);
			continue;
		}
		if ((uid_t) uid != myuid) {
			// store user name - necessary to process /etc/group
			*ptr1 = '\0';
			char *user = strdup(buf);
			if (!user)
				errExit("malloc");
			ulist_add(user);
			continue; // skip line
		}
		fprintf(fpout, "%s", buf);
	}
	fclose(fpin);
	SET_PERMS_STREAM(fpout, 0, 0, 0644);
	fclose(fpout);

	// mount-bind tne new password file
	if (mount(RUN_PASSWD_FILE, "/etc/passwd", "none", MS_BIND, "mode=400,gid=0") < 0)
		errExit("mount");
	fs_logger("create /etc/passwd");

	return;

errout:
	fwarning("failed to clean up /etc/passwd\n");
	if (fpin)
		fclose(fpin);
	if (fpout)
		fclose(fpout);
}
Example #11
0
/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
			     u64 time_seq, struct ulist *refs,
			     struct ulist *roots, const u64 *extent_item_pos)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	int info_level = 0;
	int ret;
	struct list_head prefs;
	struct __prelim_ref *ref;
	struct extent_inode_elem *eie = NULL;
	u64 total_refs = 0;

	INIT_LIST_HEAD(&prefs);

	key.objectid = bytenr;
	key.offset = (u64)-1;
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

		path->slots[0]--;
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
			ret = __add_inline_refs(fs_info, path, bytenr,
						&info_level, &prefs,
						&total_refs);
			if (ret)
				goto out;
			ret = __add_keyed_refs(fs_info, path, bytenr,
					       info_level, &prefs);
			if (ret)
				goto out;
		}
	}
	btrfs_release_path(path);

	ret = __add_missing_keys(fs_info, &prefs);
	if (ret)
		goto out;

	__merge_refs(&prefs, 1);

	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
				      extent_item_pos, total_refs);
	if (ret)
		goto out;

	__merge_refs(&prefs, 2);

	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		WARN_ON(ref->count < 0);
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
			if (ret < 0)
				goto out;
		}
		if (ref->count && ref->parent) {
			if (extent_item_pos && !ref->inode_list &&
			    ref->level == 0) {
				u32 bsz;
				struct extent_buffer *eb;
				bsz = fs_info->extent_root->nodesize;
				eb = read_tree_block(fs_info->extent_root,
							   ref->parent, bsz, 0);
				if (!extent_buffer_uptodate(eb)) {
					free_extent_buffer(eb);
					ret = -EIO;
					goto out;
				}
				ret = find_extent_in_eb(eb, bytenr,
							*extent_item_pos, &eie);
				free_extent_buffer(eb);
				if (ret < 0)
					goto out;
				ref->inode_list = eie;
			}
			ret = ulist_add_merge_ptr(refs, ref->parent,
						  ref->inode_list,
						  (void **)&eie, GFP_NOFS);
			if (ret < 0)
				goto out;
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
			eie = NULL;
		}
		list_del(&ref->list);
		kfree(ref);
	}

out:
	btrfs_free_path(path);
	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		list_del(&ref->list);
		kfree(ref);
	}
	if (ret < 0)
		free_inode_elem_list(eie);
	return ret;
}
Example #12
0
/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
			     u64 time_seq, struct ulist *refs,
			     struct ulist *roots, const u64 *extent_item_pos)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
	struct btrfs_delayed_ref_head *head;
	int info_level = 0;
	int ret;
	struct list_head prefs_delayed;
	struct list_head prefs;
	struct __prelim_ref *ref;
	struct extent_inode_elem *eie = NULL;
	u64 total_refs = 0;

	INIT_LIST_HEAD(&prefs);
	INIT_LIST_HEAD(&prefs_delayed);

	key.objectid = bytenr;
	key.offset = (u64)-1;
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	if (!trans) {
		path->search_commit_root = 1;
		path->skip_locking = 1;
	}

	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
	head = NULL;

	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
	if (trans && likely(trans->type != __TRANS_DUMMY)) {
#else
	if (trans) {
#endif
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
		head = btrfs_find_delayed_ref_head(trans, bytenr);
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
				atomic_inc(&head->node.refs);
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
				btrfs_put_delayed_ref(&head->node);
				goto again;
			}
			spin_unlock(&delayed_refs->lock);
			ret = __add_delayed_refs(head, time_seq,
						 &prefs_delayed, &total_refs);
			mutex_unlock(&head->mutex);
			if (ret)
				goto out;
		} else {
			spin_unlock(&delayed_refs->lock);
		}
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

		path->slots[0]--;
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
			ret = __add_inline_refs(fs_info, path, bytenr,
						&info_level, &prefs,
						&total_refs);
			if (ret)
				goto out;
			ret = __add_keyed_refs(fs_info, path, bytenr,
					       info_level, &prefs);
			if (ret)
				goto out;
		}
	}
	btrfs_release_path(path);

	list_splice_init(&prefs_delayed, &prefs);

	ret = __add_missing_keys(fs_info, &prefs);
	if (ret)
		goto out;

	__merge_refs(&prefs, 1);

	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
				      extent_item_pos, total_refs);
	if (ret)
		goto out;

	__merge_refs(&prefs, 2);

	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		WARN_ON(ref->count < 0);
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
			if (ret < 0)
				goto out;
		}
		if (ref->count && ref->parent) {
			if (extent_item_pos && !ref->inode_list &&
			    ref->level == 0) {
				u32 bsz;
				struct extent_buffer *eb;
				bsz = btrfs_level_size(fs_info->extent_root,
							ref->level);
				eb = read_tree_block(fs_info->extent_root,
							   ref->parent, bsz, 0);
				if (!eb || !extent_buffer_uptodate(eb)) {
					free_extent_buffer(eb);
					ret = -EIO;
					goto out;
				}
				btrfs_tree_read_lock(eb);
				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
				ret = find_extent_in_eb(eb, bytenr,
							*extent_item_pos, &eie);
				btrfs_tree_read_unlock_blocking(eb);
				free_extent_buffer(eb);
				if (ret < 0)
					goto out;
				ref->inode_list = eie;
			}
			ret = ulist_add_merge_ptr(refs, ref->parent,
						  ref->inode_list,
						  (void **)&eie, GFP_NOFS);
			if (ret < 0)
				goto out;
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
			eie = NULL;
		}
		list_del(&ref->list);
		kmem_cache_free(btrfs_prelim_ref_cache, ref);
	}

out:
	btrfs_free_path(path);
	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		list_del(&ref->list);
		kmem_cache_free(btrfs_prelim_ref_cache, ref);
	}
	while (!list_empty(&prefs_delayed)) {
		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
				       list);
		list_del(&ref->list);
		kmem_cache_free(btrfs_prelim_ref_cache, ref);
	}
	if (ret < 0)
		free_inode_elem_list(eie);
	return ret;
}

static void free_leaf_list(struct ulist *blocks)
{
	struct ulist_node *node = NULL;
	struct extent_inode_elem *eie;
	struct ulist_iterator uiter;

	ULIST_ITER_INIT(&uiter);
	while ((node = ulist_next(blocks, &uiter))) {
		if (!node->aux)
			continue;
		eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
		free_inode_elem_list(eie);
		node->aux = 0;
	}

	ulist_free(blocks);
}

/*
 * Finds all leafs with a reference to the specified combination of bytenr and
 * offset. key_list_head will point to a list of corresponding keys (caller must
 * free each list element). The leafs will be stored in the leafs ulist, which
 * must be freed with ulist_free.
 *
 * returns 0 on success, <0 on error
 */
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
				struct btrfs_fs_info *fs_info, u64 bytenr,
				u64 time_seq, struct ulist **leafs,
				const u64 *extent_item_pos)
{
	int ret;

	*leafs = ulist_alloc(GFP_NOFS);
	if (!*leafs)
		return -ENOMEM;

	ret = find_parent_nodes(trans, fs_info, bytenr,
				time_seq, *leafs, NULL, extent_item_pos);
	if (ret < 0 && ret != -ENOENT) {
		free_leaf_list(*leafs);
		return ret;
	}

	return 0;
}
Example #13
0
static int upipe_m3u_reader_process_media(struct upipe *upipe,
                                          struct uref *flow_def,
                                          const char *line)
{
    struct upipe_m3u_reader *upipe_m3u_reader =
        upipe_m3u_reader_from_upipe(upipe);

    if (!ubase_check(uref_flow_match_def(flow_def, M3U_FLOW_DEF)) &&
        !ubase_check(uref_flow_match_def(flow_def, MASTER_FLOW_DEF)))
        return UBASE_ERR_INVALID;
    UBASE_RETURN(uref_flow_set_def(flow_def, MASTER_FLOW_DEF));

    struct uref *item = uref_sibling_alloc_control(flow_def);
    if (unlikely(item == NULL)) {
        upipe_throw_fatal(upipe, UBASE_ERR_ALLOC);
        return UBASE_ERR_ALLOC;
    }

    struct ustring name, value;
    while (ubase_check(attribute_iterate(&line, &name, &value)) && line) {
        if (!ustring_cmp_str(name, "URI")) {
            value = ustring_unframe(value, '"');
            char val[value.len + 1];
            int err = ustring_cpy(value, val, sizeof (val));
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to copy %.*s",
                              (int)value.len, value.at);
                continue;
            }
            err = uref_m3u_set_uri(item, val);
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to set uri %s", val);
                continue;
            }
        }
        else if (!ustring_cmp_str(name, "TYPE")) {
            char val[value.len + 1];
            int err = ustring_cpy(value, val, sizeof (val));
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to copy %.*s",
                              (int)value.len, value.at);
                continue;
            }
            err = uref_m3u_master_set_media_type(item, val);
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to set media type %s", val);
                continue;
            }
        }
        else if (!ustring_cmp_str(name, "DEFAULT")) {
            if (!ustring_cmp_str(value, "YES")) {
                int err = uref_m3u_master_set_media_default(item);
                if (unlikely(!ubase_check(err)))
                    continue;
            }
            else if (ustring_cmp_str(value, "NO")) {
                upipe_warn_va(upipe, "invalid DEFAULT value %.*s",
                              (int)value.len, value.at);
                continue;
            }
        }
        else if (!ustring_cmp_str(name, "AUTOSELECT")) {
            if (!ustring_cmp_str(value, "YES")) {
                int err = uref_m3u_master_set_media_autoselect(item);
                if (unlikely(!ubase_check(err)))
                    continue;
            }
            else if (ustring_cmp_str(value, "NO")) {
                upipe_warn_va(upipe, "invalid AUTOSELECT value %.*s",
                              (int)value.len, value.at);
                continue;
            }
        }
        else if (!ustring_cmp_str(name, "NAME")) {
            value = ustring_unframe(value, '"');
            char val[value.len + 1];
            int err = ustring_cpy(value, val, sizeof (val));
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to copy %.*s",
                              (int)value.len, value.at);
                continue;
            }
            err = uref_m3u_master_set_media_name(item, val);
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to set media name %s", val);
                continue;
            }
        }
        else if (!ustring_cmp_str(name, "GROUP-ID")) {
            value = ustring_unframe(value, '"');
            char val[value.len + 1];
            int err = ustring_cpy(value, val, sizeof (val));
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to copy %.*s",
                              (int)value.len, value.at);
                continue;
            }
            err = uref_m3u_master_set_media_group(item, val);
            if (unlikely(!ubase_check(err))) {
                upipe_warn_va(upipe, "fail to set group id %s", val);
                continue;
            }
        }
        else {
            upipe_warn_va(upipe, "ignoring attribute %.*s (%.*s)",
                          (int)name.len, name.at,
                          (int)value.len, value.at);
        }
    }
    ulist_add(&upipe_m3u_reader->items, uref_to_uchain(item));
    return UBASE_ERR_NONE;
}