Exemplo n.º 1
0
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)

		goto dispatch_request;


	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}


	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
void prdebug_sock_tag_tree(int indent_level,
			   struct rb_root *sock_tag_tree)
{
	struct rb_node *node;
	struct sock_tag *sock_tag_entry;
	char *str;

	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
		return;

	if (RB_EMPTY_ROOT(sock_tag_tree)) {
		str = "sock_tag_tree=rb_root{}";
		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
		return;
	}

	str = "sock_tag_tree=rb_root{";
	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
	indent_level++;
	for (node = rb_first(sock_tag_tree);
	     node;
	     node = rb_next(node)) {
		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
		str = pp_sock_tag(sock_tag_entry);
		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
		kfree(str);
	}
	indent_level--;
	str = "}";
	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
}
Exemplo n.º 3
0
static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd)
{
	struct fiops_ioc *ioc;
	struct fiops_rb_root *service_tree = NULL;
	int i;
	struct request *rq;

	for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
		if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
			service_tree = &fiopsd->service_tree[i];
			break;
		}
	}

	if (!service_tree)
		return NULL;

	ioc = fiops_rb_first(service_tree);

	rq = rq_entry_fifo(ioc->fifo.next);
	/*
	 * we are the only async task and sync requests are in flight, delay a
	 * moment. If there are other tasks coming, sync tasks have no chance
	 * to be starved, don't delay
	 */
	if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 &&
			service_tree->count == 1) {
		fiops_log_ioc(fiopsd, ioc,
				"postpone async, in_flight async %d sync %d",
				fiopsd->in_flight[0], fiopsd->in_flight[1]);
		return NULL;
	}

	return ioc;
}
Exemplo n.º 4
0
static unsigned int remove_extent(struct results_tree *res, struct extent *extent)
{
	struct dupe_extents *p = extent->e_parent;
	struct rb_node *n;
	unsigned int result;

again:
	p->de_score -= p->de_len;
	p->de_num_dupes--;
	result = p->de_num_dupes;

	list_del_init(&extent->e_list);
	list_del_init(&extent->e_file_extents);
	rb_erase(&extent->e_node, &p->de_extents_root);
	free_extent(extent);

	if (p->de_num_dupes == 1) {
		/* It doesn't make sense to have one extent in a dup
		 * list. */
		abort_on(RB_EMPTY_ROOT(&p->de_extents_root));/* logic error */

		n = rb_first(&p->de_extents_root);
		extent = rb_entry(n, struct extent, e_node);
		goto again;
	}
Exemplo n.º 5
0
static void
vr_exit_queue(struct elevator_queue *e)
{
struct vr_data *vd = e->elevator_data;
BUG_ON(!RB_EMPTY_ROOT(&vd->sort_list));
kfree(vd);
}
static void print_summary(const char *filename)
{
	struct sym_ext *sym_ext;
	struct rb_node *node;

	printf("\nSorted summary for file %s\n", filename);
	printf("----------------------------------------------\n\n");

	if (RB_EMPTY_ROOT(&root_sym_ext)) {
		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
		return;
	}

	node = rb_first(&root_sym_ext);
	while (node) {
		double percent;
		const char *color;
		char *path;

		sym_ext = rb_entry(node, struct sym_ext, node);
		percent = sym_ext->percent;
		color = get_percent_color(percent);
		path = sym_ext->path;

		color_fprintf(stdout, color, " %7.2f %s", percent, path);
		node = rb_next(node);
	}
}
Exemplo n.º 7
0
static int namei_dscan(ext2_ino_t ino, struct ext2_inode *inode,
		       struct dentry *parent, const char *name, int namelen,
		       struct ea_info *eas)
{
	struct target_inode *t = namei_find_inode(ino);
	int offset;

	/* We may have already printed a name for this inode, and no longer
	 * care about it.
	 */
	if (!t)
		return ACTION_COMPLETE;

	if (--t->nlinks == 0)
		rb_erase(&t->rb_node, &namei_targets);

	offset = build_path(parent, 0);
	fprintf(outfile, "%lu %.*s%.*s\n", ino, offset, path_buffer,
			 namelen, name);

	if (RB_EMPTY_ROOT(&namei_targets))
		return ACTION_END_SCAN;

	return ACTION_COMPLETE;
}
Exemplo n.º 8
0
/*
 * lookup rb entry in position of @ofs in rb-tree,
 * if hit, return the entry, otherwise, return NULL
 * @prev_ex: extent before ofs
 * @next_ex: extent after ofs
 * @insert_p: insert point for new extent at ofs
 * in order to simpfy the insertion after.
 * tree must stay unchanged between lookup and insertion.
 */
struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
				struct rb_entry *cached_re,
				unsigned int ofs,
				struct rb_entry **prev_entry,
				struct rb_entry **next_entry,
				struct rb_node ***insert_p,
				struct rb_node **insert_parent,
				bool force)
{
	struct rb_node **pnode = &root->rb_node;
	struct rb_node *parent = NULL, *tmp_node;
	struct rb_entry *re = cached_re;

	*insert_p = NULL;
	*insert_parent = NULL;
	*prev_entry = NULL;
	*next_entry = NULL;

	if (RB_EMPTY_ROOT(root))
		return NULL;

	if (re) {
		if (re->ofs <= ofs && re->ofs + re->len > ofs)
			goto lookup_neighbors;
	}

	while (*pnode) {
		parent = *pnode;
		re = rb_entry(*pnode, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			pnode = &(*pnode)->rb_left;
		else if (ofs >= re->ofs + re->len)
			pnode = &(*pnode)->rb_right;
		else
			goto lookup_neighbors;
	}

	*insert_p = pnode;
	*insert_parent = parent;

	re = rb_entry(parent, struct rb_entry, rb_node);
	tmp_node = parent;
	if (parent && ofs > re->ofs)
		tmp_node = rb_next(parent);
	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);

	tmp_node = parent;
	if (parent && ofs < re->ofs)
		tmp_node = rb_prev(parent);
	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	return NULL;

lookup_neighbors:
	if (ofs == re->ofs || force) {
		/* lookup prev node for merging backward later */
		tmp_node = rb_prev(&re->rb_node);
		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
Exemplo n.º 9
0
void itree_teardown(struct inode_tree *itree)
{
	struct rb_node *rbnode;
	struct itree_node *itnode;

	while (!RB_EMPTY_ROOT(&itree->inodes)) {
		rbnode = rb_first(&itree->inodes);
		rb_erase(rbnode, &itree->inodes);
	}

	while (!RB_EMPTY_ROOT(&itree->sorted)) {
		rbnode = rb_first(&itree->sorted);
		itnode = rb_entry(rbnode, struct itree_node, sorted_node);
		rb_erase(rbnode, &itree->sorted);
		free(itnode);
	}
}
Exemplo n.º 10
0
static int namei_init(const char *device, int argc, const char **argv)
{
	unsigned long ino;
	FILE *file;
	int rc;

	while (argc--) {
		if (!strcmp(*argv, "all_names")) {
			namei_all_names = 1;
		} else if (!strncmp(*argv, "file=", 5)) {
			file = fopen(*argv + 5, "r");
			if (!file) {
				int e = errno;
				fprintf(stderr, "Unable to open ");
				errno = e;
				perror(*argv + 5);
				return 1;
			}

			while (!feof(file)) {
				rc = fscanf(file, "%lu", &ino);
				if (rc == 1)
					namei_add_inode(ino);
				else if (rc != EOF) {
					fprintf(stderr, "Bad read from %s\n",
							*argv + 5);
					fclose(file);
					return 1;
				}
			}
			fclose(file);
		} else {
			char *end;
			if (!**argv) {
				fprintf(stderr, "Unable to parse empty action "
						"arg\n");
				return 1;
			}
			ino = strtoul(*argv, &end, 0);
			if (*end || end == *argv) {
				fprintf(stderr, "Invalid action argument "
						"'%s'\n", *argv);
				return 1;
			}
			namei_add_inode(ino);
		}

		argv++;
	}

	if (RB_EMPTY_ROOT(&namei_targets)) {
		fprintf(stderr, "No inodes given to name\n");
		return 1;
	}

	return 0;
}
Exemplo n.º 11
0
    void CPageMgr::RemoveIndexTreeIfNeed(size_t pagecount)
    {
        struct SHashNode * hashNode     = GetHashNode(pagecount);
        struct SIndexInfo * indexInfo   = (struct SIndexInfo *)RbSearch(&hashNode->hash_tree, 
                &pagecount, &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);

        if (RB_EMPTY_ROOT(&indexInfo->free_tree)) {
            RbRemove(&hashNode->hash_tree, indexInfo, &CPageMgr::HashTreeGetRbNode);
            ReleaseIndexInfo(indexInfo);
        }
    }
Exemplo n.º 12
0
static void fiops_charge_vios(struct fiops_data *fiopsd,
	struct fiops_ioc *ioc, u64 vios)
{
	struct fiops_rb_root *service_tree = ioc->service_tree;
	ioc->vios += vios;

	if (RB_EMPTY_ROOT(&ioc->sort_list))
		fiops_del_ioc_rr(fiopsd, ioc);
	else
		fiops_resort_rr_list(fiopsd, ioc);

	fiops_update_min_vios(service_tree);
}
Exemplo n.º 13
0
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
						       struct mm_struct *mm,
						       unsigned long start,
						       unsigned long end,
						       bool blockable)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct i915_mmu_object *mo;
	struct interval_tree_node *it;
	LIST_HEAD(cancelled);

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end--;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, start, end);
	while (it) {
		if (!blockable) {
			spin_unlock(&mn->lock);
			return -EAGAIN;
		}
		/* The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		mo = container_of(it, struct i915_mmu_object, it);
		if (kref_get_unless_zero(&mo->obj->base.refcount))
			queue_work(mn->wq, &mo->work);

		list_add(&mo->link, &cancelled);
		it = interval_tree_iter_next(it, start, end);
	}
	list_for_each_entry(mo, &cancelled, link)
		del_object(mo);
	spin_unlock(&mn->lock);

	if (!list_empty(&cancelled))
		flush_workqueue(mn->wq);

	return 0;
}
Exemplo n.º 14
0
/*
 * Get the first inode from the sorted tree, then remove from both. Use
 * itree_get_inode function to retrieve the inode. Returns 1 if any
 * errors occurred, otherwise the inode is returned with its refcount
 * updated.
 */
int itree_fetch(struct inode_tree *itree, __u8 taskid, int duet_fd, char *path,
	unsigned long long *uuid, long long *inmem)
{
	int ret = 0;
	struct rb_node *rbnode;
	struct itree_node *itnode;

	*uuid = 0;
	path[0] = '\0';
again:
	if (RB_EMPTY_ROOT(&itree->sorted))
		return 0;

	/* Grab last node in the sorted tree, and remove from both trees */
	rbnode = rb_last(&itree->sorted);
	itnode = rb_entry(rbnode, struct itree_node, sorted_node);
	rb_erase(&itnode->sorted_node, &itree->sorted);
	rb_erase(&itnode->inodes_node, &itree->inodes);

	*uuid = itnode->uuid;
	*inmem = itnode->inmem;
	free(itnode);

	itree_dbg("itree: fetch picked uuid %llu, inode %lu\n", *uuid,
		DUET_UUID_INO(*uuid));

	/* Check if we've processed it before */
	if (duet_check_done(duet_fd, taskid, *uuid, 1) == 1)
		goto again;

	itree_dbg("itree: fetching uuid %llu, inode %lu\n", *uuid,
		DUET_UUID_INO(*uuid));

	/* Get the path for this inode */
	if (duet_get_path(duet_fd, taskid, *uuid, path)) {
		//fprintf(stderr, "itree: inode path not found\n");
		goto again;
	}

	/* If this isn't a child, mark to avoid, and retry */
	if (path[0] == '\0') {
		//duet_set_done(duet_fd, taskid, *uuid, 1);
		//itree_dbg("itree: marking uuid %llu, ino %lu for task %u to avoid\n",
		//	*uuid, DUET_UUID_INO(*uuid), taskid);
		goto again;
	}

	return ret;
}
Exemplo n.º 15
0
/*
 * osd map
 */
void ceph_osdmap_destroy(struct ceph_osdmap *map)
{
	dout("osdmap_destroy %p\n", map);
	if (map->crush)
		crush_destroy(map->crush);
	while (!RB_EMPTY_ROOT(&map->pg_temp)) {
		struct ceph_pg_mapping *pg =
			rb_entry(rb_first(&map->pg_temp),
				 struct ceph_pg_mapping, node);
		rb_erase(&pg->node, &map->pg_temp);
		kfree(pg);
	}
	while (!RB_EMPTY_ROOT(&map->pg_pools)) {
		struct ceph_pg_pool_info *pi =
			rb_entry(rb_first(&map->pg_pools),
				 struct ceph_pg_pool_info, node);
		rb_erase(&pi->node, &map->pg_pools);
		kfree(pi);
	}
	kfree(map->osd_state);
	kfree(map->osd_weight);
	kfree(map->osd_addr);
	kfree(map);
}
Exemplo n.º 16
0
void digest_free(struct rb_root *root)
{
	struct rb_node *n = rb_first(root);
	struct d_tree *t;

	while (n) {
		t = rb_entry(n, struct d_tree, t_node);
		n = rb_next(n);
		rb_erase(&t->t_node, root);
		free(t->digest);
		free(t);
	}

	abort_on(!RB_EMPTY_ROOT(root));
}
Exemplo n.º 17
0
static int check_rbtree_empty(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

	if (b->irq_wait) {
		pr_err("Empty breadcrumbs still has a waiter\n");
		return -EINVAL;
	}

	if (!RB_EMPTY_ROOT(&b->waiters)) {
		pr_err("Empty breadcrumbs, but wait-tree not empty\n");
		return -EINVAL;
	}

	return 0;
}
static void
fiops_merged_requests(struct request_queue *q, struct request *rq,
		    struct request *next)
{
	struct fiops_ioc *ioc = RQ_CIC(rq);
	struct fiops_data *fiopsd = q->elevator->elevator_data;

	fiops_remove_request(next);

	ioc = RQ_CIC(next);
	/*
	 * all requests of this task are merged to other tasks, delete it
	 * from the service tree.
	 */
	if (fiops_ioc_on_rr(ioc) && RB_EMPTY_ROOT(&ioc->sort_list))
		fiops_del_ioc_rr(fiopsd, ioc);
}
Exemplo n.º 19
0
static void smc_lgr_free_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(to_delayed_work(work),
						  struct smc_link_group,
						  free_work);
	bool conns;

	spin_lock_bh(&smc_lgr_list.lock);
	if (list_empty(&lgr->list))
		goto free;
	read_lock_bh(&lgr->conns_lock);
	conns = RB_EMPTY_ROOT(&lgr->conns_all);
	read_unlock_bh(&lgr->conns_lock);
	if (!conns) { /* number of lgr connections is no longer zero */
		spin_unlock_bh(&smc_lgr_list.lock);
		return;
	}
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
	spin_unlock_bh(&smc_lgr_list.lock);

	if (!lgr->is_smcd && !lgr->terminating)	{
		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

		/* try to send del link msg, on error free lgr immediately */
		if (lnk->state == SMC_LNK_ACTIVE &&
		    !smc_link_send_delete(lnk)) {
			/* reschedule in case we never receive a response */
			smc_lgr_schedule_free_work(lgr);
			return;
		}
	}

	if (!delayed_work_pending(&lgr->free_work)) {
		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

		if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
			smc_llc_link_inactive(lnk);
		if (lgr->is_smcd)
			smc_ism_signal_shutdown(lgr);
		smc_lgr_free(lgr);
	}
}
Exemplo n.º 20
0
static void smc_lgr_free_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(to_delayed_work(work),
						  struct smc_link_group,
						  free_work);
	bool conns;

	spin_lock_bh(&smc_lgr_list.lock);
	read_lock_bh(&lgr->conns_lock);
	conns = RB_EMPTY_ROOT(&lgr->conns_all);
	read_unlock_bh(&lgr->conns_lock);
	if (!conns) { /* number of lgr connections is no longer zero */
		spin_unlock_bh(&smc_lgr_list.lock);
		return;
	}
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
	spin_unlock_bh(&smc_lgr_list.lock);
	smc_lgr_free(lgr);
}
static int fiops_forced_dispatch(struct fiops_data *fiopsd)
{
	struct fiops_ioc *ioc;
	int dispatched = 0;
	int i;

	for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
		while (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
			ioc = fiops_rb_first(&fiopsd->service_tree[i]);

			while (!list_empty(&ioc->fifo)) {
				fiops_dispatch_request(fiopsd, ioc);
				dispatched++;
			}
			if (fiops_ioc_on_rr(ioc))
				fiops_del_ioc_rr(fiopsd, ioc);
		}
	}
	return dispatched;
}
Exemplo n.º 22
0
/*
 * allow the fileserver to request callback state (re-)initialisation
 */
void afs_init_callback_state(struct afs_server *server)
{
	struct afs_vnode *vnode;

	_enter("{%p}", server);

	spin_lock(&server->cb_lock);

	/* kill all the promises on record from this server */
	while (!RB_EMPTY_ROOT(&server->cb_promises)) {
		vnode = rb_entry(server->cb_promises.rb_node,
				 struct afs_vnode, cb_promise);
		_debug("UNPROMISE { vid=%x:%u uq=%u}",
		       vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
		rb_erase(&vnode->cb_promise, &server->cb_promises);
		vnode->cb_promised = false;
	}

	spin_unlock(&server->cb_lock);
	_leave("");
}
Exemplo n.º 23
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	/*
	 * batches are currently reads XOR writes
	 */
	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)
		/* we have a next request are still entitled to batch */
		goto dispatch_request;

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}

	/*
	 * there are either no reads or writes have been starved
	 */

	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected data_dir
	 */
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		/*
		 * A deadline has expired, the last request was in the other
		 * direction, or we have run out of higher-sectored requests.
		 * Start again from the request with the earliest expiry time.
		 */
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request.
	 */
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
Exemplo n.º 24
0
/*
 * lookup extent at @fofs, if hit, return the extent
 * if not, return NULL and
 * @prev_ex: extent before fofs
 * @next_ex: extent after fofs
 * @insert_p: insert point for new extent at fofs
 * in order to simpfy the insertion after.
 * tree must stay unchanged between lookup and insertion.
 */
static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
				unsigned int fofs,
				struct extent_node **prev_ex,
				struct extent_node **next_ex,
				struct rb_node ***insert_p,
				struct rb_node **insert_parent)
{
	struct rb_node **pnode = &et->root.rb_node;
	struct rb_node *parent = NULL, *tmp_node;
	struct extent_node *en = et->cached_en;

	*insert_p = NULL;
	*insert_parent = NULL;
	*prev_ex = NULL;
	*next_ex = NULL;

	if (RB_EMPTY_ROOT(&et->root))
		return NULL;

	if (en) {
		struct extent_info *cei = &en->ei;

		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
			goto lookup_neighbors;
	}

	while (*pnode) {
		parent = *pnode;
		en = rb_entry(*pnode, struct extent_node, rb_node);

		if (fofs < en->ei.fofs)
			pnode = &(*pnode)->rb_left;
		else if (fofs >= en->ei.fofs + en->ei.len)
			pnode = &(*pnode)->rb_right;
		else
			goto lookup_neighbors;
	}

	*insert_p = pnode;
	*insert_parent = parent;

	en = rb_entry(parent, struct extent_node, rb_node);
	tmp_node = parent;
	if (parent && fofs > en->ei.fofs)
		tmp_node = rb_next(parent);
	*next_ex = tmp_node ?
		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;

	tmp_node = parent;
	if (parent && fofs < en->ei.fofs)
		tmp_node = rb_prev(parent);
	*prev_ex = tmp_node ?
		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
	return NULL;

lookup_neighbors:
	if (fofs == en->ei.fofs) {
		/* lookup prev node for merging backward later */
		tmp_node = rb_prev(&en->rb_node);
		*prev_ex = tmp_node ?
			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
	}
Exemplo n.º 25
0
static inline int has_stealable_pjobs(struct rtws_rq *rtws_rq)
{
    return !RB_EMPTY_ROOT(&rtws_rq->stealable_pjobs);
}
Exemplo n.º 26
0
static int
vr_queue_empty(struct request_queue *q)
{
struct vr_data *vd = vr_get_data(q);
return RB_EMPTY_ROOT(&vd->sort_list);
}
Exemplo n.º 27
0
void vgic_v3_its_free_domain(struct domain *d)
{
    ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices));
}
Exemplo n.º 28
0
/*
 * receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
		  struct msghdr *msg, size_t len, int flags)
{
	struct rxrpc_skb_priv *sp;
	struct rxrpc_call *call = NULL, *continue_call = NULL;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct sk_buff *skb;
	long timeo;
	int copy, ret, ullen, offset, copied = 0;
	u32 abort_code;

	DEFINE_WAIT(wait);

	_enter(",,,%zu,%d", len, flags);

	if (flags & (MSG_OOB | MSG_TRUNC))
		return -EOPNOTSUPP;

	ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);

	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
	msg->msg_flags |= MSG_MORE;

	lock_sock(&rx->sk);

	for (;;) {
		/* return immediately if a client socket has no outstanding
		 * calls */
		if (RB_EMPTY_ROOT(&rx->calls)) {
			if (copied)
				goto out;
			if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
				release_sock(&rx->sk);
				if (continue_call)
					rxrpc_put_call(continue_call);
				return -ENODATA;
			}
		}

		/* get the next message on the Rx queue */
		skb = skb_peek(&rx->sk.sk_receive_queue);
		if (!skb) {
			/* nothing remains on the queue */
			if (copied &&
			    (msg->msg_flags & MSG_PEEK || timeo == 0))
				goto out;

			/* wait for a message to turn up */
			release_sock(&rx->sk);
			prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
						  TASK_INTERRUPTIBLE);
			ret = sock_error(&rx->sk);
			if (ret)
				goto wait_error;

			if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
				if (signal_pending(current))
					goto wait_interrupted;
				timeo = schedule_timeout(timeo);
			}
			finish_wait(sk_sleep(&rx->sk), &wait);
			lock_sock(&rx->sk);
			continue;
		}

	peek_next_packet:
		sp = rxrpc_skb(skb);
		call = sp->call;
		ASSERT(call != NULL);

		_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);

		/* make sure we wait for the state to be updated in this call */
		spin_lock_bh(&call->lock);
		spin_unlock_bh(&call->lock);

		if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
			_debug("packet from released call");
			if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
				BUG();
			rxrpc_free_skb(skb);
			continue;
		}

		/* determine whether to continue last data receive */
		if (continue_call) {
			_debug("maybe cont");
			if (call != continue_call ||
			    skb->mark != RXRPC_SKB_MARK_DATA) {
				release_sock(&rx->sk);
				rxrpc_put_call(continue_call);
				_leave(" = %d [noncont]", copied);
				return copied;
			}
		}

		rxrpc_get_call(call);

		/* copy the peer address and timestamp */
		if (!continue_call) {
			if (msg->msg_name) {
				size_t len =
					sizeof(call->conn->trans->peer->srx);
				memcpy(msg->msg_name,
				       &call->conn->trans->peer->srx, len);
				msg->msg_namelen = len;
			}
			sock_recv_ts_and_drops(msg, &rx->sk, skb);
		}

		/* receive the message */
		if (skb->mark != RXRPC_SKB_MARK_DATA)
			goto receive_non_data_message;

		_debug("recvmsg DATA #%u { %d, %d }",
		       ntohl(sp->hdr.seq), skb->len, sp->offset);

		if (!continue_call) {
			/* only set the control data once per recvmsg() */
			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
				       ullen, &call->user_call_ID);
			if (ret < 0)
				goto copy_error;
			ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		}

		ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
		ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
		call->rx_data_recv = ntohl(sp->hdr.seq);

		ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);

		offset = sp->offset;
		copy = skb->len - offset;
		if (copy > len - copied)
			copy = len - copied;

		ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);

		if (ret < 0)
			goto copy_error;

		/* handle piecemeal consumption of data packets */
		_debug("copied %d+%d", copy, copied);

		offset += copy;
		copied += copy;

		if (!(flags & MSG_PEEK))
			sp->offset = offset;

		if (sp->offset < skb->len) {
			_debug("buffer full");
			ASSERTCMP(copied, ==, len);
			break;
		}

		/* we transferred the whole data packet */
		if (sp->hdr.flags & RXRPC_LAST_PACKET) {
			_debug("last");
			if (call->conn->out_clientflag) {
				 /* last byte of reply received */
				ret = copied;
				goto terminal_message;
			}

			/* last bit of request received */
			if (!(flags & MSG_PEEK)) {
				_debug("eat packet");
				if (skb_dequeue(&rx->sk.sk_receive_queue) !=
				    skb)
					BUG();
				rxrpc_free_skb(skb);
			}
			msg->msg_flags &= ~MSG_MORE;
			break;
		}

		/* move on to the next data message */
		_debug("next");
		if (!continue_call)
			continue_call = sp->call;
		else
			rxrpc_put_call(call);
		call = NULL;

		if (flags & MSG_PEEK) {
			_debug("peek next");
			skb = skb->next;
			if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
				break;
			goto peek_next_packet;
		}

		_debug("eat packet");
		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
			BUG();
		rxrpc_free_skb(skb);
	}
Exemplo n.º 29
0
static void key_garbage_collector(struct work_struct *work)
{
	struct rb_node *rb;
	key_serial_t cursor;
	struct key *key, *xkey;
	time_t new_timer = LONG_MAX, limit, now;

	now = current_kernel_time().tv_sec;
	kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);

	if (test_and_set_bit(0, &key_gc_executing)) {
		key_schedule_gc(current_kernel_time().tv_sec + 1);
		kleave(" [busy; deferring]");
		return;
	}

	limit = now;
	if (limit > key_gc_delay)
		limit -= key_gc_delay;
	else
		limit = key_gc_delay;

	spin_lock(&key_serial_lock);

	if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
		spin_unlock(&key_serial_lock);
		clear_bit(0, &key_gc_executing);
		return;
	}

	cursor = key_gc_cursor;
	if (cursor < 0)
		cursor = 0;
	if (cursor > 0)
		new_timer = key_gc_new_timer;
	else
		key_gc_again = false;

	/* find the first key above the cursor */
	key = NULL;
	rb = key_serial_tree.rb_node;
	while (rb) {
		xkey = rb_entry(rb, struct key, serial_node);
		if (cursor < xkey->serial) {
			key = xkey;
			rb = rb->rb_left;
		} else if (cursor > xkey->serial) {
			rb = rb->rb_right;
		} else {
			rb = rb_next(rb);
			if (!rb)
				goto reached_the_end;
			key = rb_entry(rb, struct key, serial_node);
			break;
		}
	}

	if (!key)
		goto reached_the_end;

	/* trawl through the keys looking for keyrings */
	for (;;) {
		if (key->expiry > limit && key->expiry < new_timer) {
			kdebug("will expire %x in %ld",
			       key_serial(key), key->expiry - limit);
			new_timer = key->expiry;
		}

		if (key->type == &key_type_keyring &&
		    key_gc_keyring(key, limit))
			/* the gc had to release our lock so that the keyring
			 * could be modified, so we have to get it again */
			goto gc_released_our_lock;

		rb = rb_next(&key->serial_node);
		if (!rb)
			goto reached_the_end;
		key = rb_entry(rb, struct key, serial_node);
	}

gc_released_our_lock:
	kdebug("gc_released_our_lock");
	key_gc_new_timer = new_timer;
	key_gc_again = true;
	clear_bit(0, &key_gc_executing);
	schedule_work(&key_gc_work);
	kleave(" [continue]");
	return;

	/* when we reach the end of the run, we set the timer for the next one */
reached_the_end:
	kdebug("reached_the_end");
	spin_unlock(&key_serial_lock);
	key_gc_new_timer = new_timer;
	key_gc_cursor = 0;
	clear_bit(0, &key_gc_executing);

	if (key_gc_again) {
		/* there may have been a key that expired whilst we were
		 * scanning, so if we discarded any links we should do another
		 * scan */
		new_timer = now + 1;
		key_schedule_gc(new_timer);
	} else if (new_timer < LONG_MAX) {
		new_timer += key_gc_delay;
		key_schedule_gc(new_timer);
	}
	kleave(" [end]");
}
Exemplo n.º 30
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(request_queue_t *q, int force)
{
    struct deadline_data *dd = q->elevator->elevator_data;
    const int reads = !list_empty(&dd->fifo_list[READ]);
    const int writes = !list_empty(&dd->fifo_list[WRITE]);
    struct request *rq;
    int data_dir;

    /*
     * batches are currently reads XOR writes
     */
    if (dd->next_rq[WRITE])
        rq = dd->next_rq[WRITE];
    else
        rq = dd->next_rq[READ];

    if (rq) {
        /* we have a "next request" */

        if (dd->last_sector != rq->sector)
            /* end the batch on a non sequential request */
            dd->batching += dd->fifo_batch;

        if (dd->batching < dd->fifo_batch)
            /* we are still entitled to batch */
            goto dispatch_request;
    }

    /*
     * at this point we are not running a batch. select the appropriate
     * data direction (read / write)
     */

    if (reads) {
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

        if (writes && (dd->starved++ >= dd->writes_starved))
            goto dispatch_writes;

        data_dir = READ;

        goto dispatch_find_request;
    }

    /*
     * there are either no reads or writes have been starved
     */

    if (writes) {
dispatch_writes:
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

        dd->starved = 0;

        data_dir = WRITE;

        goto dispatch_find_request;
    }

    return 0;

dispatch_find_request:
    /*
     * we are not running a batch, find best request for selected data_dir
     */
    if (deadline_check_fifo(dd, data_dir)) {
        /* An expired request exists - satisfy it */
        dd->batching = 0;
        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);

    } else if (dd->next_rq[data_dir]) {
        /*
         * The last req was the same dir and we have a next request in
         * sort order. No expired requests so continue on from here.
         */
        rq = dd->next_rq[data_dir];
    } else {
        struct rb_node *node;
        /*
         * The last req was the other direction or we have run out of
         * higher-sectored requests. Go back to the lowest sectored
         * request (1 way elevator) and start a new batch.
         */
        dd->batching = 0;
        node = rb_first(&dd->sort_list[data_dir]);
        if (node)
            rq = rb_entry_rq(node);
    }

dispatch_request:
    /*
     * rq is the selected appropriate request.
     */
    dd->batching++;
    deadline_move_request(dd, rq);

    return 1;
}