static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd)
{
	struct fiops_ioc *ioc;
	struct fiops_rb_root *service_tree = NULL;
	int i;
	struct request *rq;

	for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
		if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
			service_tree = &fiopsd->service_tree[i];
			break;
		}
	}

	if (!service_tree)
		return NULL;

	ioc = fiops_rb_first(service_tree);

	rq = rq_entry_fifo(ioc->fifo.next);
	/*
	 * we are the only async task and sync requests are in flight, delay a
	 * moment. If there are other tasks coming, sync tasks have no chance
	 * to be starved, don't delay
	 */
	if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 &&
			service_tree->count == 1) {
		fiops_log_ioc(fiopsd, ioc,
				"postpone async, in_flight async %d sync %d",
				fiopsd->in_flight[0], fiopsd->in_flight[1]);
		return NULL;
	}

	return ioc;
}
/*
 * The fiopsd->service_trees holds all pending fiops_ioc's that have
 * requests waiting to be processed. It is sorted in the order that
 * we will service the queues.
 */
static void fiops_service_tree_add(struct fiops_data *fiopsd,
	struct fiops_ioc *ioc)
{
	struct rb_node **p, *parent;
	struct fiops_ioc *__ioc;
	struct fiops_rb_root *service_tree = ioc_service_tree(ioc);
	u64 vios;
	int left;

	/* New added IOC */
	if (RB_EMPTY_NODE(&ioc->rb_node)) {
		if (ioc->in_flight > 0)
			vios = ioc->vios;
		else
			vios = max_vios(service_tree->min_vios, ioc->vios);
	} else {
		vios = ioc->vios;
		/* ioc->service_tree might not equal to service_tree */
		fiops_rb_erase(&ioc->rb_node, ioc->service_tree);
		ioc->service_tree = NULL;
	}

	fiops_log_ioc(fiopsd, ioc, "service tree add, vios %lld", vios);

	left = 1;
	parent = NULL;
	ioc->service_tree = service_tree;
	p = &service_tree->rb.rb_node;
	while (*p) {
		struct rb_node **n;

		parent = *p;
		__ioc = rb_entry(parent, struct fiops_ioc, rb_node);

		/*
		 * sort by key, that represents service time.
		 */
		if (vios <  __ioc->vios)
			n = &(*p)->rb_left;
		else {
			n = &(*p)->rb_right;
			left = 0;
		}

		p = n;
	}

	if (left)
		service_tree->left = &ioc->rb_node;

	ioc->vios = vios;
	rb_link_node(&ioc->rb_node, parent, p);
	rb_insert_color(&ioc->rb_node, &service_tree->rb);
	service_tree->count++;

	fiops_update_min_vios(service_tree);
}
static void fiops_completed_request(struct request_queue *q, struct request *rq)
{
	struct fiops_data *fiopsd = q->elevator->elevator_data;
	struct fiops_ioc *ioc = RQ_CIC(rq);

	fiopsd->in_flight[rq_is_sync(rq)]--;
	ioc->in_flight--;

	fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d",
		ioc->in_flight, fiopsd->busy_queues);

	if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0)
		fiops_schedule_dispatch(fiopsd);
}
static void fiops_charge_vios(struct fiops_data *fiopsd,
	struct fiops_ioc *ioc, u64 vios)
{
	struct fiops_rb_root *service_tree = ioc->service_tree;
	ioc->vios += vios;

	fiops_log_ioc(fiopsd, ioc, "charge vios %lld, new vios %lld", vios, ioc->vios);

	if (RB_EMPTY_ROOT(&ioc->sort_list))
		fiops_del_ioc_rr(fiopsd, ioc);
	else
		fiops_resort_rr_list(fiopsd, ioc);

	fiops_update_min_vios(service_tree);
}