/* * initialize elevator private data (vr_data). */ static int vr_init_queue(struct request_queue *q, struct elevator_type *e) { struct vr_data *vd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; vd = kmalloc_node(sizeof(*vd), GFP_KERNEL | __GFP_ZERO, q->node); if (!vd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = vd; INIT_LIST_HEAD(&vd->fifo_list[SYNC]); INIT_LIST_HEAD(&vd->fifo_list[ASYNC]); vd->sort_list = RB_ROOT; vd->fifo_expire[SYNC] = sync_expire; vd->fifo_expire[ASYNC] = async_expire; vd->fifo_batch = fifo_batch; vd->rev_penalty = rev_penalty; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
/* * initialize elevator private data (deadline_data). */ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e) { struct deadline_data *dd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); if (!dd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = dd; INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]); dd->sort_list[READ] = RB_ROOT; dd->sort_list[WRITE] = RB_ROOT; dd->fifo_expire[READ] = read_expire; dd->fifo_expire[WRITE] = write_expire; dd->writes_starved = writes_starved; dd->front_merges = 1; dd->fifo_batch = fifo_batch; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
static int zen_init_queue(struct request_queue *q, struct elevator_type *e) { struct zen_data *zdata; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; zdata = kmalloc_node(sizeof(*zdata), GFP_KERNEL, q->node); if (!zdata) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = zdata; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); INIT_LIST_HEAD(&zdata->fifo_list[SYNC]); INIT_LIST_HEAD(&zdata->fifo_list[ASYNC]); zdata->fifo_expire[SYNC] = sync_expire; zdata->fifo_expire[ASYNC] = async_expire; zdata->fifo_batch = fifo_batch; return 0; }
static int mpq_init_queue(struct request_queue *q, struct elevator_type *e) { struct mpq_data *nd; struct elevator_queue *eq; int i=0; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); if (!nd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = nd; while(i<numQ) { INIT_LIST_HEAD(&nd->queue[i]); i++; } spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
static int fiops_init_queue(struct request_queue *q, struct elevator_type *e) { struct fiops_data *fiopsd; int i; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; fiopsd = kzalloc_node(sizeof(*fiopsd), GFP_KERNEL, q->node); if (!fiopsd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = fiopsd; fiopsd->queue = q; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); for (i = IDLE_WORKLOAD; i <= RT_WORKLOAD; i++) fiopsd->service_tree[i] = FIOPS_RB_ROOT; INIT_WORK(&fiopsd->unplug_work, fiops_kick_queue); fiopsd->read_scale = VIOS_READ_SCALE; fiopsd->write_scale = VIOS_WRITE_SCALE; fiopsd->sync_scale = VIOS_SYNC_SCALE; fiopsd->async_scale = VIOS_ASYNC_SCALE; return 0; }
static int osio_init_queue(struct request_queue *q, struct elevator_type *e) { struct osio_data *od; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; od = kmalloc_node(sizeof(*od), GFP_KERNEL, q->node); if (!od) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = od; INIT_LIST_HEAD(&od->fifo_head[OSIO_DIR_READ]); INIT_LIST_HEAD(&od->fifo_head[OSIO_DIR_SYNC_WRITE]); INIT_LIST_HEAD(&od->fifo_head[OSIO_DIR_ASYNC_WRITE]); od->batching = 0; od->fifo_dir = OSIO_DIR_UNDEF; od->write_starved[OSIO_SYNC] = 0; od->write_starved[OSIO_ASYNC] = 0; od->fifo_batch[OSIO_DIR_READ] = FIFO_READ_BATCH; od->fifo_batch[OSIO_DIR_SYNC_WRITE] = FIFO_SYNC_WRITE_BATCH; od->fifo_batch[OSIO_DIR_ASYNC_WRITE] = FIFO_ASYNC_WRITE_BATCH; od->write_starved_line[OSIO_SYNC] = SYNC_WRITE_STARVED_LINE; od->write_starved_line[OSIO_ASYNC] = ASYNC_WRITE_STARVED_LINE; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
static int cscan_init_queue(struct request_queue *q, struct elevator_type *e) { struct cscan_data *cd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; cd = kmalloc_node(sizeof(*cd), GFP_KERNEL, q->node); if (!cd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = cd; cd->sort_list[0] = RB_ROOT; cd->sort_list[1] = RB_ROOT; cd->curr = 0; cd->last_sector = 0; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
/* * row_init_queue() - Init scheduler data structures * @q: requests queue * * Return pointer to struct row_data to be saved in elevator for * this dispatch queue * */ static int row_init_queue(struct request_queue *q, struct elevator_type *e) { struct row_data *rdata; struct elevator_queue *eq; int i; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; rdata = kmalloc_node(sizeof(*rdata), GFP_KERNEL | __GFP_ZERO, q->node); if (!rdata) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = rdata; memset(rdata, 0, sizeof(*rdata)); for (i = 0; i < ROWQ_MAX_PRIO; i++) { INIT_LIST_HEAD(&rdata->row_queues[i].fifo); rdata->row_queues[i].disp_quantum = row_queues_def[i].quantum; rdata->row_queues[i].rdata = rdata; rdata->row_queues[i].prio = i; rdata->row_queues[i].idle_data.begin_idling = false; rdata->row_queues[i].idle_data.last_insert_time = ktime_set(0, 0); } rdata->reg_prio_starvation.starvation_limit = ROW_REG_STARVATION_TOLLERANCE; rdata->low_prio_starvation.starvation_limit = ROW_LOW_STARVATION_TOLLERANCE; /* * Currently idling is enabled only for READ queues. If we want to * enable it for write queues also, note that idling frequency will * be the same in both cases */ rdata->rd_idle_data.idle_time_ms = ROW_IDLE_TIME_MSEC; rdata->rd_idle_data.freq_ms = ROW_READ_FREQ_MSEC; hrtimer_init(&rdata->rd_idle_data.hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rdata->rd_idle_data.hr_timer.function = &row_idle_hrtimer_fn; INIT_WORK(&rdata->rd_idle_data.idle_work, kick_queue); rdata->last_served_ioprio_class = IOPRIO_CLASS_NONE; rdata->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO; rdata->dispatch_queue = q; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
static int sio_init_queue(struct request_queue *q, struct elevator_type *e) { struct sio_data *sd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; /* Allocate structure */ sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); if (!sd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = sd; /* Initialize fifo lists */ INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]); INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]); INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]); INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]); /* Initialize data */ sd->batched = 0; sd->fifo_expire[SYNC][READ] = sync_read_expire; sd->fifo_expire[SYNC][WRITE] = sync_write_expire; sd->fifo_expire[ASYNC][READ] = async_read_expire; sd->fifo_expire[ASYNC][WRITE] = async_write_expire; sd->writes_starved = writes_starved; sd->fifo_batch = fifo_batch; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }
static int greedy_init_queue(struct request_queue *q, struct elevator_type *e) { struct greedy_data *nd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); if (!nd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = nd; INIT_LIST_HEAD(&nd->lower_queue); INIT_LIST_HEAD(&nd->upper_queue); nd->disk_head = 0ul; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); return 0; }