static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer) { struct watchdog_core_data *wd_data; wd_data = container_of(timer, struct watchdog_core_data, timer); kthread_queue_work(watchdog_kworker, &wd_data->work); return HRTIMER_NORESTART; }
/* * q->request_fn for old request-based dm. * Called with the queue lock held. */ static void dm_old_request_fn(struct request_queue *q) { struct mapped_device *md = q->queuedata; struct dm_target *ti = md->immutable_target; struct request *rq; struct dm_rq_target_io *tio; sector_t pos = 0; if (unlikely(!ti)) { int srcu_idx; struct dm_table *map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { dm_put_live_table(md, srcu_idx); return; } ti = dm_table_find_target(map, pos); dm_put_live_table(md, srcu_idx); } /* * For suspend, check blk_queue_stopped() and increment * ->pending within a single queue_lock not to increment the * number of in-flight I/Os after the queue is stopped in * dm_suspend(). */ while (!blk_queue_stopped(q)) { rq = blk_peek_request(q); if (!rq) return; /* always use block 0 to find the target for flushes for now */ pos = 0; if (req_op(rq) != REQ_OP_FLUSH) pos = blk_rq_pos(rq); if ((dm_old_request_peeked_before_merge_deadline(md) && md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) && md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || (ti->type->busy && ti->type->busy(ti))) { blk_delay_queue(q, 10); return; } dm_start_request(md, rq); tio = tio_from_request(rq); init_tio(tio, rq, md); /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); BUG_ON(!irqs_disabled()); } }