Exemple #1
0
static void
kaio_submit(struct ploop_io *io, struct ploop_request * preq,
            unsigned long rw,
            struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
    if (rw & REQ_FLUSH) {
        spin_lock_irq(&io->plo->lock);
        kaio_queue_fsync_req(preq);
        io->plo->st.bio_syncwait++;
        spin_unlock_irq(&io->plo->lock);
        return;
    }

    if (iblk == PLOOP_ZERO_INDEX)
        iblk = 0;

    kaio_sbl_submit(io->files.file, preq, rw, sbl, iblk, size);
}
Exemple #2
0
static void
kaio_submit(struct ploop_io *io, struct ploop_request * preq,
	     unsigned long rw,
	     struct bio_list *sbl, iblock_t iblk, unsigned int size)
{

	struct bio * b;

	if (rw & BIO_FLUSH) {
		spin_lock_irq(&io->plo->lock);
		kaio_queue_fsync_req(preq);
		io->plo->st.bio_syncwait++;
		spin_unlock_irq(&io->plo->lock);
		return;
	}

	ploop_prepare_io_request(preq);

	if (iblk == PLOOP_ZERO_INDEX)
		iblk = 0;

	if (rw & (1<<BIO_RW)) {
		loff_t off = sbl->head->bi_sector;
		off = ((loff_t)iblk << preq->plo->cluster_log) |
			(off & ((1<<preq->plo->cluster_log) - 1));
		ploop_prepare_tracker(preq, off);
	}

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int err;

		atomic_inc(&preq->io_count);
		err = kaio_kernel_submit(io->files.file, b, preq, iblk, rw);
		if (err) {
			ploop_set_error(preq, err);
			ploop_complete_io_request(preq);
			break;
		}
	}

	kaio_complete_io_request(preq);
}
Exemple #3
0
static void kaio_complete_io_state(struct ploop_request * preq)
{
	struct ploop_device * plo   = preq->plo;
	unsigned long flags;

	if (preq->error || !(preq->req_rw & BIO_FUA) ||
	    preq->eng_state == PLOOP_E_INDEX_READ ||
	    preq->eng_state == PLOOP_E_TRANS_INDEX_READ ||
	    preq->eng_state == PLOOP_E_DELTA_READ ||
	    preq->eng_state == PLOOP_E_TRANS_DELTA_READ) {
		ploop_complete_io_state(preq);
		return;
	}

	preq->req_rw &= ~BIO_FUA;

	spin_lock_irqsave(&plo->lock, flags);
	kaio_queue_fsync_req(preq);
	plo->st.bio_syncwait++;
	spin_unlock_irqrestore(&plo->lock, flags);
}
Exemple #4
0
static void kaio_complete_io_state(struct ploop_request * preq)
{
    struct ploop_device * plo   = preq->plo;
    unsigned long flags;
    int post_fsync = 0;

    if (preq->error || !(preq->req_rw & REQ_FUA) ||
            preq->eng_state == PLOOP_E_INDEX_READ ||
            preq->eng_state == PLOOP_E_TRANS_INDEX_READ ||
            preq->eng_state == PLOOP_E_DELTA_READ ||
            preq->eng_state == PLOOP_E_TRANS_DELTA_READ) {
        ploop_complete_io_state(preq);
        return;
    }

    preq->req_rw &= ~REQ_FUA;

    /* Convert requested fua to fsync */
    if (test_and_clear_bit(PLOOP_REQ_FORCE_FUA, &preq->state) ||
            test_and_clear_bit(PLOOP_REQ_KAIO_FSYNC, &preq->state))
        post_fsync = 1;

    if (!post_fsync &&
            !ploop_req_delay_fua_possible(preq->req_rw, preq) &&
            (preq->req_rw & REQ_FUA))
        post_fsync = 1;

    preq->req_rw &= ~REQ_FUA;

    if (post_fsync) {
        spin_lock_irqsave(&plo->lock, flags);
        kaio_queue_fsync_req(preq);
        plo->st.bio_syncwait++;
        spin_unlock_irqrestore(&plo->lock, flags);
    } else {
        ploop_complete_io_state(preq);
    }
}