static int _drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, struct page *page, sector_t sector, int rw, int size) { struct bio *bio; struct drbd_md_io md_io; int ok; md_io.mdev = mdev; init_completion(&md_io.event); md_io.error = 0; if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) rw |= REQ_HARDBARRIER; rw |= REQ_UNPLUG | REQ_SYNC; retry: bio = bio_alloc(GFP_NOIO, 1); bio->bi_bdev = bdev->md_bdev; bio->bi_sector = sector; ok = (bio_add_page(bio, page, size, 0) == size); if (!ok) goto out; bio->bi_private = &md_io; bio->bi_end_io = drbd_md_io_complete; bio->bi_rw = rw; if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) bio_endio(bio, -EIO); else submit_bio(rw, bio); wait_for_completion(&md_io.event); ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; /* check for unsupported barrier op. * would rather check on EOPNOTSUPP, but that is not reliable. * don't try again for ANY return value != 0 */ if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { /* Try again with no barrier */ dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); set_bit(MD_NO_BARRIER, &mdev->flags); rw &= ~REQ_HARDBARRIER; bio_put(bio); goto retry; } out: bio_put(bio); return ok; }
static int _drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, struct page *page, sector_t sector, int rw, int size) { struct bio *bio; struct drbd_md_io md_io; int ok; md_io.mdev = mdev; init_completion(&md_io.event); md_io.error = 0; if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) rw |= REQ_FUA; rw |= REQ_UNPLUG | REQ_SYNC; bio = bio_alloc(GFP_NOIO, 1); bio->bi_bdev = bdev->md_bdev; bio->bi_sector = sector; ok = (bio_add_page(bio, page, size, 0) == size); if (!ok) goto out; bio->bi_private = &md_io; bio->bi_end_io = drbd_md_io_complete; bio->bi_rw = rw; if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) bio_endio(bio, -EIO); else submit_bio(rw, bio); wait_for_completion(&md_io.event); ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; out: bio_put(bio); return ok; }
/** * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents * @mdev: DRBD device. * * Called when we detach (unconfigure) local storage, * or when we go from R_PRIMARY to R_SECONDARY role. */ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) { int i, nr_elements; unsigned int enr; struct bio **bios; struct drbd_atodb_wait wc; ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) return; /* sorry, I don't have any act_log etc... */ wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); nr_elements = mdev->act_log->nr_elements; /* GFP_KERNEL, we are not in anyone's write-out path */ bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); if (!bios) goto submit_one_by_one; atomic_set(&wc.count, 0); init_completion(&wc.io_done); wc.mdev = mdev; wc.error = 0; for (i = 0; i < nr_elements; i++) { enr = lc_element_by_index(mdev->act_log, i)->lc_number; if (enr == LC_FREE) continue; /* next statement also does atomic_inc wc.count and local_cnt */ if (atodb_prepare_unless_covered(mdev, bios, enr/AL_EXT_PER_BM_SECT, &wc)) goto free_bios_submit_one_by_one; } /* unnecessary optimization? */ lc_unlock(mdev->act_log); wake_up(&mdev->al_wait); /* all prepared, submit them */ for (i = 0; i < nr_elements; i++) { if (bios[i] == NULL) break; if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { bios[i]->bi_rw = WRITE; bio_endio(bios[i], -EIO); } else { submit_bio(WRITE, bios[i]); } } drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); /* always (try to) flush bitmap to stable storage */ drbd_md_flush(mdev); /* In case we did not submit a single IO do not wait for * them to complete. ( Because we would wait forever here. ) * * In case we had IOs and they are already complete, there * is not point in waiting anyways. * Therefore this if () ... */ if (atomic_read(&wc.count)) wait_for_completion(&wc.io_done); put_ldev(mdev); kfree(bios); return; free_bios_submit_one_by_one: /* free everything by calling the endio callback directly. */ for (i = 0; i < nr_elements && bios[i]; i++) bio_endio(bios[i], 0); kfree(bios); submit_one_by_one: dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); for (i = 0; i < mdev->act_log->nr_elements; i++) { enr = lc_element_by_index(mdev->act_log, i)->lc_number; if (enr == LC_FREE) continue; /* Really slow: if we have al-extents 16..19 active, * sector 4 will be written four times! Synchronous! */ drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); } lc_unlock(mdev->act_log); wake_up(&mdev->al_wait); put_ldev(mdev); }