static int rw_header(struct log_c *lc, int op) { lc->io_req.bi_op = op; lc->io_req.bi_op_flags = 0; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); }
static int rw_header(struct log_c *lc, int rw) { lc->io_req.bi_rw = rw; lc->io_req.mem.ptr.vma = lc->disk_header; lc->io_req.notify.fn = NULL; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); }
/**************************************************************************** * Wrapper functions for using the new dm_io API ****************************************************************************/ static int dm_io_sync_vm(unsigned int num_regions, struct dm_io_region *where, int rw, void *data, unsigned long *error_bits, struct cache_c *dmc) { struct dm_io_request iorq; iorq.bi_rw= rw; iorq.mem.type = DM_IO_VMA; iorq.mem.ptr.vma = data; iorq.notify.fn = NULL; iorq.client = dmc->io_client; return dm_io(&iorq, num_regions, where, error_bits); }
static int dm_io_async_bvec(unsigned int num_regions, struct dm_io_region *where, int rw, struct bio_vec *bvec, io_notify_fn fn, void *context) { struct kcached_job *job = (struct kcached_job *)context; struct cache_c *dmc = job->dmc; struct dm_io_request iorq; iorq.bi_rw = (rw | (1 << REQ_SYNC)); iorq.mem.type = DM_IO_BVEC; iorq.mem.ptr.bvec = bvec; iorq.notify.fn = fn; iorq.notify.context = context; iorq.client = dmc->io_client; return dm_io(&iorq, num_regions, where, NULL); }
static int flush_header(struct log_c *lc) { struct dm_io_region null_location = { .bdev = lc->header_location.bdev, .sector = 0, .count = 0, }; lc->io_req.bi_op = REQ_OP_WRITE; lc->io_req.bi_op_flags = WRITE_FLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } static int read_header(struct log_c *log) { int r; r = rw_header(log, REQ_OP_READ); if (r) return r; header_from_disk(&log->header, log->disk_header); /* New log required? */ if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { log->header.magic = MIRROR_MAGIC; log->header.version = MIRROR_DISK_VERSION; log->header.nr_regions = 0; } #ifdef __LITTLE_ENDIAN if (log->header.version == 1) log->header.version = 2; #endif if (log->header.version != MIRROR_DISK_VERSION) { DMWARN("incompatible disk log version"); return -EINVAL; } return 0; }
static int dm_io_async_pagelist_IO(struct flashcache_copy_job *job, unsigned int num_regions, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) struct io_region *where, #else struct dm_io_region *where, #endif io_notify_fn fn, int rw, struct page_list *pl) { struct dm_io_request iorq; iorq.bi_rw = rw; iorq.mem.type = DM_IO_PAGE_LIST; iorq.mem.ptr.pl = pl; iorq.mem.offset = 0; iorq.notify.fn = fn; iorq.notify.context = (void *)job; iorq.client = flashcache_io_client; return dm_io(&iorq, num_regions, where, NULL); }
int dm_io_async_bvec(unsigned int num_regions, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) struct dm_io_region *where, #else struct io_region *where, #endif int rw, struct bio_vec *bvec, io_notify_fn fn, void *context) { struct kcached_job *job = (struct kcached_job *)context; struct cache_c *dmc = job->dmc; struct dm_io_request iorq; iorq.bi_rw = rw; iorq.mem.type = DM_IO_BVEC; iorq.mem.ptr.bvec = bvec; iorq.notify.fn = fn; iorq.notify.context = context; iorq.client = dmc->io_client; return dm_io(&iorq, num_regions, where, NULL); }
static int rw_header(struct log_c *lc, int rw) { lc->io_req.bi_rw = rw; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); }