static int read_header(struct log_c *log) { int r; r = rw_header(log, READ); if (r) return r; header_from_disk(&log->header, log->disk_header); /* New log required? */ if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { log->header.magic = MIRROR_MAGIC; log->header.version = MIRROR_DISK_VERSION; log->header.nr_regions = 0; } /* Version 2 is like version 1 but always little endian on disk. */ #ifdef __LITTLE_ENDIAN if (log->header.version == 1) log->header.version = 2; #endif if (log->header.version != MIRROR_DISK_VERSION) { DMWARN("incompatible disk log version"); return -EINVAL; } return 0; }
static int flush_header(struct log_c *lc) { struct dm_io_region null_location = { .bdev = lc->header_location.bdev, .sector = 0, .count = 0, }; lc->io_req.bi_op = REQ_OP_WRITE; lc->io_req.bi_op_flags = WRITE_FLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } static int read_header(struct log_c *log) { int r; r = rw_header(log, REQ_OP_READ); if (r) return r; header_from_disk(&log->header, log->disk_header); /* New log required? */ if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { log->header.magic = MIRROR_MAGIC; log->header.version = MIRROR_DISK_VERSION; log->header.nr_regions = 0; } #ifdef __LITTLE_ENDIAN if (log->header.version == 1) log->header.version = 2; #endif if (log->header.version != MIRROR_DISK_VERSION) { DMWARN("incompatible disk log version"); return -EINVAL; } return 0; }
static int disk_flush(struct dm_dirty_log *log) { int r, i; struct log_c *lc = log->context; /* only write if the log has changed */ if (!lc->touched_cleaned && !lc->touched_dirtied) return 0; if (lc->touched_cleaned && log->flush_callback_fn && log->flush_callback_fn(lc->ti)) { /* * At this point it is impossible to determine which * regions are clean and which are dirty (without * re-reading the log off disk). So mark all of them * dirty. */ lc->flush_failed = 1; for (i = 0; i < lc->region_count; i++) log_clear_bit(lc, lc->clean_bits, i); } r = rw_header(lc, REQ_OP_WRITE); if (r) fail_log_device(lc); else { if (lc->touched_dirtied) { r = flush_header(lc); if (r) { lc->log_dev_flush_failed = 1; fail_log_device(lc); } else lc->touched_dirtied = 0; } lc->touched_cleaned = 0; } return r; }
static int disk_resume(struct dm_dirty_log *log) { int r; unsigned i; struct log_c *lc = (struct log_c *) log->context; size_t size = lc->bitset_uint32_count * sizeof(uint32_t); /* read the disk header */ r = read_header(lc); if (r) { DMWARN("%s: Failed to read header on dirty region log device", lc->log_dev->name); fail_log_device(lc); /* * If the log device cannot be read, we must assume * all regions are out-of-sync. If we simply return * here, the state will be uninitialized and could * lead us to return 'in-sync' status for regions * that are actually 'out-of-sync'. */ lc->header.nr_regions = 0; } /* set or clear any new bits -- device has grown */ if (lc->sync == NOSYNC) for (i = lc->header.nr_regions; i < lc->region_count; i++) /* FIXME: amazingly inefficient */ log_set_bit(lc, lc->clean_bits, i); else for (i = lc->header.nr_regions; i < lc->region_count; i++) /* FIXME: amazingly inefficient */ log_clear_bit(lc, lc->clean_bits, i); /* clear any old bits -- device has shrunk */ for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++) log_clear_bit(lc, lc->clean_bits, i); /* copy clean across to sync */ memcpy(lc->sync_bits, lc->clean_bits, size); lc->sync_count = memweight(lc->clean_bits, lc->bitset_uint32_count * sizeof(uint32_t)); lc->sync_search = 0; /* set the correct number of regions in the header */ lc->header.nr_regions = lc->region_count; header_to_disk(&lc->header, lc->disk_header); /* write the new header */ r = rw_header(lc, REQ_OP_WRITE); if (!r) { r = flush_header(lc); if (r) lc->log_dev_flush_failed = 1; } if (r) { DMWARN("%s: Failed to write header on dirty region log device", lc->log_dev->name); fail_log_device(lc); } return r; }
static inline int write_header(struct log_c *log) { header_to_disk(&log->header, log->disk_header); return rw_header(log, WRITE); }