static int array_block_check(struct dm_block_validator *v, struct dm_block *b, size_t size_of_block) { struct array_block *bh_le = dm_block_data(b); __le32 csum_disk; if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) { DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu", (unsigned long long) le64_to_cpu(bh_le->blocknr), (unsigned long long) dm_block_location(b)); return -ENOTBLK; } csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries, size_of_block - sizeof(__le32), CSUM_XOR)); if (csum_disk != bh_le->csum) { DMERR_LIMIT("array_block_check failed: csum %u != wanted %u", (unsigned) le32_to_cpu(csum_disk), (unsigned) le32_to_cpu(bh_le->csum)); return -EILSEQ; } return 0; }
static int node_check(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct node *n = dm_block_data(b); struct node_header *h = &n->header; size_t value_size; __le32 csum_disk; uint32_t flags; if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { DMERR_LIMIT("node_check failed blocknr %llu wanted %llu", le64_to_cpu(h->blocknr), dm_block_location(b)); return -ENOTBLK; } csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags, block_size - sizeof(__le32), BTREE_CSUM_XOR)); if (csum_disk != h->csum) { DMERR_LIMIT("node_check failed csum %u wanted %u", le32_to_cpu(csum_disk), le32_to_cpu(h->csum)); return -EILSEQ; } value_size = le32_to_cpu(h->value_size); if (sizeof(struct node_header) + (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { DMERR_LIMIT("node_check failed: max_entries too large"); return -EILSEQ; } if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { DMERR_LIMIT("node_check failed, too many entries"); return -EILSEQ; } /* * The node must be either INTERNAL or LEAF. */ flags = le32_to_cpu(h->flags); if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) { DMERR_LIMIT("node_check failed, node is neither INTERNAL or LEAF"); return -EILSEQ; } return 0; }
static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) { dm_block_t count; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); int r = sm_metadata_new_block_(sm, b); if (r) { DMERR_LIMIT("unable to allocate new metadata block"); return r; } r = sm_metadata_get_nr_free(sm, &count); if (r) { DMERR_LIMIT("couldn't get free block count"); return r; } check_threshold(&smm->threshold, count); return r; }
static void block_dec(void *context, const void *value) { int r; uint64_t b; __le64 block_le; uint32_t ref_count; struct dm_block *block; struct array_block *ab; struct dm_array_info *info = context; memcpy(&block_le, value, sizeof(block_le)); b = le64_to_cpu(block_le); r = dm_tm_ref(info->btree_info.tm, b, &ref_count); if (r) { DMERR_LIMIT("couldn't get reference count for block %llu", (unsigned long long) b); return; } if (ref_count == 1) { /* * We're about to drop the last reference to this ablock. * So we need to decrement the ref count of the contents. */ r = get_ablock(info, b, &block, &ab); if (r) { DMERR_LIMIT("couldn't get array block %llu", (unsigned long long) b); return; } dec_ablock_entries(info, ab); unlock_ablock(info, block); } dm_tm_dec(info->btree_info.tm, b); }