/** * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks * @ndns: the namespace containing poison ranges * @bb: badblocks instance to populate * @offset: offset at the start of the namespace before 'sector 0' * * The poison list generated during NFIT initialization may contain multiple, * possibly overlapping ranges in the SPA (System Physical Address) space. * Compare each of these ranges to the namespace currently being initialized, * and add badblocks to the gendisk for all matching sub-ranges */ void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, struct badblocks *bb, resource_size_t offset) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_region *nd_region = to_nd_region(ndns->dev.parent); struct nvdimm_bus *nvdimm_bus; struct list_head *poison_list; u64 ns_start, ns_end, ns_size; struct nd_poison *pl; ns_size = nvdimm_namespace_capacity(ndns) - offset; ns_start = nsio->res.start + offset; ns_end = nsio->res.end; nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); poison_list = &nvdimm_bus->poison_list; if (list_empty(poison_list)) return; list_for_each_entry(pl, poison_list, list) { u64 pl_end = pl->start + pl->length - 1; /* Discard intervals with no intersection */ if (pl_end < ns_start) continue; if (pl->start > ns_end) continue; /* Deal with any overlap after start of the namespace */ if (pl->start >= ns_start) { u64 start = pl->start; u64 len; if (pl_end <= ns_end) len = pl->length; else len = ns_start + ns_size - pl->start; __add_badblock_range(bb, start - ns_start, len); continue; } /* Deal with overlap for poison starting before the namespace */ if (pl->start < ns_start) { u64 len; if (pl_end < ns_end) len = pl->start + pl->length - ns_start; else len = ns_size; __add_badblock_range(bb, 0, len); } }
static int __nd_btt_probe(struct nd_btt *nd_btt, struct nd_namespace_common *ndns, struct btt_sb *btt_sb) { if (!btt_sb || !ndns || !nd_btt) return -ENODEV; if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb))) return -ENXIO; if (nvdimm_namespace_capacity(ndns) < SZ_16M) return -ENXIO; if (!nd_btt_arena_is_valid(nd_btt, btt_sb)) return -ENODEV; nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize); nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL); if (!nd_btt->uuid) return -ENOMEM; __nd_device_register(&nd_btt->dev); return 0; }
static int nd_pfn_init(struct nd_pfn *nd_pfn) { struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_region *nd_region; unsigned long npfns; phys_addr_t offset; u64 checksum; int rc; if (!pfn_sb) return -ENOMEM; nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); if (rc == 0 || rc == -EBUSY) return rc; /* section alignment for simple hotplug */ if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN || pmem->phys_addr & ND_PFN_MASK) return -ENODEV; nd_region = to_nd_region(nd_pfn->dev.parent); if (nd_region->ro) { dev_info(&nd_pfn->dev, "%s is read-only, unable to init metadata\n", dev_name(&nd_region->dev)); goto err; } memset(pfn_sb, 0, sizeof(*pfn_sb)); npfns = (pmem->size - SZ_8K) / SZ_4K; /* * Note, we use 64 here for the standard size of struct page, * debugging options may cause it to be larger in which case the * implementation will limit the pfns advertised through * ->direct_access() to those that are included in the memmap. */ if (nd_pfn->mode == PFN_MODE_PMEM) offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE); else if (nd_pfn->mode == PFN_MODE_RAM) offset = SZ_8K; else goto err; npfns = (pmem->size - offset) / SZ_4K; pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->npfns = cpu_to_le64(npfns); memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN); memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); pfn_sb->version_major = cpu_to_le16(1); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); if (rc) goto err; return 0; err: nd_pfn->pfn_sb = NULL; kfree(pfn_sb); return -ENXIO; }
int nd_pfn_validate(struct nd_pfn *nd_pfn) { struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_io *nsio; u64 checksum, offset; if (!pfn_sb || !ndns) return -ENODEV; if (!is_nd_pmem(nd_pfn->dev.parent)) return -ENODEV; /* section alignment for simple hotplug */ if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN) return -ENODEV; if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb))) return -ENXIO; if (memcmp(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN) != 0) return -ENODEV; checksum = le64_to_cpu(pfn_sb->checksum); pfn_sb->checksum = 0; if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb)) return -ENODEV; pfn_sb->checksum = cpu_to_le64(checksum); switch (le32_to_cpu(pfn_sb->mode)) { case PFN_MODE_RAM: break; case PFN_MODE_PMEM: /* TODO: allocate from PMEM support */ return -ENOTTY; default: return -ENXIO; } if (!nd_pfn->uuid) { /* from probe we allocate */ nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); if (!nd_pfn->uuid) return -ENOMEM; } else { /* from init we validate */ if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) return -ENODEV; } /* * These warnings are verbose because they can only trigger in * the case where the physical address alignment of the * namespace has changed since the pfn superblock was * established. */ offset = le64_to_cpu(pfn_sb->dataoff); nsio = to_nd_namespace_io(&ndns->dev); if (nsio->res.start & ND_PFN_MASK) { dev_err(&nd_pfn->dev, "init failed: %s not section aligned\n", dev_name(&ndns->dev)); return -EBUSY; } else if (offset >= resource_size(&nsio->res)) { dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", dev_name(&ndns->dev)); return -EBUSY; } return 0; }
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; unsigned long align; enum nd_pfn_mode mode; struct nd_namespace_io *nsio; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_common *ndns = nd_pfn->ndns; const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); if (!pfn_sb || !ndns) return -ENODEV; if (!is_memory(nd_pfn->dev.parent)) return -ENODEV; if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) return -ENXIO; if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0) return -ENODEV; checksum = le64_to_cpu(pfn_sb->checksum); pfn_sb->checksum = 0; if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb)) return -ENODEV; pfn_sb->checksum = cpu_to_le64(checksum); if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) return -ENODEV; if (__le16_to_cpu(pfn_sb->version_minor) < 1) { pfn_sb->start_pad = 0; pfn_sb->end_trunc = 0; } if (__le16_to_cpu(pfn_sb->version_minor) < 2) pfn_sb->align = 0; switch (le32_to_cpu(pfn_sb->mode)) { case PFN_MODE_RAM: case PFN_MODE_PMEM: break; default: return -ENXIO; } align = le32_to_cpu(pfn_sb->align); offset = le64_to_cpu(pfn_sb->dataoff); if (align == 0) align = 1UL << ilog2(offset); mode = le32_to_cpu(pfn_sb->mode); if (!nd_pfn->uuid) { /* * When probing a namepace via nd_pfn_probe() the uuid * is NULL (see: nd_pfn_devinit()) we init settings from * pfn_sb */ nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); if (!nd_pfn->uuid) return -ENOMEM; nd_pfn->align = align; nd_pfn->mode = mode; } else { /* * When probing a pfn / dax instance we validate the * live settings against the pfn_sb */ if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) return -ENODEV; /* * If the uuid validates, but other settings mismatch * return EINVAL because userspace has managed to change * the configuration without specifying new * identification. */ if (nd_pfn->align != align || nd_pfn->mode != mode) { dev_err(&nd_pfn->dev, "init failed, settings mismatch\n"); dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n", nd_pfn->align, align, nd_pfn->mode, mode); return -EINVAL; } } if (align > nvdimm_namespace_capacity(ndns)) { dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", align, nvdimm_namespace_capacity(ndns)); return -EINVAL; } /* * These warnings are verbose because they can only trigger in * the case where the physical address alignment of the * namespace has changed since the pfn superblock was * established. */ nsio = to_nd_namespace_io(&ndns->dev); if (offset >= resource_size(&nsio->res)) { dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", dev_name(&ndns->dev)); return -EBUSY; } if ((align && !IS_ALIGNED(offset, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled align: %#lx\n", offset, align); return -ENXIO; } return 0; }