const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, char *name) { struct nd_region *nd_region = to_nd_region(ndns->dev.parent); const char *suffix = NULL; if (ndns->claim) { if (is_nd_btt(ndns->claim)) suffix = "s"; else if (is_nd_pfn(ndns->claim)) suffix = "m"; else dev_WARN_ONCE(&ndns->dev, 1, "unknown claim type by %s\n", dev_name(ndns->claim)); } if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { if (!suffix && pmem_should_map_pages(&ndns->dev)) suffix = "m"; sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : ""); } else if (is_namespace_blk(&ndns->dev)) { struct nd_namespace_blk *nsblk; nsblk = to_nd_namespace_blk(&ndns->dev); sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix ? suffix : ""); } else { return NULL; } return name; }
int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) { int rc; struct device *dev; struct nd_pfn *nd_pfn; struct nd_pfn_sb *pfn_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; nvdimm_bus_lock(&ndns->dev); dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dev) return -ENOMEM; dev_set_drvdata(dev, drvdata); pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); nd_pfn = to_nd_pfn(dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); nd_pfn->pfn_sb = NULL; kfree(pfn_sb); dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__, rc == 0 ? dev_name(dev) : "<none>"); if (rc < 0) { __nd_detach_ndns(dev, &nd_pfn->ndns); put_device(dev); } else __nd_device_register(&nd_pfn->dev); return rc; }
struct nd_blk_region *to_nd_blk_region(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev); WARN_ON(!is_nd_blk(dev)); return container_of(nd_region, struct nd_blk_region, nd_region); }
static ssize_t read_only_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); return sprintf(buf, "%d\n", nd_region->ro); }
static ssize_t nstype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); }
int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata) { int rc; struct device *dev; struct btt_sb *btt_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; nvdimm_bus_lock(&ndns->dev); dev = __nd_btt_create(nd_region, 0, NULL, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dev) return -ENOMEM; dev_set_drvdata(dev, drvdata); btt_sb = kzalloc(sizeof(*btt_sb), GFP_KERNEL); rc = __nd_btt_probe(to_nd_btt(dev), ndns, btt_sb); kfree(btt_sb); dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__, rc == 0 ? dev_name(dev) : "<none>"); if (rc < 0) { __nd_btt_detach_ndns(to_nd_btt(dev)); put_device(dev); } return rc; }
static ssize_t mappings_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); return sprintf(buf, "%d\n", nd_region->ndr_mappings); }
static int is_uuid_busy(struct device *dev, void *data) { struct nd_region *nd_region = to_nd_region(dev->parent); u8 *uuid = data; switch (nd_region_to_nstype(nd_region)) { case ND_DEVICE_NAMESPACE_PMEM: { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); if (!nspm->uuid) break; if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0) return -EBUSY; break; } case ND_DEVICE_NAMESPACE_BLK: { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); if (!nsblk->uuid) break; if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0) return -EBUSY; break; } default: break; } return 0; }
/* * Upon successful probe/remove, take/release a reference on the * associated interleave set (if present), and plant new btt + namespace * seeds. Also, on the removal of a BLK region, notify the provider to * disable the region. */ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, struct device *dev, bool probe) { struct nd_region *nd_region; if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { int i; nd_region = to_nd_region(dev); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = nd_mapping->ndd; struct nvdimm *nvdimm = nd_mapping->nvdimm; kfree(nd_mapping->labels); nd_mapping->labels = NULL; put_ndd(ndd); nd_mapping->ndd = NULL; if (ndd) atomic_dec(&nvdimm->busy); } if (is_nd_pmem(dev)) return; to_nd_blk_region(dev)->disable(nvdimm_bus, dev); } if (dev->parent && is_nd_blk(dev->parent) && probe) { nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->ns_seed == dev) nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } if (is_nd_btt(dev) && probe) { struct nd_btt *nd_btt = to_nd_btt(dev); nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->btt_seed == dev) nd_region_create_btt_seed(nd_region); if (nd_region->ns_seed == &nd_btt->ndns->dev && is_nd_blk(dev->parent)) nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } }
static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct nd_region *nd_region = to_nd_region(dev); if (n < nd_region->ndr_mappings) return a->mode; return 0; }
static bool is_nd_btt_idle(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_btt *nd_btt = to_nd_btt(dev); if (nd_region->btt_seed == dev || nd_btt->ndns || dev->driver) return false; return true; }
static void nd_btt_release(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_btt *nd_btt = to_nd_btt(dev); dev_dbg(dev, "%s\n", __func__); nd_btt_detach_ndns(nd_btt); ida_simple_remove(&nd_region->btt_ida, nd_btt->id); kfree(nd_btt->uuid); kfree(nd_btt); }
static int nd_region_probe(struct device *dev) { int err, rc; static unsigned long once; struct nd_region_namespaces *num_ns; struct nd_region *nd_region = to_nd_region(dev); if (nd_region->num_lanes > num_online_cpus() && nd_region->num_lanes < num_possible_cpus() && !test_and_set_bit(0, &once)) { dev_info(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n", num_online_cpus(), nd_region->num_lanes, num_possible_cpus()); dev_info(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n", nd_region->num_lanes); } rc = nd_blk_region_init(nd_region); if (rc) return rc; rc = nd_region_register_namespaces(nd_region, &err); num_ns = devm_kzalloc(dev, sizeof(*num_ns), GFP_KERNEL); if (!num_ns) return -ENOMEM; if (rc < 0) return rc; num_ns->active = rc; num_ns->count = rc + err; dev_set_drvdata(dev, num_ns); if (rc && err && rc == err) return -ENODEV; nd_region->btt_seed = nd_btt_create(nd_region); nd_region->pfn_seed = nd_pfn_create(nd_region); if (err == 0) return 0; /* * Given multiple namespaces per region, we do not want to * disable all the successfully registered peer namespaces upon * a single registration failure. If userspace is missing a * namespace that it expects it can disable/re-enable the region * to retry discovery after correcting the failure. * <regionX>/namespaces returns the current * "<async-registered>/<total>" namespace count. */ dev_err(dev, "failed to register %d namespace%s, continuing...\n", err, err == 1 ? "" : "s"); return 0; }
static void nd_pfn_release(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_pfn *nd_pfn = to_nd_pfn(dev); dev_dbg(dev, "%s\n", __func__); nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); kfree(nd_pfn->uuid); kfree(nd_pfn); }
static void namespace_blk_release(struct device *dev) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); struct nd_region *nd_region = to_nd_region(dev->parent); if (nsblk->id >= 0) ida_simple_remove(&nd_region->ns_ida, nsblk->id); kfree(nsblk->alt_name); kfree(nsblk->uuid); kfree(nsblk->res); kfree(nsblk); }
static ssize_t read_only_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { bool ro; int rc = strtobool(buf, &ro); struct nd_region *nd_region = to_nd_region(dev); if (rc) return rc; nd_region->ro = ro; return len; }
static ssize_t set_cookie_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); struct nd_interleave_set *nd_set = nd_region->nd_set; if (is_nd_pmem(dev) && nd_set) /* pass, should be precluded by region_visible */; else return -ENXIO; return sprintf(buf, "%#llx\n", nd_set->cookie); }
/** * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks * @ndns: the namespace containing poison ranges * @bb: badblocks instance to populate * @offset: offset at the start of the namespace before 'sector 0' * * The poison list generated during NFIT initialization may contain multiple, * possibly overlapping ranges in the SPA (System Physical Address) space. * Compare each of these ranges to the namespace currently being initialized, * and add badblocks to the gendisk for all matching sub-ranges */ void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, struct badblocks *bb, resource_size_t offset) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_region *nd_region = to_nd_region(ndns->dev.parent); struct nvdimm_bus *nvdimm_bus; struct list_head *poison_list; u64 ns_start, ns_end, ns_size; struct nd_poison *pl; ns_size = nvdimm_namespace_capacity(ndns) - offset; ns_start = nsio->res.start + offset; ns_end = nsio->res.end; nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); poison_list = &nvdimm_bus->poison_list; if (list_empty(poison_list)) return; list_for_each_entry(pl, poison_list, list) { u64 pl_end = pl->start + pl->length - 1; /* Discard intervals with no intersection */ if (pl_end < ns_start) continue; if (pl->start > ns_end) continue; /* Deal with any overlap after start of the namespace */ if (pl->start >= ns_start) { u64 start = pl->start; u64 len; if (pl_end <= ns_end) len = pl->length; else len = ns_start + ns_size - pl->start; __add_badblock_range(bb, start - ns_start, len); continue; } /* Deal with overlap for poison starting before the namespace */ if (pl->start < ns_start) { u64 len; if (pl_end < ns_end) len = pl->start + pl->length - ns_start; else len = ns_size; __add_badblock_range(bb, 0, len); } }
static ssize_t mappingN(struct device *dev, char *buf, int n) { struct nd_region *nd_region = to_nd_region(dev); struct nd_mapping *nd_mapping; struct nvdimm *nvdimm; if (n >= nd_region->ndr_mappings) return -ENXIO; nd_mapping = &nd_region->mapping[n]; nvdimm = nd_mapping->nvdimm; return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), nd_mapping->start, nd_mapping->size); }
static ssize_t namespace_seed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); ssize_t rc; nvdimm_bus_lock(dev); if (nd_region->ns_seed) rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); else rc = sprintf(buf, "\n"); nvdimm_bus_unlock(dev); return rc; }
static int nd_region_remove(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev); /* flush attribute readers and disable */ nvdimm_bus_lock(dev); nd_region->ns_seed = NULL; nd_region->btt_seed = NULL; nd_region->pfn_seed = NULL; dev_set_drvdata(dev, NULL); nvdimm_bus_unlock(dev); device_for_each_child(dev, NULL, child_unregister); return 0; }
static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); unsigned long long size = 0; if (is_nd_pmem(dev)) { size = nd_region->ndr_size; } else if (nd_region->ndr_mappings == 1) { struct nd_mapping *nd_mapping = &nd_region->mapping[0]; size = nd_mapping->size; } return sprintf(buf, "%llu\n", size); }
static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) { struct nd_region *nd_region = to_nd_region(dev->parent); struct device *seed = NULL; if (is_nd_btt(dev)) seed = nd_region->btt_seed; else if (is_nd_pfn(dev)) seed = nd_region->pfn_seed; else if (is_nd_dax(dev)) seed = nd_region->dax_seed; if (seed == dev || ndns || dev->driver) return false; return true; }
static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk) { struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_label_id label_id; resource_size_t size = 0; struct resource *res; if (!nsblk->uuid) return 0; nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); for_each_dpa_resource(ndd, res) if (strcmp(res->name, label_id.id) == 0) size += resource_size(res); return size; }
static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) { struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_label_id label_id; struct resource *res; int count, i; if (!nsblk->uuid || !nsblk->lbasize || !ndd) return false; count = 0; nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); for_each_dpa_resource(ndd, res) { if (strcmp(res->name, label_id.id) != 0) continue; /* * Resources with unacknoweldged adjustments indicate a * failure to update labels */ if (res->flags & DPA_RESOURCE_ADJUSTED) return false; count++; } /* These values match after a successful label update */ if (count != nsblk->num_resources) return false; for (i = 0; i < nsblk->num_resources; i++) { struct resource *found = NULL; for_each_dpa_resource(ndd, res) if (res == nsblk->res[i]) { found = res; break; } /* stale resource */ if (!found) return false; } return true; }
static void nd_region_release(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev); u16 i; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; put_device(&nvdimm->dev); } free_percpu(nd_region->lane); ida_simple_remove(®ion_ida, nd_region->id); if (is_nd_blk(dev)) kfree(to_nd_blk_region(dev)); else kfree(nd_region); }
static ssize_t alt_name_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); device_unlock(dev); return rc < 0 ? rc : len; }
bool pmem_should_map_pages(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) return false; if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags)) return false; if (is_nd_pfn(dev) || is_nd_btt(dev)) return false; #ifdef ARCH_MEMREMAP_PMEM return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; #else return false; #endif }
static ssize_t available_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); unsigned long long available = 0; /* * Flush in-flight updates and grab a snapshot of the available * size. Of course, this value is potentially invalidated the * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_available_dpa(nd_region); nvdimm_bus_unlock(dev); return sprintf(buf, "%llu\n", available); }
static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, typeof(*dev), kobj); struct nd_region *nd_region = to_nd_region(dev); struct nd_interleave_set *nd_set = nd_region->nd_set; int type = nd_region_to_nstype(nd_region); if (a != &dev_attr_set_cookie.attr && a != &dev_attr_available_size.attr) return a->mode; if ((type == ND_DEVICE_NAMESPACE_PMEM || type == ND_DEVICE_NAMESPACE_BLK) && a == &dev_attr_available_size.attr) return a->mode; else if (is_nd_pmem(dev) && nd_set) return a->mode; return 0; }