static int is_uuid_busy(struct device *dev, void *data) { u8 *uuid1 = data, *uuid2 = NULL; if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); uuid2 = nspm->uuid; } else if (is_namespace_blk(dev)) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); uuid2 = nsblk->uuid; } else if (is_nd_btt(dev)) { struct nd_btt *nd_btt = to_nd_btt(dev); uuid2 = nd_btt->uuid; } else if (is_nd_pfn(dev)) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); uuid2 = nd_pfn->uuid; } if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0) return -EBUSY; return 0; }
static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc = 0; device_lock(dev); nvdimm_bus_lock(dev); if (dev->driver) rc = -EBUSY; else { size_t n = len - 1; if (strncmp(buf, "pmem\n", n) == 0 || strncmp(buf, "pmem", n) == 0) { /* TODO: allocate from PMEM support */ rc = -ENOTTY; } else if (strncmp(buf, "ram\n", n) == 0 || strncmp(buf, "ram", n) == 0) nd_pfn->mode = PFN_MODE_RAM; else if (strncmp(buf, "none\n", n) == 0 || strncmp(buf, "none", n) == 0) nd_pfn->mode = PFN_MODE_NONE; else rc = -EINVAL; } dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); device_unlock(dev); return rc ? rc : len; }
int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) { int rc; struct device *dev; struct nd_pfn *nd_pfn; struct nd_pfn_sb *pfn_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; nvdimm_bus_lock(&ndns->dev); dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dev) return -ENOMEM; dev_set_drvdata(dev, drvdata); pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); nd_pfn = to_nd_pfn(dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); nd_pfn->pfn_sb = NULL; kfree(pfn_sb); dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__, rc == 0 ? dev_name(dev) : "<none>"); if (rc < 0) { __nd_detach_ndns(dev, &nd_pfn->ndns); put_device(dev); } else __nd_device_register(&nd_pfn->dev); return rc; }
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); if (nd_pfn->uuid) return sprintf(buf, "%pUb\n", nd_pfn->uuid); return sprintf(buf, "\n"); }
static void nd_pfn_release(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_pfn *nd_pfn = to_nd_pfn(dev); dev_dbg(dev, "%s\n", __func__); nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); kfree(nd_pfn->uuid); kfree(nd_pfn); }
static ssize_t namespace_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc; nvdimm_bus_lock(dev); rc = sprintf(buf, "%s\n", nd_pfn->ndns ? dev_name(&nd_pfn->ndns->dev) : ""); nvdimm_bus_unlock(dev); return rc; }
static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); switch (nd_pfn->mode) { case PFN_MODE_RAM: return sprintf(buf, "ram\n"); case PFN_MODE_PMEM: return sprintf(buf, "pmem\n"); default: return sprintf(buf, "none\n"); } }
static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc; device_lock(dev); rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); device_unlock(dev); return rc ? rc : len; }
static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns) { struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); struct pmem_device *pmem; /* free pmem disk */ pmem = dev_get_drvdata(&nd_pfn->dev); pmem_detach_disk(pmem); /* release nd_pfn resources */ kfree(nd_pfn->pfn_sb); nd_pfn->pfn_sb = NULL; return 0; }
static ssize_t namespace_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc; device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); device_unlock(dev); return rc; }
struct nd_pfn *to_nd_pfn_safe(struct device *dev) { /* * pfn device attributes are re-used by dax device instances, so we * need to be careful to correct device-to-nd_pfn conversion. */ if (is_nd_pfn(dev)) return to_nd_pfn(dev); if (is_nd_dax(dev)) { struct nd_dax *nd_dax = to_nd_dax(dev); return &nd_dax->nd_pfn; } WARN_ON(1); return NULL; }
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) { int rc; struct nd_pfn *nd_pfn; struct device *pfn_dev; struct nd_pfn_sb *pfn_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; switch (ndns->claim_class) { case NVDIMM_CCLASS_NONE: case NVDIMM_CCLASS_PFN: break; default: return -ENODEV; } nvdimm_bus_lock(&ndns->dev); nd_pfn = nd_pfn_alloc(nd_region); pfn_dev = nd_pfn_devinit(nd_pfn, ndns); nvdimm_bus_unlock(&ndns->dev); if (!pfn_dev) return -ENOMEM; pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); nd_pfn = to_nd_pfn(pfn_dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, PFN_SIG); dev_dbg(dev, "%s: pfn: %s\n", __func__, rc == 0 ? dev_name(pfn_dev) : "<none>"); if (rc < 0) { nd_detach_ndns(pfn_dev, &nd_pfn->ndns); put_device(pfn_dev); } else __nd_device_register(pfn_dev); return rc; }
static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); struct device *dev = &nd_pfn->dev; struct vmem_altmap *altmap; struct nd_region *nd_region; struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; phys_addr_t offset; int rc; if (!nd_pfn->uuid || !nd_pfn->ndns) return -ENODEV; nd_region = to_nd_region(dev->parent); rc = nd_pfn_init(nd_pfn); if (rc) return rc; if (PAGE_SIZE != SZ_4K) { dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n"); return -ENXIO; } if (nsio->res.start & ND_PFN_MASK) { dev_err(dev, "%s not memory hotplug section aligned\n", dev_name(&ndns->dev)); return -ENXIO; } pfn_sb = nd_pfn->pfn_sb; offset = le64_to_cpu(pfn_sb->dataoff); nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); if (nd_pfn->mode == PFN_MODE_RAM) { if (offset != SZ_8K) return -EINVAL; nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); altmap = NULL; } else { rc = -ENXIO; goto err; } /* establish pfn range for lookup, and switch to direct map */ pmem = dev_get_drvdata(dev); memunmap_pmem(dev, pmem->virt_addr); pmem->virt_addr = (void __pmem *)devm_memremap_pages(dev, &nsio->res); if (IS_ERR(pmem->virt_addr)) { rc = PTR_ERR(pmem->virt_addr); goto err; } /* attach pmem disk in "pfn-mode" */ pmem->data_offset = offset; rc = pmem_attach_disk(dev, ndns, pmem); if (rc) goto err; return rc; err: nvdimm_namespace_detach_pfn(ndns); return rc; }