int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata) { int rc; struct device *dev; struct btt_sb *btt_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; nvdimm_bus_lock(&ndns->dev); dev = __nd_btt_create(nd_region, 0, NULL, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dev) return -ENOMEM; dev_set_drvdata(dev, drvdata); btt_sb = kzalloc(sizeof(*btt_sb), GFP_KERNEL); rc = __nd_btt_probe(to_nd_btt(dev), ndns, btt_sb); kfree(btt_sb); dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__, rc == 0 ? dev_name(dev) : "<none>"); if (rc < 0) { __nd_btt_detach_ndns(to_nd_btt(dev)); put_device(dev); } return rc; }
static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc = 0; device_lock(dev); nvdimm_bus_lock(dev); if (dev->driver) rc = -EBUSY; else { size_t n = len - 1; if (strncmp(buf, "pmem\n", n) == 0 || strncmp(buf, "pmem", n) == 0) { /* TODO: allocate from PMEM support */ rc = -ENOTTY; } else if (strncmp(buf, "ram\n", n) == 0 || strncmp(buf, "ram", n) == 0) nd_pfn->mode = PFN_MODE_RAM; else if (strncmp(buf, "none\n", n) == 0 || strncmp(buf, "none", n) == 0) nd_pfn->mode = PFN_MODE_NONE; else rc = -EINVAL; } dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); device_unlock(dev); return rc ? rc : len; }
int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) { int rc; struct device *dev; struct nd_pfn *nd_pfn; struct nd_pfn_sb *pfn_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; nvdimm_bus_lock(&ndns->dev); dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dev) return -ENOMEM; dev_set_drvdata(dev, drvdata); pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); nd_pfn = to_nd_pfn(dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); nd_pfn->pfn_sb = NULL; kfree(pfn_sb); dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__, rc == 0 ? dev_name(dev) : "<none>"); if (rc < 0) { __nd_detach_ndns(dev, &nd_pfn->ndns); put_device(dev); } else __nd_device_register(&nd_pfn->dev); return rc; }
/* * Upon successful probe/remove, take/release a reference on the * associated interleave set (if present), and plant new btt + namespace * seeds. Also, on the removal of a BLK region, notify the provider to * disable the region. */ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, struct device *dev, bool probe) { struct nd_region *nd_region; if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { int i; nd_region = to_nd_region(dev); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = nd_mapping->ndd; struct nvdimm *nvdimm = nd_mapping->nvdimm; kfree(nd_mapping->labels); nd_mapping->labels = NULL; put_ndd(ndd); nd_mapping->ndd = NULL; if (ndd) atomic_dec(&nvdimm->busy); } if (is_nd_pmem(dev)) return; to_nd_blk_region(dev)->disable(nvdimm_bus, dev); } if (dev->parent && is_nd_blk(dev->parent) && probe) { nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->ns_seed == dev) nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } if (is_nd_btt(dev) && probe) { struct nd_btt *nd_btt = to_nd_btt(dev); nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->btt_seed == dev) nd_region_create_btt_seed(nd_region); if (nd_region->ns_seed == &nd_btt->ndns->dev && is_nd_blk(dev->parent)) nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } }
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) { resource_size_t size; nvdimm_bus_lock(&nsblk->common.dev); size = __nd_namespace_blk_validate(nsblk); nvdimm_bus_unlock(&nsblk->common.dev); return size; }
static int nvdimm_remove(struct device *dev) { struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); nvdimm_bus_lock(dev); dev_set_drvdata(dev, NULL); nvdimm_bus_unlock(dev); put_ndd(ndd); return 0; }
static ssize_t namespace_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc; nvdimm_bus_lock(dev); rc = sprintf(buf, "%s\n", nd_pfn->ndns ? dev_name(&nd_pfn->ndns->dev) : ""); nvdimm_bus_unlock(dev); return rc; }
static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); /* * The state may be in the process of changing, userspace should * quiesce probing if it wants a static answer */ nvdimm_bus_lock(dev); nvdimm_bus_unlock(dev); return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) ? "active" : "idle"); }
static ssize_t namespace_seed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); ssize_t rc; nvdimm_bus_lock(dev); if (nd_region->ns_seed) rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); else rc = sprintf(buf, "\n"); nvdimm_bus_unlock(dev); return rc; }
static int nd_region_remove(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev); /* flush attribute readers and disable */ nvdimm_bus_lock(dev); nd_region->ns_seed = NULL; nd_region->btt_seed = NULL; nd_region->pfn_seed = NULL; dev_set_drvdata(dev, NULL); nvdimm_bus_unlock(dev); device_for_each_child(dev, NULL, child_unregister); return 0; }
static ssize_t namespace_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { ssize_t rc; nvdimm_bus_lock(dev); device_lock(dev); rc = __namespace_store(dev, attr, buf, len); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); device_unlock(dev); nvdimm_bus_unlock(dev); return rc; }
static ssize_t init_namespaces_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region_namespaces *num_ns = dev_get_drvdata(dev); ssize_t rc; nvdimm_bus_lock(dev); if (num_ns) rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count); else rc = -ENXIO; nvdimm_bus_unlock(dev); return rc; }
static int nvdimm_probe(struct device *dev) { struct nvdimm_drvdata *ndd; int rc; ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); if (!ndd) return -ENOMEM; dev_set_drvdata(dev, ndd); ndd->dpa.name = dev_name(dev); ndd->ns_current = -1; ndd->ns_next = -1; ndd->dpa.start = 0; ndd->dpa.end = -1; ndd->dev = dev; get_device(dev); kref_init(&ndd->kref); rc = nvdimm_init_nsarea(ndd); if (rc) goto err; rc = nvdimm_init_config_data(ndd); if (rc) goto err; dev_dbg(dev, "config data size: %d\n", ndd->nsarea.config_size); nvdimm_bus_lock(dev); ndd->ns_current = nd_label_validate(ndd); ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); nd_label_copy(ndd, to_next_namespace_index(ndd), to_current_namespace_index(ndd)); rc = nd_label_reserve_dpa(ndd); nvdimm_bus_unlock(dev); if (rc) goto err; return 0; err: put_ndd(ndd); return rc; }
static ssize_t align_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_pfn->align, nd_pfn_supported_alignments()); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); device_unlock(dev); return rc ? rc : len; }
void nvdimm_drvdata_release(struct kref *kref) { struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); struct device *dev = ndd->dev; struct resource *res, *_r; dev_dbg(dev, "%s\n", __func__); nvdimm_bus_lock(dev); for_each_dpa_resource_safe(ndd, res, _r) nvdimm_free_dpa(ndd, res); nvdimm_bus_unlock(dev); kvfree(ndd->data); kfree(ndd); put_device(dev); }
static ssize_t sector_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; device_lock(dev); nvdimm_bus_lock(dev); rc = nd_sector_size_store(dev, buf, &nd_btt->lbasize, btt_lbasize_supported); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); device_unlock(dev); return rc ? rc : len; }
static ssize_t alt_name_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); device_unlock(dev); return rc < 0 ? rc : len; }
static ssize_t available_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); unsigned long long available = 0; /* * Flush in-flight updates and grab a snapshot of the available * size. Of course, this value is potentially invalidated the * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_available_dpa(nd_region); nvdimm_bus_unlock(dev); return sprintf(buf, "%llu\n", available); }
static ssize_t available_slots_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; nvdimm_bus_lock(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); nfree = 0; } else nfree--; rc = sprintf(buf, "%d\n", nfree); nvdimm_bus_unlock(dev); return rc; }
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) { int rc; struct nd_pfn *nd_pfn; struct device *pfn_dev; struct nd_pfn_sb *pfn_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); if (ndns->force_raw) return -ENODEV; switch (ndns->claim_class) { case NVDIMM_CCLASS_NONE: case NVDIMM_CCLASS_PFN: break; default: return -ENODEV; } nvdimm_bus_lock(&ndns->dev); nd_pfn = nd_pfn_alloc(nd_region); pfn_dev = nd_pfn_devinit(nd_pfn, ndns); nvdimm_bus_unlock(&ndns->dev); if (!pfn_dev) return -ENOMEM; pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); nd_pfn = to_nd_pfn(pfn_dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, PFN_SIG); dev_dbg(dev, "%s: pfn: %s\n", __func__, rc == 0 ? dev_name(pfn_dev) : "<none>"); if (rc < 0) { nd_detach_ndns(pfn_dev, &nd_pfn->ndns); put_device(pfn_dev); } else __nd_device_register(pfn_dev); return rc; }
static int nvdimm_probe(struct device *dev) { struct nvdimm_drvdata *ndd; int rc; rc = nvdimm_check_config_data(dev); if (rc) { /* not required for non-aliased nvdimm, ex. NVDIMM-N */ if (rc == -ENOTTY) rc = 0; return rc; } /* * The locked status bit reflects explicit status codes from the * label reading commands, revalidate it each time the driver is * activated and re-reads the label area. */ nvdimm_clear_locked(dev); ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); if (!ndd) return -ENOMEM; dev_set_drvdata(dev, ndd); ndd->dpa.name = dev_name(dev); ndd->ns_current = -1; ndd->ns_next = -1; ndd->dpa.start = 0; ndd->dpa.end = -1; ndd->dev = dev; get_device(dev); kref_init(&ndd->kref); /* * Attempt to unlock, if the DIMM supports security commands, * otherwise the locked indication is determined by explicit * status codes from the label reading commands. */ rc = nvdimm_security_unlock(dev); if (rc < 0) dev_dbg(dev, "failed to unlock dimm: %d\n", rc); /* * EACCES failures reading the namespace label-area-properties * are interpreted as the DIMM capacity being locked but the * namespace labels themselves being accessible. */ rc = nvdimm_init_nsarea(ndd); if (rc == -EACCES) { /* * See nvdimm_namespace_common_probe() where we fail to * allow namespaces to probe while the DIMM is locked, * but we do allow for namespace enumeration. */ nvdimm_set_locked(dev); rc = 0; } if (rc) goto err; /* * EACCES failures reading the namespace label-data are * interpreted as the label area being locked in addition to the * DIMM capacity. We fail the dimm probe to prevent regions from * attempting to parse the label area. */ rc = nd_label_data_init(ndd); if (rc == -EACCES) nvdimm_set_locked(dev); if (rc) goto err; dev_dbg(dev, "config data size: %d\n", ndd->nsarea.config_size); nvdimm_bus_lock(dev); if (ndd->ns_current >= 0) { rc = nd_label_reserve_dpa(ndd); if (rc == 0) nvdimm_set_aliasing(dev); } nvdimm_bus_unlock(dev); if (rc) goto err; return 0; err: put_ndd(ndd); return rc; }