static int nd_pfn_init(struct nd_pfn *nd_pfn) { struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_region *nd_region; unsigned long npfns; phys_addr_t offset; u64 checksum; int rc; if (!pfn_sb) return -ENOMEM; nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); if (rc == 0 || rc == -EBUSY) return rc; /* section alignment for simple hotplug */ if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN || pmem->phys_addr & ND_PFN_MASK) return -ENODEV; nd_region = to_nd_region(nd_pfn->dev.parent); if (nd_region->ro) { dev_info(&nd_pfn->dev, "%s is read-only, unable to init metadata\n", dev_name(&nd_region->dev)); goto err; } memset(pfn_sb, 0, sizeof(*pfn_sb)); npfns = (pmem->size - SZ_8K) / SZ_4K; /* * Note, we use 64 here for the standard size of struct page, * debugging options may cause it to be larger in which case the * implementation will limit the pfns advertised through * ->direct_access() to those that are included in the memmap. */ if (nd_pfn->mode == PFN_MODE_PMEM) offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE); else if (nd_pfn->mode == PFN_MODE_RAM) offset = SZ_8K; else goto err; npfns = (pmem->size - offset) / SZ_4K; pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->npfns = cpu_to_le64(npfns); memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN); memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); pfn_sb->version_major = cpu_to_le16(1); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); if (rc) goto err; return 0; err: nd_pfn->pfn_sb = NULL; kfree(pfn_sb); return -ENXIO; }
static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; struct nd_namespace_common *ndns = nd_pfn->ndns; u32 start_pad = 0, end_trunc = 0; resource_size_t start, size; struct nd_namespace_io *nsio; struct nd_region *nd_region; struct nd_pfn_sb *pfn_sb; unsigned long npfns; phys_addr_t offset; const char *sig; u64 checksum; int rc; pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL); if (!pfn_sb) return -ENOMEM; nd_pfn->pfn_sb = pfn_sb; if (is_nd_dax(&nd_pfn->dev)) sig = DAX_SIG; else sig = PFN_SIG; rc = nd_pfn_validate(nd_pfn, sig); if (rc != -ENODEV) return rc; /* no info block, do init */; nd_region = to_nd_region(nd_pfn->dev.parent); if (nd_region->ro) { dev_info(&nd_pfn->dev, "%s is read-only, unable to init metadata\n", dev_name(&nd_region->dev)); return -ENXIO; } memset(pfn_sb, 0, sizeof(*pfn_sb)); /* * Check if pmem collides with 'System RAM' when section aligned and * trim it accordingly */ nsio = to_nd_namespace_io(&ndns->dev); start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); size = resource_size(&nsio->res); if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) == REGION_MIXED) { start = nsio->res.start; start_pad = PHYS_SECTION_ALIGN_UP(start) - start; } start = nsio->res.start; size = PHYS_SECTION_ALIGN_UP(start + size) - start; if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) == REGION_MIXED) { size = resource_size(&nsio->res); end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); } if (start_pad + end_trunc) dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); /* * Note, we use 64 here for the standard size of struct page, * debugging options may cause it to be larger in which case the * implementation will limit the pfns advertised through * ->direct_access() to those that are included in the memmap. */ start += start_pad; size = resource_size(&nsio->res); npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) / PAGE_SIZE); if (nd_pfn->mode == PFN_MODE_PMEM) { /* * The altmap should be padded out to the block size used * when populating the vmemmap. This *should* be equal to * PMD_SIZE for most architectures. */ offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, max(nd_pfn->align, PMD_SIZE)) - start; } else if (nd_pfn->mode == PFN_MODE_RAM) offset = ALIGN(start + SZ_8K + dax_label_reserve, nd_pfn->align) - start; else return -ENXIO; if (offset + start_pad + end_trunc >= size) { dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", dev_name(&ndns->dev)); return -ENXIO; } npfns = (size - offset - start_pad - end_trunc) / SZ_4K; pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->npfns = cpu_to_le64(npfns); memcpy(pfn_sb->signature, sig, PFN_SIG_LEN); memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); pfn_sb->version_major = cpu_to_le16(1); pfn_sb->version_minor = cpu_to_le16(2); pfn_sb->start_pad = cpu_to_le32(start_pad); pfn_sb->end_trunc = cpu_to_le32(end_trunc); pfn_sb->align = cpu_to_le32(nd_pfn->align); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0); }