Ejemplo n.º 1
0
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
			unsigned int len, unsigned int off, int rw,
			sector_t sector)
{
	int rc = 0;
	bool bad_pmem = false;
	void *mem = kmap_atomic(page);
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;

	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
		bad_pmem = true;

	if (rw == READ) {
		if (unlikely(bad_pmem))
			rc = -EIO;
		else {
			rc = memcpy_from_pmem(mem + off, pmem_addr, len);
			flush_dcache_page(page);
		}
	} else {
		/*
		 * Note that we write the data both before and after
		 * clearing poison.  The write before clear poison
		 * handles situations where the latest written data is
		 * preserved and the clear poison operation simply marks
		 * the address range as valid without changing the data.
		 * In this case application software can assume that an
		 * interrupted write will either return the new good
		 * data or an error.
		 *
		 * However, if pmem_clear_poison() leaves the data in an
		 * indeterminate state we need to perform the write
		 * after clear poison.
		 */
		flush_dcache_page(page);
		memcpy_to_pmem(pmem_addr, mem + off, len);
		if (unlikely(bad_pmem)) {
			pmem_clear_poison(pmem, pmem_off, len);
			memcpy_to_pmem(pmem_addr, mem + off, len);
		}
	}

	kunmap_atomic(mem);
	return rc;
}
Ejemplo n.º 2
0
static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
			unsigned int len, unsigned int off, unsigned int op,
			sector_t sector)
{
	blk_status_t rc = BLK_STS_OK;
	bool bad_pmem = false;
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
	void *pmem_addr = pmem->virt_addr + pmem_off;

	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
		bad_pmem = true;

	if (!op_is_write(op)) {
		if (unlikely(bad_pmem))
			rc = BLK_STS_IOERR;
		else {
			rc = read_pmem(page, off, pmem_addr, len);
			flush_dcache_page(page);
		}
	} else {
		/*
		 * Note that we write the data both before and after
		 * clearing poison.  The write before clear poison
		 * handles situations where the latest written data is
		 * preserved and the clear poison operation simply marks
		 * the address range as valid without changing the data.
		 * In this case application software can assume that an
		 * interrupted write will either return the new good
		 * data or an error.
		 *
		 * However, if pmem_clear_poison() leaves the data in an
		 * indeterminate state we need to perform the write
		 * after clear poison.
		 */
		flush_dcache_page(page);
		write_pmem(pmem_addr, page, off, len);
		if (unlikely(bad_pmem)) {
			rc = pmem_clear_poison(pmem, pmem_off, len);
			write_pmem(pmem_addr, page, off, len);
		}
	}

	return rc;
}
Ejemplo n.º 3
0
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
			unsigned int len, unsigned int off, int rw,
			sector_t sector)
{
	void *mem = kmap_atomic(page);
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;

	if (rw == READ) {
		if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
			return -EIO;
		memcpy_from_pmem(mem + off, pmem_addr, len);
		flush_dcache_page(page);
	} else {
		flush_dcache_page(page);
		memcpy_to_pmem(pmem_addr, mem + off, len);
	}

	kunmap_atomic(mem);
	return 0;
}
Ejemplo n.º 4
0
static int nsio_rw_bytes(struct nd_namespace_common *ndns,
		resource_size_t offset, void *buf, size_t size, int rw)
{
	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);

	if (unlikely(offset + size > nsio->size)) {
		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
		return -EFAULT;
	}

	if (rw == READ) {
		unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);

		if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
			return -EIO;
		return memcpy_from_pmem(buf, nsio->addr + offset, size);
	} else {
		memcpy_to_pmem(nsio->addr + offset, buf, size);
		nvdimm_flush(to_nd_region(ndns->dev.parent));
	}

	return 0;
}
Ejemplo n.º 5
0
long pmem_direct_access(struct block_device *bdev, sector_t sector,
		void __pmem **kaddr, pfn_t *pfn, long size)
{
	struct pmem_device *pmem = bdev->bd_queue->queuedata;
	resource_size_t offset = sector * 512 + pmem->data_offset;

	if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
		return -EIO;

	/*
	 * Limit dax to a single page at a time given vmalloc()-backed
	 * in the nfit_test case.
	 */
	if (get_nfit_res(pmem->phys_addr + offset)) {
		struct page *page;

		*kaddr = pmem->virt_addr + offset;
		page = vmalloc_to_page(pmem->virt_addr + offset);
		*pfn = page_to_pfn_t(page);
		dev_dbg_ratelimited(disk_to_dev(bdev->bd_disk)->parent,
				"%s: sector: %#llx pfn: %#lx\n", __func__,
				(unsigned long long) sector, page_to_pfn(page));

		return PAGE_SIZE;
	}

	*kaddr = pmem->virt_addr + offset;
	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);

	/*
	 * If badblocks are present, limit known good range to the
	 * requested range.
	 */
	if (unlikely(pmem->bb.count))
		return size;
	return pmem->size - pmem->pfn_pad - offset;
}