/** * axon_ram_direct_access - direct_access() method for block device * @device, @sector, @data: see block_device_operations method */ static long axon_ram_direct_access(struct block_device *device, sector_t sector, void **kaddr, pfn_t *pfn) { struct axon_ram_bank *bank = device->bd_disk->private_data; loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; *kaddr = (void *) bank->io_addr + offset; *pfn = phys_to_pfn_t(bank->ph_addr + offset, PFN_DEV); return bank->size - offset; }
long pmem_direct_access(struct block_device *bdev, sector_t sector, void __pmem **kaddr, pfn_t *pfn, long size) { struct pmem_device *pmem = bdev->bd_queue->queuedata; resource_size_t offset = sector * 512 + pmem->data_offset; if (unlikely(is_bad_pmem(&pmem->bb, sector, size))) return -EIO; /* * Limit dax to a single page at a time given vmalloc()-backed * in the nfit_test case. */ if (get_nfit_res(pmem->phys_addr + offset)) { struct page *page; *kaddr = pmem->virt_addr + offset; page = vmalloc_to_page(pmem->virt_addr + offset); *pfn = page_to_pfn_t(page); dev_dbg_ratelimited(disk_to_dev(bdev->bd_disk)->parent, "%s: sector: %#llx pfn: %#lx\n", __func__, (unsigned long long) sector, page_to_pfn(page)); return PAGE_SIZE; } *kaddr = pmem->virt_addr + offset; *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); /* * If badblocks are present, limit known good range to the * requested range. */ if (unlikely(pmem->bb.count)) return size; return pmem->size - pmem->pfn_pad - offset; }