static int __init early_parse_rma_size(char *p) { unsigned long kvm_rma_size; pr_debug("%s(%s)\n", __func__, p); if (!p) return -EINVAL; kvm_rma_size = memparse(p, &p); /* * Check that the requested size is one supported in hardware */ if (lpcr_rmls(kvm_rma_size) < 0) { pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); return -EINVAL; } kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT; return 0; }
/* * Called at boot time while the bootmem allocator is active, * to allocate contiguous physical memory for the real memory * areas for guests. */ void kvm_rma_init(void) { unsigned long i; unsigned long j, npages; void *rma; struct page *pg; /* Only do this on PPC970 in HV mode */ if (!cpu_has_feature(CPU_FTR_HVMODE) || !cpu_has_feature(CPU_FTR_ARCH_201)) return; if (!kvm_rma_size || !kvm_rma_count) return; /* Check that the requested size is one supported in hardware */ if (lpcr_rmls(kvm_rma_size) < 0) { pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); return; } npages = kvm_rma_size >> PAGE_SHIFT; rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info)); for (i = 0; i < kvm_rma_count; ++i) { rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size); pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma, kvm_rma_size >> 20); rma_info[i].base_virt = rma; rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT; rma_info[i].npages = npages; list_add_tail(&rma_info[i].list, &free_rmas); atomic_set(&rma_info[i].use_count, 0); pg = pfn_to_page(rma_info[i].base_pfn); for (j = 0; j < npages; ++j) { atomic_inc(&pg->_count); ++pg; } } }