void __init apq8064_allocate_fb_region(void) { void *addr; unsigned long size; size = MSM_FB_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); }
void __init msm_msm7627a_allocate_memory_regions(void) { void *addr; unsigned long fb_size; fb_size = MSM_FB_SIZE; addr = alloc_bootmem_align(fb_size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + fb_size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", fb_size, addr, __pa(addr)); #ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE fb_size = MSM_V4L2_VIDEO_OVERLAY_BUF_SIZE; addr = alloc_bootmem_align(fb_size, 0x1000); msm_v4l2_video_overlay_resources[0].start = __pa(addr); msm_v4l2_video_overlay_resources[0].end = msm_v4l2_video_overlay_resources[0].start + fb_size - 1; pr_debug("allocating %lu bytes at %p (%lx physical) for v4l2\n", fb_size, addr, __pa(addr)); #endif }
void __init msm_msm7627a_allocate_memory_regions(void) { /*Add 4 framebuffer and delete the mem adapter strategy*/ void *addr; unsigned long fb_size; #ifndef CONFIG_HUAWEI_KERNEL if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) fb_size = MSM7x25A_MSM_FB_SIZE; else if (machine_is_msm7627a_evb() || machine_is_msm8625_evb() || machine_is_msm8625_evt() || machine_is_qrd_skud_prime()) fb_size = MSM8x25_MSM_FB_SIZE; else if (machine_is_msm8625q_evbd() || machine_is_msm8625q_skud()) fb_size = MSM8x25Q_MSM_FB_SIZE; else fb_size = MSM_FB_SIZE; #else fb_size = get_framebuffer_size(); #endif addr = alloc_bootmem_align(fb_size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + fb_size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", fb_size, addr, __pa(addr)); #ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE fb_size = MSM_V4L2_VIDEO_OVERLAY_BUF_SIZE; addr = alloc_bootmem_align(fb_size, 0x1000); msm_v4l2_video_overlay_resources[0].start = __pa(addr); msm_v4l2_video_overlay_resources[0].end = msm_v4l2_video_overlay_resources[0].start + fb_size - 1; pr_debug("allocating %lu bytes at %p (%lx physical) for v4l2\n", fb_size, addr, __pa(addr)); #endif }
void __init msm8x60_allocate_memory_regions(void) { void *addr; unsigned long size; if (hdmi_is_primary) size = roundup((1920 * 1088 * 4 * 2), 4096); else size = MSM_FB_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); }
static void __init msm_msm7x2x_allocate_memory_regions(void) { void *addr; unsigned long size; if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) fb_size = MSM7x25A_MSM_FB_SIZE; else fb_size = MSM_FB_SIZE; size = fb_size; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); }
/* * Called at boot time while the bootmem allocator is active, * to allocate contiguous physical memory for the real memory * areas for guests. */ void kvm_rma_init(void) { unsigned long i; unsigned long j, npages; void *rma; struct page *pg; /* Only do this on PPC970 in HV mode */ if (!cpu_has_feature(CPU_FTR_HVMODE) || !cpu_has_feature(CPU_FTR_ARCH_201)) return; if (!kvm_rma_size || !kvm_rma_count) return; /* Check that the requested size is one supported in hardware */ if (lpcr_rmls(kvm_rma_size) < 0) { pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); return; } npages = kvm_rma_size >> PAGE_SHIFT; rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info)); for (i = 0; i < kvm_rma_count; ++i) { rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size); pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma, kvm_rma_size >> 20); rma_info[i].base_virt = rma; rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT; rma_info[i].npages = npages; list_add_tail(&rma_info[i].list, &free_rmas); atomic_set(&rma_info[i].use_count, 0); pg = pfn_to_page(rma_info[i].base_pfn); for (j = 0; j < npages; ++j) { atomic_inc(&pg->_count); ++pg; } } }
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return alloc_bootmem_align(size, align); }