/** * kvm_cma_reserve() - reserve area for kvm hash pagetable * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. */ void __init kvm_cma_reserve(void) { unsigned long align_size; struct memblock_region *reg; phys_addr_t selected_size = 0; /* * We cannot use memblock_phys_mem_size() here, because * memblock_analyze() has not been called yet. */ for_each_memblock(memory, reg) selected_size += memblock_region_memory_end_pfn(reg) - memblock_region_memory_base_pfn(reg); selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; if (selected_size) { pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); /* * Old CPUs require HPT aligned on a multiple of its size. So for them * make the alignment as max size we could request. */ if (!cpu_has_feature(CPU_FTR_ARCH_206)) align_size = __rounddown_pow_of_two(selected_size); else align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size); kvm_cma_declare_contiguous(selected_size, align_size); } }
/** * vos_rounddown_pow_of_two() - Round down to nearest power of two * @n: number to be tested * * Test if the input number is power of two, and return the nearest power of two * * Return: number rounded down to the nearest power of two */ unsigned long vos_rounddown_pow_of_two(unsigned long n) { if (is_power_of_2(n)) return n; /* already a power of 2 */ return __rounddown_pow_of_two(n); }
void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) { struct device_node *phbn; const __be64 *prop64; u64 hub_id; void *tce_mem; uint64_t tce_per_phb; int64_t rc; int phb_count = 0; pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-hubid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-hubid\" property !\n"); return; } hub_id = be64_to_cpup(prop64); pr_info(" HUB-ID : 0x%016llx\n", hub_id); /* Count child PHBs and calculate TCE space per PHB */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) phb_count++; } if (phb_count <= 0) { pr_info(" No PHBs for Hub %s\n", np->full_name); return; } tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count); pr_info(" Allocating %lld MB of TCE memory per PHB\n", tce_per_phb >> 20); /* Currently allocate 16M of TCE memory for every Hub * * XXX TODO: Make it chip local if possible */ tce_mem = memblock_virt_alloc(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY); pr_debug(" TCE : 0x%016lx..0x%016lx\n", __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1); rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem), P5IOC2_TCE_MEMORY); if (rc != OPAL_SUCCESS) { pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc); return; } /* Initialize PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) { pnv_pci_init_p5ioc2_phb(phbn, hub_id, tce_mem, tce_per_phb); tce_mem += tce_per_phb; } } }
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; if (p->sup_pclk && clk == host->clk) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); else sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); }