Exemple #1
0
/* Reserve a portion of memory for CEU buffers */
static void __init migor_mv_mem_reserve(void)
{
	phys_addr_t phys;
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;

	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
	memblock_free(phys, size);
	memblock_remove(phys, size);

	ceu_dma_membase = phys;
}
Exemple #2
0
static void __init universal5433_reserve(void)
{
#ifdef CONFIG_MIPI_LLI
	/* mipi-lli */
	lli_phys_addr = memblock_alloc_base(MIPI_LLI_RESERVE_SIZE,
			MIPI_LLI_RESERVE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
	pr_info("memblock_reserve: [%#08lx-%#08lx] for mipi-lli\n",
			(unsigned long)lli_phys_addr,
			(unsigned long)lli_phys_addr + MIPI_LLI_RESERVE_SIZE);
#endif
	init_exynos_ion_contig_heap();
}
Exemple #3
0
static void __init mx6q_sabrelite_reserve(void)
{
	phys_addr_t phys;

	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
					   SZ_4K, SZ_1G);
		memblock_free(phys, imx6q_gpu_pdata.reserved_mem_size);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
}
static void __init mx6q_sabrelite_reserve(void)
{
#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	phys_addr_t phys;

	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
					   SZ_4K, SZ_1G);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
#endif
}
Exemple #5
0
static void __init mx6_evk_reserve(void)
{
#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	phys_addr_t phys;

	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
					   SZ_4K, MEMBLOCK_ALLOC_ACCESSIBLE);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
#endif
}
Exemple #6
0
static void *early_alloc_pgtable(unsigned long size)
{
	void *pt;

	if (init_bootmem_done)
		pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
	else
		pt = __va(memblock_alloc_base(size, size,
					 __pa(MAX_DMA_ADDRESS)));
	memset(pt, 0, size);

	return pt;
}
Exemple #7
0
static void __init plat_reserve(void)
{
#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	phys_addr_t phys;

	if (plat_gpu.reserved_mem_size) {
		phys = memblock_alloc_base(plat_gpu.reserved_mem_size,
					   SZ_4K, SZ_1G);
		memblock_remove(phys, plat_gpu.reserved_mem_size);
		plat_gpu.reserved_mem_base = phys;
	}
#endif
}
// ARM10C 20131207
// min: 0x20000, max_low: 0x4f800
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	// start_pfn: 0x20000, end_pfn: 0x4f800, end_pfn - start_pfn: 0x2f800
	// boot_pages: 0x6
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);

	// boot_pages << PAGE_SHIFT: 0x6000, L1_CACHE_BYTES: 64
	// __pfn_to_phys(0x4f800); 0x4f800000
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);

	// pglist_data.bdata 의 bootmem_node_data 주소로 설정
	pgdat = NODE_DATA(0);

	// pgdat: ?, __phys_to_pfn(bitmap): ?, start_pfn: 0x20000, end_pfn: 0x4f800
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		// start: 0x20000
		unsigned long start = memblock_region_memory_base_pfn(reg);
		// end: 0xA0000
		unsigned long end = memblock_region_memory_end_pfn(reg);

		// end: 0xA0000, end_pfn: 0x4f800
		if (end >= end_pfn)
			// end: 0x4f800
			end = end_pfn;
		// start: 0x20000, end: 0x4f800
		if (start >= end)
			break;

		// __pfn_to_phys(0x20000): 0x20000000, (end - start) << PAGE_SHIFT: 0x2f800000
		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
Exemple #9
0
static void *__init alloc_stack(unsigned long limit, int cpu)
{
	unsigned long pa;

	pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
					early_cpu_to_node(cpu), MEMBLOCK_NONE);
	if (!pa) {
		pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
		if (!pa)
			panic("cannot allocate stacks");
	}

	return __va(pa);
}
static void __init mx6q_reserve(void)
{
	phys_addr_t phys;
	int i, fb0_reserved = 0, fb_array_size;

	/*
	 * Reserve primary framebuffer memory if its base address
	 * is set by kernel command line.
	 */
	fb_array_size = ARRAY_SIZE(sabr_fb_data);
	if (fb_array_size > 0 && sabr_fb_data[0].res_base[0] &&
	    sabr_fb_data[0].res_size[0]) {
		memblock_reserve(sabr_fb_data[0].res_base[0],
				 sabr_fb_data[0].res_size[0]);
		memblock_remove(sabr_fb_data[0].res_base[0],
				sabr_fb_data[0].res_size[0]);
		sabr_fb_data[0].late_init = true;
		ipu_data[ldb_data.ipu_id].bypass_reset = true;
		fb0_reserved = 1;
	}
	for (i = fb0_reserved; i < fb_array_size; i++)
		if (sabr_fb_data[i].res_size[0]) {
			/* Reserve for other background buffer. */
			phys = memblock_alloc(sabr_fb_data[i].res_size[0],
						SZ_4K);
			memblock_remove(phys, sabr_fb_data[i].res_size[0]);
			sabr_fb_data[i].res_base[0] = phys;
		}

#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
			SZ_4K, SZ_2G);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
#endif

#if defined(CONFIG_ION)
	if (imx_ion_data.heaps[0].size) {
		phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K);
		memblock_free(phys, imx_ion_data.heaps[0].size);
		memblock_remove(phys, imx_ion_data.heaps[0].size);
		imx_ion_data.heaps[0].base = phys;
	}
#endif
}
Exemple #11
0
void __init allocate_pacas(void)
{
	u64 limit;
	int cpu;
	int nr_cpus;

	limit = ppc64_rma_size;

#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * We can't take SLB misses on the paca, and we want to access them
	 * in real mode, so allocate them within the RMA and also within
	 * the first segment.
	 */
	limit = min(0x10000000ULL, limit);
#endif

	/*
	 * Always align up the nr_cpu_ids to SMT threads and allocate
	 * the paca. This will help us to prepare for a situation where
	 * boot cpu id > nr_cpus_id. We will use the last nthreads
	 * slots (nthreads == threads per core) to accommodate a core
	 * that contains boot cpu thread.
	 *
	 * Do not change nr_cpu_ids value here. Let us do that in
	 * early_init_dt_scan_cpus() where we know exact value
	 * of threads per core.
	 */
	nr_cpus = _ALIGN_UP(nr_cpu_ids, MAX_SMT);
	paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);

	paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
	memset(paca, 0, paca_size);

	printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
		paca_size, nr_cpus, paca);

	allocate_lppacas(nr_cpus, limit);

	allocate_slb_shadows(nr_cpus, limit);

	/* Can't use for_each_*_cpu, as they aren't functional yet */
	for (cpu = 0; cpu < nr_cpus; cpu++)
		initialise_paca(&paca[cpu], cpu);
}
Exemple #12
0
/*
 * Call early during boot, before mem init or bootmem, to retrieve the RTAS
 * informations from the device-tree and allocate the RMO buffer for userland
 * accesses.
 */
void __init rtas_initialize(void)
{
	unsigned long rtas_region = RTAS_INSTANTIATE_MAX;

	/* Get RTAS dev node and fill up our "rtas" structure with infos
	 * about it.
	 */
	rtas.dev = of_find_node_by_name(NULL, "rtas");
	if (rtas.dev) {
		const __be32 *basep, *entryp, *sizep;

		basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
		sizep = of_get_property(rtas.dev, "rtas-size", NULL);
		if (basep != NULL && sizep != NULL) {
			rtas.base = __be32_to_cpu(*basep);
			rtas.size = __be32_to_cpu(*sizep);
			entryp = of_get_property(rtas.dev,
					"linux,rtas-entry", NULL);
			if (entryp == NULL) /* Ugh */
				rtas.entry = rtas.base;
			else
				rtas.entry = __be32_to_cpu(*entryp);
		} else
			rtas.dev = NULL;
	}
	if (!rtas.dev)
		return;

	/* If RTAS was found, allocate the RMO buffer for it and look for
	 * the stop-self token if any
	 */
#ifdef CONFIG_PPC64
	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
		rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
		ibm_suspend_me_token = rtas_token("ibm,suspend-me");
	}
#endif
	rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);

#ifdef CONFIG_RTAS_ERROR_LOGGING
	rtas_last_error_token = rtas_token("rtas-last-error");
#endif
}
Exemple #13
0
static void __init arm_bootmem_init(struct meminfo *mi,
	unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;
	int i;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);
	pgdat = NODE_DATA(0);
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	for_each_bank(i, mi) {
		struct membank *bank = &mi->bank[i];
		if (!bank->highmem)
			free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
	}

	/*
	 * Reserve the memblock reserved regions in bootmem.
	 */
	for (i = 0; i < memblock.reserved.cnt; i++) {
		phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
		if (start >= start_pfn &&
		    memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
			reserve_bootmem_node(pgdat, __pfn_to_phys(start),
				memblock_size_bytes(&memblock.reserved, i),
				BOOTMEM_DEFAULT);
	}
}
Exemple #14
0
void __init mmu_partition_table_init(void)
{
	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
	unsigned long ptcr;

	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
	partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
						MEMBLOCK_ALLOC_ANYWHERE));

	/* Initialize the Partition Table with no entries */
	memset((void *)partition_tb, 0, patb_size);

	/*
	 * update partition table control register,
	 * 64 K size.
	 */
	ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
	mtspr(SPRN_PTCR, ptcr);
	powernv_set_nmmu_ptcr(ptcr);
}
/*
 * Call early during boot, before mem init, to retrieve the RTAS
 * information from the device-tree and allocate the RMO buffer for userland
 * accesses.
 */
void __init rtas_initialize(void)
{
	unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
	u32 base, size, entry;
	int no_base, no_size, no_entry;

	/* Get RTAS dev node and fill up our "rtas" structure with infos
	 * about it.
	 */
	rtas.dev = of_find_node_by_name(NULL, "rtas");
	if (!rtas.dev)
		return;

	no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
	no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
	if (no_base || no_size) {
		of_node_put(rtas.dev);
		rtas.dev = NULL;
		return;
	}

	rtas.base = base;
	rtas.size = size;
	no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
	rtas.entry = no_entry ? rtas.base : entry;

	/* If RTAS was found, allocate the RMO buffer for it and look for
	 * the stop-self token if any
	 */
#ifdef CONFIG_PPC64
	if (firmware_has_feature(FW_FEATURE_LPAR)) {
		rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
		ibm_suspend_me_token = rtas_token("ibm,suspend-me");
	}
#endif
	rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);

#ifdef CONFIG_RTAS_ERROR_LOGGING
	rtas_last_error_token = rtas_token("rtas-last-error");
#endif
}
Exemple #16
0
/*
 * Stack space used when we detect a bad kernel stack pointer, and
 * early in SMP boots before relocation is enabled.
 */
static void __init emergency_stack_init(void)
{
	u64 limit;
	unsigned int i;

	/*
	 * Emergency stacks must be under 256MB, we cannot afford to take
	 * SLB misses on them. The ABI also requires them to be 128-byte
	 * aligned.
	 *
	 * Since we use these as temporary stacks during secondary CPU
	 * bringup, we need to get at them in real mode. This means they
	 * must also be within the RMO region.
	 */
	limit = min(slb0_limit(), memblock.rmo_size);

	for_each_possible_cpu(i) {
		unsigned long sp;
		sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
		sp += THREAD_SIZE;
		paca[i].emergency_sp = __va(sp);
	}
}
Exemple #17
0
static void __ref init_fallback_flush(void)
{
	u64 l1d_size, limit;
	int cpu;

	/* Only allocate the fallback flush area once (at boot time). */
	if (l1d_flush_fallback_area)
		return;

	l1d_size = ppc64_caches.l1d.size;

	/*
	 * If there is no d-cache-size property in the device tree, l1d_size
	 * could be zero. That leads to the loop in the asm wrapping around to
	 * 2^64-1, and then walking off the end of the fallback area and
	 * eventually causing a page fault which is fatal. Just default to
	 * something vaguely sane.
	 */
	if (!l1d_size)
		l1d_size = (64 * 1024);

	limit = min(ppc64_bolted_size(), ppc64_rma_size);

	/*
	 * Align to L1d size, and size it at 2x L1d size, to catch possible
	 * hardware prefetch runoff. We don't have a recipe for load patterns to
	 * reliably avoid the prefetcher.
	 */
	l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
	memset(l1d_flush_fallback_area, 0, l1d_size * 2);

	for_each_possible_cpu(cpu) {
		struct paca_struct *paca = paca_ptrs[cpu];
		paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
		paca->l1d_flush_size = l1d_size;
	}
}
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);
	pgdat = NODE_DATA(0);
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (end >= end_pfn)
			end = end_pfn;
		if (start >= end)
			break;

		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
Exemple #19
0
static __ref void *early_alloc_pgtable(unsigned long size, int nid,
			unsigned long region_start, unsigned long region_end)
{
	unsigned long pa = 0;
	void *pt;

	if (region_start || region_end) /* has region hint */
		pa = memblock_alloc_range(size, size, region_start, region_end,
						MEMBLOCK_NONE);
	else if (nid != -1) /* has node hint */
		pa = memblock_alloc_base_nid(size, size,
						MEMBLOCK_ALLOC_ANYWHERE,
						nid, MEMBLOCK_NONE);

	if (!pa)
		pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE);

	BUG_ON(!pa);

	pt = __va(pa);
	memset(pt, 0, size);

	return pt;
}
Exemple #20
0
static void imx6q_reserve(void)
{
	phys_addr_t phys;
	phys_addr_t max_phys;
	struct meminfo *mi;
	struct membank *bank;

#ifdef CONFIG_PSTORE_RAM
	mi = &meminfo;
	if (!mi) {
		pr_err("no memory reserve for ramoops.\n");
		return;
	}

	/* use memmory last bank for ram console store */
	bank = &mi->bank[mi->nr_banks - 1];
	if (!bank) {
		pr_err("no memory reserve for ramoops.\n");
		return;
	}
	max_phys = bank->start + bank->size;
	/* reserve 64M for uboot avoid ram console data is cleaned by uboot */
	phys = memblock_alloc_base(SZ_1M, SZ_4K, max_phys - SZ_64M);
	if (phys) {
		memblock_remove(phys, SZ_1M);
		memblock_reserve(phys, SZ_1M);
		ramoops_phys_addr = phys;
		ramoops_mem_size = SZ_1M;
	} else {
		ramoops_phys_addr = 0;
		ramoops_mem_size = 0;
		pr_err("no memory reserve for ramoops.\n");
	}
#endif
	return;
}
Exemple #21
0
static void __init wand_reserve(void)
{
	phys_addr_t phys;
	phys_addr_t total_mem = 0;
	int i;
	struct meminfo *mi = &meminfo;

	for (i = 0; i < mi->nr_banks; i++)
		total_mem += mi->bank[i].size;

#if 0
	int fb0_reserved = 0, fb_array_size;

	/*
	 * Reserve primary framebuffer memory if its base address
	 * is set by kernel command line.
	 */
	fb_array_size = ARRAY_SIZE(wand_fb_pdata);
	if (fb_array_size > 0 && wand_fb_pdata[0].res_base[0] &&
	    wand_fb_pdata[0].res_size[0]) {
		if (wand_fb_pdata[0].res_base[0] > SZ_2G)
			printk(KERN_INFO"UI Performance downgrade with FB phys address %x!\n",
			    wand_fb_pdata[0].res_base[0]);
		memblock_reserve(wand_fb_pdata[0].res_base[0],
				 wand_fb_pdata[0].res_size[0]);
		memblock_remove(wand_fb_pdata[0].res_base[0],
				wand_fb_pdata[0].res_size[0]);
		wand_fb_pdata[0].late_init = true;
		wand_ipu_data[wand_ldb_data.ipu_id].bypass_reset = true;
		fb0_reserved = 1;
	}
	for (i = fb0_reserved; i < fb_array_size; i++)
		if (wand_fb_pdata[i].res_size[0]) {
			/* Reserve for other background buffer. */
			phys = memblock_alloc_base(wand_fb_pdata[i].res_size[0],
						SZ_4K, total_mem);
			memblock_remove(phys, wand_fb_pdata[i].res_size[0]);
			wand_fb_pdata[i].res_base[0] = phys;
		}
#endif

#ifdef CONFIG_ANDROID_RAM_CONSOLE
	phys = memblock_alloc_base(SZ_1M, SZ_4K, total_mem);
	printk("EDWARD :  ram console init at phys 0x%x\n",phys);
	memblock_remove(phys, SZ_1M);
	memblock_free(phys, SZ_1M);
	ram_console_resource.start = phys;
	ram_console_resource.end   = phys + SZ_1M - 1;
#endif

#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	if (wand_gpu_pdata.reserved_mem_size) {
		printk("EDWARD : GPU_Reserved Memory equals to %d\n",wand_gpu_pdata.reserved_mem_size);
			phys = memblock_alloc_base(wand_gpu_pdata.reserved_mem_size,
						   SZ_4K, total_mem);
		printk("EDWARD :  gpumem init at phys 0x%x\n",phys);
		memblock_remove(phys, wand_gpu_pdata.reserved_mem_size);
		wand_gpu_pdata.reserved_mem_base = phys;
	}
#endif

#if defined(CONFIG_ION)
	if (wand_ion_data.heaps[0].size) {
		phys = memblock_alloc(wand_ion_data.heaps[0].size, SZ_4K);
		memblock_remove(phys, wand_ion_data.heaps[0].size);
		wand_ion_data.heaps[0].base = phys;
	}
#endif
}
Exemple #22
0
u64 __init memblock_alloc(u64 size, u64 align)
{
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
}
Exemple #23
0
static void __init allocate_slb_shadows(int nr_cpus, int limit)
{
	int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
	slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
	memset(slb_shadow, 0, size);
}
Exemple #24
0
int __init spc_memory_init(void)
{
#if 0
    void* bedram;
#endif

    // Quick sanity checks
    // Is SPC context area large enough for all SPC contexts?
    BUILD_BUG_ON((sizeof(spc_context_t) * NR_CPUS) > SPC_CONTEXT_SIZE);
    // Does struct fusedos_config_t fit into memory area for FusedOS config?
    BUILD_BUG_ON(sizeof(fusedos_config_t) > FUSEDOS_CONFIG_SIZE);

    // Put the SPC monitor, context, and config just below 1 GB
    spc_monitor = (void*)memblock_alloc_base(SPC_MONITOR_SIZE + SPC_CONTEXT_SIZE + FUSEDOS_CONFIG_SIZE,
                                             (phys_addr_t)(1ul << 24),  // align to 16 MB
                                             (phys_addr_t)(1ul << 30)); // below 1 GB
    if (!spc_monitor) {
        printk(KERN_ERR "FUSEDOS spc_memory_init: Cannot allocate spc_monitor.\n");
        return -2;
    }
    spc_context = (spc_context_t*)(__va(spc_monitor + SPC_MONITOR_SIZE));
    fusedos_config = (fusedos_config_t*)(__va(spc_monitor + SPC_MONITOR_SIZE + SPC_CONTEXT_SIZE));

    fusedos_config_init();

    if( fusedos_config->nr_spcs > 0 ) {
        spc_memory = __alloc_bootmem(
    	    ((unsigned long)SPC_MEMORY_SIZE) * (fusedos_config->nr_spcs),
    	    PAGE_SIZE, SPC_MEMORY_PADDR);
    
        if (__pa(spc_memory) < SPC_MEMORY_PADDR) {
            printk(KERN_ERR "FUSEDOS spc_memory_init: Cannot allocate spc_memory at 0x%x, 0x%lx\n",
                   SPC_MEMORY_PADDR, __pa(spc_memory));
            return -3;
        }
    }
    printk("FUSEDOS spc_memory_init: spc_monitor 0x%p, spc_context 0x%p, fusedos_config 0x%p\n",
           spc_monitor, spc_context, fusedos_config);
    printk("FUSEDOS spc_memory_init: spc_memory 0x%p, __pa(spc_memory) 0x%lx\n", spc_memory, __pa(spc_memory));
    printk("FUSEDOS spc_memory_init: _fw %p\n", _fw);

    // From firmware/src/fw_mmu.c, tlbwe_slot parameters calculated
    // with tests/fusedos/tlbwe_slot_defines
    //
    // NOTE: we force this into way 3 of the TLB set in order to avoid an A2 defect
    //       that does not properly honor IPROT (Linux relies on IPROT to keep the
    //       firmware TLB resident).
    // tlbwe_slot(
    //     3,
    //     MAS1_V(1) | MAS1_TID(0) | MAS1_TS(0) | MAS1_TSIZE_1GB | MAS1_IPROT(1),
    //     MAS2_EPN((PHYMAP_MINADDR_MMIO | PHYMAP_PRIVILEGEDOFFSET) >> 12) | MAS2_W(0) | MAS2_I(1) | MAS2_M(1) |
    //              MAS2_G(1) | MAS2_E(0),
    //     MAS7_3_RPN((PHYMAP_MINADDR_MMIO | PHYMAP_PRIVILEGEDOFFSET) >> 12) | MAS3_SR(1) | MAS3_SW(1) | MAS3_SX(1) |
    //                MAS3_UR(0) | MAS3_UW(0) | MAS3_UX(0) | MAS3_U1(1),
    //     MAS8_TGS(0) | MAS8_VF(0) | MAS8_TLPID(0),
    //     MMUCR3_X(0) | MMUCR3_R(1) |       MMUCR3_C(1) | MMUCR3_ECL(0) | MMUCR3_CLASS(1) |MMUCR3_ThdID(0xF)
    //     );
    //
#define SPRN_MMUCR3               (1023)           // Memory Management Unit Control Register 3
    asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS0),      "r" (0x30000));
    asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS1),      "r" (0xc0000a00));
    asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS2),      "r" (0x3ffc000000e));
    asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS7_MAS3), "r" (0x3ffc0000115));
    asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS8),      "r" (0x0));
    asm volatile ("mtspr %0,%1": : "i" (SPRN_MMUCR3),    "r" (0x310f));
    asm volatile ("isync;" : : : "memory" );
    asm volatile ("tlbwe;" : : : "memory" );

    spc_context_init();

    return 0;
}
Exemple #25
0
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
{
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}