Exemple #1
0
struct prom_pmemblock * __init prom_getmdesc(void)
{
	unsigned int memsize;

	memsize = 0x02000000;
	pr_info("Setting default memory size 0x%08x\n", memsize);

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = simmem_reserved;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = simmem_free;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ff000;

	mdesc[2].type = simmem_reserved;
	mdesc[2].base = 0x00100000;
	mdesc[2].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[2].base;

	mdesc[3].type = simmem_free;
	mdesc[3].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[3].size = memsize - mdesc[3].base;

	return &mdesc[0];
}
static struct prom_pmemblock * __init prom_getmdesc(void)
{
	char *memsize_str;
	unsigned int memsize;
	char *ptr;
	static char cmdline[COMMAND_LINE_SIZE] __initdata;

	
	memsize_str = prom_getenv("memsize");
	if (!memsize_str) {
		printk(KERN_WARNING
		       "memsize not set in boot prom, set to default (32Mb)\n");
		physical_memsize = 0x02000000;
	} else {
#ifdef DEBUG
		pr_debug("prom_memsize = %s\n", memsize_str);
#endif
		physical_memsize = simple_strtol(memsize_str, NULL, 0);
	}

#ifdef CONFIG_CPU_BIG_ENDIAN
	physical_memsize -= PAGE_SIZE;
#endif

	strcpy(cmdline, arcs_cmdline);
	ptr = strstr(cmdline, "memsize=");
	if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
		ptr = strstr(ptr, " memsize=");

	if (ptr)
		memsize = memparse(ptr + 8, &ptr);
	else
		memsize = physical_memsize;

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = yamon_dontuse;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = yamon_prom;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ef000;

	mdesc[2].type = yamon_dontuse;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;

	mdesc[3].type = yamon_dontuse;
	mdesc[3].base = 0x00100000;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;

	mdesc[4].type = yamon_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - mdesc[4].base;

	return &mdesc[0];
}
Exemple #3
0
struct prom_pmemblock * __init prom_getmdesc(void)
{
	char *memsize_str;
	unsigned int memsize;

	memsize_str = prom_getenv("memsize");
	if (!memsize_str) {
		prom_printf("memsize not set in boot prom, set to default (32Mb)\n");
		memsize = 0x02000000;
	} else {
#ifdef DEBUG
		prom_printf("prom_memsize = %s\n", memsize_str);
#endif
		memsize = simple_strtol(memsize_str, NULL, 0);
	}

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = yamon_dontuse;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = yamon_prom;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ef000;

#if (CONFIG_MIPS_MALTA)
	/* 
	 * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
	 * south bridge and PCI access always forwarded to the ISA Bus and 
	 * BIOSCS# is always generated.
	 * This mean that this area can't be used as DMA memory for PCI 
	 * devices.
	 */
	mdesc[2].type = yamon_dontuse;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;
#else
	mdesc[2].type = yamon_prom;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;
#endif

	mdesc[3].type = yamon_dontuse;
	mdesc[3].base = 0x00100000;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[3].base;

	mdesc[4].type = yamon_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - mdesc[4].base;

	return &mdesc[0];
}
Exemple #4
0
void __init mem_init(void)
{
	unsigned long codesize, reservedpages, datasize, initsize;

        max_mapnr = num_physpages = max_low_pfn;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

	/* Setup guest page hinting */
	cmma_init();

	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();
	setup_zero_pages();	/* Setup zeroed pages. */

	reservedpages = 0;

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
        printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
		nr_free_pages() << (PAGE_SHIFT-10),
                max_mapnr << (PAGE_SHIFT-10),
                codesize >> 10,
                reservedpages << (PAGE_SHIFT-10),
                datasize >>10,
                initsize >> 10);
	printk("Write protected kernel read-only data: %#lx - %#lx\n",
	       (unsigned long)&_stext,
	       PFN_ALIGN((unsigned long)&_eshared) - 1);
}
Exemple #5
0
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
	dma_addr_t * dma_handle, gfp_t gfp)
{
	void *ret;

	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

	gfp = massage_gfp_flags(dev, gfp);

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret) {
		memset(ret, 0, size);
		*dma_handle = plat_map_dma_mem(dev, ret, size);

		if (!plat_device_is_coherent(dev)) {
			dma_cache_wback_inv((unsigned long) ret, size);
#ifdef CONFIG_BRCM_CONSISTENT_DMA
			if (brcm_map_coherent(*dma_handle, ret, PFN_ALIGN(size),
					&ret, gfp)) {
				free_pages((unsigned long)ret, size);
				ret = NULL;
			}
#else
			ret = UNCAC_ADDR(ret);
#endif
		}
	}

	return ret;
}
Exemple #6
0
/*
 * Reserve Xen mfn_list.
 */
static void __init xen_reserve_xen_mfnlist(void)
{
	phys_addr_t start, size;

	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
		start = __pa(xen_start_info->mfn_list);
		size = PFN_ALIGN(xen_start_info->nr_pages *
				 sizeof(unsigned long));
	} else {
		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
	}

	if (!xen_is_e820_reserved(start, size)) {
		memblock_reserve(start, size);
		return;
	}

#ifdef CONFIG_X86_32
	/*
	 * Relocating the p2m on 32 bit system to an arbitrary virtual address
	 * is not supported, so just give up.
	 */
	xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
	BUG();
#else
	xen_relocate_p2m();
#endif
}
Exemple #7
0
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

#ifndef CONFIG_UML
	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);
#endif

	if (!vaddr)
		goto out_kfree;

	/*
	 * Just cast to an unsigned long to avoid warnings about casting from a
	 * pointer to an integer of different size. The pointer is only 32-bits
	 * so we lose no data.
	 */
	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
#ifndef CONFIG_UML
	if (vaddr)
		iounmap(vaddr);
#endif
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
Exemple #8
0
void *allocate_contiguous_memory(unsigned long size,
	int mem_type, unsigned long align, int cached)
{
	unsigned long aligned_size = PFN_ALIGN(size);
	struct mem_pool *mpool;

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool)
		return NULL;
	return __alloc(mpool, aligned_size, align, cached);

}
Exemple #9
0
unsigned long allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align)
{
	unsigned long paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool)
		return -EINVAL;

	if (!mpool->gpool)
		return -EAGAIN;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return -EAGAIN;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	/* We search the tree using node->vaddr, so set
	 * it to something unique even though we don't
	 * use it for physical allocation nodes.
	 * The virtual and physical address ranges
	 * are disjoint, so there won't be any chance of
	 * a duplicate node->vaddr value.
	 */
	node->vaddr = (void *)paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return -ENOMEM;
}
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);

	if (!vaddr)
		goto out_kfree;

	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
	if (vaddr)
		iounmap(vaddr);
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
Exemple #11
0
static ssize_t __init setup_pcpu_4k(size_t static_size)
{
	size_t pages_size;
	unsigned int cpu;
	int i, j;
	ssize_t ret;

	pcpu4k_nr_static_pages = PFN_UP(static_size);

	/* unaligned allocations can't be freed, round up to page size */
	pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
			       * sizeof(pcpu4k_pages[0]));
	pcpu4k_pages = alloc_bootmem(pages_size);

	/* allocate and copy */
	j = 0;
	for_each_possible_cpu(cpu)
		for (i = 0; i < pcpu4k_nr_static_pages; i++) {
			void *ptr;

			ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
			if (!ptr)
				goto enomem;

			memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
			pcpu4k_pages[j++] = virt_to_page(ptr);
		}

	/* we're ready, commit */
	pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
		pcpu4k_nr_static_pages, static_size);

	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, -1,
				     -1, NULL, pcpu4k_populate_pte);
	goto out_free_ar;

enomem:
	while (--j >= 0)
		free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpu4k_pages), pages_size);
	return ret;
}
Exemple #12
0
int orion_sysinit(void)
{
	unsigned long mem_size, free_start, free_end, start_pfn, bootmap_size;
	
	mips_machgroup = MACH_GROUP_ORION;
	/* 64 MB non-upgradable */
	mem_size = 32 << 20;
	
	free_start = PHYSADDR(PFN_ALIGN(&_end));
	free_end = mem_size;
	start_pfn = PFN_UP((unsigned long)&_end);
	
	/* Register all the contiguous memory with the bootmem allocator
	   and free it.  Be careful about the bootmem freemap.  */
	bootmap_size = init_bootmem(start_pfn, mem_size >> PAGE_SHIFT);
	
	/* Free the entire available memory after the _end symbol.  */
	free_start += bootmap_size;
	free_bootmem(free_start, free_end-free_start);
}
phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align, void *caller)
{
	phys_addr_t paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool || !mpool->gpool)
		return 0;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return 0;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	node->vaddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return 0;
}
Exemple #14
0
void __init mem_init(void)
{
	if (MACHINE_HAS_TLB_LC)
		cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
	cpumask_set_cpu(0, mm_cpumask(&init_mm));
	atomic_set(&init_mm.context.attach_count, 1);

        max_mapnr = max_low_pfn;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

	/* Setup guest page hinting */
	cmma_init();

	/* this will put all low memory onto the freelists */
	free_all_bootmem();
	setup_zero_pages();	/* Setup zeroed pages. */

	mem_init_print_info(NULL);
	printk("Write protected kernel read-only data: %#lx - %#lx\n",
	       (unsigned long)&_stext,
	       PFN_ALIGN((unsigned long)&_eshared) - 1);
}
struct prom_pmemblock * __init prom_getmdesc(void)
{
	unsigned int memsize;
	//char cmdline[CL_SIZE], *ptr;
	char *ptr;
	static char cmdline[COMMAND_LINE_SIZE] __initdata;

	mem_resource_start = CPHYSADDR(PFN_ALIGN(&_text));
	mem_resource_end = CPHYSADDR(PFN_ALIGN(&_end));
        	
#if 0
	/* otherwise look in the environment */
	memsize_str = prom_getenv("memsize");
	if (!memsize_str) {
		prom_printf("memsize not set in boot prom, set to default (64Mb)\n");
		physical_memsize = 0x04000000;
	} else {
#ifdef DEBUG
		prom_printf("prom_memsize = %s\n", memsize_str);
#endif
		physical_memsize = simple_strtol(memsize_str, NULL, 0);
	}

#ifdef CONFIG_CPU_BIG_ENDIAN
	/* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
		 word of physical memory */
	physical_memsize -= PAGE_SIZE;
#endif
#endif
	physical_memsize = 0x04000000;
	/* Check the command line for a memsize directive that overrides
		 the physical/default amount */
	strcpy(cmdline, arcs_cmdline);
	ptr = strstr(cmdline, "memsize=");
	if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
		ptr = strstr(ptr, " memsize=");

	if (ptr)
		memsize = memparse(ptr + 8, &ptr);
	else
		memsize = physical_memsize;

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = yamon_dontuse;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = yamon_prom;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ef000;

	mdesc[2].type = yamon_prom;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;

	mdesc[3].type = yamon_dontuse;
	mdesc[3].base = 0x00100000;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;

#if 0
	mdesc[4].type = yamon_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - mdesc[4].base;
#else // Patch for Samsung
	mdesc[4].type = yamon_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = 0x02000000 - mdesc[4].base;

	mdesc[5].type = yamon_dontuse;
	mdesc[5].base = 0x02000000;
	mdesc[5].size = 0x05000000 - mdesc[5].base;

#if 0
	mdesc[6].type = yamon_free;
	mdesc[6].base = 0x05000000;
	mdesc[6].size = memsize - mdesc[6].base;

	mdesc[6].type = yamon_free;
	mdesc[6].base = 0x30000000;
	mdesc[6].size = 0x10000000;
#endif
#endif

	return &mdesc[0];
}
Exemple #16
0
struct prom_pmemblock * __init prom_getmdesc(void)
{
    char *memsize_str;
    unsigned int memsize;
    char tmp_cmdline[COMMAND_LINE_SIZE];
    char *tmp_str;

    strlcpy(tmp_cmdline, arcs_cmdline, COMMAND_LINE_SIZE);
    tmp_str = find_args(tmp_cmdline, "debug_ros");
    if (tmp_str) {
        sscanf(tmp_str, "%x", &audio_addr);
        debug_flag = 1;
    }

    if (audio_addr == 0) {
        audio_addr = *((unsigned int *)0xa00000d8);
        audio_addr = reverseInt(audio_addr);
    }
    if ((audio_addr < 0x81000000) || (audio_addr >= 0x82000000)) {
        printk("**************************************************************\n");
        printk("You didn't specify audio image address, use default instead...\n");
        printk("**************************************************************\n");
        audio_addr = 0;
    }

    memsize_str = prom_getenv("memsize");
    if (!memsize_str) {
        prom_printf("memsize not set in boot prom, set to default (32Mb)\n");
        memsize = 0x02000000;
    } else {
#ifdef DEBUG
        prom_printf("prom_memsize = %s\n", memsize_str);
#endif
        memsize = simple_strtol(memsize_str, NULL, 0);
        if (memsize < 0x02000000) {
            panic("memsize cannot be less then 32Mb.\n");
        }
    }

    memset(mdesc, 0, sizeof(mdesc));

#ifdef CONFIG_REALTEK_MARS_RESERVE_LAST_48MB
    if (memsize == 0x10000000)
        memsize -= 0x03000000;
#endif

    // The last page is used to store the DRAM calibration parameter;
    // Jacky says that this page can be included in his RTOS memory region.
//	mdesc[4].type = yamon_dontuse;
//	mdesc[4].base = memsize - 4096;
//	mdesc[4].size = 4096;

#if CONFIG_REALTEK_RTOS_MEMORY_SIZE
    // If memsize is over 32Mb
    if(memsize > 0x02000000) {
        mdesc[4].type = yamon_free;
        mdesc[4].base = 0x02000000;
        mdesc[4].size = memsize - 0x02000000;
    }

    // This memory region is used by RTOS.
    mdesc[3].type = yamon_dontuse;
//	mdesc[3].base = mdesc[4].base - CONFIG_REALTEK_RTOS_MEMORY_SIZE;
    if (audio_addr != 0) {
        audio_addr = audio_addr-0x80000000;
        mdesc[3].base = audio_addr;
        mdesc[3].size = 0x02000000 - audio_addr;
    } else {
        mdesc[3].base = 0x02000000 - CONFIG_REALTEK_RTOS_MEMORY_SIZE;
        mdesc[3].size = CONFIG_REALTEK_RTOS_MEMORY_SIZE;
    }
    printk("audio addr: %x \n", audio_addr);

    // Kernel image is stored in 0x100000 - CPHYSADDR(PFN_ALIGN(&_end)).
    mdesc[2].type = yamon_free;
    mdesc[2].base = CPHYSADDR(PFN_ALIGN(&_end));
    mdesc[2].size = mdesc[3].base - CPHYSADDR(PFN_ALIGN(&_end));
#else
    // Kernel image is stored in 0x100000 - CPHYSADDR(PFN_ALIGN(&_end)).
    mdesc[2].type = yamon_free;
    mdesc[2].base = CPHYSADDR(PFN_ALIGN(&_end));
    mdesc[2].size = memsize - CPHYSADDR(PFN_ALIGN(&_end));
#endif

    mdesc[1].type = yamon_dontuse;
    mdesc[1].base = 0x100000;
    mdesc[1].size = CPHYSADDR(PFN_ALIGN(&_end))-0x100000;

    mdesc[0].type = yamon_free;
    mdesc[0].base = 0x0;
    mdesc[0].size = 0x100000;

    return &mdesc[0];
}
Exemple #17
0
struct prom_pmemblock * __init prom_getmdesc(void)
{
	char *memsize_str;
	unsigned int memsize;

	memsize_str = prom_getenv("memsize");
	if (!memsize_str) {
#ifdef CONFIG_MIPS_AVALANCHE_SOC
		prom_printf("memsize not set in boot prom, set to default (64Mb)\n");
		memsize = 0x04000000;
#else
		prom_printf("memsize not set in boot prom, set to default (32Mb)\n");
		memsize = 0x02000000;
#endif
	} else {
#ifdef DEBUG
		prom_printf("prom_memsize = %s\n", memsize_str);
#endif
		memsize = simple_strtol(memsize_str, NULL, 0);
	}

	memset(mdesc, 0, sizeof(mdesc));

#if defined(CONFIG_MIPS_AVALANCHE_SOC)

#define PREV_MDESC(x)  ((x) - 1)
    {
        struct prom_pmemblock *p_mdesc = &mdesc[0];
	p_mdesc->type = yamon_dontuse;	
	p_mdesc->base = 0x00000000;
	p_mdesc->size = AVALANCHE_SDRAM_BASE;

	p_mdesc++;
	p_mdesc->type = yamon_prom;
	p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size;
	p_mdesc->size = PAGE_SIZE;

	p_mdesc++;
	p_mdesc->type = yamon_prom;    
	p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size;
	p_mdesc->size = (CONFIG_MIPS_AVALANCHE_LOAD_ADDRESS - KSEG0ADDR(AVALANCHE_SDRAM_BASE)) - PAGE_SIZE;

	p_mdesc++;
	p_mdesc->type = yamon_dontuse;
	p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size;
	p_mdesc->size = CPHYSADDR(PFN_ALIGN(&_end)) - p_mdesc->base;
	p_mdesc++;
	p_mdesc->type = yamon_free;
	p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size;
	p_mdesc->size = memsize - (CPHYSADDR(PFN_ALIGN(&_end)) - AVALANCHE_SDRAM_BASE);
    }
#else 
	mdesc[0].type = yamon_dontuse;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = yamon_prom;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ef000;

#ifdef CONFIG_MIPS_MALTA
	/*
	 * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
	 * south bridge and PCI access always forwarded to the ISA Bus and
	 * BIOSCS# is always generated.
	 * This mean that this area can't be used as DMA memory for PCI
	 * devices.
	 */
	mdesc[2].type = yamon_dontuse;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;
#else
	mdesc[2].type = yamon_prom;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;
#endif

	mdesc[3].type = yamon_dontuse;
	mdesc[3].base = 0x00100000;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[3].base;

	mdesc[4].type = yamon_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - mdesc[4].base;
#endif /* CONFIG_MIPS_AVALANCHE_SOC */	

	return &mdesc[0];
}
Exemple #18
0
struct prom_pmemblock * __init prom_getmdesc(void)
{
	char *memsize_str;
	unsigned int memsize;
	char cmdline[CL_SIZE], *ptr;

	/* otherwise look in the environment */
	memsize_str = prom_getenv("memsize");
	if (!memsize_str) {
		printk(KERN_WARNING
		       "memsize not set in boot prom, set to default (32Mb)\n");
		physical_memsize = 0x02000000;
	} else {
#ifdef DEBUG
		pr_debug("prom_memsize = %s\n", memsize_str);
#endif
		physical_memsize = simple_strtol(memsize_str, NULL, 0);
	}

#ifdef CONFIG_CPU_BIG_ENDIAN
	/* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
	   word of physical memory */
	physical_memsize -= PAGE_SIZE;
#endif

	/* Check the command line for a memsize directive that overrides
	   the physical/default amount */
	strcpy(cmdline, arcs_cmdline);
	ptr = strstr(cmdline, "memsize=");
	if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
		ptr = strstr(ptr, " memsize=");

	if (ptr)
		memsize = memparse(ptr + 8, &ptr);
	else
		memsize = physical_memsize;

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = yamon_dontuse;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = yamon_prom;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ef000;

#ifdef CONFIG_MIPS_MALTA
	/*
	 * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
	 * south bridge and PCI access always forwarded to the ISA Bus and
	 * BIOSCS# is always generated.
	 * This mean that this area can't be used as DMA memory for PCI
	 * devices.
	 */
	mdesc[2].type = yamon_dontuse;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;
#else
	mdesc[2].type = yamon_prom;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;
#endif

	mdesc[3].type = yamon_dontuse;
	mdesc[3].base = 0x00100000;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;

	mdesc[4].type = yamon_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - mdesc[4].base;

	return &mdesc[0];
}
Exemple #19
0
fw_memblock_t * __init fw_getmdesc(int eva)
{
	char *memsize_str, *ememsize_str = NULL, *ptr;
	unsigned long memsize = 0, ememsize = 0;
	static char cmdline[COMMAND_LINE_SIZE] __initdata;
	int tmp;

	/* otherwise look in the environment */

	memsize_str = fw_getenv("memsize");
	if (memsize_str)
		tmp = kstrtol(memsize_str, 0, &memsize);
	if (eva) {
	/* Look for ememsize for EVA */
		ememsize_str = fw_getenv("ememsize");
		if (ememsize_str)
			tmp = kstrtol(ememsize_str, 0, &ememsize);
	}
	if (!memsize && !ememsize) {
		pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
		physical_memsize = 0x02000000;
	} else {
		/* If ememsize is set, then set physical_memsize to that */
		physical_memsize = ememsize ? : memsize;
	}

#ifdef CONFIG_CPU_BIG_ENDIAN
	/* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
	   word of physical memory */
	physical_memsize -= PAGE_SIZE;
#endif

	/* Check the command line for a memsize directive that overrides
	   the physical/default amount */
	strcpy(cmdline, arcs_cmdline);
	ptr = strstr(cmdline, "memsize=");
	if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
		ptr = strstr(ptr, " memsize=");
	/* And now look for ememsize */
	if (eva) {
		ptr = strstr(cmdline, "ememsize=");
		if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
			ptr = strstr(ptr, " ememsize=");
	}

	if (ptr)
		memsize = memparse(ptr + 8 + (eva ? 1 : 0), &ptr);
	else
		memsize = physical_memsize;

	/* Last 64K for HIGHMEM arithmetics */
	if (memsize > 0x7fff0000)
		memsize = 0x7fff0000;

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = fw_dontuse;
	mdesc[0].base = PHYS_OFFSET;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = fw_code;
	mdesc[1].base = mdesc[0].base + 0x00001000UL;
	mdesc[1].size = 0x000ef000;

	/*
	 * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
	 * south bridge and PCI access always forwarded to the ISA Bus and
	 * BIOSCS# is always generated.
	 * This mean that this area can't be used as DMA memory for PCI
	 * devices.
	 */
	mdesc[2].type = fw_dontuse;
	mdesc[2].base = mdesc[0].base + 0x000f0000UL;
	mdesc[2].size = 0x00010000;

	mdesc[3].type = fw_dontuse;
	mdesc[3].base = mdesc[0].base + 0x00100000UL;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
		0x00100000UL;

	mdesc[4].type = fw_free;
	mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - CPHYSADDR(mdesc[4].base);

	return &mdesc[0];
}
Exemple #20
0
fw_memblock_t * __init fw_getmdesc(void)
{
	char *memsize_str, *ptr;
	unsigned int memsize;
	static char cmdline[COMMAND_LINE_SIZE] __initdata;
	long val;
	int tmp;

	/* otherwise look in the environment */
	memsize_str = fw_getenv("memsize");
	if (!memsize_str) {
		pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
		physical_memsize = 0x02000000;
	} else {
		tmp = kstrtol(memsize_str, 0, &val);
		physical_memsize = (unsigned long)val;
	}

#ifdef CONFIG_CPU_BIG_ENDIAN
	/* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
	   word of physical memory */
	physical_memsize -= PAGE_SIZE;
#endif

	/* Check the command line for a memsize directive that overrides
	   the physical/default amount */
	strcpy(cmdline, arcs_cmdline);
	ptr = strstr(cmdline, "memsize=");
	if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
		ptr = strstr(ptr, " memsize=");

	if (ptr)
		memsize = memparse(ptr + 8, &ptr);
	else
		memsize = physical_memsize;

	memset(mdesc, 0, sizeof(mdesc));

	mdesc[0].type = fw_dontuse;
	mdesc[0].base = 0x00000000;
	mdesc[0].size = 0x00001000;

	mdesc[1].type = fw_code;
	mdesc[1].base = 0x00001000;
	mdesc[1].size = 0x000ef000;

	/*
	 * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
	 * south bridge and PCI access always forwarded to the ISA Bus and
	 * BIOSCS# is always generated.
	 * This mean that this area can't be used as DMA memory for PCI
	 * devices.
	 */
	mdesc[2].type = fw_dontuse;
	mdesc[2].base = 0x000f0000;
	mdesc[2].size = 0x00010000;

	mdesc[3].type = fw_dontuse;
	mdesc[3].base = 0x00100000;
	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
		mdesc[3].base;

	mdesc[4].type = fw_free;
	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
	mdesc[4].size = memsize - mdesc[4].base;

	return &mdesc[0];
}
Exemple #21
0
static ssize_t __init setup_pcpu_remap(size_t static_size)
{
	static struct vm_struct vm;
	size_t ptrs_size, dyn_size;
	unsigned int cpu;
	ssize_t ret;

	/*
	 * If large page isn't supported, there's no benefit in doing
	 * this.  Also, on non-NUMA, embedding is better.
	 */
	if (!cpu_has_pse || !pcpu_need_numa())
		return -EINVAL;

	/*
	 * Currently supports only single page.  Supporting multiple
	 * pages won't be too difficult if it ever becomes necessary.
	 */
	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
			       PERCPU_DYNAMIC_RESERVE);
	if (pcpur_size > PMD_SIZE) {
		pr_warning("PERCPU: static data is larger than large page, "
			   "can't use large page\n");
		return -EINVAL;
	}
	dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;

	/* allocate pointer array and alloc large pages */
	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
	pcpur_ptrs = alloc_bootmem(ptrs_size);

	for_each_possible_cpu(cpu) {
		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
		if (!pcpur_ptrs[cpu])
			goto enomem;

		/*
		 * Only use pcpur_size bytes and give back the rest.
		 *
		 * Ingo: The 2MB up-rounding bootmem is needed to make
		 * sure the partial 2MB page is still fully RAM - it's
		 * not well-specified to have a PAT-incompatible area
		 * (unmapped RAM, device memory, etc.) in that hole.
		 */
		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
			     PMD_SIZE - pcpur_size);

		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
	}

	/* allocate address and map */
	vm.flags = VM_ALLOC;
	vm.size = num_possible_cpus() * PMD_SIZE;
	vm_area_register_early(&vm, PMD_SIZE);

	for_each_possible_cpu(cpu) {
		pmd_t *pmd;

		pmd = populate_extra_pmd((unsigned long)vm.addr
					 + cpu * PMD_SIZE);
		set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
				     PAGE_KERNEL_LARGE));
	}

	/* we're ready, commit */
	pr_info("PERCPU: Remapped at %p with large pages, static data "
		"%zu bytes\n", vm.addr, static_size);

	ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
				     PMD_SIZE, vm.addr, NULL);
	goto out_free_ar;

enomem:
	for_each_possible_cpu(cpu)
		if (pcpur_ptrs[cpu])
			free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
	return ret;
}