Esempio n. 1
0
static void __init bootmem_init(void)
{
	unsigned long reserved_end;
	unsigned long mapstart = ~0UL;
	unsigned long bootmap_size;
	int i;

	/*
	 * Init any data related to initrd. It's a nop if INITRD is
	 * not selected. Once that done we can determine the low bound
	 * of usable memory.
	 */
	reserved_end = max(init_initrd(),
			   (unsigned long) PFN_UP(__pa_symbol(&_end)));

	/*
	 * max_low_pfn is not a number of pages. The number of pages
	 * of the system is given by 'max_low_pfn - min_low_pfn'.
	 */
	min_low_pfn = ~0UL;
	max_low_pfn = 0;

	/*
	 * Find the highest page frame number we have available.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
				+ boot_mem_map.map[i].size);

		if (end > max_low_pfn)
			max_low_pfn = end;
		if (start < min_low_pfn)
			min_low_pfn = start;
		if (end <= reserved_end)
			continue;
		if (start >= mapstart)
			continue;
		mapstart = max(reserved_end, start);
	}

	if (min_low_pfn >= max_low_pfn)
		panic("Incorrect memory mapping !!!");
	if (min_low_pfn > ARCH_PFN_OFFSET) {
		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
			min_low_pfn - ARCH_PFN_OFFSET);
	} else if (min_low_pfn < ARCH_PFN_OFFSET) {
		pr_info("%lu free pages won't be used\n",
			ARCH_PFN_OFFSET - min_low_pfn);
	}
	min_low_pfn = ARCH_PFN_OFFSET;

	/*
	 * Determine low and high memory ranges
	 */
	max_pfn = max_low_pfn;
	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
		highstart_pfn = PFN_DOWN(HIGHMEM_START);
		highend_pfn = max_low_pfn;
#endif
		max_low_pfn = PFN_DOWN(HIGHMEM_START);
	}

	/*
	 * Initialize the boot-time allocator with low memory only.
	 */
	bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
					 min_low_pfn, max_low_pfn);


	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
				+ boot_mem_map.map[i].size);

		if (start <= min_low_pfn)
			start = min_low_pfn;
		if (start >= end)
			continue;

#ifndef CONFIG_HIGHMEM
		if (end > max_low_pfn)
			end = max_low_pfn;

		/*
		 * ... finally, is the area going away?
		 */
		if (end <= start)
			continue;
#endif

		add_active_range(0, start, end);
	}

	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end, size;

		/*
		 * Reserve usable memory.
		 */
		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end   = PFN_DOWN(boot_mem_map.map[i].addr
				    + boot_mem_map.map[i].size);
		/*
		 * We are rounding up the start address of usable memory
		 * and at the end of the usable range downwards.
		 */
		if (start >= max_low_pfn)
			continue;
		if (start < reserved_end)
			start = reserved_end;
		if (end > max_low_pfn)
			end = max_low_pfn;

		/*
		 * ... finally, is the area going away?
		 */
		if (end <= start)
			continue;
		size = end - start;

		/* Register lowmem ranges */
#ifdef CONFIG_BRCMSTB
		/* carve out space for bmem */
		brcm_free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
#else
		free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
#endif
		memory_present(0, start, end);
	}

	/*
	 * Reserve the bootmap memory.
	 */
	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);

	/*
	 * Reserve initrd memory if needed.
	 */
	finalize_initrd();
}
Esempio n. 2
0
static inline void bootmem_init(void)
{
	unsigned long start_pfn;
	unsigned long reserved_end = (unsigned long)&_end;
#ifndef CONFIG_SGI_IP27
	unsigned long first_usable_pfn;
	unsigned long bootmap_size;
	int i;
#endif
#ifdef CONFIG_BLK_DEV_INITRD
	int initrd_reserve_bootmem = 0;

	/* Board specific code should have set up initrd_start and initrd_end */
 	ROOT_DEV = Root_RAM0;
	if (parse_rd_cmdline(&initrd_start, &initrd_end)) {
		reserved_end = max(reserved_end, initrd_end);
		initrd_reserve_bootmem = 1;
	} else {
		unsigned long tmp;
		u32 *initrd_header;

		tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2;
		if (tmp < reserved_end)
			tmp += PAGE_SIZE;
		initrd_header = (u32 *)tmp;
		if (initrd_header[0] == 0x494E5244) {
			initrd_start = (unsigned long)&initrd_header[2];
			initrd_end = initrd_start + initrd_header[1];
			reserved_end = max(reserved_end, initrd_end);
			initrd_reserve_bootmem = 1;
		}
	}
#endif	/* CONFIG_BLK_DEV_INITRD */

	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards.
	 */
	start_pfn = PFN_UP(CPHYSADDR(reserved_end));

#ifndef CONFIG_SGI_IP27
	/* Find the highest page frame number we have available.  */
	max_pfn = 0;
	first_usable_pfn = -1UL;
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
		      + boot_mem_map.map[i].size);

		if (start >= end)
			continue;
		if (end > max_pfn)
			max_pfn = end;
		if (start < first_usable_pfn) {
			if (start > start_pfn) {
				first_usable_pfn = start;
			} else if (end > start_pfn) {
				first_usable_pfn = start_pfn;
			}
		}
	}

	/*
	 * Determine low and high memory ranges
	 */
	max_low_pfn = max_pfn;
	if (max_low_pfn > MAXMEM_PFN) {
		max_low_pfn = MAXMEM_PFN;
#ifndef CONFIG_HIGHMEM
		/* Maximum memory usable is what is directly addressable */
		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
		       MAXMEM >> 20);
		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#endif
	}
Esempio n. 3
0
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
					      phys_addr_t end_addr)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	unsigned long start, end;
	unsigned long len = 0;
	unsigned long pfn;
	int ret;

	start = PFN_UP(start_addr);
	end = PFN_DOWN(end_addr);

	if (end <= start)
		return 0;

	printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ",
	       start, end);
	for(pfn = start; pfn < end; pfn++) {
		unsigned long mfn = pfn_to_mfn(pfn);

		/* Make sure pfn exists to start with */
		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
			continue;

		set_xen_guest_handle(reservation.extent_start, &mfn);
		reservation.nr_extents = 1;

		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
					   &reservation);
		WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
		     start, end, ret);
		if (ret == 1) {
			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
			len++;
		}
	}
	printk(KERN_CONT "%ld pages freed\n", len);

	return len;
}

static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
						     const struct e820map *e820)
{
	phys_addr_t max_addr = PFN_PHYS(max_pfn);
	phys_addr_t last_end = ISA_END_ADDRESS;
	unsigned long released = 0;
	int i;

	/* Free any unused memory above the low 1Mbyte. */
	for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
		phys_addr_t end = e820->map[i].addr;
		end = min(max_addr, end);

		if (last_end < end)
			released += xen_release_chunk(last_end, end);
		last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
	}

	if (last_end < max_addr)
		released += xen_release_chunk(last_end, max_addr);

	printk(KERN_INFO "released %ld pages of unused memory\n", released);
	return released;
}
Esempio n. 4
0
static inline void bootmem_init(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
	unsigned long tmp;
	unsigned long *initrd_header;
#endif
	unsigned long bootmap_size;
	unsigned long start_pfn, max_pfn, max_low_pfn, first_usable_pfn;
	int i;

#ifdef CONFIG_BLK_DEV_INITRD
	tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8;
	if (tmp < (unsigned long)&_end)
		tmp += PAGE_SIZE;
	initrd_header = (unsigned long *)tmp;
	if (initrd_header[0] == 0x494E5244) {
		initrd_start = (unsigned long)&initrd_header[2];
		initrd_end = initrd_start + initrd_header[1];
	}
	start_pfn = PFN_UP(__pa((&_end)+(initrd_end - initrd_start) + PAGE_SIZE));
#else
	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards.
	 */
	{
		unsigned long	*sp, len = 0;

		sp = (unsigned long *) &_end;
		
		if (memcmp(&sp[0], "-rom1fs-", 8) == 0) { /* romfs */
			len = be32_to_cpu(sp[2]);
			printk("romfs reserved %d\n", len);
		} else if (sp[0] == 0x28cd3d45) { /* cramfs */
			len = sp[1];
			printk("cramfs reserved %d\n", len);
		} else
			printk("NOFS reserved @ 0x%x\n", sp);
		start_pfn = PFN_UP(__pa(&_end + len));
	}
#endif	/* CONFIG_BLK_DEV_INITRD */

	/* Find the highest page frame number we have available.  */
	max_pfn = 0;
	first_usable_pfn = -1UL;
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
		      + boot_mem_map.map[i].size);

		if (start >= end)
			continue;
		if (end > max_pfn)
			max_pfn = end;
		if (start < first_usable_pfn) {
			if (start > start_pfn) {
				first_usable_pfn = start;
			} else if (end > start_pfn) {
				first_usable_pfn = start_pfn;
			}
		}
	}

	/*
	 * Determine low and high memory ranges
	 */
	max_low_pfn = max_pfn;
	if (max_low_pfn > MAXMEM_PFN) {
		max_low_pfn = MAXMEM_PFN;
#ifndef CONFIG_HIGHMEM
		/* Maximum memory usable is what is directly addressable */
		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
		       MAXMEM>>20);
		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#endif
	}
Esempio n. 5
0
static inline void bootmem_init(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
	unsigned long tmp;
	unsigned long *initrd_header;
#endif
	unsigned long bootmap_size;
	unsigned long start_pfn, max_pfn;
	int i;

#ifdef CONFIG_BLK_DEV_INITRD
	tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8;
	if (tmp < (unsigned long)&_end)
		tmp += PAGE_SIZE;
	initrd_header = (unsigned long *)tmp;
	if (initrd_header[0] == 0x494E5244) {
		initrd_start = (unsigned long)&initrd_header[2];
		initrd_end = initrd_start + initrd_header[1];
	}
	start_pfn = PFN_UP(CPHYSADDR((&_end)+(initrd_end - initrd_start) + PAGE_SIZE));
#else
	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards.
	 */
	start_pfn = PFN_UP(CPHYSADDR(&_end));
#endif	/* CONFIG_BLK_DEV_INITRD */

#ifndef CONFIG_SGI_IP27
	/* Find the highest page frame number we have available.  */
	max_pfn = 0;
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
		      + boot_mem_map.map[i].size);

		if (start >= end)
			continue;
		if (end > max_pfn)
			max_pfn = end;
	}

	/* Initialize the boot-time allocator.  */
	bootmap_size = init_bootmem(start_pfn, max_pfn);

	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long curr_pfn, last_pfn, size;

		/*
		 * Reserve usable memory.
		 */
		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		/*
		 * We are rounding up the start address of usable memory:
		 */
		curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
		if (curr_pfn >= max_pfn)
			continue;
		if (curr_pfn < start_pfn)
			curr_pfn = start_pfn;

		/*
		 * ... and at the end of the usable range downwards:
		 */
		last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
				    + boot_mem_map.map[i].size);

		if (last_pfn > max_pfn)
			last_pfn = max_pfn;

		/*
		 * ... finally, did all the rounding and playing
		 * around just make the area go away?
		 */
		if (last_pfn <= curr_pfn)
			continue;

		size = last_pfn - curr_pfn;
		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
	}

	/* Reserve the bootmap memory.  */
	reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size);
#endif

#ifdef CONFIG_BLK_DEV_INITRD
	/* Board specific code should have set up initrd_start and initrd_end */
	ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
	if (&__rd_start != &__rd_end) {
		initrd_start = (unsigned long)&__rd_start;
		initrd_end = (unsigned long)&__rd_end;
	}
	initrd_below_start_ok = 1;
	if (initrd_start) {
		unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
		printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
		       (void *)initrd_start,
		       initrd_size);
/* FIXME: is this right? */
#ifndef CONFIG_SGI_IP27
		if (CPHYSADDR(initrd_end) > PFN_PHYS(max_pfn)) {
			printk("initrd extends beyond end of memory "
			       "(0x%p > 0x%p)\ndisabling initrd\n",
			       (void *)CPHYSADDR(initrd_end),
			       (void *)PFN_PHYS(max_pfn));
			initrd_start = 0;
		}
#endif /* !CONFIG_SGI_IP27 */
	}
#endif
}
Esempio n. 6
0
/**
 * machine_specific_memory_setup - Hook for machine specific memory setup.
 **/
char * __init xen_memory_setup(void)
{
	static struct e820entry map[E820MAX] __initdata;

	unsigned long max_pfn = xen_start_info->nr_pages;
	unsigned long long mem_end;
	int rc;
	struct xen_memory_map memmap;
	unsigned long extra_pages = 0;
	unsigned long extra_limit;
	int i;
	int op;

	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
	mem_end = PFN_PHYS(max_pfn);

	memmap.nr_entries = E820MAX;
	set_xen_guest_handle(memmap.buffer, map);

	op = xen_initial_domain() ?
		XENMEM_machine_memory_map :
		XENMEM_memory_map;
	rc = HYPERVISOR_memory_op(op, &memmap);
	if (rc == -ENOSYS) {
		BUG_ON(xen_initial_domain());
		memmap.nr_entries = 1;
		map[0].addr = 0ULL;
		map[0].size = mem_end;
		/* 8MB slack (to balance backend allocations). */
		map[0].size += 8ULL << 20;
		map[0].type = E820_RAM;
		rc = 0;
	}
	BUG_ON(rc);

	e820.nr_map = 0;
	xen_extra_mem_start = mem_end;
	for (i = 0; i < memmap.nr_entries; i++) {
		unsigned long long end = map[i].addr + map[i].size;

		if (map[i].type == E820_RAM) {
			if (map[i].addr < mem_end && end > mem_end) {
				/* Truncate region to max_mem. */
				u64 delta = end - mem_end;

				map[i].size -= delta;
				extra_pages += PFN_DOWN(delta);

				end = mem_end;
			}
		}

		if (end > xen_extra_mem_start)
			xen_extra_mem_start = end;

		/* If region is non-RAM or below mem_end, add what remains */
		if ((map[i].type != E820_RAM || map[i].addr < mem_end) &&
		    map[i].size > 0)
			e820_add_region(map[i].addr, map[i].size, map[i].type);
	}

	/*
	 * In domU, the ISA region is normal, usable memory, but we
	 * reserve ISA memory anyway because too many things poke
	 * about in there.
	 *
	 * In Dom0, the host E820 information can leave gaps in the
	 * ISA range, which would cause us to release those pages.  To
	 * avoid this, we unconditionally reserve them here.
	 */
	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
			E820_RESERVED);

	/*
	 * Reserve Xen bits:
	 *  - mfn_list
	 *  - xen_start_info
	 * See comment above "struct start_info" in <xen/interface/xen.h>
	 */
	memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
		      __pa(xen_start_info->pt_base),
			"XEN START INFO");

	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

	extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);

	/*
	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
	 * factor the base size.  On non-highmem systems, the base
	 * size is the full initial memory allocation; on highmem it
	 * is limited to the max size of lowmem, so that it doesn't
	 * get completely filled.
	 *
	 * In principle there could be a problem in lowmem systems if
	 * the initial memory is also very large with respect to
	 * lowmem, but we won't try to deal with that here.
	 */
	extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
			  max_pfn + extra_pages);

	if (extra_limit >= max_pfn)
		extra_pages = extra_limit - max_pfn;
	else
		extra_pages = 0;

	if (!xen_initial_domain())
		xen_add_extra_mem(extra_pages);

	return "Xen";
}
Esempio n. 7
0
/**
 * dma_contiguous_reserve_area() - reserve custom contiguous area
 * @size: Size of the reserved area (in bytes),
 * @base: Base address of the reserved area optional, use 0 for any
 * @limit: End address of the reserved memory (optional, 0 for any).
 * @res_cma: Pointer to store the created cma region.
 * @fixed: hint about where to place the reserved area
 *
 * This function reserves memory from early allocator. It should be
 * called by arch specific code once the early allocator (memblock or bootmem)
 * has been activated and all other subsystems have already allocated/reserved
 * memory. This function allows to create custom reserved areas for specific
 * devices.
 *
 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
 * reserve in range from @base to @limit.
 */
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
				       phys_addr_t limit, struct cma **res_cma,
				       bool fixed)
{
	struct cma *cma = &cma_areas[cma_area_count];
	phys_addr_t alignment;
	int ret = 0;

	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
		 (unsigned long)size, (unsigned long)base,
		 (unsigned long)limit);

	/* Sanity checks */
	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size)
		return -EINVAL;

	/* Sanitise input arguments */
	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
	base = ALIGN(base, alignment);
	size = ALIGN(size, alignment);
	limit &= ~(alignment - 1);

	/* Reserve memory */
	if (base && fixed) {
		if (memblock_is_region_reserved(base, size) ||
		    memblock_reserve(base, size) < 0) {
			ret = -EBUSY;
			goto err;
		}
	} else {
		phys_addr_t addr = memblock_alloc_range(size, alignment, base,
							limit);
		if (!addr) {
			ret = -ENOMEM;
			goto err;
		} else {
			base = addr;
		}
	}

	/*
	 * Each reserved area must be initialised later, when more kernel
	 * subsystems (like slab allocator) are available.
	 */
	cma->base_pfn = PFN_DOWN(base);
	cma->count = size >> PAGE_SHIFT;
	*res_cma = cma;
	cma_area_count++;

	pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
		(unsigned long)base);

	/* Architecture specific contiguous memory fixup. */
	dma_contiguous_early_fixup(base, size);
	return 0;
err:
	pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
	return ret;
}
Esempio n. 8
0
void __init setup_arch(char **cmdline_p)
{
	unsigned long bootmap_size;
	unsigned long start_pfn, max_pfn, max_low_pfn;

#ifdef CONFIG_EARLY_PRINTK
	extern void enable_early_printk(void);

	enable_early_printk();
#endif
#ifdef CONFIG_CMDLINE_BOOL
        strcpy(COMMAND_LINE, CONFIG_CMDLINE);
#endif

	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) _text;
	init_mm.end_code = (unsigned long) _etext;
	init_mm.end_data = (unsigned long) _edata;
	init_mm.brk = (unsigned long) _end;

	code_resource.start = virt_to_bus(_text);
	code_resource.end = virt_to_bus(_etext)-1;
	data_resource.start = virt_to_bus(_etext);
	data_resource.end = virt_to_bus(_edata)-1;

	sh_mv_setup(cmdline_p);

#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
#define PFN_PHYS(x)	((x) << PAGE_SHIFT)

	/*
	 * Find the highest page frame number we have available
	 */
	max_pfn = PFN_DOWN(__pa(memory_end));

	/*
	 * Determine low and high memory ranges:
	 */
	max_low_pfn = max_pfn;

	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	start_pfn = PFN_UP(__pa(_end));

	/*
	 * Find a proper area for the bootmem bitmap. After this
	 * bootstrap step all allocations (until the page allocator
	 * is intact) must be done via bootmem_alloc().
	 */
	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
					 __MEMORY_START>>PAGE_SHIFT,
					 max_low_pfn);
	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	{
		unsigned long curr_pfn, last_pfn, pages;

		/*
		 * We are rounding up the start address of usable memory:
		 */
		curr_pfn = PFN_UP(__MEMORY_START);
		/*
		 * ... and at the end of the usable range downwards:
		 */
		last_pfn = PFN_DOWN(__pa(memory_end));

		if (last_pfn > max_low_pfn)
			last_pfn = max_low_pfn;

		pages = last_pfn - curr_pfn;
		free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn),
				  PFN_PHYS(pages));
	}

	/*
	 * Reserve the kernel text and
	 * Reserve the bootmem bitmap. We do this in two steps (first step
	 * was init_bootmem()), because this catches the (definitely buggy)
	 * case of us accidentally initializing the bootmem allocator with
	 * an invalid RAM area.
	 */
	reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE,
		(PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE);

#ifdef CONFIG_BLK_DEV_INITRD
	ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
	if (&__rd_start != &__rd_end) {
		LOADER_TYPE = 1;
		INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START;
		INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start;
	}

	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
			reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			printk("initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				    INITRD_START + INITRD_SIZE,
				    max_low_pfn << PAGE_SHIFT);
			initrd_start = 0;
		}
	}
#endif

#ifdef CONFIG_DUMMY_CONSOLE
	conswitchp = &dummy_con;
#endif

	/* Perform the machine specific initialisation */
	platform_setup();

	paging_init();
}
Esempio n. 9
0
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t contig_start, contig_end;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !early_info.mem.nr_banks )
        panic("No memory bank");

    /*
     * We are going to accumulate two regions here.
     *
     * The first is the bounds of the initial memory region which is
     * contiguous with the first bank. For simplicity the xenheap is
     * always allocated from this region.
     *
     * The second is the complete bounds of the regions containing RAM
     * (ie. from the lowest RAM address to the highest), which
     * includes any holes.
     *
     * We also track the number of actual RAM pages (i.e. not counting
     * the holes).
     */
    ram_size  = early_info.mem.bank[0].size;

    contig_start = ram_start = early_info.mem.bank[0].start;
    contig_end   = ram_end = ram_start + ram_size;

    for ( i = 1; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_size = early_info.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        paddr_t new_ram_size = ram_size + bank_size;
        paddr_t new_ram_start = min(ram_start,bank_start);
        paddr_t new_ram_end = max(ram_end,bank_end);

        /*
         * If the new bank is contiguous with the initial contiguous
         * region then incorporate it into the contiguous region.
         *
         * Otherwise we allow non-contigious regions so long as at
         * least half of the total RAM region actually contains
         * RAM. We actually fudge this slightly and require that
         * adding the current bank does not cause us to violate this
         * restriction.
         *
         * This restriction ensures that the frametable (which is not
         * currently sparse) does not consume all available RAM.
         */
        if ( bank_start == contig_end )
            contig_end = bank_end;
        else if ( bank_end == contig_start )
            contig_start = bank_start;
        else if ( 2 * new_ram_size < new_ram_end - new_ram_start )
            /* Would create memory map which is too sparse, so stop here. */
            break;

        ram_size = new_ram_size;
        ram_start = new_ram_start;
        ram_end = new_ram_end;
    }

    if ( i != early_info.mem.nr_banks )
    {
        printk("WARNING: only using %d out of %d memory banks\n",
               i, early_info.mem.nr_banks);
        early_info.mem.nr_banks = i;
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * Locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1/8 the total RAM in the system
     *  - must be at least 128M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL;
    xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT));

    do
    {
        /* xenheap is always in the initial contiguous region */
        e = consider_modules(contig_start, contig_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );

    if ( ! e )
        panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n",
            e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages);
    printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_end = bank_start + early_info.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
Esempio n. 10
0
    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
#else /* CONFIG_ARM_64 */
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start = ~0;
    paddr_t ram_end = 0;
    paddr_t ram_size = 0;
    int bank;
    unsigned long dtb_pages;
    void *fdt;

    total_pages = 0;
    for ( bank = 0 ; bank < early_info.mem.nr_banks; bank++ )
    {
        paddr_t bank_start = early_info.mem.bank[bank].start;
        paddr_t bank_size = early_info.mem.bank[bank].size;
        paddr_t bank_end = bank_start + bank_size;
        paddr_t s, e;

        paddr_t new_ram_size = ram_size + bank_size;
        paddr_t new_ram_start = min(ram_start,bank_start);
        paddr_t new_ram_end = max(ram_end,bank_end);

        /*
         * We allow non-contigious regions so long as at least half of
         * the total RAM region actually contains RAM. We actually
         * fudge this slightly and require that adding the current
         * bank does not cause us to violate this restriction.
         *
         * This restriction ensures that the frametable (which is not
         * currently sparse) does not consume all available RAM.
         */
        if ( bank > 0 && 2 * new_ram_size < new_ram_end - new_ram_start )
            /* Would create memory map which is too sparse, so stop here. */
            break;

        ram_start = new_ram_start;
        ram_end = new_ram_end;
        ram_size = new_ram_size;

        setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = bank_end;
            }

            if ( e > bank_end )
                e = bank_end;

            xenheap_mfn_end = e;

            dt_unreserved_regions(s, e, init_boot_pages, 0);
            s = n;
        }
    }

    if ( bank != early_info.mem.nr_banks )
    {
        printk("WARNING: only using %d out of %d memory banks\n",
               bank, early_info.mem.nr_banks);
        early_info.mem.nr_banks = bank;
    }

    total_pages += ram_size >> PAGE_SHIFT;

    xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
    xenheap_mfn_start = ram_start >> PAGE_SHIFT;
    xenheap_mfn_end = ram_end >> PAGE_SHIFT;
    xenheap_max_mfn(xenheap_mfn_end);

    /*
     * Need enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    end_boot_allocator();
}
Esempio n. 11
0
static unsigned long __init setup_memory(void)
{
	unsigned long start_pfn, max_low_pfn, bootmap_size;

	start_pfn = PFN_UP( __pa(_end) );
	max_low_pfn = PFN_DOWN( __pa(memory_end) );

	/*
	 * Initialize the boot-time allocator (with low memory only):
	 */
	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
		CONFIG_MEMORY_START>>PAGE_SHIFT, max_low_pfn);

	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	{
		unsigned long curr_pfn;
		unsigned long last_pfn;
		unsigned long pages;

		/*
		 * We are rounding up the start address of usable memory:
		 */
		curr_pfn = PFN_UP(__pa(memory_start));

		/*
		 * ... and at the end of the usable range downwards:
		 */
		last_pfn = PFN_DOWN(__pa(memory_end));

		if (last_pfn > max_low_pfn)
			last_pfn = max_low_pfn;

		pages = last_pfn - curr_pfn;
		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
	}

	/*
	 * Reserve the kernel text and
	 * Reserve the bootmem bitmap. We do this in two steps (first step
	 * was init_bootmem()), because this catches the (definitely buggy)
	 * case of us accidentally initializing the bootmem allocator with
	 * an invalid RAM area.
	 */
	reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
		(PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
		- CONFIG_MEMORY_START);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE);

	/*
	 * reserve memory hole
	 */
#ifdef CONFIG_MEMHOLE
	reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE);
#endif

#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
			reserve_bootmem(INITRD_START, INITRD_SIZE);
			initrd_start = INITRD_START + PAGE_OFFSET;
			initrd_end = initrd_start + INITRD_SIZE;
			printk("initrd:start[%08lx],size[%08lx]\n",
				initrd_start, INITRD_SIZE);
		} else {
			printk("initrd extends beyond end of memory "
				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				INITRD_START + INITRD_SIZE,
				max_low_pfn << PAGE_SHIFT);

			initrd_start = 0;
		}
	}
#endif

	return max_low_pfn;
}
Esempio n. 12
0
 int
 init_boot_information
 (
 	uint32_t magic,
 	mbootinfo_t *mbi,
 	sysinfo_t *sys_info
 )
 {
	int i = 0;
	
	memset((void *) sys_info, 0x00, sizeof(sysinfo_t));
	 
 	/* Integrity check */
 	if(magic != MULTIBOOT_INFO_MAGIC)
	{
		printk("[INFO] Invalid magic number (%x)\n", magic);
		return -1;
	}
	
	if (!CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_MEMORY))
	{
		printk
		(
			"[FATAL] There is no memory information "
			"on multiboot structure\n"
		);
		
		return -1;
	}

	sys_info->sys_kern_start = (vaddr_t) &KERN_TEXT_BEGIN;
	sys_info->sys_kern_end   = (vaddr_t) &KERN_TOP;
	
	sys_info->sys_lomem 	 = mbi->mi_mem_lower*1024;
	sys_info->sys_himem 	 = mbi->mi_mem_upper*1024;
        
	sys_info->sys_page_size  = PAGE_SIZE;
	
	/** HiMem empieza desde el mega a contar asi que le tenemos que sumar */
	sys_info->sys_memory =  sys_info->sys_lomem + 
		sys_info->sys_himem + 1024*1024;
		
	paddr_t seg_start = 0, seg_end = 0;
	
	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_MMAP))
   	{
                struct multiboot_mmap *mmap;
    	
		#ifdef DEBUG
                printk("[mboot] Memory maps:\n");
		#endif
       
		for(mmap = (struct multiboot_mmap *) mbi->mi_mmap_addr;
			(unsigned long) mmap < mbi->mi_mmap_addr + mbi->mi_mmap_length;
                        mmap = (struct multiboot_mmap *) ((unsigned long) mmap
			+ mmap->mm_size + sizeof (mmap->mm_size)))
		{
			/* Tenemos un limite maximo estatico de # de segmentos */
			if (i == VM_MAX_SEGMENTS)
			{
				printk("[NOTICE] Too many segments increase VM_MAX_SEGMENTS\n");
				break;
			}
			
			/* Si el tipo no es AVAILABLE RAM skipeamos */
			if(mmap->mm_type != AVL_RAM)
			{
				continue;
			}
			
			/* Por ahora usamos la parte baja del puntero que nos da GRUB */
			seg_start = mmap->mm_base_addr_low;
			seg_end	  = mmap->mm_base_addr_low + mmap->mm_length_low;
		
			if(seg_start < 0x100000 && seg_end > 0xa0000)
			{
				printk("[NOTICE] segment overlaps with ``Compatibility Holes''\n");
				
				sys_info->sys_addr_space.segments[i].seg_start = seg_start;
				sys_info->sys_addr_space.segments[i].seg_end   = 0xa0000;
				sys_info->sys_addr_space.segments[i].seg_type  = mmap->mm_type;
				sys_info->sys_addr_space.segments_number++;
				i++;
		
				sys_info->sys_addr_space.segments[i].seg_start = 0x100000;
				sys_info->sys_addr_space.segments[i].seg_end   = seg_end;
				sys_info->sys_addr_space.segments[i].seg_type  = mmap->mm_type;
				sys_info->sys_addr_space.segments_number++;
				i++;
			}
			else
			{
				sys_info->sys_addr_space.segments[i].seg_start = seg_start;
				sys_info->sys_addr_space.segments[i].seg_end   = seg_end;
				sys_info->sys_addr_space.segments[i].seg_type  = mmap->mm_type;
				sys_info->sys_addr_space.segments_number++;
				i++;
			}
                }
        }

	/* Inicializamos en el maximo PFN */
	sys_info->sys_minpfn = PFN_UP(0xffffffff);
	
	/* Inicializamos en el mninimo PFN */
	sys_info->sys_maxpfn = PFN_DOWN(0);
	
	/* Buscamos el Minimum page frame number y el Maximun Page Frame number */
	for(i = 0; i < sys_info->sys_addr_space.segments_number; i++)
	{
		sys_info->sys_minpfn = min(sys_info->sys_minpfn,
			PFN_UP(sys_info->sys_addr_space.segments[i].seg_start));
		
		sys_info->sys_maxpfn = max(sys_info->sys_maxpfn, 
			PFN_DOWN(sys_info->sys_addr_space.segments[i].seg_end));
	}
	
	sys_info->sys_pages = sys_info->sys_maxpfn+1;
	
	/* 
	 * FIXME: Aca hay un error, el maxpfn esta mal calculado
	 * por que lo estoy tomando con respecto a seg_end y si seg_end
	 * termina al medio de una pagina, el max_pfn tendira que ser
	 * el numero de pagina al que corresponde seg_end menos 1. No lo arreglo
	 * por que cuando hago los cambios no se por que carajo crashea. Si
	 * te das cuenta por que pasa Gaston, arreglalo.
	 * Sino, mas adelante loveo.
	 */
	
	#define PAGES_TO_MB(x) ((x) >> (20-PAGE_SHIFT))
	printk("[INFO] %ldMB LOWMEM available.\n", PAGES_TO_MB(sys_info->sys_maxpfn));

	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_VBE))
   	{
 		printk("[mboot] VBE Information present\n");
		DUMP(mbi->mi_vbe_control_info, "%p\n");
		DUMP(mbi->mi_vbe_mode_info, "%p\n");
		DUMP(mbi->mi_vbe_interface_seg, "%d\n");
		DUMP(mbi->mi_vbe_interface_off, "%d\n");
		DUMP(mbi->mi_vbe_interface_len, "%d\n");
   	}
	
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_DRIVES))
   	{
		#ifdef DEBUG
		int i, count;
		struct multiboot_drive *mbd = 
			(struct multiboot_drive *) mbi->mi_drives_addr;

		count = mbi->mi_drives_length / sizeof(struct multiboot_drive);

		printk("[mboot] Drives information:\n");

		for(i = 0; i < count; i++, mbd++)
		{
			printk
			(
				"  | Bios drive number: %d\n"
				"  | Drive Mode:        %s\n"
				"  | C = %d | H = %d | S = %d | BLOCKS = %d | Size = %d\n",
				mbd->md_number,
				(mbd->md_mode ? "LBA" : "CHS"),
				mbd->md_cylinders,
				mbd->md_heads,
				mbd->md_sectors,
				mbd->md_cylinders * mbd->md_heads * mbd->md_sectors,
				mbd->md_cylinders * mbd->md_heads * mbd->md_sectors * 512
			);
		}
		#endif /* DEBUG */
   	}
	
	#ifdef DEBUG
	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_BOOT_DEVICE))
	{
		printk ("[mboot] Device drive: 0x%x\n", 
			mbi->mi_boot_device_drive);
	} 
	
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_CMDLINE))
   	{
    	printk ("[mboot] Command line: %s\n", 
    		mbi->mi_cmdline);
   	}
	
	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_MODS))
    {
	   	struct multiboot_module *mod;
       	int i;
 
       	printk
       	(
       		"[mboot] Number of loaded modules: %d\n",
			(int) mbi->mi_mods_count
		);
       
 		mod = (void *) mbi->mi_mods_addr;
		
		for (i = 0; i < mbi->mi_mods_count; i++, mod++)
		{
			printk
        	(
        		"\t[%u] mod_start = 0x%x, mod_end = 0x%x, string = %s\n",
            	(unsigned) i,
            	(unsigned) mod->mm_mod_start,
                (unsigned) mod->mm_mod_end,
                (char *) mod->mm_string
        	);
       	}
    }
 
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_AOUT_SYMS) 
   		&& CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_ELF_SYMS))
    {
    	printk("[Error] Cannot have both, ELF header and aout header\n");
       	return -1;
    }
 
   	/* Is the symbol table of a.out valid? */
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_AOUT_SYMS))
    {
    	struct multiboot_aout_symtab *aout_sym = &(mbi->u.aout_sym);
 
      	printk
      	(
      		"aout_symbol_table: tabsize = 0x%0x, "
            "strsize = 0x%x, addr = 0x%x\n",
            aout_sym->ma_tabsize,
            aout_sym->ma_strsize,
            aout_sym->ma_addr
        );
    }
 
   	/* Is the section header table of ELF valid? */
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_ELF_SYMS))
    {
    	struct multiboot_elf_sh_table *elf_sec = &(mbi->u.elf_sec);
 
       	printk
       	(
       		"[mboot] elf_sec: num = %u, size = 0x%x,"
            " addr = 0x%x, shndx = 0x%x\n",
            elf_sec->me_num,
            elf_sec->me_size,
            elf_sec->me_addr,
            elf_sec->me_shndx
        );
    }
 
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_CONFIG_TABLE))
   	{
 		
   	}

   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_LOADER_NAME))
   	{
  		printk("[mboot] Loaded by: %s\n", mbi->mi_loader_name);
   	}
  	
   	if (CHECK_BIT (mbi->mi_flags, MULTIBOOT_INFO_HAS_APM_TABLE))
   	{
 		printk("[mboot] APM Information present\n");
 		
		struct multiboot_apm *apm_info = (struct multiboot_apm *) mbi->mi_apm_table;
		
		printk( "    version     : %x\n", apm_info->ma_version );
		printk( "    cseg        : %x\n", apm_info->ma_cseg );
		printk( "    offset      : %x\n", apm_info->ma_offset );
		printk( "    cseg_16     : %x\n", apm_info->ma_cseg_16 );
		printk( "    dseg        : %x\n", apm_info->ma_dseg );
		printk( "    flags       : %x\n", apm_info->ma_flags );
		printk( "    cseg_len    : %x\n", apm_info->ma_cseg_len );
		printk( "    cseg_16_len : %x\n", apm_info->ma_cseg_16_len );
		printk( "    dseg_len    : %x\n", apm_info->ma_dseg_len );
   	}
	#endif	/* DEBUG */
   	
 	return 0;	
 }
Esempio n. 13
0
int __virt_addr_valid(const volatile void *kaddr)
{
	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
Esempio n. 14
0
void __init setup_arch(char **cmdline_p)
{
    int bootmap_size;

    memory_start = PAGE_ALIGN(_ramstart);
    memory_end = _ramend;

    init_mm.start_code = (unsigned long) &_stext;
    init_mm.end_code = (unsigned long) &_etext;
    init_mm.end_data = (unsigned long) &_edata;
    init_mm.brk = (unsigned long) 0;

    config_BSP(&command_line[0], sizeof(command_line));

#if defined(CONFIG_BOOTPARAM)
    strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line));
    command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_BOOTPARAM */

#if defined(CONFIG_UBOOT)
    /* CONFIG_UBOOT and CONFIG_BOOTPARAM defined, concatenate cmdline */
#if defined(CONFIG_BOOTPARAM)
    /* Add the whitespace separator */
    command_line[strlen(CONFIG_BOOTPARAM_STRING)] = ' ';
    /* Parse uboot command line into the rest of the buffer */
    parse_uboot_commandline(
        &command_line[(strlen(CONFIG_BOOTPARAM_STRING)+1)],
        (sizeof(command_line) -
         (strlen(CONFIG_BOOTPARAM_STRING)+1)));
    /* Only CONFIG_UBOOT defined, create cmdline */
#else
    parse_uboot_commandline(&command_line[0], sizeof(command_line));
#endif /* CONFIG_BOOTPARAM */
    command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_UBOOT */

    printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU_NAME "\n");

#ifdef CONFIG_UCDIMM
    printk(KERN_INFO "uCdimm by Lineo, Inc. <www.lineo.com>\n");
#endif
#ifdef CONFIG_M68VZ328
    printk(KERN_INFO "M68VZ328 support by Evan Stawnyczy <*****@*****.**>\n");
#endif
#ifdef CONFIG_COLDFIRE
    printk(KERN_INFO "COLDFIRE port done by Greg Ungerer, [email protected]\n");
#ifdef CONFIG_M5307
    printk(KERN_INFO "Modified for M5307 by Dave Miller, [email protected]\n");
#endif
#ifdef CONFIG_ELITE
    printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, [email protected]\n");
#endif
#endif
    printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");

#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 )
    printk(KERN_INFO "TRG SuperPilot FLASH card support <*****@*****.**>\n");
#endif
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 )
    printk(KERN_INFO "PalmV support by Lineo Inc. <*****@*****.**>\n");
#endif
#ifdef CONFIG_DRAGEN2
    printk(KERN_INFO "DragonEngine II board support by Georges Menie\n");
#endif
#ifdef CONFIG_M5235EVB
    printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif

    pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
             _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
    pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
             __bss_stop, memory_start, memory_start, memory_end);

    /* Keep a copy of command line */
    *cmdline_p = &command_line[0];
    memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
    boot_command_line[COMMAND_LINE_SIZE-1] = 0;

#if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
    conswitchp = &dummy_con;
#endif

    /*
     * Give all the memory to the bootmap allocator, tell it to put the
     * boot mem_map at the start of memory.
     */
    min_low_pfn = PFN_DOWN(memory_start);
    max_pfn = max_low_pfn = PFN_DOWN(memory_end);

    bootmap_size = init_bootmem_node(
                       NODE_DATA(0),
                       min_low_pfn,		/* map goes here */
                       PFN_DOWN(PAGE_OFFSET),
                       max_pfn);
    /*
     * Free the usable memory, we have to make sure we do not free
     * the bootmem bitmap so we then reserve it after freeing it :-)
     */
    free_bootmem(memory_start, memory_end - memory_start);
    reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);

#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
    if ((initrd_start > 0) && (initrd_start < initrd_end) &&
            (initrd_end < memory_end))
        reserve_bootmem(initrd_start, initrd_end - initrd_start,
                        BOOTMEM_DEFAULT);
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */

    /*
     * Get kmalloc into gear.
     */
    paging_init();
}
Esempio n. 15
0
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !bootinfo.mem.nr_banks )
        panic("No memory bank");

    init_pdx();

    ram_start = bootinfo.mem.bank[0].start;
    ram_size  = bootinfo.mem.bank[0].size;
    ram_end   = ram_start + ram_size;

    for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[i].start;
        paddr_t bank_size = bootinfo.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        ram_size  = ram_size + bank_size;
        ram_start = min(ram_start,bank_start);
        ram_end   = max(ram_end,bank_end);
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * Locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1GB or 1/8 the total RAM in the system if less
     *  - must be at least 128M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL;
    xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT));
    xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));

    do
    {
        e = consider_modules(ram_start, ram_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );

    if ( ! e )
        panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n",
            e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages);
    printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[i].start;
        paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
Esempio n. 16
0
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);

	return PFN_DOWN(maddr.maddr);
}
Esempio n. 17
0
    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
#else /* CONFIG_ARM_64 */
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start = ~0;
    paddr_t ram_end = 0;
    paddr_t ram_size = 0;
    int bank;
    unsigned long dtb_pages;
    void *fdt;

    init_pdx();

    total_pages = 0;
    for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[bank].start;
        paddr_t bank_size = bootinfo.mem.bank[bank].size;
        paddr_t bank_end = bank_start + bank_size;
        paddr_t s, e;

        ram_size = ram_size + bank_size;
        ram_start = min(ram_start,bank_start);
        ram_end = max(ram_end,bank_end);

        setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = bank_end;
            }

            if ( e > bank_end )
                e = bank_end;

            xenheap_mfn_end = e;

            dt_unreserved_regions(s, e, init_boot_pages, 0);
            s = n;
        }
    }

    total_pages += ram_size >> PAGE_SHIFT;

    xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
    xenheap_mfn_start = ram_start >> PAGE_SHIFT;
    xenheap_mfn_end = ram_end >> PAGE_SHIFT;
    xenheap_max_mfn(xenheap_mfn_end);

    /*
     * Need enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    end_boot_allocator();
}
Esempio n. 18
0
static void __init
setup_memory(void)
{
        unsigned long bootmap_size;
	unsigned long start_pfn, end_pfn;
	int i;

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	start_pfn = PFN_UP(__pa(&_end));
	end_pfn = max_pfn = PFN_DOWN(memory_end);

#ifdef CONFIG_BLK_DEV_INITRD
	/*
	 * Move the initrd in case the bitmap of the bootmem allocater
	 * would overwrite it.
	 */

	if (INITRD_START && INITRD_SIZE) {
		unsigned long bmap_size;
		unsigned long start;

		bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
		bmap_size = PFN_PHYS(bmap_size);

		if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
			start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;

			if (start + INITRD_SIZE > memory_end) {
				printk("initrd extends beyond end of memory "
				       "(0x%08lx > 0x%08lx)\n"
				       "disabling initrd\n",
				       start + INITRD_SIZE, memory_end);
				INITRD_START = INITRD_SIZE = 0;
			} else {
				printk("Moving initrd (0x%08lx -> 0x%08lx, "
				       "size: %ld)\n",
				       INITRD_START, start, INITRD_SIZE);
				memmove((void *) start, (void *) INITRD_START,
					INITRD_SIZE);
				INITRD_START = start;
			}
		}
	}
#endif

	/*
	 * Initialize the boot-time allocator
	 */
	bootmap_size = init_bootmem(start_pfn, end_pfn);

	/*
	 * Register RAM areas with the bootmem allocator.
	 */

	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		unsigned long start_chunk, end_chunk, pfn;

		if (memory_chunk[i].type != CHUNK_READ_WRITE)
			continue;
		start_chunk = PFN_DOWN(memory_chunk[i].addr);
		end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
		end_chunk = min(end_chunk, end_pfn);
		if (start_chunk >= end_chunk)
			continue;
		add_active_range(0, start_chunk, end_chunk);
		pfn = max(start_chunk, start_pfn);
		for (; pfn <= end_chunk; pfn++)
			page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
	}

	psw_set_key(PAGE_DEFAULT_KEY);

	free_bootmem_with_active_regions(0, max_pfn);

	/*
	 * Reserve memory used for lowcore/command line/kernel image.
	 */
	reserve_bootmem(0, (unsigned long)_ehead);
	reserve_bootmem((unsigned long)_stext,
			PFN_PHYS(start_pfn) - (unsigned long)_stext);
	/*
	 * Reserve the bootmem bitmap itself as well. We do this in two
	 * steps (first step was init_bootmem()) because this catches
	 * the (very unlikely) case of us accidentally initializing the
	 * bootmem allocator with an invalid RAM area.
	 */
	reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);

#ifdef CONFIG_BLK_DEV_INITRD
	if (INITRD_START && INITRD_SIZE) {
		if (INITRD_START + INITRD_SIZE <= memory_end) {
			reserve_bootmem(INITRD_START, INITRD_SIZE);
			initrd_start = INITRD_START;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			printk("initrd extends beyond end of memory "
			       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			       initrd_start + INITRD_SIZE, memory_end);
			initrd_start = initrd_end = 0;
		}
	}
#endif
}
Esempio n. 19
0
/**
 * machine_specific_memory_setup - Hook for machine specific memory setup.
 **/
char * __init xen_memory_setup(void)
{
	static struct e820entry map[E820MAX] __initdata;
	static struct e820entry map_raw[E820MAX] __initdata;

	unsigned long max_pfn = xen_start_info->nr_pages;
	unsigned long long mem_end;
	int rc;
	struct xen_memory_map memmap;
	unsigned long extra_pages = 0;
	unsigned long extra_limit;
	unsigned long identity_pages = 0;
	int i;
	int op;

	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
	mem_end = PFN_PHYS(max_pfn);

	memmap.nr_entries = E820MAX;
	set_xen_guest_handle(memmap.buffer, map);

	op = xen_initial_domain() ?
		XENMEM_machine_memory_map :
		XENMEM_memory_map;
	rc = HYPERVISOR_memory_op(op, &memmap);
	if (rc == -ENOSYS) {
		BUG_ON(xen_initial_domain());
		memmap.nr_entries = 1;
		map[0].addr = 0ULL;
		map[0].size = mem_end;
		/* 8MB slack (to balance backend allocations). */
		map[0].size += 8ULL << 20;
		map[0].type = E820_RAM;
		rc = 0;
	}
	BUG_ON(rc);

	memcpy(map_raw, map, sizeof(map));
	e820.nr_map = 0;
#ifdef CONFIG_X86_32
	xen_extra_mem_start = mem_end;
#else
	xen_extra_mem_start = max((1ULL << 32), mem_end);
#endif
	for (i = 0; i < memmap.nr_entries; i++) {
		unsigned long long end;

		/* Guard against non-page aligned E820 entries. */
		if (map[i].type == E820_RAM)
			map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;

		end = map[i].addr + map[i].size;
		if (map[i].type == E820_RAM && end > mem_end) {
			/* RAM off the end - may be partially included */
			u64 delta = min(map[i].size, end - mem_end);

			map[i].size -= delta;
			end -= delta;

			extra_pages += PFN_DOWN(delta);
			/*
			 * Set RAM below 4GB that is not for us to be unusable.
			 * This prevents "System RAM" address space from being
			 * used as potential resource for I/O address (happens
			 * when 'allocate_resource' is called).
			 */
			if (delta &&
				(xen_initial_domain() && end < 0x100000000ULL))
				e820_add_region(end, delta, E820_UNUSABLE);
		}

		if (map[i].size > 0 && end > xen_extra_mem_start)
			xen_extra_mem_start = end;

		/* Add region if any remains */
		if (map[i].size > 0)
			e820_add_region(map[i].addr, map[i].size, map[i].type);
	}

	/*
	 * In domU, the ISA region is normal, usable memory, but we
	 * reserve ISA memory anyway because too many things poke
	 * about in there.
	 *
	 * In Dom0, the host E820 information can leave gaps in the
	 * ISA range, which would cause us to release those pages.  To
	 * avoid this, we unconditionally reserve them here.
	 */
	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
			E820_RESERVED);

	/*
	 * Reserve Xen bits:
	 *  - mfn_list
	 *  - xen_start_info
	 * See comment above "struct start_info" in <xen/interface/xen.h>
	 */
	memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
		      __pa(xen_start_info->pt_base),
			"XEN START INFO");

	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

	extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);

	/*
	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
	 * factor the base size.  On non-highmem systems, the base
	 * size is the full initial memory allocation; on highmem it
	 * is limited to the max size of lowmem, so that it doesn't
	 * get completely filled.
	 *
	 * In principle there could be a problem in lowmem systems if
	 * the initial memory is also very large with respect to
	 * lowmem, but we won't try to deal with that here.
	 */
	extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
			  max_pfn + extra_pages);

	if (extra_limit >= max_pfn)
		extra_pages = extra_limit - max_pfn;
	else
		extra_pages = 0;

	xen_add_extra_mem(extra_pages);

	/*
	 * Set P2M for all non-RAM pages and E820 gaps to be identity
	 * type PFNs. We supply it with the non-sanitized version
	 * of the E820.
	 */
	identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
	printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
	return "Xen";
}