Beispiel #1
0
void setup_arch(void) {  
  setup_memory_map();

  finish_e820_parsing();

  max_pfn = e820_end_of_ram_pfn();

  /* preallocate 4k for mptable mpc */
  /*
  early_reserve_e820_mpc_new();
  */
  
  /* update e820 for memory not covered by WB MTRRs */
  /*
  mtrr_bp_init();
  if (mtrr_trim_uncached_memory(max_pfn)) {
    max_pfn = e820_end_of_ram_pfn();
  }
  */

  /* max_low_pfn get updated here */
  find_low_pfn_range();

  printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
         max_pfn_mapped<<PAGE_SHIFT);

  max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
  max_pfn_mapped = max_low_pfn_mapped;

  initmem_init(0, max_pfn);
}
Beispiel #2
0
void __init init()
{
	e820map.setup();
	lowmem::init();
	gdt.init();
	init_memory_mapping();
	//idt.init();
	apic.init();
	acpi::init();
	io_apic.init();
	//init_processors();
	////copy_ap_startup_code();
	//init_percpu_section();
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
				 u32 type)
{
	unsigned long last_map_pfn;

	if (type == EFI_MEMORY_MAPPED_IO)
		return ioremap(phys_addr, size);

	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
		return NULL;

	return (void __iomem *)__va(phys_addr);
}
Beispiel #4
0
/*
 * Memory is added always to NORMAL zone. This means you will never get
 * additional DMA/DMA32 memory.
 */
int arch_add_memory(int nid, u64 start, u64 size)
{
	struct pglist_data *pgdat = NODE_DATA(nid);
	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	init_memory_mapping(start, (start + size -1));

	ret = __add_pages(zone, start_pfn, nr_pages);
	if (ret)
		goto error;

	return ret;
error:
	printk("%s: Problem encountered in __add_pages!\n", __func__);
	return ret;
}
Beispiel #5
0
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
				 u32 type, u64 attribute)
{
	unsigned long last_map_pfn;

	if (type == EFI_MEMORY_MAPPED_IO)
		return ioremap(phys_addr, size);

	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
		unsigned long top = last_map_pfn << PAGE_SHIFT;
		efi_ioremap(top, size - (top - phys_addr), type, attribute);
	}

	if (!(attribute & EFI_MEMORY_WB))
		efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);

	return (void __iomem *)__va(phys_addr);
}
Beispiel #6
0
void init_prog(t_tissue_stack *t) {
	t->plug_actions = plug_actions_from_external_plugin;

	t->percent_init = percent_init_direct;
	t->percent_add = percent_add_direct;
	t->percent_get = percent_get_direct;
	t->percent_cancel = percent_cancel_direct;
	t->is_percent_paused_cancel = is_percent_paused_cancel;
	t->clean_pause_queue = clean_pause_queue;

	t->percent_pause = percent_pause_direct;
	t->percent_resume = percent_resume_direct;

	t->tasks = malloc(sizeof(*t->tasks));

	t->tasks->f = NULL;
	t->tasks->add_to_queue = task_add_queue;
	t->tasks->path = strdup(CONCAT_APP_PATH("tasks/general"));
	t->tasks->path_tmp = strdup(CONCAT_APP_PATH("tasks/general.tmp"));
	t->tasks->is_running = FALSE;
	pthread_mutex_init(&t->tasks->mutex, NULL);
	pthread_mutex_init(&t->tasks->queue_mutex, NULL);

	t->tile_requests = malloc(sizeof(*t->tile_requests));
	init_tile_requests(t->tile_requests);
	t->memory_mappings = NULL;
#ifdef USE_MM
	t->memory_mappings = malloc(sizeof(*t->memory_mappings));
	init_memory_mapping(t->memory_mappings);
#endif
	t->get_volume = get_volume;
	t->check_volume = check_volume;
	t->clean_quit = clean_quit;
	t->first = NULL;
	t->first_notification = NULL;
	pthread_cond_init(&t->main_cond, NULL);
	pthread_mutex_init(&t->main_mutex, NULL);
	nc_create_notification("log_debug", lc_debug, t);
	nc_create_notification("log_info", lc_info, t);
	nc_create_notification("log_warning", lc_warning, t);
	nc_create_notification("log_error", lc_error, t);
	nc_create_notification("log_fatal", lc_fatal, t);
	t->create_notification = nc_create_notification;
	t->subscribe = nc_subscribe;
	t->raise = nc_raise;
	t->log = malloc(sizeof(*t->log));
	t->log->state = ON;
	t->log->path = strdup(CONCAT_APP_PATH("logs/"));

#ifndef _TSS_DEBUG_
	t->log->debug = OFF;
	t->log->verbose = OFF;
#else
	t->log->debug = ON;
	t->log->verbose = ON;
#endif

	t->log->write_on_files = ON;
	t->log->write_on_plug_files = OFF;
	t->log->write_on_level_files = ON;
	log_plugin.id = pthread_self();
	log_plugin.tss = t;
	if (t->log->state) {
		// make sure directory exists !
		t_string_buffer * actualPath = createDirectory(t->log->path, 0766);
		// couldn't create directory
		if (actualPath == NULL) {
			ERROR("Couldn't create %s", t->log->path);
			t->log->state = OFF; // turn logging off
		} else {
			//path = concat_path(actualPath->buffer, "tss-general", ".log");
			free_t_string_buffer(actualPath);
		}

	}
	init_func_ptr(t);
	init_percent_time(t, strdup(CONCAT_APP_PATH("tasks/")));
}
Beispiel #7
0
void __init setup_arch(char **cmdline_p)
{
	printk(KERN_INFO "Command line: %s\n", saved_command_line);

 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 	screen_info = SCREEN_INFO;
	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
#ifdef CONFIG_EFI
	if (!strncmp(EFI_LOADER_SIG, "EFIL", 4))
		efi_enabled = 1;
#endif
	setup_memory_region();
	copy_edd();

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) &_text;
	init_mm.end_code = (unsigned long) &_etext;
	init_mm.end_data = (unsigned long) &_edata;
	init_mm.brk = (unsigned long) &_end;
	init_mm.pgd = __va(__pa_symbol(&init_level4_pgt));

	code_resource.start = __pa_symbol(&_text);
	code_resource.end = __pa_symbol(&_etext)-1;
	data_resource.start = __pa_symbol(&_etext);
	data_resource.end = __pa_symbol(&_edata)-1;

	parse_cmdline_early(cmdline_p);

	finish_e820_parsing();

	early_identify_cpu(&boot_cpu_data);

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	end_pfn = e820_end_of_ram();
	num_physpages = end_pfn;		/* for pfn_valid */

	check_efer();

	discover_ebda();

	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
	if (efi_enabled)
		efi_map_memmap();

	dmi_scan_machine();

	/*
	 * VMware detection requires dmi to be available, so this
	 * needs to be done after dmi_scan_machine, for the BP.
	 */
	init_hypervisor(&boot_cpu_data);

	/*
	 * init_hypervisor gets called more than one time throughout
	 * the life of the cpu, but that hurts kvm. We only need it
	 * once, so we do it explicitly here
	 */
	if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_KVM)
		kvmclock_init();

#ifdef CONFIG_ACPI
	/*
	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
	 * Call this early for SRAT node setup.
	 */
	acpi_boot_table_init();
#endif

#ifdef CONFIG_ACPI_NUMA
	/*
	 * Parse SRAT to discover nodes.
	 */
	acpi_numa_init();
#endif

#ifdef CONFIG_NUMA
	numa_initmem_init(0, end_pfn); 
#else
	contig_initmem_init(0, end_pfn);
#endif

	/* Reserve direct mapping */
	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
				(table_end - table_start) << PAGE_SHIFT,
				BOOTMEM_DEFAULT);

	/* reserve kernel */
	reserve_bootmem_generic(__pa_symbol(&_text),
				__pa_symbol(&_end) - __pa_symbol(&_text),
				BOOTMEM_DEFAULT);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_generic(0, PAGE_SIZE, BOOTMEM_DEFAULT);

	/* reserve ebda region */
	if (ebda_addr)
		reserve_bootmem_generic(ebda_addr, ebda_size,
					BOOTMEM_DEFAULT);

#ifdef CONFIG_SMP
	/* Reserve SMP trampoline */
	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE,
				BOOTMEM_DEFAULT);
#endif

#ifdef CONFIG_ACPI_SLEEP
       /*
        * Reserve low memory region for sleep support.
        */
       acpi_reserve_bootmem();
#endif
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * Find and reserve possible boot-time SMP configuration:
	 */
	find_smp_config();
#endif
#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			reserve_bootmem_generic(INITRD_START, INITRD_SIZE,
						BOOTMEM_DEFAULT);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
			initrd_end = initrd_start+INITRD_SIZE;
		}
		else {
			printk(KERN_ERR "initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			    (unsigned long)(INITRD_START + INITRD_SIZE),
			    (unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#ifdef CONFIG_KEXEC
	if ((crashk_res.start < crashk_res.end) &&
	    (crashk_res.end <= (end_pfn << PAGE_SHIFT))) {
		if (reserve_bootmem_generic(crashk_res.start,
					crashk_res.end - crashk_res.start + 1,
					BOOTMEM_EXCLUSIVE) < 0) {
			printk(KERN_ERR "crashkernel reservation failed - "
					"memory is in use\n");
			crashk_res.start = crashk_res.end = 0;
		}
	}
	else {
		printk(KERN_ERR "Memory for crash kernel (0x%lx to 0x%lx) not"
		       "within permissible range\ndisabling kdump\n",
		       crashk_res.start, crashk_res.end);
		crashk_res.end = 0;
		crashk_res.start = 0;
	}
#endif

	reserve_ibft_region();

	paging_init();

	check_ioapic();

	/*
	 * set this early, so we dont allocate cpu0
	 * if MADT list doesnt list BSP first
	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
	 */
	cpu_set(0, cpu_present_map);
#ifdef CONFIG_ACPI
	/*
	 * Read APIC and some other early information from ACPI tables.
	 */
	acpi_boot_init();
#endif

	init_cpu_to_node();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * get boot-time SMP configuration:
	 */
	if (smp_found_config)
		get_smp_config();
	init_apic_mappings();
#endif

	/*
	 * Request address space for all standard RAM and ROM resources
	 * and also for regions reported as reserved by the e820.
	 */
	probe_roms();
	e820_reserve_resources(); 
	e820_mark_nosave_regions();

	request_resource(&iomem_resource, &video_ram_resource);

	{
	unsigned i;
	/* request I/O space for devices used on all i[345]86 PCs */
	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
		request_resource(&ioport_resource, &standard_io_resources[i]);
	}

	e820_setup_gap();

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
		conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif
}
Beispiel #8
0
void __init setup_arch(char **cmdline_p)
{
	unsigned long kernel_end;

#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	struct e820entry *machine_e820;
	struct xen_memory_map memmap;
#endif

#ifdef CONFIG_XEN
	/* Register a call for panic conditions. */
	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);

 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
	kernel_end = 0;		/* dummy */
 	screen_info = SCREEN_INFO;

	if (xen_start_info->flags & SIF_INITDOMAIN) {
		/* This is drawn from a dump from vgacon:startup in
		 * standard Linux. */
		screen_info.orig_video_mode = 3;
		screen_info.orig_video_isVGA = 1;
		screen_info.orig_video_lines = 25;
		screen_info.orig_video_cols = 80;
		screen_info.orig_video_ega_bx = 3;
		screen_info.orig_video_points = 16;
	} else
		screen_info.orig_video_isVGA = 0;

	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);


#endif

	setup_xen_features();

	HYPERVISOR_vm_assist(VMASST_CMD_enable,
			     VMASST_TYPE_writable_pagetables);

	ARCH_SETUP
#else
 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 	screen_info = SCREEN_INFO;
	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
#endif	/* !CONFIG_XEN */
	setup_memory_region();
	copy_edd();

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) &_text;
	init_mm.end_code = (unsigned long) &_etext;
	init_mm.end_data = (unsigned long) &_edata;
	init_mm.brk = (unsigned long) &_end;

#ifndef CONFIG_XEN
	code_resource.start = virt_to_phys(&_text);
	code_resource.end = virt_to_phys(&_etext)-1;
	data_resource.start = virt_to_phys(&_etext);
	data_resource.end = virt_to_phys(&_edata)-1;
#endif

	parse_cmdline_early(cmdline_p);

	early_identify_cpu(&boot_cpu_data);

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	end_pfn = e820_end_of_ram();
	num_physpages = end_pfn;		/* for pfn_valid */

	check_efer();

#ifndef CONFIG_XEN
	discover_ebda();
#endif

	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));

#ifdef CONFIG_ACPI_NUMA
	/*
	 * Parse SRAT to discover nodes.
	 */
	acpi_numa_init();
#endif

#ifdef CONFIG_NUMA
	numa_initmem_init(0, end_pfn); 
#else
	contig_initmem_init(0, end_pfn);
#endif

	/* Reserve direct mapping */
	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
				(table_end - table_start) << PAGE_SHIFT);

	/* reserve kernel */
	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);

#ifdef CONFIG_XEN
	/* reserve physmap, start info and initial page tables */
	reserve_bootmem(kernel_end, (table_start<<PAGE_SHIFT)-kernel_end);
#else
	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_generic(0, PAGE_SIZE);

	/* reserve ebda region */
	if (ebda_addr)
		reserve_bootmem_generic(ebda_addr, ebda_size);
#endif

#ifdef CONFIG_SMP
	/*
	 * But first pinch a few for the stack/trampoline stuff
	 * FIXME: Don't need the extra page at 4K, but need to fix
	 * trampoline before removing it. (see the GDT stuff)
	 */
	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);

	/* Reserve SMP trampoline */
	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
#endif

#ifdef CONFIG_ACPI_SLEEP
       /*
        * Reserve low memory region for sleep support.
        */
       acpi_reserve_bootmem();
#endif
#ifdef CONFIG_XEN
#ifdef CONFIG_BLK_DEV_INITRD
	if (xen_start_info->mod_start) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
			initrd_start = INITRD_START + PAGE_OFFSET;
			initrd_end = initrd_start+INITRD_SIZE;
			initrd_below_start_ok = 1;
		} else {
			printk(KERN_ERR "initrd extends beyond end of memory "
				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				(unsigned long)(INITRD_START + INITRD_SIZE),
				(unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#else	/* CONFIG_XEN */
#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
			initrd_end = initrd_start+INITRD_SIZE;
		}
		else {
			printk(KERN_ERR "initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			    (unsigned long)(INITRD_START + INITRD_SIZE),
			    (unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#endif	/* !CONFIG_XEN */
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end) {
		reserve_bootmem(crashk_res.start,
			crashk_res.end - crashk_res.start + 1);
	}
#endif

	paging_init();
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * Find and reserve possible boot-time SMP configuration:
	 */
	find_smp_config();
#endif
#ifdef CONFIG_XEN
	{
		int i, j, k, fpp;
		unsigned long va;

		/* 'Initial mapping' of initrd must be destroyed. */
		for (va = xen_start_info->mod_start;
		     va < (xen_start_info->mod_start+xen_start_info->mod_len);
		     va += PAGE_SIZE) {
			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
		}

		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
			/* Make sure we have a large enough P->M table. */
			phys_to_machine_mapping = alloc_bootmem(
				end_pfn * sizeof(unsigned long));
			memset(phys_to_machine_mapping, ~0,
			       end_pfn * sizeof(unsigned long));
			memcpy(phys_to_machine_mapping,
			       (unsigned long *)xen_start_info->mfn_list,
			       xen_start_info->nr_pages * sizeof(unsigned long));
			free_bootmem(
				__pa(xen_start_info->mfn_list),
				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
						sizeof(unsigned long))));

			/* Destroyed 'initial mapping' of old p2m table. */
			for (va = xen_start_info->mfn_list;
			     va < (xen_start_info->mfn_list +
				   (xen_start_info->nr_pages*sizeof(unsigned long)));
			     va += PAGE_SIZE) {
				HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
			}

			/*
			 * Initialise the list of the frames that specify the
			 * list of frames that make up the p2m table. Used by
                         * save/restore.
			 */
			pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
				virt_to_mfn(pfn_to_mfn_frame_list_list);

			fpp = PAGE_SIZE/sizeof(unsigned long);
			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
				if ((j % fpp) == 0) {
					k++;
					BUG_ON(k>=fpp);
					pfn_to_mfn_frame_list[k] =
						alloc_bootmem(PAGE_SIZE);
					pfn_to_mfn_frame_list_list[k] =
						virt_to_mfn(pfn_to_mfn_frame_list[k]);
					j=0;
				}
				pfn_to_mfn_frame_list[k][j] =
					virt_to_mfn(&phys_to_machine_mapping[i]);
			}
			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
		}

	}

	if (xen_start_info->flags & SIF_INITDOMAIN)
		dmi_scan_machine();

	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
	{
		acpi_disabled = 1;
#ifdef  CONFIG_ACPI
		acpi_ht = 0;
#endif
	}
#endif

#ifndef CONFIG_XEN
	check_ioapic();
#endif

	zap_low_mappings(0);

	/*
	 * set this early, so we dont allocate cpu0
	 * if MADT list doesnt list BSP first
	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
	 */
	cpu_set(0, cpu_present_map);
#ifdef CONFIG_ACPI
	/*
	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
	 * Call this early for SRAT node setup.
	 */
	acpi_boot_table_init();

	/*
	 * Read APIC and some other early information from ACPI tables.
	 */
	acpi_boot_init();
#endif

	init_cpu_to_node();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * get boot-time SMP configuration:
	 */
	if (smp_found_config)
		get_smp_config();
#ifndef CONFIG_XEN
	init_apic_mappings();
#endif
#endif
#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
	prefill_possible_map();
#endif

	/*
	 * Request address space for all standard RAM and ROM resources
	 * and also for regions reported as reserved by the e820.
	 */
#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	probe_roms();
	if (xen_start_info->flags & SIF_INITDOMAIN) {
		machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE);

		memmap.nr_entries = E820MAX;
		set_xen_guest_handle(memmap.buffer, machine_e820);

		BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap));

		e820_reserve_resources(machine_e820, memmap.nr_entries);
	}
#elif !defined(CONFIG_XEN)
	probe_roms();
	e820_reserve_resources(e820.map, e820.nr_map);
#endif

	request_resource(&iomem_resource, &video_ram_resource);

	{
	unsigned i;
	/* request I/O space for devices used on all i[345]86 PCs */
	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
		request_resource(&ioport_resource, &standard_io_resources[i]);
	}

#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	if (xen_start_info->flags & SIF_INITDOMAIN) {
		e820_setup_gap(machine_e820, memmap.nr_entries);
		free_bootmem(__pa(machine_e820), PAGE_SIZE);
	}
#elif !defined(CONFIG_XEN)
	e820_setup_gap(e820.map, e820.nr_map);
#endif

#ifdef CONFIG_GART_IOMMU
	iommu_hole_init();
#endif

#ifdef CONFIG_XEN
	{
		struct physdev_set_iopl set_iopl;

		set_iopl.iopl = 1;
		HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);

		if (xen_start_info->flags & SIF_INITDOMAIN) {
			if (!(xen_start_info->flags & SIF_PRIVILEGED))
				panic("Xen granted us console access "
				      "but not privileged status");
		       
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
			conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
			conswitchp = &dummy_con;
#endif
#endif
		} else {
			extern int console_use_vt;
			console_use_vt = 0;
		}
	}
#else	/* CONFIG_XEN */

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif

#endif /* !CONFIG_XEN */
}
void __init setup_arch(char **cmdline_p)
{
	unsigned long low_mem_size;
	unsigned long kernel_end;

 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 	drive_info = DRIVE_INFO;
 	screen_info = SCREEN_INFO;
	edid_info = EDID_INFO;
	aux_device_present = AUX_DEVICE_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
	setup_memory_region();
	copy_edd();

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) &_text;
	init_mm.end_code = (unsigned long) &_etext;
	init_mm.end_data = (unsigned long) &_edata;
	init_mm.brk = (unsigned long) &_end;

	code_resource.start = virt_to_phys(&_text);
	code_resource.end = virt_to_phys(&_etext)-1;
	data_resource.start = virt_to_phys(&_etext);
	data_resource.end = virt_to_phys(&_edata)-1;

	parse_cmdline_early(cmdline_p);

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	end_pfn = e820_end_of_ram();

	check_efer();

	init_memory_mapping(); 

#ifdef CONFIG_DISCONTIGMEM
	numa_initmem_init(0, end_pfn); 
#else
	contig_initmem_init(); 
#endif

	/* Reserve direct mapping */
	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
				(table_end - table_start) << PAGE_SHIFT);

	/* reserve kernel */
	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_generic(0, PAGE_SIZE);

#ifdef CONFIG_SMP
	/*
	 * But first pinch a few for the stack/trampoline stuff
	 * FIXME: Don't need the extra page at 4K, but need to fix
	 * trampoline before removing it. (see the GDT stuff)
	 */
	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);

	/* Reserve SMP trampoline */
	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
#endif

#ifdef CONFIG_ACPI_SLEEP
       /*
        * Reserve low memory region for sleep support.
        */
       acpi_reserve_bootmem();
#endif
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * Find and reserve possible boot-time SMP configuration:
	 */
	find_smp_config();
#endif
#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
			initrd_end = initrd_start+INITRD_SIZE;
		}
		else {
			printk(KERN_ERR "initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			    (unsigned long)(INITRD_START + INITRD_SIZE),
			    (unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
	paging_init();

		check_ioapic();
#ifdef CONFIG_ACPI_BOOT
       /*
        * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
        * Must do this after paging_init (due to reliance on fixmap, and thus
        * the bootmem allocator) but before get_smp_config (to allow parsing
        * of MADT).
        */
	acpi_boot_init();
#endif
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * get boot-time SMP configuration:
	 */
	if (smp_found_config)
		get_smp_config();
	init_apic_mappings();
#endif

	/*
	 * Request address space for all standard RAM and ROM resources
	 * and also for regions reported as reserved by the e820.
	 */
	probe_roms();
	e820_reserve_resources(); 

	request_resource(&iomem_resource, &video_ram_resource);

	{
	unsigned i;
	/* request I/O space for devices used on all i[345]86 PCs */
	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
		request_resource(&ioport_resource, &standard_io_resources[i]);
	}

	/* Will likely break when you have unassigned resources with more
	   than 4GB memory and bridges that don't support more than 4GB. 
	   Doing it properly would require to use pci_alloc_consistent
	   in this case. */
	low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
	if (low_mem_size > pci_mem_start)
		pci_mem_start = low_mem_size;

#ifdef CONFIG_GART_IOMMU
       iommu_hole_init();
#endif

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif
}
void __init setup_arch(char **cmdline_p)
{
	unsigned long kernel_end;

 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 	screen_info = SCREEN_INFO;
	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
	setup_memory_region();
	copy_edd();

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) &_text;
	init_mm.end_code = (unsigned long) &_etext;
	init_mm.end_data = (unsigned long) &_edata;
	init_mm.brk = (unsigned long) &_end;

	code_resource.start = virt_to_phys(&_text);
	code_resource.end = virt_to_phys(&_etext)-1;
	data_resource.start = virt_to_phys(&_etext);
	data_resource.end = virt_to_phys(&_edata)-1;

	parse_cmdline_early(cmdline_p);

	early_identify_cpu(&boot_cpu_data);

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	end_pfn = e820_end_of_ram();
	num_physpages = end_pfn;		/* for pfn_valid */

	check_efer();

	discover_ebda();

	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));

	dmi_scan_machine();

	zap_low_mappings(0);

#ifdef CONFIG_ACPI
	/*
	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
	 * Call this early for SRAT node setup.
	 */
	acpi_boot_table_init();
#endif

#ifdef CONFIG_ACPI_NUMA
	/*
	 * Parse SRAT to discover nodes.
	 */
	acpi_numa_init();
#endif

#ifdef CONFIG_NUMA
	numa_initmem_init(0, end_pfn); 
#else
	contig_initmem_init(0, end_pfn);
#endif

	/* Reserve direct mapping */
	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
				(table_end - table_start) << PAGE_SHIFT);

	/* reserve kernel */
	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_generic(0, PAGE_SIZE);

	/* reserve ebda region */
	if (ebda_addr)
		reserve_bootmem_generic(ebda_addr, ebda_size);

#ifdef CONFIG_SMP
	/*
	 * But first pinch a few for the stack/trampoline stuff
	 * FIXME: Don't need the extra page at 4K, but need to fix
	 * trampoline before removing it. (see the GDT stuff)
	 */
	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);

	/* Reserve SMP trampoline */
	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
#endif

#ifdef CONFIG_ACPI_SLEEP
       /*
        * Reserve low memory region for sleep support.
        */
       acpi_reserve_bootmem();
#endif
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * Find and reserve possible boot-time SMP configuration:
	 */
	find_smp_config();
#endif
#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
			initrd_end = initrd_start+INITRD_SIZE;
		}
		else {
			printk(KERN_ERR "initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			    (unsigned long)(INITRD_START + INITRD_SIZE),
			    (unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end) {
		reserve_bootmem(crashk_res.start,
			crashk_res.end - crashk_res.start + 1);
	}
#endif

	paging_init();

	check_ioapic();

	/*
	 * set this early, so we dont allocate cpu0
	 * if MADT list doesnt list BSP first
	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
	 */
	cpu_set(0, cpu_present_map);
#ifdef CONFIG_ACPI
	/*
	 * Read APIC and some other early information from ACPI tables.
	 */
	acpi_boot_init();
#endif

	init_cpu_to_node();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * get boot-time SMP configuration:
	 */
	if (smp_found_config)
		get_smp_config();
	init_apic_mappings();
#endif

	/*
	 * Request address space for all standard RAM and ROM resources
	 * and also for regions reported as reserved by the e820.
	 */
	probe_roms();
	e820_reserve_resources(); 

	request_resource(&iomem_resource, &video_ram_resource);

	{
	unsigned i;
	/* request I/O space for devices used on all i[345]86 PCs */
	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
		request_resource(&ioport_resource, &standard_io_resources[i]);
	}

	e820_setup_gap();

#ifdef CONFIG_GART_IOMMU
	iommu_hole_init();
#endif

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif
}