/* * Find devices. The system is alive. */ void machine_init(void) { /* * Initialize the console. */ cninit(); /* * Set up to use floating point. */ init_fpu(); #ifdef MACH_HYP hyp_init(); #else /* MACH_HYP */ #ifdef LINUX_DEV /* * Initialize Linux drivers. */ linux_init(); #endif /* * Find the devices */ probeio(); #endif /* MACH_HYP */ /* * Get the time */ inittodr(); #ifndef MACH_HYP /* * Tell the BIOS not to clear and test memory. */ *(unsigned short *)phystokv(0x472) = 0x1234; #endif /* MACH_HYP */ #if VM_MIN_KERNEL_ADDRESS == 0 /* * Unmap page 0 to trap NULL references. * * Note that this breaks accessing some BIOS areas stored there. */ pmap_unmap_page_zero(); #endif }
/** * Linux main */ int main(int argc, char **argv) { gconf.binary = argv[0]; posix_init(); XInitThreads(); hts_mutex_init(&gdk_mutex); g_thread_init(NULL); gdk_threads_set_lock_functions(gdk_obtain, gdk_release); gdk_threads_init(); gdk_threads_enter(); gtk_init(&argc, &argv); parse_opts(argc, argv); linux_init(); main_init(); if(gconf.ui && !strcmp(gconf.ui, "gu")) ui_wanted = &ui_gu; glibcourier = glib_courier_create(g_main_context_default()); prop_subscribe(0, PROP_TAG_NAME("global", "eventSink"), PROP_TAG_CALLBACK_EVENT, linux_global_eventsink, NULL, PROP_TAG_COURIER, glibcourier, NULL); add_xdg_paths(); mainloop(); main_fini(); arch_exit(); }
/* * Find devices. The system is alive. */ void machine_init() { /* * Initialize the console. */ cninit(); /* * Set up to use floating point. */ init_fpu(); #ifdef LINUX_DEV /* * Initialize Linux drivers. */ linux_init(); #endif /* * Find the devices */ probeio(); /* * Get the time */ inittodr(); /* * Tell the BIOS not to clear and test memory. */ *(unsigned short *)phystokv(0x472) = 0x1234; /* * Unmap page 0 to trap NULL references. */ pmap_unmap_page_zero(); }
/* given a xa_instance_t struct with the xc_handle and the * domain_id filled in, this function will fill in the rest * of the values using queries to libxc. */ int helper_init (xa_instance_t *instance) { int ret = XA_SUCCESS; uint32_t local_offset = 0; unsigned char *memory = NULL; if (XA_MODE_XEN == instance->mode){ #ifdef ENABLE_XEN /* init instance->m.xen.xc_handle */ if (xc_domain_getinfo( instance->m.xen.xc_handle, instance->m.xen.domain_id, 1, &(instance->m.xen.info) ) != 1){ fprintf(stderr, "ERROR: Failed to get domain info\n"); ret = xa_report_error(instance, 0, XA_ECRITICAL); if (XA_FAILURE == ret) goto error_exit; } xa_dbprint("--got domain info.\n"); /* find the version of xen that we are running */ init_xen_version(instance); #endif /* ENABLE_XEN */ } /* read in configure file information */ if (read_config_file(instance) == XA_FAILURE){ ret = xa_report_error(instance, 0, XA_EMINOR); if (XA_FAILURE == ret) goto error_exit; } /* determine the page sizes and layout for target OS */ if (XA_MODE_XEN == instance->mode){ #ifdef ENABLE_XEN if (get_page_info_xen(instance) == XA_FAILURE){ fprintf(stderr, "ERROR: memory layout not supported\n"); ret = xa_report_error(instance, 0, XA_ECRITICAL); if (XA_FAILURE == ret) goto error_exit; } #endif /* ENABLE_XEN */ } else{ /*TODO add memory layout discovery here for file */ instance->hvm = 1; /* assume nonvirt image or hvm image for now */ instance->pae = 0; /* assume no pae for now */ } xa_dbprint("--got memory layout.\n"); /* setup the correct page offset size for the target OS */ init_page_offset(instance); if (XA_MODE_XEN == instance->mode){ #ifdef ENABLE_XEN /* init instance->hvm */ instance->hvm = xa_ishvm(instance->m.xen.domain_id); #ifdef XA_DEBUG if (instance->hvm){ xa_dbprint("**set instance->hvm to true (HVM).\n"); } else{ xa_dbprint("**set instance->hvm to false (PV).\n"); } #endif /* XA_DEBUG */ #endif /* ENABLE_XEN */ } /* get the memory size */ if (get_memory_size(instance) == XA_FAILURE){ fprintf(stderr, "ERROR: Failed to get memory size.\n"); ret = xa_report_error(instance, 0, XA_ECRITICAL); if (XA_FAILURE == ret) goto error_exit; } /* setup OS specific stuff */ if (instance->os_type == XA_OS_LINUX){ ret = linux_init(instance); } else if (instance->os_type == XA_OS_WINDOWS){ ret = windows_init(instance); } error_exit: return ret; }
/** * This is the architecture-independent kernel entry point. Before it is * called, architecture-specific code has done the bare minimum initialization * necessary. This function initializes the kernel and its various subsystems. * It calls back to architecture-specific code at several well defined points, * which all architectures must implement (e.g., setup_arch()). * * \callgraph */ void start_kernel() { unsigned int cpu; unsigned int timeout; int status; /* * Parse the kernel boot command line. * This is where boot-time configurable variables get set, * e.g., the ones with param() and DRIVER_PARAM() specifiers. */ parse_params(lwk_command_line); /* * Initialize the console subsystem. * printk()'s will be visible after this. */ console_init(); /* * Hello, Dave. */ printk("%s", lwk_banner); printk(KERN_DEBUG "%s\n", lwk_command_line); sort_exception_table(); /* * Do architecture specific initialization. * This detects memory, CPUs, architecture dependent irqs, etc. */ setup_arch(); /* * Setup the architecture independent interrupt handling. */ irq_init(); /* * Initialize the kernel memory subsystem. Up until now, the simple * boot-time memory allocator (bootmem) has been used for all dynamic * memory allocation. Here, the bootmem allocator is destroyed and all * of the free pages it was managing are added to the kernel memory * pool (kmem) or the user memory pool (umem). * * After this point, any use of the bootmem allocator will cause a * kernel panic. The normal kernel memory subsystem API should be used * instead (e.g., kmem_alloc() and kmem_free()). */ mem_subsys_init(); /* * Initialize the address space management subsystem. */ aspace_subsys_init(); sched_init_runqueue(0); /* This CPUs scheduler state + idle task */ sched_add_task(current); /* now safe to call schedule() */ /* * Initialize the task scheduling subsystem. */ core_timer_init(0); /* Start the kernel filesystems */ kfs_init(); /* * Initialize the random number generator. */ rand_init(); workq_init(); /* * Boot all of the other CPUs in the system, one at a time. */ printk(KERN_INFO "Number of CPUs detected: %d\n", num_cpus()); for_each_cpu_mask(cpu, cpu_present_map) { /* The bootstrap CPU (that's us) is already booted. */ if (cpu == 0) { cpu_set(cpu, cpu_online_map); continue; } printk(KERN_DEBUG "Booting CPU %u.\n", cpu); arch_boot_cpu(cpu); /* Wait for ACK that CPU has booted (5 seconds max). */ for (timeout = 0; timeout < 50000; timeout++) { if (cpu_isset(cpu, cpu_online_map)) break; udelay(100); } if (!cpu_isset(cpu, cpu_online_map)) panic("Failed to boot CPU %d.\n", cpu); } /* * Initialize the PCI subsystem. */ init_pci(); /* * Enable external interrupts. */ local_irq_enable(); #ifdef CONFIG_NETWORK /* * Bring up any network devices. */ netdev_init(); #endif #ifdef CONFIG_CRAY_GEMINI driver_init_list("net", "gemini"); #endif #ifdef CONFIG_BLOCK_DEVICE /** * Initialize the block devices */ blkdev_init(); #endif mcheck_init_late(); /* * And any modules that need to be started. */ driver_init_by_name( "module", "*" ); #ifdef CONFIG_KGDB /* * Stop eary (before "late" devices) in KGDB if requested */ kgdb_initial_breakpoint(); #endif /* * Bring up any late init devices. */ driver_init_by_name( "late", "*" ); /* * Bring up the Linux compatibility layer, if enabled. */ linux_init(); #ifdef CONFIG_DEBUG_HW_NOISE /* Measure noise/interference in the underlying hardware/VMM */ extern void measure_noise(int, uint64_t); measure_noise(0, 0); #endif /* * Start up user-space... */ printk(KERN_INFO "Loading initial user-level task (init_task)...\n"); if ((status = create_init_task()) != 0) panic("Failed to create init_task (status=%d).", status); current->state = TASK_EXITED; schedule(); /* This should not return */ BUG(); }
static status_t vmi_init_private( vmi_instance_t *vmi, uint32_t flags, uint64_t id, const char *name, vmi_config_t config) { uint32_t access_mode = flags & 0x0000FFFF; uint32_t init_mode = flags & 0x00FF0000; uint32_t config_mode = flags & 0xFF000000; status_t status = VMI_FAILURE; /* allocate memory for instance structure */ *vmi = (vmi_instance_t) safe_malloc(sizeof(struct vmi_instance)); memset(*vmi, 0, sizeof(struct vmi_instance)); /* initialize instance struct to default values */ dbprint(VMI_DEBUG_CORE, "LibVMI Version 0.11.0\n"); //TODO change this with each release /* save the flags and init mode */ (*vmi)->flags = flags; (*vmi)->init_mode = init_mode; (*vmi)->config_mode = config_mode; /* the config hash table is set up later based on mode */ (*vmi)->config = NULL; /* set page mode to unknown */ (*vmi)->page_mode = VMI_PM_UNKNOWN; /* setup the caches */ pid_cache_init(*vmi); sym_cache_init(*vmi); rva_cache_init(*vmi); v2p_cache_init(*vmi); if ( init_mode & VMI_INIT_SHM_SNAPSHOT ) { #if ENABLE_SHM_SNAPSHOT == 1 v2m_cache_init(*vmi); #else errprint("LibVMI wasn't compiled with SHM support!\n"); status = VMI_FAILURE; goto error_exit; #endif } /* connecting to xen, kvm, file, etc */ if (VMI_FAILURE == set_driver_type(*vmi, access_mode, id, name)) { goto error_exit; } /* driver-specific initilization */ if (VMI_FAILURE == driver_init(*vmi)) { goto error_exit; } dbprint(VMI_DEBUG_CORE, "--completed driver init.\n"); /* resolve the id and name */ if (VMI_FAILURE == set_id_and_name(*vmi, access_mode, id, name)) { goto error_exit; } /* init vmi for specific file/domain through the driver */ if (VMI_FAILURE == driver_init_vmi(*vmi)) { goto error_exit; } /* setup the page offset size */ if (VMI_FAILURE == init_page_offset(*vmi)) { goto error_exit; } /* get the memory size */ if (driver_get_memsize(*vmi, &(*vmi)->allocated_ram_size, &(*vmi)->max_physical_address) == VMI_FAILURE) { errprint("Failed to get memory size.\n"); goto error_exit; } dbprint(VMI_DEBUG_CORE, "**set size = %"PRIu64" [0x%"PRIx64"]\n", (*vmi)->size, (*vmi)->size); // for file mode we need os-specific heuristics to deduce the architecture // for live mode, having arch_interface set even in VMI_PARTIAL mode // allows use of dtb-based translation methods. if (VMI_FILE != (*vmi)->mode) { if(VMI_FAILURE == arch_init(*vmi)) { if (init_mode & VMI_INIT_COMPLETE) { dbprint(VMI_DEBUG_CORE, "--failed to determine architecture of live vm and INIT_COMPLETE.\n"); goto error_exit; } else { dbprint(VMI_DEBUG_CORE, "--failed to determine architecture of live vm and INIT_PARTIAL, continuing.\n"); } } else { dbprint(VMI_DEBUG_CORE, "--succesfully completed architecture init.\n"); } } /* we check VMI_INIT_COMPLETE first as VMI_INIT_PARTIAL is not exclusive */ if (init_mode & VMI_INIT_COMPLETE) { switch((*vmi)->config_mode) { case VMI_CONFIG_STRING: /* read and parse the config string */ if(VMI_FAILURE == read_config_string(*vmi, (char*)config)) { goto error_exit; } break; case VMI_CONFIG_GLOBAL_FILE_ENTRY: /* read and parse the config file */ if(VMI_FAILURE == read_config_file_entry(*vmi)) { goto error_exit; } break; case VMI_CONFIG_GHASHTABLE: /* read and parse the ghashtable */ if (!config) { goto error_exit; } (*vmi)->config = (GHashTable*)config; break; case VMI_CONFIG_NONE: default: /* init_complete requires configuration falling back to VMI_CONFIG_GLOBAL_FILE_ENTRY is unsafe here as the config pointer is probably NULL */ goto error_exit; } if(VMI_FAILURE == set_os_type_from_config(*vmi)) { dbprint(VMI_DEBUG_CORE, "--failed to determine os type from config\n"); goto error_exit; } /* setup OS specific stuff */ switch ( (*vmi)->os_type ) { #ifdef ENABLE_LINUX case VMI_OS_LINUX: if(VMI_FAILURE == linux_init(*vmi)) { goto error_exit; } break; #endif #ifdef ENABLE_WINDOWS case VMI_OS_WINDOWS: if(VMI_FAILURE == windows_init(*vmi)) { goto error_exit; } break; #endif default: goto error_exit; } status = VMI_SUCCESS; } else if (init_mode & VMI_INIT_PARTIAL) { status = VMI_SUCCESS; } else { errprint("Need to specify either VMI_INIT_PARTIAL or VMI_INIT_COMPLETE.\n"); goto error_exit; } if(init_mode & VMI_INIT_EVENTS) { #if ENABLE_XEN_EVENTS == 1 /* Enable event handlers */ events_init(*vmi); #else errprint("LibVMI wasn't compiled with events support!\n"); status = VMI_FAILURE; #endif } error_exit: return status; }
static status_t vmi_init_private( vmi_instance_t *vmi, uint32_t flags, unsigned long id, char *name, vmi_config_t *config) { uint32_t access_mode = flags & 0x0000FFFF; uint32_t init_mode = flags & 0x00FF0000; uint32_t config_mode = flags & 0xFF000000; status_t status = VMI_FAILURE; /* allocate memory for instance structure */ *vmi = (vmi_instance_t) safe_malloc(sizeof(struct vmi_instance)); memset(*vmi, 0, sizeof(struct vmi_instance)); /* initialize instance struct to default values */ dbprint("LibVMI Version 0.9_alpha\n"); //TODO change this with each release /* save the flags and init mode */ (*vmi)->flags = flags; (*vmi)->init_mode = init_mode; (*vmi)->config = config; (*vmi)->config_mode = config_mode; /* setup the caches */ pid_cache_init(*vmi); sym_cache_init(*vmi); rva_cache_init(*vmi); v2p_cache_init(*vmi); /* connecting to xen, kvm, file, etc */ if (VMI_FAILURE == set_driver_type(*vmi, access_mode, id, name)) { goto error_exit; } /* resolve the id and name */ if (VMI_FAILURE == set_id_and_name(*vmi, access_mode, id, name)) { goto error_exit; } /* driver-specific initilization */ if (VMI_FAILURE == driver_init(*vmi)) { goto error_exit; } dbprint("--completed driver init.\n"); /* we check VMI_INIT_COMPLETE first as VMI_INIT_PARTIAL is not exclusive */ if (init_mode & VMI_INIT_COMPLETE) { /* init_complete requires configuration */ if(VMI_CONFIG_NONE & (*vmi)->config_mode) { /* falling back to VMI_CONFIG_GLOBAL_FILE_ENTRY is unsafe here as the config pointer is probably NULL */ goto error_exit; } /* read and parse the config file */ else if ( (VMI_CONFIG_STRING & (*vmi)->config_mode || VMI_CONFIG_GLOBAL_FILE_ENTRY & (*vmi)->config_mode) && VMI_FAILURE == read_config_file(*vmi)) { goto error_exit; } /* read and parse the ghashtable */ else if (VMI_CONFIG_GHASHTABLE & (*vmi)->config_mode && VMI_FAILURE == read_config_ghashtable(*vmi)) { dbprint("--failed to parse ghashtable\n"); goto error_exit; } /* setup the correct page offset size for the target OS */ if (VMI_FAILURE == init_page_offset(*vmi)) { goto error_exit; } /* get the memory size */ if (driver_get_memsize(*vmi, &(*vmi)->size) == VMI_FAILURE) { errprint("Failed to get memory size.\n"); goto error_exit; } dbprint("**set size = %"PRIu64" [0x%"PRIx64"]\n", (*vmi)->size, (*vmi)->size); /* determine the page sizes and layout for target OS */ // Find the memory layout. If this fails, then proceed with the // OS-specific heuristic techniques. (*vmi)->pae = (*vmi)->pse = (*vmi)->lme = (*vmi)->cr3 = 0; (*vmi)->page_mode = VMI_PM_UNKNOWN; status = get_memory_layout(*vmi, &((*vmi)->page_mode), &((*vmi)->cr3), &((*vmi)->pae), &((*vmi)->pse), &((*vmi)->lme)); if (VMI_FAILURE == status) { dbprint ("**Failed to get memory layout for VM. Trying heuristic method.\n"); // fall-through } // if // Heuristic method if (!(*vmi)->cr3) { (*vmi)->cr3 = find_cr3((*vmi)); dbprint("**set cr3 = 0x%.16"PRIx64"\n", (*vmi)->cr3); } // if /* setup OS specific stuff */ if (VMI_OS_LINUX == (*vmi)->os_type) { status = linux_init(*vmi); } else if (VMI_OS_WINDOWS == (*vmi)->os_type) { status = windows_init(*vmi); } /* Enable event handlers only if we're in a consistent state */ if((status == VMI_SUCCESS) && (init_mode & VMI_INIT_EVENTS)){ events_init(*vmi); } return status; } else if (init_mode & VMI_INIT_PARTIAL) { init_page_offset(*vmi); driver_get_memsize(*vmi, &(*vmi)->size); /* Enable event handlers */ if(init_mode & VMI_INIT_EVENTS){ events_init(*vmi); } return VMI_SUCCESS; } error_exit: return status; }