/** * tries to determine the physical package, a cpu belongs to */ int get_pkg(int cpu) { int pkg=-1; char buffer[10]; if (cpu == -1) { cpu = get_cpu(); } if (cpu != -1) { sprintf(path, "/sys/devices/system/cpu/cpu%i/topology/physical_package_id", cpu); if( read_file(path, buffer, sizeof(buffer)) ) pkg = atoi(buffer); /* fallbacks if sysfs is not working */ if (pkg == -1) { /* assume 0 if there is only one CPU or only one package */ if ((num_cpus() == 1) || (num_packages() == 1)) { pkg = 0; } /* get the physical package id from /proc/cpuinfo */ else if(!get_proc_cpuinfo_data("physical id", buffer, cpu)) { pkg = atoi(buffer); } /* if the number of cpus equals the number of packages assume pkg_id = cpu_id*/ else if (num_cpus() == num_packages()) { pkg = cpu; } /* if there is only one core per package assume pkg_id = core_id */ else if (num_cores_per_package() == 1) { pkg = get_core_id(cpu); } /* if the number of packages equals the number of numa nodes assume pkg_id = numa node */ else if (num_numa_nodes() == num_packages()) { pkg = get_numa_node(cpu); } /* NOTE pkg_id in UMA Systems with multiple sockets and more than 1 Core per socket can't be determined without correct topology information in sysfs*/ } } return pkg; }
/** * tries to determine the core ID, a cpu belongs to */ int get_core_id(int cpu) { int core=-1; char buffer[10]; if (cpu == -1) { cpu = get_cpu(); } if (cpu != -1) { sprintf(path, "/sys/devices/system/cpu/cpu%i/topology/core_id", cpu); if(read_file(path, buffer, sizeof(buffer))) core = atoi(buffer); /* fallbacks if sysfs is not working */ if (core == -1) { /* assume 0 if there is only one CPU */ if (num_cpus() == 1) { core = 0; } /* if each package contains only one cpu assume core_id = package_id = cpu_id */ else if (num_cores_per_package() == 1) { core = 0; } /* NOTE core_id can't be determined without correct topology information in sysfs if there are multiple cores per package TODO /proc/cpuinfo */ } } return core; }
static int setting_set_affinity(const TCHAR *service_name, void *param, const TCHAR *name, void *default_value, value_t *value, const TCHAR *additional) { HKEY key = (HKEY) param; if (! key) return -1; long error; __int64 mask; __int64 system_affinity = 0LL; if (value && value->string) { DWORD_PTR affinity; if (! GetProcessAffinityMask(GetCurrentProcess(), &affinity, (DWORD_PTR *) &system_affinity)) system_affinity = ~0; if (is_default(value->string) || str_equiv(value->string, NSSM_AFFINITY_ALL)) mask = 0LL; else if (affinity_string_to_mask(value->string, &mask)) { print_message(stderr, NSSM_MESSAGE_BOGUS_AFFINITY_MASK, value->string, num_cpus() - 1); return -1; } } else mask = 0LL; if (! mask) { error = RegDeleteValue(key, name); if (error == ERROR_SUCCESS || error == ERROR_FILE_NOT_FOUND) return 0; print_message(stderr, NSSM_MESSAGE_REGDELETEVALUE_FAILED, name, service_name, error_string(error)); return -1; } /* Canonicalise. */ TCHAR *canon = 0; if (affinity_mask_to_string(mask, &canon)) canon = value->string; __int64 effective_affinity = mask & system_affinity; if (effective_affinity != mask) { /* Requested CPUs did not intersect with available CPUs? */ if (! effective_affinity) mask = effective_affinity = system_affinity; TCHAR *system = 0; if (! affinity_mask_to_string(system_affinity, &system)) { TCHAR *effective = 0; if (! affinity_mask_to_string(effective_affinity, &effective)) { print_message(stderr, NSSM_MESSAGE_EFFECTIVE_AFFINITY_MASK, value->string, system, effective); HeapFree(GetProcessHeap(), 0, effective); } HeapFree(GetProcessHeap(), 0, system); } } if (RegSetValueEx(key, name, 0, REG_SZ, (const unsigned char *) canon, (unsigned long) (_tcslen(canon) + 1) * sizeof(TCHAR)) != ERROR_SUCCESS) { if (canon != value->string) HeapFree(GetProcessHeap(), 0, canon); log_event(EVENTLOG_ERROR_TYPE, NSSM_EVENT_SETVALUE_FAILED, name, error_string(GetLastError()), 0); return -1; } if (canon != value->string) HeapFree(GetProcessHeap(), 0, canon); return 1; }
void check_mainloop(void) { time_t now = 0, then = 0; useconds_t sleeptime; int delta = 0; /* init */ status_alerted.alert_load = false; status_alerted.alert_disk = false; status_alerted.alert_cpu = false; sleeptime = 100000; numcpus = num_cpus(); http_fetch_url(hburl); vbprintf("Found %d cpus\n", numcpus); database_init(); /* * XXX: Right here we're just spinning in place. The reason for this * is to be able to have different intervals for the checking process * (disk/cpu/load), and also for the heartbeat-process, which might * check at different intervals. I might separate this out into * another file so we have a rudimentary timer-based scheduler that * can shoot off different functions at variable intervals. */ time(&then); while (1) { int sampletrig = 0, hbtrig = 0; time(&now); delta = (int) now - (int) then; sampletrig = delta % interval; hbtrig = delta % hbinterval; if (!sampletrig) { load_check(); disk_check(diskpaths); cpu_check(); check_alert(); sleep(1); /* make sure trig status is over */ } if (!hbtrig) { http_fetch_url(hburl); sleep(1); /* make sure trig status is over */ } usleep(sleeptime); } }
static int setting_get_affinity(const TCHAR *service_name, void *param, const TCHAR *name, void *default_value, value_t *value, const TCHAR *additional) { HKEY key = (HKEY) param; if (! key) return -1; unsigned long type; TCHAR *buffer = 0; unsigned long buflen = 0; int ret = RegQueryValueEx(key, name, 0, &type, 0, &buflen); if (ret == ERROR_FILE_NOT_FOUND) { if (value_from_string(name, value, NSSM_AFFINITY_ALL) == 1) return 0; return -1; } if (ret != ERROR_SUCCESS) return -1; if (type != REG_SZ) return -1; buffer = (TCHAR *) HeapAlloc(GetProcessHeap(), 0, buflen); if (! buffer) { print_message(stderr, NSSM_MESSAGE_OUT_OF_MEMORY, _T("affinity"), _T("setting_get_affinity")); return -1; } if (get_string(key, (TCHAR *) name, buffer, buflen, false, false, true)) { HeapFree(GetProcessHeap(), 0, buffer); return -1; } __int64 affinity; if (affinity_string_to_mask(buffer, &affinity)) { print_message(stderr, NSSM_MESSAGE_BOGUS_AFFINITY_MASK, buffer, num_cpus() - 1); HeapFree(GetProcessHeap(), 0, buffer); return -1; } HeapFree(GetProcessHeap(), 0, buffer); /* Canonicalise. */ if (affinity_mask_to_string(affinity, &buffer)) { if (buffer) HeapFree(GetProcessHeap(), 0, buffer); return -1; } ret = value_from_string(name, value, buffer); HeapFree(GetProcessHeap(), 0, buffer); return ret; }
/** * This is the architecture-independent kernel entry point. Before it is * called, architecture-specific code has done the bare minimum initialization * necessary. This function initializes the kernel and its various subsystems. * It calls back to architecture-specific code at several well defined points, * which all architectures must implement (e.g., setup_arch()). * * \callgraph */ void start_kernel() { unsigned int cpu; unsigned int timeout; int status; /* * Parse the kernel boot command line. * This is where boot-time configurable variables get set, * e.g., the ones with param() and DRIVER_PARAM() specifiers. */ parse_params(lwk_command_line); /* * Initialize the console subsystem. * printk()'s will be visible after this. */ console_init(); /* * Hello, Dave. */ printk("%s", lwk_banner); printk(KERN_DEBUG "%s\n", lwk_command_line); sort_exception_table(); /* * Do architecture specific initialization. * This detects memory, CPUs, architecture dependent irqs, etc. */ setup_arch(); /* * Setup the architecture independent interrupt handling. */ irq_init(); /* * Initialize the kernel memory subsystem. Up until now, the simple * boot-time memory allocator (bootmem) has been used for all dynamic * memory allocation. Here, the bootmem allocator is destroyed and all * of the free pages it was managing are added to the kernel memory * pool (kmem) or the user memory pool (umem). * * After this point, any use of the bootmem allocator will cause a * kernel panic. The normal kernel memory subsystem API should be used * instead (e.g., kmem_alloc() and kmem_free()). */ mem_subsys_init(); /* * Initialize the address space management subsystem. */ aspace_subsys_init(); sched_init_runqueue(0); /* This CPUs scheduler state + idle task */ sched_add_task(current); /* now safe to call schedule() */ /* * Initialize the task scheduling subsystem. */ core_timer_init(0); /* Start the kernel filesystems */ kfs_init(); /* * Initialize the random number generator. */ rand_init(); workq_init(); /* * Boot all of the other CPUs in the system, one at a time. */ printk(KERN_INFO "Number of CPUs detected: %d\n", num_cpus()); for_each_cpu_mask(cpu, cpu_present_map) { /* The bootstrap CPU (that's us) is already booted. */ if (cpu == 0) { cpu_set(cpu, cpu_online_map); continue; } printk(KERN_DEBUG "Booting CPU %u.\n", cpu); arch_boot_cpu(cpu); /* Wait for ACK that CPU has booted (5 seconds max). */ for (timeout = 0; timeout < 50000; timeout++) { if (cpu_isset(cpu, cpu_online_map)) break; udelay(100); } if (!cpu_isset(cpu, cpu_online_map)) panic("Failed to boot CPU %d.\n", cpu); } /* * Initialize the PCI subsystem. */ init_pci(); /* * Enable external interrupts. */ local_irq_enable(); #ifdef CONFIG_NETWORK /* * Bring up any network devices. */ netdev_init(); #endif #ifdef CONFIG_CRAY_GEMINI driver_init_list("net", "gemini"); #endif #ifdef CONFIG_BLOCK_DEVICE /** * Initialize the block devices */ blkdev_init(); #endif mcheck_init_late(); /* * And any modules that need to be started. */ driver_init_by_name( "module", "*" ); #ifdef CONFIG_KGDB /* * Stop eary (before "late" devices) in KGDB if requested */ kgdb_initial_breakpoint(); #endif /* * Bring up any late init devices. */ driver_init_by_name( "late", "*" ); /* * Bring up the Linux compatibility layer, if enabled. */ linux_init(); #ifdef CONFIG_DEBUG_HW_NOISE /* Measure noise/interference in the underlying hardware/VMM */ extern void measure_noise(int, uint64_t); measure_noise(0, 0); #endif /* * Start up user-space... */ printk(KERN_INFO "Loading initial user-level task (init_task)...\n"); if ((status = create_init_task()) != 0) panic("Failed to create init_task (status=%d).", status); current->state = TASK_EXITED; schedule(); /* This should not return */ BUG(); }
/** * initializes cpuinfo-struct * @param print detection-summary is written to stdout when !=0 */ void init_cpuinfo(cpu_info_t *cpuinfo,int print) { unsigned int i; char output[_HW_DETECT_MAX_OUTPUT]; /* initialize data structure */ memset(cpuinfo,0,sizeof(cpu_info_t)); strcpy(cpuinfo->architecture,"unknown\0"); strcpy(cpuinfo->vendor,"unknown\0"); strcpy(cpuinfo->model_str,"unknown\0"); cpuinfo->num_cpus = num_cpus(); get_architecture(cpuinfo->architecture, sizeof(cpuinfo->architecture)); get_cpu_vendor(cpuinfo->vendor, sizeof(cpuinfo->vendor)); get_cpu_name(cpuinfo->model_str, sizeof(cpuinfo->model_str)); cpuinfo->family = get_cpu_family(); cpuinfo->model = get_cpu_model(); cpuinfo->stepping = get_cpu_stepping(); cpuinfo->num_cores_per_package = num_cores_per_package(); cpuinfo->num_threads_per_core = num_threads_per_core(); cpuinfo->num_packages = num_packages(); cpuinfo->clockrate = get_cpu_clockrate(1, 0); /* setup supported feature list*/ if(!strcmp(cpuinfo->architecture,"x86_64")) cpuinfo->features |= X86_64; if (feature_available("SMT")) cpuinfo->features |= SMT; if (feature_available("FPU")) cpuinfo->features |= FPU; if (feature_available("MMX")) cpuinfo->features |= MMX; if (feature_available("MMX_EXT")) cpuinfo->features |= MMX_EXT; if (feature_available("SSE")) cpuinfo->features |= SSE; if (feature_available("SSE2")) cpuinfo->features |= SSE2; if (feature_available("SSE3")) cpuinfo->features |= SSE3; if (feature_available("SSSE3")) cpuinfo->features |= SSSE3; if (feature_available("SSE4.1")) cpuinfo->features |= SSE4_1; if (feature_available("SSE4.2")) cpuinfo->features |= SSE4_2; if (feature_available("SSE4A")) cpuinfo->features |= SSE4A; if (feature_available("ABM")) cpuinfo->features |= ABM; if (feature_available("POPCNT")) cpuinfo->features |= POPCNT; if (feature_available("AVX")) cpuinfo->features |= AVX; if (feature_available("AVX2")) cpuinfo->features |= AVX2; if (feature_available("FMA")) cpuinfo->features |= FMA; if (feature_available("FMA4")) cpuinfo->features |= FMA4; if (feature_available("AES")) cpuinfo->features |= AES; if (feature_available("AVX512")) cpuinfo->features |= AVX512; /* determine cache details */ for (i=0; i<(unsigned int)num_caches(0); i++) { cpuinfo->Cache_shared[cache_level(0,i)-1]=cache_shared(0,i); cpuinfo->Cacheline_size[cache_level(0,i)-1]=cacheline_length(0,i); if (cpuinfo->Cachelevels < (unsigned int)cache_level(0,i)) { cpuinfo->Cachelevels = cache_level(0,i); } switch (cache_type(0,i)) { case UNIFIED_CACHE: { cpuinfo->Cache_unified[cache_level(0,i)-1]=1; cpuinfo->U_Cache_Size[cache_level(0,i)-1]=cache_size(0,i); cpuinfo->U_Cache_Sets[cache_level(0,i)-1]=cache_assoc(0,i); break; } case DATA_CACHE: { cpuinfo->Cache_unified[cache_level(0,i)-1]=0; cpuinfo->D_Cache_Size[cache_level(0,i)-1]=cache_size(0,i); cpuinfo->D_Cache_Sets[cache_level(0,i)-1]=cache_assoc(0,i); break; } case INSTRUCTION_CACHE: { cpuinfo->Cache_unified[cache_level(0,i)-1]=0; cpuinfo->I_Cache_Size[cache_level(0,i)-1]=cache_size(0,i); cpuinfo->I_Cache_Sets[cache_level(0,i)-1]=cache_assoc(0,i); break; } default: break; } } /* print a summary */ if (print) { fflush(stdout); printf("\n system summary:\n"); if(cpuinfo->num_packages) printf(" number of processors: %i\n",cpuinfo->num_packages); if(cpuinfo->num_cores_per_package) printf(" number of cores per package: %i\n",cpuinfo->num_cores_per_package); if(cpuinfo->num_threads_per_core) printf(" number of threads per core: %i\n",cpuinfo->num_threads_per_core); if(cpuinfo->num_cpus) printf(" total number of threads: %i\n",cpuinfo->num_cpus); printf("\n processor characteristics:\n"); printf(" architecture: %s\n",cpuinfo->architecture); printf(" vendor: %s\n",cpuinfo->vendor); printf(" processor-name: %s\n",cpuinfo->model_str); printf(" model: Family %i, Model %i, Stepping %i\n",cpuinfo->family,cpuinfo->model,cpuinfo->stepping); printf(" frequency: %llu MHz\n",cpuinfo->clockrate/1000000); fflush(stdout); printf(" supported features:\n -"); if(cpuinfo->features&X86_64) printf(" X86_64"); if(cpuinfo->features&FPU) printf(" FPU"); if(cpuinfo->features&MMX) printf(" MMX"); if(cpuinfo->features&MMX_EXT) printf(" MMX_EXT"); if(cpuinfo->features&SSE) printf(" SSE"); if(cpuinfo->features&SSE2) printf(" SSE2"); if(cpuinfo->features&SSE3) printf(" SSE3"); if(cpuinfo->features&SSSE3) printf(" SSSE3"); if(cpuinfo->features&SSE4_1) printf(" SSE4.1"); if(cpuinfo->features&SSE4_2) printf(" SSE4.2"); if(cpuinfo->features&SSE4A) printf(" SSE4A"); if(cpuinfo->features&POPCNT) printf(" POPCNT"); if(cpuinfo->features&AVX) printf(" AVX"); if(cpuinfo->features&AVX2) printf(" AVX2"); if(cpuinfo->features&AVX512) printf(" AVX512"); if(cpuinfo->features&FMA) printf(" FMA"); if(cpuinfo->features&FMA4) printf(" FMA4"); if(cpuinfo->features&AES) printf(" AES"); if(cpuinfo->features&SMT) printf(" SMT"); printf(" \n"); if(cpuinfo->Cachelevels) { printf(" Caches:\n"); for(i = 0; i < (unsigned int)num_caches(0); i++) { snprintf(output,sizeof(output),"n/a"); if (cache_info(0, i, output, sizeof(output)) != -1) printf(" - %s\n",output); } } } fflush(stdout); }
/** * This is the architecture-independent kernel entry point. Before it is * called, architecture-specific code has done the bare minimum initialization * necessary. This function initializes the kernel and its various subsystems. * It calls back to architecture-specific code at several well defined points, * which all architectures must implement (e.g., setup_arch()). */ void start_kernel() { unsigned int cpu; unsigned int timeout; /* * Parse the kernel boot command line. * This is where boot-time configurable variables get set, * e.g., the ones with param() and driver_param() specifiers. */ parse_params(lwk_command_line); /* * Initialize the console subsystem. * printk()'s will be visible after this. */ console_init(); /* * Hello, Dave. */ printk(lwk_banner); printk(KERN_DEBUG "%s\n", lwk_command_line); /* * Do architecture specific initialization. * This detects memory, CPUs, etc. */ setup_arch(); /* * Initialize the kernel memory subsystem. Up until now, the simple * boot-time memory allocator (bootmem) has been used for all dynamic * memory allocation. Here, the bootmem allocator is destroyed and all * of the free pages it was managing are added to the kernel memory * pool (kmem) or the user memory pool (umem). * * After this point, any use of the bootmem allocator will cause a * kernel panic. The normal kernel memory subsystem API should be used * instead (e.g., kmem_alloc() and kmem_free()). */ mem_subsys_init(); /* * Initialize the address space management subsystem. */ aspace_subsys_init(); /* * Initialize the task management subsystem. */ task_subsys_init(); /* * Initialize the task scheduling subsystem. */ sched_subsys_init(); /* * Initialize the task scheduling subsystem. */ timer_subsys_init(); /* * Boot all of the other CPUs in the system, one at a time. */ printk(KERN_INFO "Number of CPUs detected: %d\n", num_cpus()); for_each_cpu_mask(cpu, cpu_present_map) { /* The bootstrap CPU (that's us) is already booted. */ if (cpu == 0) { cpu_set(cpu, cpu_online_map); continue; } printk(KERN_DEBUG "Booting CPU %u.\n", cpu); arch_boot_cpu(cpu); /* Wait for ACK that CPU has booted (5 seconds max). */ for (timeout = 0; timeout < 50000; timeout++) { if (cpu_isset(cpu, cpu_online_map)) break; udelay(100); } if (!cpu_isset(cpu, cpu_online_map)) panic("Failed to boot CPU %d.\n", cpu); } #ifdef CONFIG_V3VEE v3vee_run_vmm(); printk( "%s: VMM returned. We're spinning\n", __func__ ); while(1) { asm( "hlt" ); } #else /* * Start up user-space... */ printk(KERN_INFO "Loading initial user-level task (init_task)...\n"); int status; if ((status = create_init_task()) != 0) panic("Failed to create init_task (status=%d).", status); schedule(); /* This should not return */ BUG(); #endif }
int init_virtual_topology(config_t* cfg, cpu_model_t* cpu_model, virtual_topology_t** virtual_topologyp) { char* mc_pci_file; char* str; char* saveptr; char* token = "NULL"; int* physical_node_ids; physical_node_t** physical_nodes; int num_physical_nodes; int n, v, i, j, sibling_idx, node_i_idx; int node_id; physical_node_t* node_i, *node_j, *sibling_node; int ret; int min_distance; int hyperthreading; struct bitmask* mem_nodes; virtual_topology_t* virtual_topology; __cconfig_lookup_string(cfg, "topology.physical_nodes", &str); // parse the physical nodes string physical_node_ids = calloc(numa_num_possible_nodes(), sizeof(*physical_node_ids)); num_physical_nodes = 0; while (token = strtok_r(str, ",", &saveptr)) { physical_node_ids[num_physical_nodes] = atoi(token); str = NULL; if (++num_physical_nodes > numa_num_possible_nodes()) { // we re being asked to run on more nodes than available free(physical_node_ids); ret = E_ERROR; goto done; } } physical_nodes = calloc(num_physical_nodes, sizeof(*physical_nodes)); // select those nodes we can run on (e.g. not constrained by any numactl) mem_nodes = numa_get_mems_allowed(); for (i=0, n=0; i<num_physical_nodes; i++) { node_id = physical_node_ids[i]; if (numa_bitmask_isbitset(mem_nodes, node_id)) { physical_nodes[n] = malloc(sizeof(**physical_nodes)); physical_nodes[n]->node_id = node_id; // TODO: what if we want to avoid using only a single hardware contexts of a hyperthreaded core? physical_nodes[n]->cpu_bitmask = numa_allocate_cpumask(); numa_node_to_cpus(node_id, physical_nodes[n]->cpu_bitmask); __cconfig_lookup_bool(cfg, "topology.hyperthreading", &hyperthreading); if (hyperthreading) { physical_nodes[n]->num_cpus = num_cpus(physical_nodes[n]->cpu_bitmask); } else { DBG_LOG(INFO, "Not using hyperthreading.\n"); // disable the upper half of the processors in the bitmask physical_nodes[n]->num_cpus = num_cpus(physical_nodes[n]->cpu_bitmask) / 2; int fc = first_cpu(physical_nodes[n]->cpu_bitmask); for (j=fc+system_num_cpus()/2; j<fc+system_num_cpus()/2+physical_nodes[n]->num_cpus; j++) { if (numa_bitmask_isbitset(physical_nodes[n]->cpu_bitmask, j)) { numa_bitmask_clearbit(physical_nodes[n]->cpu_bitmask, j); } } } n++; } } free(physical_node_ids); num_physical_nodes = n; // if pci bus topology of each physical node is not provided then discover it if (__cconfig_lookup_string(cfg, "topology.mc_pci", &mc_pci_file) == CONFIG_FALSE || (__cconfig_lookup_string(cfg, "topology.mc_pci", &mc_pci_file) == CONFIG_TRUE && load_mc_pci_topology(mc_pci_file, physical_nodes, num_physical_nodes) != E_SUCCESS)) { discover_mc_pci_topology(cpu_model, physical_nodes, num_physical_nodes); save_mc_pci_topology(mc_pci_file, physical_nodes, num_physical_nodes); } // form virtual nodes by grouping physical nodes that are close to each other virtual_topology = malloc(sizeof(*virtual_topology)); virtual_topology->num_virtual_nodes = num_physical_nodes / 2 + num_physical_nodes % 2; virtual_topology->virtual_nodes = calloc(virtual_topology->num_virtual_nodes, sizeof(*(virtual_topology->virtual_nodes))); for (i=0, v=0; i<num_physical_nodes; i++) { min_distance = INT_MAX; sibling_node = NULL; sibling_idx = -1; if ((node_i = physical_nodes[i]) == NULL) { continue; } for (j=i+1; j<num_physical_nodes; j++) { if ((node_j = physical_nodes[j]) == NULL) { continue; } if (numa_distance(node_i->node_id,node_j->node_id) < min_distance) { sibling_node = node_j; sibling_idx = j; } } if (sibling_node) { physical_nodes[i] = physical_nodes[sibling_idx] = NULL; virtual_node_t* virtual_node = &virtual_topology->virtual_nodes[v]; virtual_node->dram_node = node_i; virtual_node->nvram_node = sibling_node; virtual_node->node_id = v; virtual_node->cpu_model = cpu_model; DBG_LOG(INFO, "Fusing physical nodes %d %d into virtual node %d\n", node_i->node_id, sibling_node->node_id, virtual_node->node_id); v++; } } // any physical node that is not paired with another physical node is // formed into a virtual node on its own if (2*v < num_physical_nodes) { for (i=0; i<num_physical_nodes; i++) { node_i = physical_nodes[i]; virtual_node_t* virtual_node = &virtual_topology->virtual_nodes[v]; virtual_node->dram_node = virtual_node->nvram_node = node_i; virtual_node->node_id = v; DBG_LOG(WARNING, "Forming physical node %d into virtual node %d without a sibling node.\n", node_i->node_id, virtual_node->node_id); } } *virtual_topologyp = virtual_topology; ret = E_SUCCESS; done: free(physical_nodes); return ret; }
int getFileFromUrl(char * url, char * dest, struct loaderData_s * loaderData) { char ret[47]; struct iurlinfo ui; enum urlprotocol_t proto = !strncmp(url, "ftp://", 6) ? URL_METHOD_FTP : URL_METHOD_HTTP; char * host = NULL, * file = NULL, * chptr = NULL; char * user = NULL, * password = NULL; int fd, rc; struct networkDeviceConfig netCfg; char * ehdrs = NULL; ip_addr_t *tip; #ifdef ROCKS char *drivername; #endif #ifdef ROCKS /* * Call non-interactive, exhaustive NetworkUp() if we are * a cluster appliance. */ if (!strlen(url)) { logMessage(INFO, "ROCKS:getFileFromUrl:calling rocksNetworkUp"); rc = rocksNetworkUp(loaderData, &netCfg); } else { logMessage(INFO, "ROCKS:getFileFromUrl:calling kickstartNetworkUp"); rc = kickstartNetworkUp(loaderData, &netCfg); } if (rc) return 1; fd = 0; /* * this will be used when starting up mini_httpd() * * Get the nextServer from PUMP if we DHCP, otherwise it * better be on the command line. */ if ( netCfg.dev.set & PUMP_INTFINFO_HAS_BOOTFILE ) { tip = &(netCfg.dev.nextServer); inet_ntop(tip->sa_family, IP_ADDR(tip), ret, IP_STRLEN(tip)); if (strlen(ret) > 0) { loaderData->nextServer = strdup(ret); } else { loaderData->nextServer = NULL; } } /* * If no nextServer use the gateway. */ if ( !loaderData->nextServer ) { loaderData->nextServer = strdup(loaderData->gateway); } logMessage(INFO, "%s: nextServer %s", "ROCKS:getFileFromUrl", loaderData->nextServer); #else if (kickstartNetworkUp(loaderData, &netCfg)) { logMessage(ERROR, "unable to bring up network"); return 1; } #endif /* ROCKS */ memset(&ui, 0, sizeof(ui)); ui.protocol = proto; #ifdef ROCKS { struct sockaddr_in *sin; int string_size; int ncpus; char np[16]; char *arch; char *base; #if defined(__i386__) arch = "i386"; #elif defined(__ia64__) arch = "ia64"; #elif defined(__x86_64__) arch = "x86_64"; #endif if (!strlen(url)) { base = strdup("install/sbin/kickstart.cgi"); host = strdup(loaderData->nextServer); } else { char *p, *q; base = NULL; host = NULL; p = strstr(url, "//"); if (p != NULL) { p += 2; /* * 'base' is the file name */ base = strchr(p, '/'); if (base != NULL) { base += 1; } /* * now get the host portion of the URL */ q = strchr(p, '/'); if (q != NULL) { *q = '\0'; host = strdup(p); } } if (!base || !host) { logMessage(ERROR, "kickstartFromUrl:url (%s) not well formed.\n", url); return(1); } } /* We always retrieve our kickstart file via HTTPS, * however the official install method (for *.img and rpms) * is still HTTP. */ ui.protocol = URL_METHOD_HTTPS; winStatus(40, 3, _("Secure Kickstart"), _("Looking for Kickstart keys...")); getCert(loaderData); newtPopWindow(); /* seed random number generator with our IP: unique for our purposes. * Used for nack backoff. */ tip = &(netCfg.dev.nextServer); sin = (struct sockaddr_in *)IP_ADDR(tip); if (sin == NULL) { srand(time(NULL)); } else { srand((unsigned int)sin->sin_addr.s_addr); } ncpus = num_cpus(); sprintf(np, "%d", ncpus); string_size = strlen(base) + strlen("?arch=") + strlen(arch) + strlen("&np=") + strlen(np) + 1; if ((file = alloca(string_size)) == NULL) { logMessage(ERROR, "kickstartFromUrl:alloca failed\n"); return(1); } memset(file, 0, string_size); sprintf(file, "/%s?arch=%s&np=%s", base, arch, np); } logMessage(INFO, "ks location: https://%s%s", host, file); #else tip = &(netCfg.dev.ip); inet_ntop(tip->sa_family, IP_ADDR(tip), ret, IP_STRLEN(tip)); getHostPathandLogin((proto == URL_METHOD_FTP ? url + 6 : url + 7), &host, &file, &user, &password, ret); logMessage(INFO, "file location: %s://%s/%s", (proto == URL_METHOD_FTP ? "ftp" : "http"), host, file); #endif /* ROCKS */ chptr = strchr(host, '/'); if (chptr == NULL) { ui.address = strdup(host); ui.prefix = strdup("/"); } else { *chptr = '\0'; ui.address = strdup(host); host = chptr; *host = '/'; ui.prefix = strdup(host); } if (user && strlen(user)) { ui.login = strdup(user); if (password && strlen(password)) ui.password = strdup(password); } if (proto == URL_METHOD_HTTP) { ehdrs = (char *) malloc(24+strlen(VERSION)); sprintf(ehdrs, "User-Agent: anaconda/%s\r\n", VERSION); } if (proto == URL_METHOD_HTTP && FL_KICKSTART_SEND_MAC(flags)) { /* find all ethernet devices and make a header entry for each one */ int i; unsigned int hdrlen; char *dev, *mac, tmpstr[128]; struct device ** devices; hdrlen = 0; devices = probeDevices(CLASS_NETWORK, BUS_UNSPEC, PROBE_LOADED); for (i = 0; devices && devices[i]; i++) { dev = devices[i]->device; mac = nl_mac2str(dev); #ifdef ROCKS drivername = get_driver_name(dev); #endif if (mac) { #ifdef ROCKS /* A hint as to our primary interface. */ if (!strcmp(dev, loaderData->netDev)) { snprintf(tmpstr, sizeof(tmpstr), "X-RHN-Provisioning-MAC-%d: %s %s %s ks\r\n", i, dev, mac, drivername); } else { snprintf(tmpstr, sizeof(tmpstr), "X-RHN-Provisioning-MAC-%d: %s %s %s\r\n", i, dev, mac, drivername); } #else snprintf(tmpstr, sizeof(tmpstr), "X-RHN-Provisioning-MAC-%d: %s %s\r\n", i, dev, mac); #endif /* ROCKS */ #ifdef ROCKS free(drivername); #endif free(mac); if (!ehdrs) { hdrlen = 128; ehdrs = (char *) malloc(hdrlen); *ehdrs = '\0'; } else if ( strlen(tmpstr) + strlen(ehdrs) + 2 > hdrlen) { hdrlen += 128; ehdrs = (char *) realloc(ehdrs, hdrlen); } strcat(ehdrs, tmpstr); } } } #ifdef ROCKS { /* Retrieve the kickstart file via HTTPS */ BIO *sbio; sbio = urlinstStartSSLTransfer(&ui, file, ehdrs, 1, flags, loaderData->nextServer); if (!sbio) { logMessage(ERROR, "failed to retrieve https://%s/%s", ui.address, file); return 1; } rc = copyFileSSL(sbio, dest); if (rc) { unlink (dest); logMessage(ERROR, "failed to copy file to %s", dest); return 1; } urlinstFinishSSLTransfer(sbio); if (haveCertificate()) umount("/mnt/rocks-disk"); } #else fd = urlinstStartTransfer(&ui, file, ehdrs); if (fd < 0) { logMessage(ERROR, "failed to retrieve http://%s/%s/%s", ui.address, ui.prefix, file); if (ehdrs) free(ehdrs); return 1; } rc = copyFileFd(fd, dest); if (rc) { unlink (dest); logMessage(ERROR, "failed to copy file to %s", dest); if (ehdrs) free(ehdrs); return 1; } urlinstFinishTransfer(&ui, fd); #endif /* ROCKS */ if (ehdrs) free(ehdrs); return 0; }