static int ex_metric_init (apr_pool_t *p) { int i; Ganglia_25metric *gmi; init_cpu_info (); /* Allocate a pool that will be used by this module */ apr_pool_create(&pool, p); metric_info = apr_array_make(pool, 2, sizeof(Ganglia_25metric)); /* Initialize each metric */ cpu_user = init_metric (pool, metric_info, cpu_count, "multicpu_user", "Percentage of CPU utilization that occurred while " "executing at the user level"); cpu_nice = init_metric (pool, metric_info, cpu_count, "multicpu_nice", "Percentage of CPU utilization that occurred while " "executing at the nice level"); cpu_system = init_metric (pool, metric_info, cpu_count, "multicpu_system", "Percentage of CPU utilization that occurred while " "executing at the system level"); cpu_idle = init_metric (pool, metric_info, cpu_count, "multicpu_idle", "Percentage of CPU utilization that occurred while " "executing at the idle level"); cpu_wio = init_metric (pool, metric_info, cpu_count, "multicpu_wio", "Percentage of CPU utilization that occurred while " "executing at the wio level"); cpu_intr = init_metric (pool, metric_info, cpu_count, "multicpu_intr", "Percentage of CPU utilization that occurred while " "executing at the intr level"); cpu_sintr = init_metric (pool, metric_info, cpu_count, "multicpu_sintr", "Percentage of CPU utilization that occurred while " "executing at the sintr level"); /* Add a terminator to the array and replace the empty static metric definition array with the dynamic array that we just created */ gmi = apr_array_push(metric_info); memset (gmi, 0, sizeof(*gmi)); multicpu_module.metrics_info = (Ganglia_25metric *)metric_info->elts; for (i = 0; multicpu_module.metrics_info[i].name != NULL; i++) { /* Initialize the metadata storage for each of the metrics and then * store one or more key/value pairs. The define MGROUPS defines * the key for the grouping attribute. */ MMETRIC_INIT_METADATA(&(multicpu_module.metrics_info[i]),p); MMETRIC_ADD_METADATA(&(multicpu_module.metrics_info[i]),MGROUP,"cpu"); } return 0; }
static inline void handle_exception(struct exception_handler_data *data, PEXCEPTION_POINTERS exception) { if (!get_dbghelp_imports(data)) return; data->exception = exception; data->process = GetCurrentProcess(); data->main_trace.context = *exception->ContextRecord; GetSystemTime(&data->time_info); init_sym_info(data); init_version_info(data); init_cpu_info(data); init_instruction_data(&data->main_trace); init_module_info(data); write_header(data); write_thread_traces(data); write_module_list(data); }
/** * init_cpu_drc_info * * @returns pointer to drc_info on success, NULL otherwise */ int init_cpu_drc_info(struct dr_info *dr_info) { struct dr_node *cpu; struct thread *t; int rc; memset(dr_info, 0, sizeof(*dr_info)); rc = init_thread_info(dr_info); if (rc) { return -1; } rc = init_cpu_info(dr_info); if (rc) { free_cpu_drc_info(dr_info); return -1; } rc = init_cache_info(dr_info); if (rc) { free_cpu_drc_info(dr_info); return -1; } say(DEBUG, "Start CPU List.\n"); for (cpu = dr_info->all_cpus; cpu; cpu = cpu->next) { say(DEBUG, "%x : %s\n", cpu->drc_index, cpu->drc_name); for (t = cpu->cpu_threads; t; t = t->sibling) say(DEBUG, "\tthread: %d: %s\n", t->phys_id, t->path); } say(DEBUG, "Done.\n"); return 0; }
void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops) { size_t max_cpus; size_t i; struct cpu_info *ci; void (*entry)(void); struct bus *bus; if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) { printk(BIOS_ERR, "CPU init failed. Device is not a CPU_CLUSTER: %s\n", dev_path(cluster)); return; } bus = cluster->link_list; /* Check if no children under this device. */ if (bus == NULL) return; /* * el3_init must be performed prior to prepare_secondary_cpu_startup. * This is important since el3_init initializes SCR values on BSP CPU * and then prepare_secondary_cpu_startup reads the initialized SCR * value and saves it for use by non-BSP CPUs. */ el3_init(); /* Mark current cpu online. */ cpu_mark_online(cpu_info()); entry = prepare_secondary_cpu_startup(); /* Initialize the cpu_info structures. */ init_cpu_info(bus); max_cpus = cntrl_ops->total_cpus(); if (max_cpus > CONFIG_MAX_CPUS) { printk(BIOS_WARNING, "max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n", max_cpus, (size_t)CONFIG_MAX_CPUS); max_cpus = CONFIG_MAX_CPUS; } for (i = 0; i < max_cpus; i++) { device_t dev; struct cpu_action action; struct stopwatch sw; ci = cpu_info_for_cpu(i); dev = ci->cpu; /* Disregard CPUs not in device tree. */ if (dev == NULL) continue; /* Skip disabled CPUs. */ if (!dev->enabled) continue; if (!cpu_online(ci)) { /* Start the CPU. */ printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id); if (cntrl_ops->start_cpu(ci->id, entry)) { printk(BIOS_ERR, "Failed to start CPU%x\n", ci->id); continue; } stopwatch_init_msecs_expire(&sw, 1000); /* Wait for CPU to come online. */ while (!stopwatch_expired(&sw)) { if (!cpu_online(ci)) continue; printk(BIOS_DEBUG, "CPU%x online in %ld usecs.\n", ci->id, stopwatch_duration_usecs(&sw)); break; } } if (!cpu_online(ci)) { printk(BIOS_DEBUG, "CPU%x failed to come online in %ld usecs.\n", ci->id, stopwatch_duration_usecs(&sw)); continue; } /* Send it the init action. */ action.run = init_this_cpu; action.arg = ci; arch_run_on_cpu(ci->id, &action); } }
/*ARGSUSED*/ void start_other_cpus(int flag) { int cpuid; extern void idlestop_init(void); int bootcpu; /* * Check if cpu_bringup_set has been explicitly set before * initializing it. */ if (CPUSET_ISNULL(cpu_bringup_set)) { CPUSET_ALL(cpu_bringup_set); } if (&cpu_feature_init) cpu_feature_init(); /* * Initialize CPC. */ kcpc_hw_init(); mutex_enter(&cpu_lock); /* * Initialize our own cpu_info. */ init_cpu_info(CPU); /* * Initialize CPU 0 cpu module private data area, including scrubber. */ cpu_init_private(CPU); populate_idstr(CPU); /* * perform such initialization as is needed * to be able to take CPUs on- and off-line. */ cpu_pause_init(); xc_init(); /* initialize processor crosscalls */ idlestop_init(); if (!use_mp) { mutex_exit(&cpu_lock); cmn_err(CE_CONT, "?***** Not in MP mode\n"); return; } /* * should we be initializing this cpu? */ bootcpu = getprocessorid(); /* * launch all the slave cpus now */ for (cpuid = 0; cpuid < NCPU; cpuid++) { pnode_t nodeid = cpunodes[cpuid].nodeid; if (nodeid == (pnode_t)0) continue; if (cpuid == bootcpu) { if (!CPU_IN_SET(cpu_bringup_set, cpuid)) { cmn_err(CE_WARN, "boot cpu not a member " "of cpu_bringup_set, adding it"); CPUSET_ADD(cpu_bringup_set, cpuid); } continue; } if (!CPU_IN_SET(cpu_bringup_set, cpuid)) continue; ASSERT(cpu[cpuid] == NULL); if (setup_cpu_common(cpuid)) { cmn_err(CE_PANIC, "cpu%d: setup failed", cpuid); } common_startup_init(cpu[cpuid], cpuid); start_cpu(cpuid, cold_flag_set); /* * Because slave_startup() gets fired off after init() * starts, we can't use the '?' trick to do 'boot -v' * printing - so we always direct the 'cpu .. online' * messages to the log. */ cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", cpuid); cpu_state_change_notify(cpuid, CPU_SETUP); if (dtrace_cpu_init != NULL) (*dtrace_cpu_init)(cpuid); } /* * since all the cpus are online now, redistribute interrupts to them. */ intr_redist_all_cpus(); mutex_exit(&cpu_lock); /* * Start the Ecache scrubber. Must be done after all calls to * cpu_init_private for every cpu (including CPU 0). */ cpu_init_cache_scrub(); if (&cpu_mp_init) cpu_mp_init(); }
/* * Routine to set up a CPU to prepare for starting it up. */ int setup_cpu_common(int cpuid) { struct cpu *cp = NULL; kthread_id_t tp; #ifdef TRAPTRACE int tt_index; TRAP_TRACE_CTL *ctlp; caddr_t newbuf; #endif /* TRAPTRACE */ extern void idle(); int rval; ASSERT(MUTEX_HELD(&cpu_lock)); ASSERT(cpu[cpuid] == NULL); ASSERT(ncpus <= max_ncpus); #ifdef TRAPTRACE /* * allocate a traptrace buffer for this CPU. */ ctlp = &trap_trace_ctl[cpuid]; if (!trap_tr0_inuse) { trap_tr0_inuse = 1; newbuf = trap_tr0; tt_index = -1; } else { for (tt_index = 0; tt_index < (max_ncpus-1); tt_index++) if (!trap_trace_inuse[tt_index]) break; ASSERT(tt_index < max_ncpus - 1); trap_trace_inuse[tt_index] = 1; newbuf = (caddr_t)(ttrace_buf + (tt_index * TRAP_TSIZE)); } ctlp->d.vaddr_base = newbuf; ctlp->d.offset = ctlp->d.last_offset = 0; ctlp->d.limit = trap_trace_bufsize; ctlp->d.paddr_base = va_to_pa(newbuf); ASSERT(ctlp->d.paddr_base != (uint64_t)-1); #endif /* TRAPTRACE */ /* * initialize hv traptrace buffer for this CPU */ mach_htraptrace_setup(cpuid); /* * Obtain pointer to the appropriate cpu structure. */ if (cpu0.cpu_flags == 0) { cp = &cpu0; } else { /* * When dynamically allocating cpu structs, * cpus is used as a pointer to a list of freed * cpu structs. */ if (cpus) { /* grab the first cpu struct on the free list */ cp = cpus; if (cp->cpu_next_free) cpus = cp->cpu_next_free; else cpus = NULL; } } if (cp == NULL) cp = vmem_xalloc(static_alloc_arena, CPU_ALLOC_SIZE, CPU_ALLOC_SIZE, 0, 0, NULL, NULL, VM_SLEEP); bzero(cp, sizeof (*cp)); cp->cpu_id = cpuid; cp->cpu_self = cp; /* * Initialize ptl1_panic stack */ ptl1_init_cpu(cp); /* * Initialize the dispatcher for this CPU. */ disp_cpu_init(cp); /* * Bootstrap the CPU's PG data */ pg_cpu_bootstrap(cp); cpu_vm_data_init(cp); /* * Now, initialize per-CPU idle thread for this CPU. */ tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_ONPROC, -1); cp->cpu_idle_thread = tp; tp->t_preempt = 1; tp->t_bound_cpu = cp; tp->t_affinitycnt = 1; tp->t_cpu = cp; tp->t_disp_queue = cp->cpu_disp; /* * Registering a thread in the callback table is usually * done in the initialization code of the thread. In this * case, we do it right after thread creation to avoid * blocking idle thread while registering itself. It also * avoids the possibility of reregistration in case a CPU * restarts its idle thread. */ CALLB_CPR_INIT_SAFE(tp, "idle"); init_cpu_info(cp); /* * Initialize the interrupt threads for this CPU */ cpu_intr_alloc(cp, NINTR_THREADS); /* * Add CPU to list of available CPUs. * It'll be on the active list after it is started. */ cpu_add_unit(cp); /* * Allocate and init cpu module private data structures, * including scrubber. */ cpu_init_private(cp); populate_idstr(cp); /* * Initialize the CPUs physical ID cache, and processor groups */ pghw_physid_create(cp); (void) pg_cpu_init(cp, B_FALSE); if ((rval = cpu_intrq_setup(cp)) != 0) { return (rval); } /* * Initialize MMU context domain information. */ sfmmu_cpu_init(cp); return (0); }