static int hwloc_aix_get_pid_getthrds_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, pid_t pid, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused) { #if HWLOC_BITS_PER_LONG == 64 struct thrdentry64 thread_info; tid64_t next_thread; #else struct thrdsinfo thread_info; tid_t next_thread; #endif next_thread = 0; /* TODO: get multiple at once */ #if HWLOC_BITS_PER_LONG == 64 while (getthrds64 (pid, &thread_info, sizeof (thread_info), &next_thread, 1) == 1) { #else while (getthrds (pid, &thread_info, sizeof (thread_info), &next_thread, 1) == 1) { #endif if (PROCESSOR_CLASS_ANY != thread_info.ti_cpuid) hwloc_bitmap_set(hwloc_set, thread_info.ti_cpuid); else hwloc_bitmap_fill(hwloc_set); } /* TODO: what if the thread list changes and we get nothing? */ return 0; } static int hwloc_aix_get_tid_getthrds_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, tid_t tid, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused) { #if HWLOC_BITS_PER_LONG == 64 struct thrdentry64 thread_info; tid64_t next_thread; #else struct thrdsinfo thread_info; tid_t next_thread; #endif pid_t pid = getpid(); next_thread = 0; /* TODO: get multiple at once */ #if HWLOC_BITS_PER_LONG == 64 while (getthrds64 (pid, &thread_info, sizeof (thread_info), &next_thread, 1) == 1) { #else while (getthrds (pid, &thread_info, sizeof (thread_info), &next_thread, 1) == 1) { #endif if (thread_info.ti_tid == tid) { if (PROCESSOR_CLASS_ANY != thread_info.ti_cpuid) hwloc_bitmap_set(hwloc_set, thread_info.ti_cpuid); else hwloc_bitmap_fill(hwloc_set); break; } } /* TODO: what if the thread goes away in the meantime? */ return 0; } static int hwloc_aix_set_thisproc_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags) { rsid_t who; who.at_pid = getpid(); return hwloc_aix_set_sth_cpubind(topology, R_PROCESS, who, who.at_pid, hwloc_set, flags); } static int hwloc_aix_get_thisproc_cpubind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, int flags) { int ret, bound; rsid_t who; who.at_pid = getpid(); ret = hwloc_aix_get_sth_rset_cpubind(topology, R_PROCESS, who, hwloc_set, flags, &bound); if (!ret && !bound) { hwloc_bitmap_zero(hwloc_set); ret = hwloc_aix_get_pid_getthrds_cpubind(topology, who.at_pid, hwloc_set, flags); } return ret; }
rawTime64 pd_thread::getRawCpuTime_hw() { #ifdef USES_PMAPI // Hardware method, using the PMAPI int ret; static bool need_init = true; if(need_init) { pm_info_t pinfo; #ifdef PMAPI_GROUPS pm_groups_info_t pginfo; ret = pm_init(PM_VERIFIED | PM_CAVEAT | PM_GET_GROUPS, &pinfo, &pginfo); #else ret = pm_init(PM_VERIFIED | PM_CAVEAT, &pinfo); #endif // We ignore the return, but pm_init must be called to initialize the // library if (ret) pm_error("PARADYNos_init: pm_init", ret); need_init = false; } int lwp_to_use; tid_t indexPtr = 0; struct thrdsinfo thrd_buf; if (get_lwp() > 0) lwp_to_use = get_lwp(); else { /* If we need to get the data for the entire process (ie. get_lwp() == 0) then we need to the pm_get_data_group function requires any lwp in the group, so we'll grab the lwp of any active thread in the process */ if(getthrds(pd_proc->getPid(), &thrd_buf, sizeof(struct thrdsinfo), &indexPtr, 1) == 0) { // perhaps the process ended return -1; } lwp_to_use = thrd_buf.ti_tid; } // PM counters are only valid when the process is paused. bool needToCont = !(pd_proc->isStopped()); if(needToCont) { // process running if (!pd_proc->pauseProc()) { return -1; // pause failed, so returning failure } } pm_data_t data; if(get_lwp() > 0) ret = pm_get_data_thread(pd_proc->getPid(), lwp_to_use, &data); else { // lwp == 0, means get data for the entire process (ie. all lwps) ret = pm_get_data_group(pd_proc->getPid(), lwp_to_use, &data); while(ret) { // if failed, could have been because the lwp (retrieved via // getthrds) was in process of being deleted. //cerr << " prev lwp_to_use " << lwp_to_use << " failed\n"; if(getthrds(pd_proc->getPid(), &thrd_buf, sizeof(struct thrdsinfo), &indexPtr, 1) == 0) { // couldn't get a valid lwp, go to standard error handling ret = 1; break; } lwp_to_use = thrd_buf.ti_tid; //cerr << " next lwp_to_use is " << lwp_to_use << "\n"; ret = pm_get_data_group(pd_proc->getPid(), lwp_to_use, &data); } } if (ret) { if(!pd_proc->hasExited()) { pm_error("dyn_lwp::getRawCpuTime_hw: pm_get_data_thread", ret); fprintf(stderr, "Attempted pm_get_data(%d, %d, %d)\n", pd_proc->getPid(), get_lwp(), lwp_to_use); } return -1; } rawTime64 result = data.accu[get_hwctr_binding(PM_CYC_EVENT)]; // Continue the process if(needToCont) { pd_proc->continueProc(); } //if(pos_junk != 101) // ct_record(pos_junk, result, hw_previous_, get_lwp(), lwp_to_use); if(result < hw_previous_) { cerr << "rollback in dyn_lwp::getRawCpuTime_hw, lwp_to_use: " << lwp_to_use << ", lwp: " << get_lwp() << ", result: " << result << ", previous result: " << hw_previous_ << "\n"; result = hw_previous_; } else hw_previous_ = result; return result; #else return 0; #endif }