int CollectorLinux::preCollect(const CollectorHints& hints, uint64_t /* iTick */) { std::vector<RTPROCESS> processes; hints.getProcesses(processes); std::vector<RTPROCESS>::iterator it; for (it = processes.begin(); it != processes.end(); it++) { VMProcessStats vmStats; int rc = getRawProcessStats(*it, &vmStats.cpuUser, &vmStats.cpuKernel, &vmStats.pagesUsed); /* On failure, do NOT stop. Just skip the entry. Having the stats for * one (probably broken) process frozen/zero is a minor issue compared * to not updating many process stats and the host cpu stats. */ if (RT_SUCCESS(rc)) mProcessStats[*it] = vmStats; } if (hints.isHostCpuLoadCollected() || mProcessStats.size()) { _getRawHostCpuLoad(); } return VINF_SUCCESS; }
void GuestRamUsage::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectGuestStats(mCGuest->getProcess()); }
void MachineCpuLoadRaw::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectProcessCpuLoad(mProcess); }
void MachineRamUsage::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectProcessRamUsage(mProcess); }
void HostRamVmm::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectHostRamVmm(); }
void HostRamUsage::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectHostRamUsage(); }
void HostCpuLoadRaw::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectHostCpuLoad(); }
void MachineNetRate::preCollect(CollectorHints& hints, uint64_t /* iTick */) { hints.collectGuestStats(mCGuest->getProcess()); }
int CollectorWin::preCollect(const CollectorHints& hints, uint64_t /* iTick */) { LogFlowThisFuncEnter(); uint64_t user, kernel, idle, total; int rc = getRawHostCpuLoad(&user, &kernel, &idle); if (RT_FAILURE(rc)) return rc; total = user + kernel + idle; DWORD dwError; const CollectorHints::ProcessList& processes = hints.getProcessFlags(); CollectorHints::ProcessList::const_iterator it; mProcessStats.clear(); for (it = processes.begin(); it != processes.end() && RT_SUCCESS(rc); it++) { RTPROCESS process = it->first; HANDLE h = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, process); if (!h) { dwError = GetLastError(); Log (("OpenProcess() -> 0x%x\n", dwError)); rc = RTErrConvertFromWin32(dwError); break; } VMProcessStats vmStats; if ((it->second & COLLECT_CPU_LOAD) != 0) { FILETIME ftCreate, ftExit, ftKernel, ftUser; if (!GetProcessTimes(h, &ftCreate, &ftExit, &ftKernel, &ftUser)) { dwError = GetLastError(); Log (("GetProcessTimes() -> 0x%x\n", dwError)); rc = RTErrConvertFromWin32(dwError); } else { vmStats.cpuKernel = FILETTIME_TO_100NS(ftKernel); vmStats.cpuUser = FILETTIME_TO_100NS(ftUser); vmStats.cpuTotal = total; } } if (RT_SUCCESS(rc) && (it->second & COLLECT_RAM_USAGE) != 0) { PROCESS_MEMORY_COUNTERS pmc; if (!GetProcessMemoryInfo(h, &pmc, sizeof(pmc))) { dwError = GetLastError(); Log (("GetProcessMemoryInfo() -> 0x%x\n", dwError)); rc = RTErrConvertFromWin32(dwError); } else vmStats.ramUsed = pmc.WorkingSetSize; } CloseHandle(h); mProcessStats[process] = vmStats; } LogFlowThisFuncLeave(); return rc; }