Exemplo n.º 1
0
int CollectorLinux::preCollect(const CollectorHints& hints, uint64_t /* iTick */)
{
    std::vector<RTPROCESS> processes;
    hints.getProcesses(processes);

    std::vector<RTPROCESS>::iterator it;
    for (it = processes.begin(); it != processes.end(); it++)
    {
        VMProcessStats vmStats;
        int rc = getRawProcessStats(*it, &vmStats.cpuUser, &vmStats.cpuKernel, &vmStats.pagesUsed);
        /* On failure, do NOT stop. Just skip the entry. Having the stats for
         * one (probably broken) process frozen/zero is a minor issue compared
         * to not updating many process stats and the host cpu stats. */
        if (RT_SUCCESS(rc))
            mProcessStats[*it] = vmStats;
    }
    if (hints.isHostCpuLoadCollected() || mProcessStats.size())
    {
        _getRawHostCpuLoad();
    }
    return VINF_SUCCESS;
}
Exemplo n.º 2
0
void GuestRamUsage::preCollect(CollectorHints& hints,  uint64_t /* iTick */)
{
    hints.collectGuestStats(mCGuest->getProcess());
}
Exemplo n.º 3
0
void MachineCpuLoadRaw::preCollect(CollectorHints& hints, uint64_t /* iTick */)
{
    hints.collectProcessCpuLoad(mProcess);
}
Exemplo n.º 4
0
void MachineRamUsage::preCollect(CollectorHints& hints, uint64_t /* iTick */)
{
    hints.collectProcessRamUsage(mProcess);
}
Exemplo n.º 5
0
void HostRamVmm::preCollect(CollectorHints& hints, uint64_t /* iTick */)
{
    hints.collectHostRamVmm();
}
Exemplo n.º 6
0
void HostRamUsage::preCollect(CollectorHints& hints, uint64_t /* iTick */)
{
    hints.collectHostRamUsage();
}
Exemplo n.º 7
0
void HostCpuLoadRaw::preCollect(CollectorHints& hints, uint64_t /* iTick */)
{
    hints.collectHostCpuLoad();
}
void MachineNetRate::preCollect(CollectorHints& hints,  uint64_t /* iTick */)
{
    hints.collectGuestStats(mCGuest->getProcess());
}
Exemplo n.º 9
0
int CollectorWin::preCollect(const CollectorHints& hints, uint64_t /* iTick */)
{
    LogFlowThisFuncEnter();

    uint64_t user, kernel, idle, total;
    int rc = getRawHostCpuLoad(&user, &kernel, &idle);
    if (RT_FAILURE(rc))
        return rc;
    total = user + kernel + idle;

    DWORD dwError;
    const CollectorHints::ProcessList& processes = hints.getProcessFlags();
    CollectorHints::ProcessList::const_iterator it;

    mProcessStats.clear();

    for (it = processes.begin(); it != processes.end() && RT_SUCCESS(rc); it++)
    {
        RTPROCESS process = it->first;
        HANDLE h = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
                               FALSE, process);

        if (!h)
        {
            dwError = GetLastError();
            Log (("OpenProcess() -> 0x%x\n", dwError));
            rc = RTErrConvertFromWin32(dwError);
            break;
        }

        VMProcessStats vmStats;
        if ((it->second & COLLECT_CPU_LOAD) != 0)
        {
            FILETIME ftCreate, ftExit, ftKernel, ftUser;
            if (!GetProcessTimes(h, &ftCreate, &ftExit, &ftKernel, &ftUser))
            {
                dwError = GetLastError();
                Log (("GetProcessTimes() -> 0x%x\n", dwError));
                rc = RTErrConvertFromWin32(dwError);
            }
            else
            {
                vmStats.cpuKernel = FILETTIME_TO_100NS(ftKernel);
                vmStats.cpuUser   = FILETTIME_TO_100NS(ftUser);
                vmStats.cpuTotal  = total;
            }
        }
        if (RT_SUCCESS(rc) && (it->second & COLLECT_RAM_USAGE) != 0)
        {
            PROCESS_MEMORY_COUNTERS pmc;
            if (!GetProcessMemoryInfo(h, &pmc, sizeof(pmc)))
            {
                dwError = GetLastError();
                Log (("GetProcessMemoryInfo() -> 0x%x\n", dwError));
                rc = RTErrConvertFromWin32(dwError);
            }
            else
                vmStats.ramUsed = pmc.WorkingSetSize;
        }
        CloseHandle(h);
        mProcessStats[process] = vmStats;
    }

    LogFlowThisFuncLeave();

    return rc;
}