示例#1
0
int evalProcesses(HANDLE hProcess)
{
    if (NULL == hProcess)
        return 0;

    unsigned int totalMemUsage = 0;
    DWORD processID = GetProcessId(hProcess);
  
    HANDLE hProcessSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);

    PROCESSENTRY32 processEntry = { 0 };
    processEntry.dwSize = sizeof(PROCESSENTRY32);

    // Retrieves information about the first process encountered in a system snapshot
    if(Process32First(hProcessSnapshot, &processEntry)) {
        do {
            // if th32processID = processID, we are the parent process!  
            // if th32ParentProcessID = processID, we are a child process!
            if ((processEntry.th32ProcessID == processID) || (processEntry.th32ParentProcessID == processID)) {
                unsigned int procMemUsage = 0;
                // Record parent process memory
                procMemUsage = getMemoryInfo(processEntry.th32ProcessID);
                totalMemUsage += procMemUsage;
            }
          // Retrieves information about the next process recorded in a system snapshot.   
        } while(Process32Next(hProcessSnapshot, &processEntry));
    }

    CloseHandle(hProcessSnapshot);
    return totalMemUsage;
}
示例#2
0
std::size_t MemoryPool::updateMemoryAuthorizedWithRAM()
{
	_memoryAuthorized = getUsedMemorySize() + getMemoryInfo()._totalRam;
	TUTTLE_TCOUT_X( 5, " - MEMORYPOOL::updateMemoryAuthorizedWithRAM - " );
	TUTTLE_TCOUT_VAR( _memoryAuthorized );
	return _memoryAuthorized;
}
void printMemoryInfo(char* pBuff)
{
    MemoryInfoType info = getMemoryInfo();
    sprintf(pBuff,
            "Memory Information:\n"
            "Global Used   : %5u\n"
            "Heap   Used   : %5u\n"
            "Heap Avail.   : %5u\n"
            "System Avail. : %5u\n",
            info.globalUsed, info.heapUsed, info.heapAvailable, info.systemAvailable);
}
示例#4
0
    /** Returns true if the memory pool is shared by host and device */
    bool isSharedMemoryPool()
    {
        size_t freeInternal = 0;
        size_t freeAtStart = 0;

        getMemoryInfo(&freeAtStart);

        /* alloc 90%, since allocating 100% is a bit risky on a SoC-like device */
        size_t allocSth = size_t( 0.9 * double(freeAtStart) );
        uint8_t* c = new uint8_t[allocSth];
        memset(c, 0, allocSth);

        getMemoryInfo(&freeInternal);
        delete [] c;

        /* if we allocated 90% of available mem, we should have "lost" more
         * than 50% of memory, even with fluctuations from the OS */
        if( double(freeInternal)/double(freeAtStart) < 0.5 )
            return true;

        return false;
    }
示例#5
0
std::ostream& operator<<( std::ostream& os, const MemoryPool& memoryPool )
{
	os << "[Memory Pool] Unused data:           " << memoryPool.getDataUnusedSize() << " bytes\n";
	os << "[Memory Pool] All datas:             " << memoryPool.getDataUsedSize() << " bytes\n";
	os << "[Memory Pool] Total RAM:             " << getMemoryInfo()._totalRam << " bytes\n";
	os << "\n";
	os << "[Memory Pool] Used memory:           " << memoryPool.getUsedMemorySize() << " bytes\n";
	os << "[Memory Pool] Allocated memory:      " << memoryPool.getAllocatedMemorySize() << " bytes\n";
	os << "[Memory Pool] Max memory:            " << memoryPool.getMaxMemorySize() << " bytes\n";
	os << "[Memory Pool] Available memory size: " << memoryPool.getAvailableMemorySize() << " bytes\n";
	os << "[Memory Pool] Wasted memory:         " << memoryPool.getWastedMemorySize() << " bytes\n";
	return os;
}
示例#6
0
static int
printInfo (char *word[], char *word_eol[], void *user_data)
{
	/* query WMI info only at the first time SysInfo is called, then cache it to save time */
	if (firstRun)
	{
		hexchat_printf (ph, "%s first execution, querying and caching WMI info...\n", name);
		wmiOs = getWmiInfo (0);
		wmiCpu = getWmiInfo (1);
		wmiVga = getWmiInfo (2);
		firstRun = 0;
	}
	if (hexchat_list_int (ph, NULL, "type") >= 2)
	{
		/* uptime will work correctly for up to 50 days, should be enough */
		hexchat_commandf (ph, "ME ** SysInfo ** Client: HexChat %s (x%d) ** OS: %s ** CPU: %s (%s) ** RAM: %s ** VGA: %s ** Uptime: %.2f Hours **",
			hexchat_get_info (ph, "version"),
			getCpuArch (),
			wmiOs,
			wmiCpu,
			getCpuMhz (),
			getMemoryInfo (),
			wmiVga, (float) GetTickCount() / 1000 / 60 / 60);
	}
	else
	{
		hexchat_printf (ph, " * Client:  HexChat %s (x%d)\n", hexchat_get_info (ph, "version"), getCpuArch ());
		hexchat_printf (ph, " * OS:      %s\n", wmiOs);
		hexchat_printf (ph, " * CPU:     %s (%s)\n", wmiCpu, getCpuMhz ());
		hexchat_printf (ph, " * RAM:     %s\n", getMemoryInfo ());
		hexchat_printf (ph, " * VGA:     %s\n", wmiVga);
		hexchat_printf (ph, " * Uptime:  %.2f Hours\n", (float) GetTickCount() / 1000 / 60 / 60);
	}

	return HEXCHAT_EAT_HEXCHAT;
}
示例#7
0
/** Decides if a ManagedWorkspace2D sould be created for the current memory conditions
    and workspace parameters NVectors, XLength,and YLength.
    @param NVectors :: the number of vectors
    @param XLength :: the size of the X vector
    @param YLength :: the size of the Y vector
    @param isCompressedOK :: The address of a boolean indicating if the compression succeeded or not
    @return true is managed workspace is needed
 */
bool MemoryManagerImpl::goForManagedWorkspace(std::size_t NVectors, std::size_t XLength, std::size_t YLength, bool* isCompressedOK)
{
  int AlwaysInMemory;// Check for disabling flag
  if (Kernel::ConfigService::Instance().getValue("ManagedWorkspace.AlwaysInMemory", AlwaysInMemory)
      && AlwaysInMemory)
    return false;

  // check potential size to create and determine trigger  
  int availPercent;
  if (!Kernel::ConfigService::Instance().getValue("ManagedWorkspace.LowerMemoryLimit", availPercent))
  {
    // Default to 40% if missing
    availPercent = 40;
  }
  if (availPercent > 150)
  {
    g_log.warning("ManagedWorkspace.LowerMemoryLimit is not allowed to be greater than 150%.");
    availPercent = 150;
  }
  if (availPercent < 0)
  {
    g_log.warning("Negative value for ManagedWorkspace.LowerMemoryLimit. Setting to 0.");
    availPercent = 0;
  }
  if (availPercent > 90)
  {
    g_log.warning("ManagedWorkspace.LowerMemoryLimit is greater than 90%. Danger of memory errors.");
  }
  MemoryInfo mi = getMemoryInfo();
  size_t triggerSize = mi.availMemory / 100 * availPercent / sizeof(double);
  // Avoid int overflow
  size_t wsSize = 0;
  if (NVectors > 1024)
      wsSize = NVectors / 1024 * (YLength * 2 + XLength);
  else if (YLength * 2 + XLength > 1024)
      wsSize = (YLength * 2 + XLength) / 1024 * NVectors;
  else
      wsSize = NVectors * (YLength * 2 + XLength) / 1024;

//  g_log.debug() << "Requested memory: " << (wsSize * sizeof(double))/1024 << " MB. " << std::endl;
//  g_log.debug() << "Available memory: " << mi.availMemory << " KB.\n";
//  g_log.debug() << "MWS trigger memory: " << triggerSize * sizeof(double) << " KB.\n";

  bool goManaged = (wsSize > triggerSize);
  // If we're on the cusp of going managed, add in the reserved but unused memory
  if( goManaged )
  {
    // This is called separately as on some systems it is an expensive calculation.
    // See Kernel/src/Memory.cpp - reservedMem() for more details
    Kernel::MemoryStats mem_stats;
    const size_t reserved = mem_stats.reservedMem();
//    g_log.debug() << "Windows - Adding reserved but unused memory of " << reserved << " KB\n";
    mi.availMemory += reserved;
    triggerSize += reserved / 100 * availPercent / sizeof(double);
    goManaged = (wsSize > triggerSize);

    g_log.debug() << "Requested memory: " << (wsSize * sizeof(double))/1024 << " MB." << std::endl;
    g_log.debug() << "Available memory: " << (mi.availMemory)/1024 << " MB." << std::endl;
    g_log.debug() << "ManagedWS trigger memory: " << (triggerSize * sizeof(double))/1024 << " MB." << std::endl;
  }

  if (isCompressedOK)
  {
    if (goManaged)
    {
      int notOK = 0;
      if ( !Kernel::ConfigService::Instance().getValue("CompressedWorkspace.DoNotUse",notOK) ) notOK = 0;
      if (notOK) *isCompressedOK = false;
      else
      {
        double compressRatio;
        if (!Kernel::ConfigService::Instance().getValue("CompressedWorkspace.EstimatedCompressRatio",compressRatio)) compressRatio = 4.;
        int VectorsPerBlock;
        if (!Kernel::ConfigService::Instance().getValue("CompressedWorkspace.VectorsPerBlock",VectorsPerBlock)) VectorsPerBlock = 4;
        double compressedSize = (1./compressRatio + 100.0*static_cast<double>(VectorsPerBlock)/static_cast<double>(NVectors))
                                      * static_cast<double>(wsSize);
        double memoryLeft = (static_cast<double>(triggerSize)/availPercent*100. - compressedSize)/1024. * sizeof(double);
        // To prevent bad allocation on Windows when free memory is too low.
        if (memoryLeft < 200.)
          *isCompressedOK = false;
        else
          *isCompressedOK =  compressedSize < static_cast<double>(triggerSize);
      }
    }
    else
    {
      *isCompressedOK = false;
    }
  }

  return goManaged;
}
示例#8
0
/*   hardware or reads values from the operatings system. */
		static int 
nvml_hardware_read( long long *value, int which_one)
		//, nvml_context_t *ctx)
{
		nvml_native_event_entry_t *entry;
		nvmlDevice_t handle;
		int cudaIdx = -1;

		entry = &nvml_native_table[which_one];
		*value = (long long) -1;
		/* replace entry->resources with the current cuda_device->nvml device */
		cudaGetDevice( &cudaIdx );

		if ( cudaIdx < 0 || cudaIdx > device_count )
			return PAPI_EINVAL;

		/* Make sure the device we are running on has the requested event */
		if ( !HAS_FEATURE( features[cudaIdx] , entry->type) ) 
				return PAPI_EINVAL;

		handle = devices[cudaIdx];

		switch (entry->type) {
				case FEATURE_CLOCK_INFO:
						*value =  getClockSpeed( 	handle, 
										(nvmlClockType_t)entry->options.clock );
						break;
				case FEATURE_ECC_LOCAL_ERRORS:
						*value = getEccLocalErrors( 	handle, 
										(nvmlEccBitType_t)entry->options.ecc_opts.bits, 
										(int)entry->options.ecc_opts.which_one);
						break;
				case FEATURE_FAN_SPEED:
						*value = getFanSpeed( handle );
						break;
				case FEATURE_MAX_CLOCK:
						*value = getMaxClockSpeed( 	handle, 
										(nvmlClockType_t)entry->options.clock );
						break;
				case FEATURE_MEMORY_INFO:
						*value = getMemoryInfo( 	handle, 
										(int)entry->options.which_one );
						break;
				case FEATURE_PERF_STATES:
						*value = getPState( handle );
						break;
				case FEATURE_POWER:
						*value = getPowerUsage( handle );
						break;
				case FEATURE_TEMP:
						*value = getTemperature( handle );
						break;
				case FEATURE_ECC_TOTAL_ERRORS:
						*value = getTotalEccErrors( 	handle, 
										(nvmlEccBitType_t)entry->options.ecc_opts.bits );
						break;
				case FEATURE_UTILIZATION:
						*value = getUtilization( 	handle, 
										(int)entry->options.which_one );
						break;
				default:
						return PAPI_EINVAL;
		}

		return PAPI_OK;


}
示例#9
0
std::size_t MemoryPool::updateMemoryAuthorizedWithRAM()
{
	_memoryAuthorized = /*getUsedMemorySize() +*/ getMemoryInfo()._totalRam;
	TUTTLE_LOG_DEBUG( TUTTLE_TRACE, "[Memory Pool] update memory authorized with RAM: " << _memoryAuthorized );
	return _memoryAuthorized;
}