예제 #1
0
QueryData genProcessEnvs(QueryContext &context) {
  QueryData results;

  auto pidlist = getProcList(context);
  int argmax = genMaxArgs();
  for (auto &pid : pidlist) {
    if (!context.constraints["pid"].matches<int>(pid)) {
      // Optimize by not searching when a pid is a constraint.
      continue;
    }

    auto env = getProcEnv(pid, argmax);
    for (auto env_itr = env.begin(); env_itr != env.end(); ++env_itr) {
      Row r;

      r["pid"] = INTEGER(pid);
      r["key"] = env_itr->first;
      r["value"] = env_itr->second;

      results.push_back(r);
    }
  }

  return results;
}
예제 #2
0
QueryData genProcessMemoryMap(QueryContext& context) {
  QueryData results;

  std::set<long> pidlist;
  if (context.constraints.count("pid") > 0 &&
      context.constraints.at("pid").exists(EQUALS)) {
    for (const auto& pid : context.constraints.at("pid").getAll<int>(EQUALS)) {
      if (pid > 0) {
        pidlist.insert(pid);
      }
    }
  }
  if (pidlist.empty()) {
    getProcList(pidlist);
  }

  for (const auto& pid : pidlist) {
    auto s = genMemoryMap(pid, results);
    if (!s.ok()) {
      VLOG(1) << s.getMessage();
    }
  }

  return results;
}
예제 #3
0
QueryData genProcessMemoryMap(QueryContext& context) {
  QueryData results;

  auto pidlist = getProcList(context);
  for (const auto& pid : pidlist) {
    genProcessMap(pid, results);
  }

  return results;
}
예제 #4
0
QueryData genProcessEnvs(QueryContext& context) {
  QueryData results;

  auto pidlist = getProcList(context);
  for (const auto& pid : pidlist) {
    genProcessEnvironment(pid, results);
  }

  return results;
}
예제 #5
0
//======================================================================
// Look for windows starting with "Installing... or "Setup ..." and let them come to front
int ListWindows()
{
	getProcList();
	DEBUGMSG(1, (L"Window List\nnr\thwnd\tprocID\tprocName\tclass\ttitle\tpos/size\tstate\n"));

	nclog(L"Window List\nthis\tnr\thwnd\tprocID\tprocName\tclass\ttitle\tpos/size\tstate\n");

	int iLevel=0;
	EnumWindows(procEnumWindows, iLevel);

	return 0;
}
QueryData genOpenSockets(QueryContext& context) {
  QueryData results;

  auto pidlist = getProcList(context);
  for (auto& pid : pidlist) {
    if (!context.constraints["pid"].matches(pid)) {
      // Optimize by not searching when a pid is a constraint.
      continue;
    }
    genOpenDescriptors(pid, DESCRIPTORS_TYPE_SOCKET, results);
  }

  return results;
}
예제 #7
0
QueryData genProcessEnvs(QueryContext &context) {
  QueryData results;

  auto pidlist = getProcList(context);
  int argmax = genMaxArgs();
  for (const auto &pid : pidlist) {
    auto args = getProcRawArgs(pid, argmax);
    for (const auto &env : args.env) {
      Row r;
      r["pid"] = INTEGER(pid);
      r["key"] = env.first;
      r["value"] = env.second;
      results.push_back(r);
    }
  }

  return results;
}
예제 #8
0
int main(int argc, char *argv[])
{
  double minTime        = 99999.0;
  double testTime       = 0.0;
  double maxBidirBw     = 0.0;
  double maxUnidirBw    = 0.0;
  int    maxBidirSize   = 0;
  int    maxUnidirSize  = 0;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    rank, wsize, iters, procs;
  int    messStart, messStop, messFactor, messSize;
  int    procsPerNode, totalops, procIdx;
  int    useBarrier, printPairs, useNearestRank, dummy;
  double currBw;
  char   allocPattern;
  MPI_Comm activeComm;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);


if(rank == 0) {
    //sleep(5);
    //while (!__sync_bool_compare_and_swap(&r_sync, 59, 0)){ sleep(2); printf("sync value %d \n", r_sync); };
    int k = 0;
    #pragma omp parallel
    #pragma omp atomic 
                k++;
    printf ("Done Sync. Number of Threads counted = %i\n",k);


    int N = 1048576;
    int i = 0;
    int j = 0;
  double start, diff;
for(j=0; j < 50 ; j++) {
    #pragma omp parallel for
    for (i=0; i<N; i++) {
            a[i] = 1.0;
            b[i] = 2.0;
        }


    start = MPI_Wtime();
    #pragma omp parallel for
    for (i = 0; i < N; i++) {
        a[i]= b[i];
    }
        diff = MPI_Wtime() - start ;
        printf("ELAPSED time to copy : %11.6f N: %d \n",diff, N);
}
}else{
//__sync_fetch_and_add(&r_sync, 1);
printf("OK other ranks  \n");
//sleep(100);
}
MPI_Barrier(MPI_COMM_WORLD);

MPI_Finalize();

return 0 ;



  if ( !processArgs(argc, argv, rank, wsize, &iters, &dummy,
                    &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();


  //Set up intra-node shared memory structures.
  if(rank == 0) {
        //One rank per node allocates shared send request lists.
        req_array = malloc(wsize*sizeof(ReqInfo));
        /*if(!IS_SM_BUF((void*)req_array)){
            printf("Failed to allocate from SM region :%p \n", req_array);
            exit(-1);
        }*/
  }
  //broadcast address of the pointer variable so that each rank has access to it
  MPI_Bcast(&req_array , 1, MPI_LONG, 0, MPI_COMM_WORLD);
  //printf("Broadcasting of shared mem region complete! rank : %d  region_addr : %lu region_ptr : %lu \n", rank, &req_array, req_array);
  //printf("Access shm region ! rank : %d  region for sync : %lu  \n", rank, req_array[rank].sync);
 /* 
  if(rank == 0){
	char* comBuf  = (char*)malloc(10);
	memset(comBuf, 1, 10);
	req_array[30].buffer = comBuf ;
	comBuf[0] = 'n';
        req_array[30].sync = malloc(sizeof(int));
        req_array[30].sync[0] = 12;
	printf("rank : %d done sending buff ptr : %p  sync ptr : %p \n",rank, comBuf, req_array[30].sync );
        printf("sleeping ! pid() %d \n", getpid());
	sleep(40000);
  }else if(rank == 30){
	while(req_array[30].sync == NULL){
	
	}

	while(req_array[30].sync[0] != 12){
	
	}
	printf("rank : %d  buffer value: %c sync value : %d  buff ptr : %p  sync ptr : %p  \n",
		rank, req_array[30].buffer[0], req_array[30].sync[0], req_array[30].buffer, req_array[30].sync );
        printf("sleeping ! pid() %d \n", getpid());
		sleep(40000);
  } 
  MPI_Barrier(MPI_COMM_WORLD);
  return 0;*/

  printPairs = 1 ;
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);
//    messStart = 8388608;
//    messStop = 4096 ;
//    messStop = 8388608 ;
    for ( messSize = messStart; messSize <= messStop;
          messSize *= messFactor )
    {
      testTime = runUnicomTest(procs, messSize, iters, rank, 
                               wsize, procsPerNode, allocPattern, 
                               useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * (procs/2);
        currBw = ((double)totalops*(double)messSize/testTime)/1000000;

        if ( currBw > maxUnidirBw )
        {
          maxUnidirBw   = currBw;
          maxUnidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }
  
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    // Create Communicator of all active processes
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runBicomTest(procs, messSize, iters, rank,
                              wsize, procsPerNode, allocPattern,
                              useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * procs;
        currBw = (((double)totalops*(double)messSize)/testTime)/1000000;

        if ( currBw > maxBidirBw )
        {
          maxBidirBw   = currBw;
          maxBidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }


  if ( rank == 0 )
  {
    printf("Max Unidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxUnidirBw, maxUnidirSize);
    printf("Max  Bidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxBidirBw, maxBidirSize);

    printParameters(iters, messStart, messStop, messFactor, 
                    procFile, procsPerNode, allocPattern, useBarrier);
    free(req_array);
  }

  printReportFooter(minTime, rank, wsize, procsPerNode, useNearestRank);
  
  free(procList);

  MPI_Finalize();

  exit(0);
}
예제 #9
0
파일: laten.c 프로젝트: uswick/presta_HMPI
main(int argc, char** argv)
{
  int rank, wsize, iters, i, procs, currtarg, dummy;
  double diff = 0.0;
  double start, max, mintime = 9999;
  MPI_Status stat;
  char comBuf;
  MPI_Comm activeComm;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    messStart, messStop, messFactor;
  int    procsPerNode, procIdx, useBarrier, printPairs, useNearestRank;
  char   allocPattern;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if ( !processArgs(argc, argv, rank, wsize, &iters, 
                    &dummy, &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();

  currtarg = getTargetRank(rank, wsize, procsPerNode, useNearestRank);

  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);
    

    if ( isActiveProc(rank, wsize, procsPerNode, procs, 
                      allocPattern, useNearestRank) )
    {
      if ( rank < currtarg )
      {
        /*  Ensure pair communication has been initialized  */
        MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
        MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
      }
      else 
      {
        /*  Ensure pair communication has been initialized  */
        MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
        MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
      }
    
      //generic_barrier(activeComm);
      MPI_Barrier(activeComm);
      //generic_barrier(activeComm);
      MPI_Barrier(activeComm);

      if ( rank < currtarg )
      {
        /*  Time operation loop  */
        start = MPI_Wtime();

        for ( i = 0; i < iters; i++ )
        {
          MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
          MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
        }
      }
      else 
      {
        /*  Time operation loop  */
        start = MPI_Wtime();

        for ( i = 0; i < iters; i++ )
        {
          MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
          MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
        }
      }

      if ( useBarrier )
        MPI_Barrier(activeComm);
        //generic_barrier(activeComm);

      diff = MPI_Wtime() - start;
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    /*  Get maximum sample length  */
    MPI_Reduce(&diff, &max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);

    if ( rank == 0 )
    {
      if ( max < mintime )
        mintime = max;

      printf(outputFormat, procs, max/iters/2*1000000);
    }
  }
  
  if ( rank == 0 )
  {
    printParameters(iters, procFile, procsPerNode, 
                    allocPattern, useBarrier);
  }

  printReportFooter(mintime, rank, wsize, procsPerNode, useNearestRank);

  MPI_Finalize();

  exit(0);
}
예제 #10
0
QueryData genProcesses(QueryContext &context) {
  QueryData results;

  auto pidlist = getProcList(context);
  auto parent_pid = getParentMap(pidlist);
  int argmax = genMaxArgs();

  for (auto &pid : pidlist) {
    if (!context.constraints["pid"].matches<int>(pid)) {
      // Optimize by not searching when a pid is a constraint.
      continue;
    }

    Row r;
    r["pid"] = INTEGER(pid);
    r["path"] = getProcPath(pid);
    // OS X proc_name only returns 16 bytes, use the basename of the path.
    r["name"] = boost::filesystem::path(r["path"]).filename().string();

    // The command line invocation including arguments.
    std::string cmdline = boost::algorithm::join(getProcArgs(pid, argmax), " ");
    boost::algorithm::trim(cmdline);
    r["cmdline"] = cmdline;
    genProcRootAndCWD(pid, r);

    proc_cred cred;
    if (getProcCred(pid, cred)) {
      r["uid"] = BIGINT(cred.real.uid);
      r["gid"] = BIGINT(cred.real.gid);
      r["euid"] = BIGINT(cred.effective.uid);
      r["egid"] = BIGINT(cred.effective.gid);
    } else {
      r["uid"] = "-1";
      r["gid"] = "-1";
      r["euid"] = "-1";
      r["egid"] = "-1";
    }

    // Find the parent process.
    const auto parent_it = parent_pid.find(pid);
    if (parent_it != parent_pid.end()) {
      r["parent"] = INTEGER(parent_it->second);
    } else {
      r["parent"] = "-1";
    }

    // If the path of the executable that started the process is available and
    // the path exists on disk, set on_disk to 1. If the path is not
    // available, set on_disk to -1. If, and only if, the path of the
    // executable is available and the file does NOT exist on disk, set on_disk
    // to 0.
    r["on_disk"] = osquery::pathExists(r["path"]).toString();

    // systems usage and time information
    struct rusage_info_v2 rusage_info_data;
    int rusage_status = proc_pid_rusage(
        pid, RUSAGE_INFO_V2, (rusage_info_t *)&rusage_info_data);
    // proc_pid_rusage returns -1 if it was unable to gather information
    if (rusage_status == 0) {
      // size/memory information
      r["wired_size"] = TEXT(rusage_info_data.ri_wired_size);
      r["resident_size"] = TEXT(rusage_info_data.ri_resident_size);
      r["phys_footprint"] = TEXT(rusage_info_data.ri_phys_footprint);

      // time information
      r["user_time"] = TEXT(rusage_info_data.ri_user_time / 1000000);
      r["system_time"] = TEXT(rusage_info_data.ri_system_time / 1000000);
      r["start_time"] = TEXT(rusage_info_data.ri_proc_start_abstime);
    } else {
      r["wired_size"] = "-1";
      r["resident_size"] = "-1";
      r["phys_footprint"] = "-1";
      r["user_time"] = "-1";
      r["system_time"] = "-1";
      r["start_time"] = "-1";
    }

    results.push_back(r);
  }

  return results;
}
예제 #11
0
파일: com.c 프로젝트: uswick/presta_HMPI
int main(int argc, char *argv[])
{
  double minTime        = 99999.0;
  double testTime       = 0.0;
  double maxBidirBw     = 0.0;
  double maxUnidirBw    = 0.0;
  int    maxBidirSize   = 0;
  int    maxUnidirSize  = 0;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    rank, wsize, iters, procs;
  int    messStart, messStop, messFactor, messSize;
  int    procsPerNode, totalops, procIdx;
  int    useBarrier, printPairs, useNearestRank, dummy;
  double currBw;
  char   allocPattern;
  MPI_Comm activeComm;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if ( !processArgs(argc, argv, rank, wsize, &iters, &dummy,
                    &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();

  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runUnicomTest(procs, messSize, iters, rank, 
                               wsize, procsPerNode, allocPattern, 
                               useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * (procs/2);
        currBw = ((double)totalops*(double)messSize/testTime)/1000000;

        if ( currBw > maxUnidirBw )
        {
          maxUnidirBw   = currBw;
          maxUnidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }
  
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runBicomTest(procs, messSize, iters, rank, 
                              wsize, procsPerNode, allocPattern, 
                              useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * procs;
        currBw = (((double)totalops*(double)messSize)/testTime)/1000000;

        if ( currBw > maxBidirBw )
        {
          maxBidirBw   = currBw;
          maxBidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }

  if ( rank == 0 )
  {
    printf("Max Unidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxUnidirBw, maxUnidirSize);
    printf("Max  Bidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxBidirBw, maxBidirSize);

    printParameters(iters, messStart, messStop, messFactor, 
                    procFile, procsPerNode, allocPattern, useBarrier);
  }

  printReportFooter(minTime, rank, wsize, procsPerNode, useNearestRank);
  
  free(procList);

  MPI_Finalize();

  exit(0);
}
예제 #12
0
QueryData genProcesses(QueryContext& context) {
  QueryData results;

  // Initialize time conversions.
  static mach_timebase_info_data_t time_base;
  if (time_base.denom == 0) {
    mach_timebase_info(&time_base);
  }

  auto pidlist = getProcList(context);
  int argmax = genMaxArgs();

  for (auto& pid : pidlist) {
    Row r;
    r["pid"] = INTEGER(pid);

    {
      // The command line invocation including arguments.
      auto args = getProcRawArgs(pid, argmax);
      std::string cmdline = boost::algorithm::join(args.args, " ");
      r["cmdline"] = cmdline;
    }

    // The process relative root and current working directory.
    genProcRootAndCWD(pid, r);

    proc_cred cred;
    if (getProcCred(pid, cred)) {
      r["parent"] = BIGINT(cred.parent);
      r["pgroup"] = BIGINT(cred.group);
      // check if process state is one of the expected ones
      r["state"] = (1 <= cred.status && cred.status <= 5)
                       ? TEXT(kProcessStateMapping[cred.status])
                       : TEXT('?');
      r["nice"] = INTEGER(cred.nice);
      r["uid"] = BIGINT(cred.real.uid);
      r["gid"] = BIGINT(cred.real.gid);
      r["euid"] = BIGINT(cred.effective.uid);
      r["egid"] = BIGINT(cred.effective.gid);
      r["suid"] = BIGINT(cred.saved.uid);
      r["sgid"] = BIGINT(cred.saved.gid);
    } else {
      continue;
    }

    // If the process is not a Zombie, try to find the path and name.
    if (cred.status != 5) {
      r["path"] = getProcPath(pid);
      // OS X proc_name only returns 16 bytes, use the basename of the path.
      r["name"] = fs::path(r["path"]).filename().string();
    } else {
      r["path"] = "";
      std::vector<char> name(17);
      proc_name(pid, name.data(), 16);
      r["name"] = std::string(name.data());
    }

    // If the path of the executable that started the process is available and
    // the path exists on disk, set on_disk to 1. If the path is not
    // available, set on_disk to -1. If, and only if, the path of the
    // executable is available and the file does NOT exist on disk, set on_disk
    // to 0.
    if (r["path"].empty()) {
      r["on_disk"] = INTEGER(-1);
    } else if (pathExists(r["path"])) {
      r["on_disk"] = INTEGER(1);
    } else {
      r["on_disk"] = INTEGER(0);
    }

    // systems usage and time information
    struct rusage_info_v2 rusage_info_data;
    int status =
        proc_pid_rusage(pid, RUSAGE_INFO_V2, (rusage_info_t*)&rusage_info_data);
    // proc_pid_rusage returns -1 if it was unable to gather information
    if (status == 0) {
      // size/memory information
      r["wired_size"] = TEXT(rusage_info_data.ri_wired_size);
      r["resident_size"] = TEXT(rusage_info_data.ri_resident_size);
      r["total_size"] = TEXT(rusage_info_data.ri_phys_footprint);

      // time information
      r["user_time"] = TEXT(rusage_info_data.ri_user_time / CPU_TIME_RATIO);
      r["system_time"] = TEXT(rusage_info_data.ri_system_time / CPU_TIME_RATIO);
      // Convert the time in CPU ticks since boot to seconds.
      // This is relative to time not-sleeping since boot.
      r["start_time"] =
          TEXT((rusage_info_data.ri_proc_start_abstime / START_TIME_RATIO) *
               time_base.numer / time_base.denom);
    } else {
      r["wired_size"] = "-1";
      r["resident_size"] = "-1";
      r["total_size"] = "-1";
      r["user_time"] = "-1";
      r["system_time"] = "-1";
      r["start_time"] = "-1";
    }

    struct proc_taskinfo task_info;
    status =
        proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &task_info, sizeof(task_info));
    if (status == sizeof(task_info)) {
      r["threads"] = INTEGER(task_info.pti_threadnum);
    } else {
      r["threads"] = "-1";
    }

    results.push_back(r);
  }

  return results;
}