Beispiel #1
0
	uint32_t getNumberOfProcessors(void)
	{
		uint32_t numberOfProcessors = 0;

		//Get the CPU affinity mask
		cpu_set_t cpuSet;
		if(sched_getaffinity(0, sizeof(cpu_set_t), &cpuSet) == 0)
		{
			//Count the number of active CPU's
			numberOfProcessors = CPU_COUNT_S(sizeof(cpu_set_t), &cpuSet);
		}
		else
		{
			LOG("Failed to determine number of processors!");
			numberOfProcessors = 1;
		}

		//If no CPU found, enforce at least one!
		if(numberOfProcessors < 1)
		{
			numberOfProcessors = 1;
		}
	
		return numberOfProcessors;
	}
Beispiel #2
0
Datei: proc.c Projekt: ds2dev/gcc
unsigned long
gomp_cpuset_popcount (unsigned long cpusetsize, cpu_set_t *cpusetp)
{
#ifdef CPU_COUNT_S
  /* glibc 2.7 and above provide a macro for this.  */
  return CPU_COUNT_S (cpusetsize, cpusetp);
#else
#ifdef CPU_COUNT
  if (cpusetsize == sizeof (cpu_set_t))
    /* glibc 2.6 and above provide a macro for this.  */
    return CPU_COUNT (cpusetp);
#endif
  size_t i;
  unsigned long ret = 0;
  extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int)
		   ? 1 : -1] __attribute__((unused));

  for (i = 0; i < cpusetsize / sizeof (cpusetp->__bits[0]); i++)
    {
      unsigned long int mask = cpusetp->__bits[i];
      if (mask == 0)
	continue;
      ret += __builtin_popcountl (mask);
    }
  return ret;
#endif
}
Beispiel #3
0
int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp)
{
    /*! @brief Function that returns a cpuset that has bits set for
               all CPUs enabled for the process which are not used by
               OpenMP.  Rather than returning an empty mask, if all
               CPUs allocated for the process are used by OpenMP, then
               the woomp mask will have all bits set. */

    int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
    int sched_num_cpu = geopm_sched_num_cpu();
    size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu);

    if (!err && !g_proc_cpuset) {
        err = ENOMEM;
    }
    if (!err && req_alloc_size < g_proc_cpuset_size) {
        err = EINVAL;
    }
    if (!err) {
        /* Copy the process CPU mask into the output. */
        memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size);
        /* Start an OpenMP parallel region and have each thread clear
           its bit from the mask. */
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
#pragma omp critical
{
        int cpu_index = sched_getcpu();
        if (cpu_index != -1 && cpu_index < num_cpu) {
            /* Clear the bit for this OpenMP thread's CPU. */
            CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp);
        }
        else {
            err = errno ? errno : GEOPM_ERROR_LOGIC;
        }
} /* end pragma omp critical */
} /* end pragma omp parallel */
#endif /* _OPENMP */
    }
    if (!err) {
        for (int i = sched_num_cpu; i < num_cpu; ++i) {
            CPU_CLR_S(i, req_alloc_size, woomp);
        }
    }
    if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) {
        /* If all CPUs are used by the OpenMP gang, then leave the
           mask open and allow the Linux scheduler to choose. */
        for (int i = 0; i < num_cpu; ++i) {
            CPU_SET_S(i, g_proc_cpuset_size, woomp);
        }
    }
    return err;
}
Beispiel #4
0
static runconfig * allocforncores(void)
{
	const unsigned ncoresmax = 128;
	const unsigned cslen = CPU_ALLOC_SIZE(ncoresmax);
	printf("assuming no more than %u cores. set length = %u\n",
		ncoresmax, cslen);

	cpu_set_t * coreset = CPU_ALLOC(ncoresmax);

	if(coreset && !sched_getaffinity(getpid(), cslen, coreset)) { } else
	{
		fail("can't get current affinity");
	}
	
	const int ncores = CPU_COUNT_S(cslen, coreset);

	if(ncores) { } else
	{
		fail("don't know how to work on 0 cores\n");
	}

	runconfig *const cfg =
		malloc(sizeof(runconfig)
			+ sizeof(unsigned) * (ncores - 1));

	if(cfg) { } else
	{
		fail("can't allocate memory for config structure");
	}

	cfg->ncores = ncores;

	unsigned cc = 0; // current core
	for(unsigned i = 0; cc < ncores; i += 1)
	{
		if(CPU_ISSET_S(i, cslen, coreset))
		{
			cfg->corelist[cc] = i;
			cc += 1;
		}
	}

	free(coreset);

	return cfg;
}
Beispiel #5
0
int pin_cpu(pid_t pid, unsigned int cpu) {
   size_t size;
   cpu_set_t * setPtr = CPU_ALLOC(1);
   assert (NULL != setPtr && "cpu_set allocation failed!");

   size = CPU_ALLOC_SIZE(1);
   CPU_ZERO_S(size, setPtr);     // clear set
   CPU_SET_S(cpu, size, setPtr); // enable requested cpu in set
   assert(1 == CPU_COUNT_S(size, setPtr));
   assert (CPU_ISSET_S(cpu, size, setPtr));

   int ret = sched_setaffinity(pid, size, setPtr);
   assert (ret == 0 && "sched_setaffinity failed");
   assert (cpu == sched_getcpu() && "Pinning failed");

   CPU_FREE(setPtr);
   return ret;
}
static int
find_last_cpu (const cpu_set_t *set, size_t size)
{
  /* We need to determine the set size with CPU_COUNT_S and the
     cpus_found counter because there is no direct way to obtain the
     actual CPU set size, in bits, from the value of
     CPU_ALLOC_SIZE.  */
  size_t cpus_found = 0;
  size_t total_cpus = CPU_COUNT_S (size, set);
  int last_cpu = -1;

  for (int cpu = 0; cpus_found < total_cpus; ++cpu)
    {
      if (CPU_ISSET_S (cpu, size, set))
	{
	  last_cpu = cpu;
	  ++cpus_found;
	}
    }
  return last_cpu;
}
Beispiel #7
0
/**
 * _CPU_set_Is_valid
 *
 * This routine validates a cpuset size corresponds to
 * the system correct size, that at least one
 * valid cpu is set and that no invalid cpus are set.
 */
bool _CPU_set_Is_valid( const cpu_set_t *cpuset, size_t setsize )
{
  cpu_set_t             temp;

  if ( !cpuset )
    return false;

  if ( setsize != cpuset_default.setsize )
    return false;

  /* Validate at least 1 valid cpu is set in cpuset */
  CPU_AND_S( cpuset_default.setsize, &temp, cpuset, cpuset_default.set );

  if ( CPU_COUNT_S( setsize, &temp ) == 0 )
    return false;

  /* Validate that no invalid cpu's are set in cpuset */
  if ( !CPU_EQUAL_S( setsize, &temp, cpuset ) )
    return false;

  return true;
}
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */)
{
	lock();
	int cpu = g_n_thread_cpu_core;
	if (cpu != NO_CPU) { //already reserved
		unlock();
		return cpu;
	}

	cpu_set_t* cpu_set = NULL;
	cpu_set = CPU_ALLOC(MAX_CPU);
	if (!cpu_set) {
		unlock();
		__log_err("failed to allocate cpu set");
		return -1;
	}
	size_t cpu_set_size = CPU_ALLOC_SIZE(MAX_CPU);
	CPU_ZERO_S(cpu_set_size, cpu_set);
	if (pthread_getaffinity_np(tid, cpu_set_size, cpu_set)) {
		unlock();
		CPU_FREE(cpu_set);
		__log_err("pthread_getaffinity_np failed for tid=%lu (errno=%d %m)", tid, errno);
		return -1;
	}

	if (CPU_COUNT_S(cpu_set_size, cpu_set) == 0) {
		unlock();
		__log_err("no cpu available for tid=%lu", tid);
		CPU_FREE(cpu_set);
		return -1;
	}

	if (CPU_COUNT_S(cpu_set_size, cpu_set) == 1) { //already attached
		for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET_S(cpu, cpu_set_size, cpu_set); cpu++) {}
	} else { //need to choose one cpu to attach to
		int min_cpu_count = -1;
		for (int i = 0; i < MAX_CPU; i++) {
			if (!CPU_ISSET_S(i, cpu_set_size, cpu_set)) continue;
			if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) {
				min_cpu_count = m_cpu_thread_count[i];
				cpu = i;
			}
		}
		if (suggested_cpu >= 0
			&& CPU_ISSET_S(suggested_cpu, cpu_set_size, cpu_set)
			&& m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) {
			cpu = suggested_cpu;
		}
		CPU_ZERO_S(cpu_set_size, cpu_set);
		CPU_SET_S(cpu, cpu_set_size, cpu_set);
		__log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu);
		if (pthread_setaffinity_np(tid, cpu_set_size, cpu_set)) {
			unlock();
			CPU_FREE(cpu_set);
			__log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d (errno=%d %m)", tid, cpu, errno);
			return -1;
		}
	}

	CPU_FREE(cpu_set);
	g_n_thread_cpu_core = cpu;
	m_cpu_thread_count[cpu]++;
	unlock();
	return cpu;
}
Beispiel #9
0
static void test_parse_cpu_set(void) {
        cpu_set_t *c = NULL;
        int ncpus;
        int cpu;

        /* Simple range (from CPUAffinity example) */
        ncpus = parse_cpu_set_and_warn("1 2", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_ISSET_S(1, CPU_ALLOC_SIZE(ncpus), c));
        assert_se(CPU_ISSET_S(2, CPU_ALLOC_SIZE(ncpus), c));
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 2);
        c = cpu_set_mfree(c);

        /* A more interesting range */
        ncpus = parse_cpu_set_and_warn("0 1 2 3 8 9 10 11", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8);
        for (cpu = 0; cpu < 4; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        for (cpu = 8; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Quoted strings */
        ncpus = parse_cpu_set_and_warn("8 '9' 10 \"11\"", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 4);
        for (cpu = 8; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Use commas as separators */
        ncpus = parse_cpu_set_and_warn("0,1,2,3 8,9,10,11", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8);
        for (cpu = 0; cpu < 4; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        for (cpu = 8; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Commas with spaces (and trailing comma, space) */
        ncpus = parse_cpu_set_and_warn("0, 1, 2, 3, 4, 5, 6, 7, ", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8);
        for (cpu = 0; cpu < 8; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Ranges */
        ncpus = parse_cpu_set_and_warn("0-3,8-11", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8);
        for (cpu = 0; cpu < 4; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        for (cpu = 8; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Ranges with trailing comma, space */
        ncpus = parse_cpu_set_and_warn("0-3  8-11, ", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8);
        for (cpu = 0; cpu < 4; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        for (cpu = 8; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Negative range (returns empty cpu_set) */
        ncpus = parse_cpu_set_and_warn("3-0", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 0);
        c = cpu_set_mfree(c);

        /* Overlapping ranges */
        ncpus = parse_cpu_set_and_warn("0-7 4-11", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 12);
        for (cpu = 0; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Mix ranges and individual CPUs */
        ncpus = parse_cpu_set_and_warn("0,1 4-11", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus >= 1024);
        assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 10);
        assert_se(CPU_ISSET_S(0, CPU_ALLOC_SIZE(ncpus), c));
        assert_se(CPU_ISSET_S(1, CPU_ALLOC_SIZE(ncpus), c));
        for (cpu = 4; cpu < 12; cpu++)
                assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c));
        c = cpu_set_mfree(c);

        /* Garbage */
        ncpus = parse_cpu_set_and_warn("0 1 2 3 garbage", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus < 0);
        assert_se(!c);

        /* Range with garbage */
        ncpus = parse_cpu_set_and_warn("0-3 8-garbage", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus < 0);
        assert_se(!c);

        /* Empty string */
        c = NULL;
        ncpus = parse_cpu_set_and_warn("", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus == 0);  /* empty string returns 0 */
        assert_se(!c);

        /* Runaway quoted string */
        ncpus = parse_cpu_set_and_warn("0 1 2 3 \"4 5 6 7 ", &c, NULL, "fake", 1, "CPUAffinity");
        assert_se(ncpus < 0);
        assert_se(!c);
}