Пример #1
0
cpu_set_t* cpu_set_malloc(unsigned *ncpus) {
        cpu_set_t *c;
        unsigned n = 1024;

        /* Allocates the cpuset in the right size */

        for (;;) {
                c = CPU_ALLOC(n);
                if (!c)
                        return NULL;

                if (sched_getaffinity(0, CPU_ALLOC_SIZE(n), c) >= 0) {
                        CPU_ZERO_S(CPU_ALLOC_SIZE(n), c);

                        if (ncpus)
                                *ncpus = n;

                        return c;
                }

                CPU_FREE(c);

                if (errno != EINVAL)
                        return NULL;

                n *= 2;
        }
}
Пример #2
0
int config_parse_cpu_affinity(
                const char *unit,
                const char *filename,
                unsigned line,
                const char *section,
                unsigned section_line,
                const char *lvalue,
                int ltype,
                const char *rvalue,
                void *data,
                void *userdata) {

        _cleanup_cpu_free_ cpu_set_t *cpuset = NULL;
        Settings *settings = data;
        int ncpus;

        assert(rvalue);
        assert(settings);

        ncpus = parse_cpu_set_and_warn(rvalue, &cpuset, unit, filename, line, lvalue);
        if (ncpus < 0)
                return ncpus;

        if (ncpus == 0) {
                /* An empty assignment resets the CPU list */
                settings->cpuset = cpu_set_mfree(settings->cpuset);
                settings->cpuset_ncpus = 0;
                return 0;
        }

        if (!settings->cpuset) {
                settings->cpuset = TAKE_PTR(cpuset);
                settings->cpuset_ncpus = (unsigned) ncpus;
                return 0;
        }

        if (settings->cpuset_ncpus < (unsigned) ncpus) {
                CPU_OR_S(CPU_ALLOC_SIZE(settings->cpuset_ncpus), cpuset, settings->cpuset, cpuset);
                CPU_FREE(settings->cpuset);
                settings->cpuset = TAKE_PTR(cpuset);
                settings->cpuset_ncpus = (unsigned) ncpus;
                return 0;
        }

        CPU_OR_S(CPU_ALLOC_SIZE((unsigned) ncpus), settings->cpuset, settings->cpuset, cpuset);

        return 0;
}
Пример #3
0
int linux_migrate_to(int target_cpu)
{
	cpu_set_t *cpu_set;
	size_t sz;
	int num_cpus;
	int ret;

	if (target_cpu < 0)
		return -1;

	num_cpus = num_online_cpus();
	if (num_cpus == -1)
		return -1;

	if (target_cpu >= num_cpus)
		return -1;

	cpu_set = CPU_ALLOC(num_cpus);
	sz = CPU_ALLOC_SIZE(num_cpus);
	CPU_ZERO_S(sz, cpu_set);
	CPU_SET_S(target_cpu, sz, cpu_set);

	/* apply to caller */
	ret = sched_setaffinity(getpid(), sz, cpu_set);

	CPU_FREE(cpu_set);

	return ret;
}
Пример #4
0
static int read_mapping(int idx, const char* which, cpu_set_t** set, size_t *sz)
{
	/* Max CPUs = 4096 */

	int	ret = -1;
	char buf[4096/4     /* enough chars for hex data (4 CPUs per char) */
	       + 4096/(4*8) /* for commas (separate groups of 8 chars) */
	       + 1] = {0};  /* for \0 */
	char fname[80] = {0};

	char* chunk_str;
	int len, nbits;
	int i;

	/* init vals returned to callee */
	*set = NULL;
	*sz = 0;

	if (num_online_cpus() > 4096)
		goto out;

	/* Read string is in the format of <mask>[,<mask>]*. All <mask>s following
	   a comma are 8 chars (representing a 32-bit mask). The first <mask> may
	   have fewer chars. Bits are MSB to LSB, left to right. */
	snprintf(fname, sizeof(fname), "/proc/litmus/%s/%d", which, idx);
	ret = read_file(fname, &buf, sizeof(buf)-1);
	if (ret <= 0)
		goto out;

	len = strnlen(buf, sizeof(buf));
	nbits = 32*(len/9) + 4*(len%9); /* compute bits, accounting for commas */

	*set = CPU_ALLOC(nbits);
	*sz = CPU_ALLOC_SIZE(nbits);
	CPU_ZERO_S(*sz, *set);

	/* process LSB chunks first (at the end of the str) and move backward */
	chunk_str = buf + len - 8;
	i = 0;
	do
	{
		unsigned long chunk;
		if(chunk_str < buf)
			chunk_str = buf; /* when MSB mask is less than 8 chars */
		chunk = strtoul(chunk_str, NULL, 16);
		while (chunk) {
			int j = ffsl(chunk) - 1;
			int x = i*32 + j;
			CPU_SET_S(x, *sz, *set);
			chunk &= ~(1ul << j);
		}
		chunk_str -= 9;
		i += 1;
	} while(chunk_str >= buf - 8);

	ret = 0;

out:
	return ret;
}
Пример #5
0
// for fixing thread affinity to a single CPU after allocating memory chains and binding it to the local or remote nodes
static int max_number_of_cpus(void)
{
    int n, cpus = 2048;
    size_t setsize =  CPU_ALLOC_SIZE(cpus);
    cpu_set_t *set = CPU_ALLOC(cpus);
    if (!set)
        goto err;

	for (;;) {
		CPU_ZERO_S(setsize, set);
		/* the library version does not return size of cpumask_t */
		n = syscall(SYS_sched_getaffinity, 0, setsize, set);
		if (n < 0 && cpus < 1024 * 1024) {
		        CPU_FREE(set);
			cpus *= 2;
			set = CPU_ALLOC(cpus);
			if (!set)
				goto err;
			continue;
		}

	CPU_FREE(set);
	return n * 8;
	}
err:
	printf("cannot determine NR_CPUS");
	return 0;
}
Пример #6
0
static void sched_setup(void)
{
	int ret;
	size_t size;
	cpu_set_t *set;
	struct sched_param sp;

	set = CPU_ALLOC(2);
	size = CPU_ALLOC_SIZE(2);
	CPU_ZERO_S(size, set);
	CPU_SET_S(0, 2, set);


	memset(&sp, 0, sizeof(sp));
	sp.sched_priority = 99;
	ret = sched_setscheduler(0, SCHED_RR, &sp);
	if (ret < 0) {
		perror("sched_setscheduler");
		exit(-1);
	}
	ret = sched_setaffinity(0, size, set);
	if (ret < 0) {
		perror("sched_setaffinity");
		exit(-1);
	}

	CPU_FREE(set);
}
Пример #7
0
void*
Thread::start_routine(void* p) {
	// get the current affinity
	cpu_set_t cs;
	CPU_ZERO(&cs);
	sched_getaffinity(0, sizeof(cs), &cs);

	// deduce the amount of CPUs
	int count = 0;
	for (int i = 0; i < 8; i++)
	{
		if (CPU_ISSET(i, &cs))
				count++;
	}

	// restrict to a single CPU
	CPU_ZERO(&cs);
	size_t size = CPU_ALLOC_SIZE(1);
	CPU_SET_S(((Thread*) p)->id % count, size, &cs);
	sched_setaffinity(pthread_self(), size, &cs);

	// run
	((Thread*) p)->run();

	return NULL;
}
Пример #8
0
static int
find_set_size (void)
{
  /* There is considerable controversy about how to determine the size
     of the kernel CPU mask.  The probing loop below is only intended
     for testing purposes.  */
  for (int num_cpus = 64; num_cpus <= INT_MAX / 2; ++num_cpus)
    {
      cpu_set_t *set = CPU_ALLOC (num_cpus);
      size_t size = CPU_ALLOC_SIZE (num_cpus);

      if (set == NULL)
	{
	  printf ("error: CPU_ALLOC (%d) failed\n", num_cpus);
	  return -1;
	}
      if (getaffinity (size, set) == 0)
	{
	  CPU_FREE (set);
	  return num_cpus;
	}
      if (errno != EINVAL)
	{
	  printf ("error: getaffinity for %d CPUs: %m\n", num_cpus);
	  CPU_FREE (set);
	  return -1;
	}
      CPU_FREE (set);
    }
  puts ("error: Cannot find maximum CPU number");
  return -1;
}
Пример #9
0
int be_migrate_thread_to_cpu(pid_t tid, int target_cpu)
{
	cpu_set_t *cpu_set;
	size_t sz;
	int num_cpus;
	int ret;

	/* TODO: Error check to make sure that tid is not a real-time task. */

	if (target_cpu < 0)
		return -1;

	num_cpus = num_online_cpus();
	if (num_cpus == -1)
		return -1;

	if (target_cpu >= num_cpus)
		return -1;

	cpu_set = CPU_ALLOC(num_cpus);
	sz = CPU_ALLOC_SIZE(num_cpus);
	CPU_ZERO_S(sz, cpu_set);
	CPU_SET_S(target_cpu, sz, cpu_set);

	/* apply to caller */
	if (tid == 0)
		tid = gettid();

	ret = sched_setaffinity(tid, sz, cpu_set);

	CPU_FREE(cpu_set);

	return ret;
}
Пример #10
0
/* CPU affinity mask buffer instead, as the present code will fail beyond 32 CPUs */
int set_cpu_affinity(unsigned int cpuid) {
  unsigned long mask = 0xffffffff;
  unsigned int len = sizeof(mask);
  int retValue = 0;
  int pid;

 #ifdef _WIN32
   HANDLE hProcess;
 #endif
 
#ifdef _WIN32
  SET_MASK(cpuid)
  hProcess = GetCurrentProcess();
  if (SetProcessAffinityMask(hProcess, mask) == 0) {
    return -1;
  }
#elif CMK_HAS_BINDPROCESSOR
  pid = getpid();
  if (bindprocessor(BINDPROCESS, pid, cpuid) == -1) return -1;
#else
#ifdef CPU_ALLOC
 if ( cpuid >= CPU_SETSIZE ) {
  cpu_set_t *cpusetp;
  size_t size;
  int num_cpus;
  num_cpus = cpuid + 1;
  cpusetp = CPU_ALLOC(num_cpus);
  if (cpusetp == NULL) {
    perror("set_cpu_affinity CPU_ALLOC");
    return -1;
  }
  size = CPU_ALLOC_SIZE(num_cpus);
  CPU_ZERO_S(size, cpusetp);
  CPU_SET_S(cpuid, size, cpusetp);
  if (sched_setaffinity(0, size, cpusetp) < 0) {
    perror("sched_setaffinity dynamically allocated");
    CPU_FREE(cpusetp);
    return -1;
  }
  CPU_FREE(cpusetp);
 } else
#endif
 {
  cpu_set_t cpuset;
  CPU_ZERO(&cpuset);
  CPU_SET(cpuid, &cpuset);
  /*SET_MASK(cpuid)*/

  /* PID 0 refers to the current process */
  /*if (sched_setaffinity(0, len, &mask) < 0) {*/
  if (sched_setaffinity(0, sizeof(cpuset), &cpuset) < 0) {
    perror("sched_setaffinity");
    return -1;
  }
 }
#endif

  return 0;
}
Пример #11
0
virBitmapPtr
virProcessGetAffinity(pid_t pid)
{
    size_t i;
    cpu_set_t *mask;
    size_t masklen;
    size_t ncpus;
    virBitmapPtr ret = NULL;

# ifdef CPU_ALLOC
    /* 262144 cpus ought to be enough for anyone */
    ncpus = 1024 << 8;
    masklen = CPU_ALLOC_SIZE(ncpus);
    mask = CPU_ALLOC(ncpus);

    if (!mask) {
        virReportOOMError();
        return NULL;
    }

    CPU_ZERO_S(masklen, mask);
# else
    ncpus = 1024;
    if (VIR_ALLOC(mask) < 0)
        return NULL;

    masklen = sizeof(*mask);
    CPU_ZERO(mask);
# endif

    if (sched_getaffinity(pid, masklen, mask) < 0) {
        virReportSystemError(errno,
                             _("cannot get CPU affinity of process %d"), pid);
        goto cleanup;
    }

    if (!(ret = virBitmapNew(ncpus)))
          goto cleanup;

    for (i = 0; i < ncpus; i++) {
# ifdef CPU_ALLOC
         /* coverity[overrun-local] */
        if (CPU_ISSET_S(i, masklen, mask))
            ignore_value(virBitmapSetBit(ret, i));
# else
        if (CPU_ISSET(i, mask))
            ignore_value(virBitmapSetBit(ret, i));
# endif
    }

 cleanup:
# ifdef CPU_ALLOC
    CPU_FREE(mask);
# else
    VIR_FREE(mask);
# endif

    return ret;
}
Пример #12
0
Файл: fork.c Проект: kschaab/RRO
/* req is one-based, cpu_set is zero-based */
SEXP mc_affinity(SEXP req) {
    if (req != R_NilValue && TYPEOF(req) != INTSXP && TYPEOF(req) != REALSXP)
	error(_("invalid CPU affinity specification"));
    if (TYPEOF(req) == REALSXP)
	req = coerceVector(req, INTSXP);
    if (TYPEOF(req) == INTSXP) {
	int max_cpu = 0, i, n = LENGTH(req), *v = INTEGER(req);
	for (i = 0; i < n; i++) {
	    if (v[i] > max_cpu)
		max_cpu = v[i];
	    if (v[i] < 1)
		error(_("invalid CPU affinity specification"));
	}
	/* These are both one-based */
	if (max_cpu <= CPU_SETSIZE) { /* can use static set */
	    cpu_set_t cs;
	    CPU_ZERO(&cs);
	    for (i = 0; i < n; i++)
		CPU_SET(v[i] - 1, &cs);
	    sched_setaffinity(0, sizeof(cpu_set_t), &cs);
	} else {
#ifndef CPU_ALLOC
	    error(_("requested CPU set is too large for this system"));
#else
	    size_t css = CPU_ALLOC_SIZE(max_cpu);
	    cpu_set_t *cs = CPU_ALLOC(max_cpu);
	    CPU_ZERO_S(css, cs);
	    for (i = 0; i < n; i++)
		CPU_SET_S(v[i] - 1, css, cs);
	    sched_setaffinity(0, css, cs);
#endif
	}
    }

    {
	/* FIXME: in theory we may want to use *_S versions as well,
	 but that would require some knowledge about the number of
	 available CPUs and comparing that to CPU_SETSIZE, so for now
	 we just use static cpu_set -- the mask will be still set
	 correctly, just the returned set will be truncated at
	 CPU_SETSIZE */
	cpu_set_t cs;
	CPU_ZERO(&cs);
	if (sched_getaffinity(0, sizeof(cs), &cs)) {
	    if (req == R_NilValue)
		error(_("retrieving CPU affinity set failed"));
	    return R_NilValue;
	} else {
	    SEXP res = allocVector(INTSXP, CPU_COUNT(&cs));
	    int i, *v = INTEGER(res);
	    for (i = 0; i < CPU_SETSIZE; i++)
		if (CPU_ISSET(i, &cs))
		    *(v++) = i + 1;
	    return res;
	}
    }
}
Пример #13
0
/* set affinity for current thread */
int
rw_piot_thread_set_affinity(void)
{
        int s;
        pthread_t thread;

/*
 * According to the section VERSIONS of the CPU_ALLOC man page:
 *
 * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added
 * in glibc 2.3.3.
 *
 * CPU_COUNT() first appeared in glibc 2.6.
 *
 * CPU_AND(),     CPU_OR(),     CPU_XOR(),    CPU_EQUAL(),    CPU_ALLOC(),
 * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(),  CPU_SET_S(),  CPU_CLR_S(),
 * CPU_ISSET_S(),  CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S()
 * first appeared in glibc 2.7.
 */
#if defined(CPU_ALLOC)
        size_t size;
        cpu_set_t *cpusetp;

        cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
        if (cpusetp == NULL) {
                RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
                return -1;
        }

        size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
        CPU_ZERO_S(size, cpusetp);
        CPU_SET_S(rte_lcore_id(), size, cpusetp);

        thread = pthread_self();
        s = pthread_setaffinity_np(thread, size, cpusetp);
        if (s != 0) {
                RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
                CPU_FREE(cpusetp);
                return -1;
        }

        CPU_FREE(cpusetp);
#else /* CPU_ALLOC */
        cpu_set_t cpuset;

        CPU_ZERO( &cpuset );
        CPU_SET( rte_lcore_id(), &cpuset );

        thread = pthread_self();
        s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
        if (s != 0) {
                RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
                return -1;
        }
#endif
        return 0;
}
Пример #14
0
static void setup(void)
{
	tst_require_root(NULL);
	uid = geteuid();
	ncpus = tst_ncpus_max();

	/* Current mask */
	mask = CPU_ALLOC(ncpus);
	if (mask == NULL)
		tst_brkm(TBROK | TERRNO, cleanup, "CPU_ALLOC(%ld) failed",
			ncpus);
	mask_size = CPU_ALLOC_SIZE(ncpus);
	if (sched_getaffinity(0, mask_size, mask) < 0)
		tst_brkm(TBROK | TERRNO, cleanup, "sched_getaffinity() failed");

	/* Mask with one more cpu than available on the system */
	emask = CPU_ALLOC(ncpus + 1);
	if (emask == NULL)
		tst_brkm(TBROK | TERRNO, cleanup, "CPU_ALLOC(%ld) failed",
			ncpus + 1);
	emask_size = CPU_ALLOC_SIZE(ncpus + 1);
	CPU_ZERO_S(emask_size, emask);
	CPU_SET_S(ncpus, emask_size, emask);

	privileged_pid = tst_fork();
	if (privileged_pid == 0) {
		pause();

		exit(0);
	} else if (privileged_pid < 0) {
		tst_brkm(TBROK | TERRNO, cleanup, "fork() failed");
	}

	/* Dropping the root privileges */
	ltpuser = getpwnam(nobody_uid);
	if (ltpuser == NULL)
		tst_brkm(TBROK | TERRNO, cleanup,
			"getpwnam failed for user id %s", nobody_uid);

	SAFE_SETEUID(cleanup, ltpuser->pw_uid);

	/* this pid is not used by the OS */
	free_pid = tst_get_unused_pid(cleanup);
}
Пример #15
0
int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp)
{
    /*! @brief Function that returns a cpuset that has bits set for
               all CPUs enabled for the process which are not used by
               OpenMP.  Rather than returning an empty mask, if all
               CPUs allocated for the process are used by OpenMP, then
               the woomp mask will have all bits set. */

    int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
    int sched_num_cpu = geopm_sched_num_cpu();
    size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu);

    if (!err && !g_proc_cpuset) {
        err = ENOMEM;
    }
    if (!err && req_alloc_size < g_proc_cpuset_size) {
        err = EINVAL;
    }
    if (!err) {
        /* Copy the process CPU mask into the output. */
        memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size);
        /* Start an OpenMP parallel region and have each thread clear
           its bit from the mask. */
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
#pragma omp critical
{
        int cpu_index = sched_getcpu();
        if (cpu_index != -1 && cpu_index < num_cpu) {
            /* Clear the bit for this OpenMP thread's CPU. */
            CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp);
        }
        else {
            err = errno ? errno : GEOPM_ERROR_LOGIC;
        }
} /* end pragma omp critical */
} /* end pragma omp parallel */
#endif /* _OPENMP */
    }
    if (!err) {
        for (int i = sched_num_cpu; i < num_cpu; ++i) {
            CPU_CLR_S(i, req_alloc_size, woomp);
        }
    }
    if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) {
        /* If all CPUs are used by the OpenMP gang, then leave the
           mask open and allow the Linux scheduler to choose. */
        for (int i = 0; i < num_cpu; ++i) {
            CPU_SET_S(i, g_proc_cpuset_size, woomp);
        }
    }
    return err;
}
static int
do_test (void)
{
  for (int i = 0; i < 10; i++)
    {
      pthread_attr_t attr;
      cpu_set_t *cpuset = CPU_ALLOC (512);
      size_t cpusetsize = CPU_ALLOC_SIZE (512);
      CPU_ZERO_S (cpusetsize, cpuset);

      RETURN_IF_FAIL (pthread_attr_init, &attr);
      RETURN_IF_FAIL (pthread_attr_setaffinity_np, &attr, cpusetsize, cpuset);
      CPU_FREE (cpuset);

      cpuset = CPU_ALLOC (1);
      cpusetsize = CPU_ALLOC_SIZE (1);
      RETURN_IF_FAIL (pthread_attr_getaffinity_np, &attr, cpusetsize, cpuset);
      CPU_FREE (cpuset);
    }
  return 0;
}
Пример #17
0
int parse_cpu_set_and_warn(
                const char *rvalue,
                cpu_set_t **cpu_set,
                const char *unit,
                const char *filename,
                unsigned line,
                const char *lvalue) {

        const char *whole_rvalue = rvalue;
        _cleanup_cpu_free_ cpu_set_t *c = NULL;
        unsigned ncpus = 0;

        assert(lvalue);
        assert(rvalue);

        for (;;) {
                _cleanup_free_ char *word = NULL;
                unsigned cpu;
                int r;

                r = extract_first_word(&rvalue, &word, WHITESPACE, EXTRACT_QUOTES);
                if (r < 0) {
                        log_syntax(unit, LOG_ERR, filename, line, r, "Invalid value for %s: %s", lvalue, whole_rvalue);
                        return r;
                }
                if (r == 0)
                        break;

                if (!c) {
                        c = cpu_set_malloc(&ncpus);
                        if (!c)
                                return log_oom();
                }

                r = safe_atou(word, &cpu);
                if (r < 0 || cpu >= ncpus) {
                        log_syntax(unit, LOG_ERR, filename, line, r, "Failed to parse CPU affinity '%s'", rvalue);
                        return -EINVAL;
                }

                CPU_SET_S(cpu, CPU_ALLOC_SIZE(ncpus), c);
        }

        /* On success, sets *cpu_set and returns ncpus for the system. */
        if (c) {
                *cpu_set = c;
                c = NULL;
        }

        return (int) ncpus;
}
Пример #18
0
static void test_scheduler_get_processors(void)
{
#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
  rtems_status_code sc;
  rtems_name name = BLUE;
  rtems_id scheduler_id;
  cpu_set_t cpusetone;
  cpu_set_t cpuset;
  size_t big = 2 * CHAR_BIT * sizeof(cpu_set_t);
  size_t cpusetbigsize = CPU_ALLOC_SIZE(big);
  cpu_set_t *cpusetbigone;
  cpu_set_t *cpusetbig;

  CPU_ZERO(&cpusetone);
  CPU_SET(0, &cpusetone);

  sc = rtems_scheduler_ident(name, &scheduler_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_scheduler_get_processor_set(scheduler_id, sizeof(cpuset), NULL);
  rtems_test_assert(sc == RTEMS_INVALID_ADDRESS);

  sc = rtems_scheduler_get_processor_set(invalid_id, sizeof(cpuset), &cpuset);
  rtems_test_assert(sc == RTEMS_INVALID_ID);

  sc = rtems_scheduler_get_processor_set(scheduler_id, 0, &cpuset);
  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);

  sc = rtems_scheduler_get_processor_set(scheduler_id, sizeof(cpuset), &cpuset);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));

  cpusetbigone = CPU_ALLOC(big);
  rtems_test_assert(cpusetbigone != NULL);

  cpusetbig = CPU_ALLOC(big);
  rtems_test_assert(cpusetbig != NULL);

  CPU_ZERO_S(cpusetbigsize, cpusetbigone);
  CPU_SET_S(0, cpusetbigsize, cpusetbigone);

  sc = rtems_scheduler_get_processor_set(scheduler_id, cpusetbigsize, cpusetbig);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone));

  CPU_FREE(cpusetbig);
  CPU_FREE(cpusetbigone);
#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
}
Пример #19
0
int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset)
{
    int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
    int sched_num_cpu = geopm_sched_num_cpu();
    size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu);
    if (!err && cpuset_size < g_proc_cpuset_size) {
        err = GEOPM_ERROR_INVALID;
    }
    if (!err) {
        memcpy(proc_cpuset, g_proc_cpuset, g_proc_cpuset_size);
        for (int i = sched_num_cpu; i < num_cpu; ++i) {
            CPU_CLR_S(i, cpuset_size, proc_cpuset);
        }
    }
    return err;
}
Пример #20
0
static runconfig * allocforncores(void)
{
	const unsigned ncoresmax = 128;
	const unsigned cslen = CPU_ALLOC_SIZE(ncoresmax);
	printf("assuming no more than %u cores. set length = %u\n",
		ncoresmax, cslen);

	cpu_set_t * coreset = CPU_ALLOC(ncoresmax);

	if(coreset && !sched_getaffinity(getpid(), cslen, coreset)) { } else
	{
		fail("can't get current affinity");
	}
	
	const int ncores = CPU_COUNT_S(cslen, coreset);

	if(ncores) { } else
	{
		fail("don't know how to work on 0 cores\n");
	}

	runconfig *const cfg =
		malloc(sizeof(runconfig)
			+ sizeof(unsigned) * (ncores - 1));

	if(cfg) { } else
	{
		fail("can't allocate memory for config structure");
	}

	cfg->ncores = ncores;

	unsigned cc = 0; // current core
	for(unsigned i = 0; cc < ncores; i += 1)
	{
		if(CPU_ISSET_S(i, cslen, coreset))
		{
			cfg->corelist[cc] = i;
			cc += 1;
		}
	}

	free(coreset);

	return cfg;
}
int __be_migrate_thread_to_cluster(pid_t tid, int cluster, int cluster_sz,
						 int ignore_rm)
{

	int first_cpu = cluster * cluster_sz; /* first CPU in cluster */
	int last_cpu = first_cpu + cluster_sz - 1;
	int master;
	int num_cpus;
	cpu_set_t *cpu_set;
	size_t sz;
	int i;
	int ret;
	/* TODO: Error check to make sure that tid is not a real-time task. */

	if (cluster_sz == 1) {
		/* we're partitioned */
		return be_migrate_thread_to_partition(tid, cluster);
	}

	master = (ignore_rm) ? -1 : release_master();
	num_cpus = num_online_cpus();

	if (num_cpus == -1 || last_cpu >= num_cpus || first_cpu < 0)
		return -1;

	cpu_set = CPU_ALLOC(num_cpus);
	sz = CPU_ALLOC_SIZE(num_cpus);
	CPU_ZERO_S(sz, cpu_set);

	for (i = first_cpu; i <= last_cpu; ++i) {
		if (i != master) {
			CPU_SET_S(i, sz, cpu_set);
		}
	}

	/* apply to caller */
	if (tid == 0)
		tid = gettid();

	ret = sched_setaffinity(tid, sz, cpu_set);

	CPU_FREE(cpu_set);

	return ret;
}
Пример #22
0
int pin_cpu(pid_t pid, unsigned int cpu) {
   size_t size;
   cpu_set_t * setPtr = CPU_ALLOC(1);
   assert (NULL != setPtr && "cpu_set allocation failed!");

   size = CPU_ALLOC_SIZE(1);
   CPU_ZERO_S(size, setPtr);     // clear set
   CPU_SET_S(cpu, size, setPtr); // enable requested cpu in set
   assert(1 == CPU_COUNT_S(size, setPtr));
   assert (CPU_ISSET_S(cpu, size, setPtr));

   int ret = sched_setaffinity(pid, size, setPtr);
   assert (ret == 0 && "sched_setaffinity failed");
   assert (cpu == sched_getcpu() && "Pinning failed");

   CPU_FREE(setPtr);
   return ret;
}
Пример #23
0
int main(int argc, char* argv[])
{
    int rank, size, rc;
    hwloc_cpuset_t cpus;
    char *bindings;
    cpu_set_t *mask;
    int nrcpus, c;
    size_t csize;
    char hostname[1024];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    gethostname(hostname, 1024);
    cpus = hwloc_bitmap_alloc();
    rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS);
    hwloc_bitmap_list_asprintf(&bindings, cpus);

    printf("[%s;%d] Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n",
           hostname, (int)getpid(), rank, size, orte_process_info.num_local_peers, rc,
           (NULL == bindings) ? "NULL" : bindings);

    nrcpus = sysconf(_SC_NPROCESSORS_ONLN);
    mask = CPU_ALLOC(nrcpus);
    csize = CPU_ALLOC_SIZE(nrcpus);
    CPU_ZERO_S(csize, mask);
    if ( sched_getaffinity(0, csize, mask) == -1 ) {
            CPU_FREE(mask);
            perror("sched_getaffinity");
            return -1;
    }

    for ( c = 0; c < nrcpus; c++ ) {
            if ( CPU_ISSET_S(c, csize, mask) ) {
                    printf("[%s:%d] CPU %d is set\n", hostname, (int)getpid(), c);
            }
    }

    CPU_FREE(mask);

    MPI_Finalize();
    return 0;
}
Пример #24
0
static int bind_cpu(thread_t *thread) {
    size_t setsize;
    cpu_set_t *cur_cpuset;
    cpu_set_t *new_cpuset;

    int ncpus = max_number_of_cpus();

    if (thread == NULL) {
        // if thread is NULL it means the emulator is disabled, return without setting CPU affinity
        //printf("thread self is null");
        return 0;
    }

    if (ncpus == 0) {
    	return 1;
    }

    setsize = CPU_ALLOC_SIZE(ncpus);
    cur_cpuset = CPU_ALLOC(ncpus);
    new_cpuset = CPU_ALLOC(ncpus);
    CPU_ZERO_S(setsize, cur_cpuset);
    CPU_ZERO_S(setsize, new_cpuset);
    CPU_SET_S(thread->cpu_id, setsize, new_cpuset);

    if (pthread_getaffinity_np(thread->pthread, setsize, cur_cpuset) != 0) {
        DBG_LOG(ERROR, "Cannot get thread tid [%d] affinity, pthread: 0x%lx on processor %d\n",
        		thread->tid, thread->pthread, thread->cpu_id);
        return 1;
    }

    if (CPU_EQUAL(cur_cpuset, new_cpuset)) {
        //printf("No need to bind CPU\n");
    	return 0;
    }

    DBG_LOG(INFO, "Binding thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id);

    if (pthread_setaffinity_np(thread->pthread, setsize, new_cpuset) != 0) {
        DBG_LOG(ERROR, "Cannot bind thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id);
        return 1;
    }

    return 0;
}
Пример #25
0
static void geopm_proc_cpuset_once(void)
{
    const char *status_path = "/proc/self/status";
    const int num_cpu = geopm_sched_num_cpu();
    const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0);

    int err = 0;
    uint32_t *proc_cpuset = NULL;
    FILE *fid = NULL;

    g_proc_cpuset = CPU_ALLOC(num_cpu);
    if (g_proc_cpuset == NULL) {
        err = ENOMEM;
    }
    if (!err) {
        g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu);
        proc_cpuset = calloc(num_read, sizeof(*proc_cpuset));
        if (proc_cpuset == NULL) {
            err = ENOMEM;
        }
    }
    if (!err) {
        fid = fopen(status_path, "r");
        if (!fid) {
            err = errno ? errno : GEOPM_ERROR_RUNTIME;
        }
    }
    if (!err) {
        err = geopm_sched_proc_cpuset_helper(num_cpu, proc_cpuset, fid);
        fclose(fid);
    }
    if (!err) {
        memcpy(g_proc_cpuset, proc_cpuset, g_proc_cpuset_size);
    }
    else if (g_proc_cpuset) {
        for (int i = 0; i < num_cpu; ++i) {
            CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
        }
    }
    if (proc_cpuset) {
        free(proc_cpuset);
    }
}
Пример #26
0
inline void pin_RT(int cpu) {
  auto set = CPU_ALLOC(NUM_CPUS);
  auto size = CPU_ALLOC_SIZE(NUM_CPUS);
  CPU_ZERO_S(size, set);
  CPU_SET_S(cpu, size, set);
  if (sched_setaffinity(0, size, set) != 0) {
    fprintf(stderr, "unable to pin to cpu %i\n", cpu);
    exit(EXIT_FAILURE);
  }
  CPU_FREE(set);

  struct sched_param p;
  p.sched_priority = 99;
  if (sched_setscheduler(0, SCHED_FIFO, &p) != 0) {
    fprintf(stderr, "unable to set SCHED_FIFO: %s\n",
      strerror(errno));
    exit(EXIT_FAILURE);
  }
}
Пример #27
0
int wireme(int core)
{
	cpu_set_t *set;
	int numthreads = core + 1;
	int ret;
	size_t size;
	set = CPU_ALLOC(numthreads);
	size = CPU_ALLOC_SIZE(numthreads);
	CPU_ZERO_S(size, set);
	/* lock us down. */
	CPU_SET_S(core, size, set);
	ret = sched_setaffinity(0, size, set);
	/* just blow up. If they ignore this error the numbers will be crap. */
	if ((ret < 0) && (! ignore_wire_failures)) {
		fprintf(stderr, "wireme: pid %d, core %d, %m\n", getpid(), core);
		exit(1);
	}
	CPU_FREE(set);
	return 0;
}
Пример #28
0
static void geopm_proc_cpuset_once(void)
{
    int err = 0;
    int num_cpu = geopm_sched_num_cpu();
    pthread_t tid;
    pthread_attr_t attr;

    g_proc_cpuset = CPU_ALLOC(num_cpu);
    if (g_proc_cpuset == NULL) {
        err = ENOMEM;
    }
    if (!err) {
        g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu);
        for (int i = 0; i < num_cpu; ++i) {
            CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
        }
        err = pthread_attr_init(&attr);
    }
    if (!err) {
        err = pthread_attr_setaffinity_np(&attr, g_proc_cpuset_size, g_proc_cpuset);
    }
    if (!err) {
        err = pthread_create(&tid, &attr, geopm_proc_cpuset_pthread, NULL);
    }
    if (!err) {
        void *result = NULL;
        err = pthread_join(tid, &result);
        if (!err && result) {
            err = (int)(size_t)result;
        }
    }
    if (err && err != ENOMEM) {
        for (int i = 0; i < num_cpu; ++i) {
            CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
        }
    }
    if (!err) {
        err = pthread_attr_destroy(&attr);
    }
}
Пример #29
0
int bind_cpu(int cpu)
{
	cpu_set_t *cmask;
	struct bitmask *bmask;
	size_t ncpu, setsize;
	int ret;

	ncpu = get_num_cpus();

	if (cpu < 0 || cpu >= (int)ncpu) {
		errno = -EINVAL;
		return -1;
	}

	cmask = CPU_ALLOC(ncpu);
	if (cmask == NULL)
		return -1;

	setsize = CPU_ALLOC_SIZE(ncpu);
	CPU_ZERO_S(setsize, cmask);
	CPU_SET_S(cpu, setsize, cmask);

	ret = sched_setaffinity(0, ncpu, cmask);

	CPU_FREE(cmask);

	/* skip NUMA stuff for UMA systems */
	if (numa_max_node() == 0)
		return ret;

	bmask = numa_bitmask_alloc(16);
	assert(bmask);

	numa_bitmask_setbit(bmask, cpu % 2);
	numa_set_membind(bmask);
	numa_bitmask_free(bmask);

	return ret;
}
Пример #30
0
int boundto(int* nelements_set, int* int_mask)
{
          cpu_set_t *mask;
          size_t size;
          int i;
          int nrcpus = 1024;
          int knt = 0;

  realloc:
          mask = CPU_ALLOC(nrcpus);
          size = CPU_ALLOC_SIZE(nrcpus);
          CPU_ZERO_S(size, mask);
          if ( sched_getaffinity(0, size, mask) == -1 ) {
                  CPU_FREE(mask);
                  if (errno == EINVAL &&
                      nrcpus < (1024 << 8)) {
                         nrcpus = nrcpus << 2;
                         goto realloc;
                  }
                  perror("sched_getaffinity");
                  return -1;
          }

          for ( i = 0; i < nrcpus; i++ ) {
                  if ( CPU_ISSET_S(i, size, mask) ) {
                          //printf("CPU %d is set\n", (i));
                          int_mask[i] = 1;
                          knt++;
                  }
          }
          *nelements_set = knt;

          CPU_FREE(mask);

          return 0;
}