int set_thread_affinity(int cpu) { int retval = -1; #if defined(CPU_ZERO) cpu_set_t cpu_mask; CPU_ZERO(&cpu_mask); if (cpu >= 0 && cpu <= CPU_SETSIZE) { CPU_SET(cpu, &cpu_mask); } else { fprintf (stderr, "Wrong cpu id: %d\n", cpu); return -1; } retval = pthread_setaffinity_np(pthread_self(), sizeof(cpu_mask), &cpu_mask); #elif defined(cpuset_create) cpuset_t *cpu_mask = cpuset_create(); cpuset_zero(cpu_mask); if (cpu >= 0 && cpu <= cpuset_size(cpu_mask)) { cpuset_set(cpu, cpu_mask); } else { fprintf (stderr, "Wrong cpu id: %d\n", cpu); return -1; } retval = pthread_setaffinity_np(0, cpuset_size(cpu_mask), cpu_mask); cpuset_destroy(cpu_mask); #else #error "no cpuset" #endif if (retval != 0) fprintf (stderr, "Error at pthread_setaffinity_np():\n"); return retval; }
static int hwloc_netbsd_get_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid, hwloc_bitmap_t hwloc_cpuset, int flags __hwloc_attribute_unused) { int status; cpuset_t *cpuset = cpuset_create(); status = sched_getaffinity_np(pid, cpuset_size(cpuset), cpuset); hwloc_netbsd_bsd2hwloc(hwloc_cpuset, cpuset); cpuset_destroy(cpuset); return status; }
static int hwloc_netbsd_set_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t tid, hwloc_const_bitmap_t hwloc_cpuset, int flags __hwloc_attribute_unused) { int status; cpuset_t *cpuset = cpuset_create(); hwloc_netbsd_hwloc2bsd(hwloc_cpuset, cpuset); status = pthread_setaffinity_np(tid, cpuset_size(cpuset), cpuset); cpuset_destroy(cpuset); if (status) { errno = status; return -1; } return 0; }
static void intrctl_affinity(int argc, char **argv) { struct intrio_set iset; cpuset_t *cpuset; unsigned long index; int ch, error; index = ULONG_MAX; memset(&iset.intrid, 0, sizeof(iset.intrid)); while ((ch = getopt(argc, argv, "c:i:")) != -1) { switch (ch) { case 'c': index = strtoul(optarg, NULL, 10); break; case 'i': if (strnlen(optarg, ARG_MAX) > INTRIDBUF) usage(); strlcpy(iset.intrid, optarg, INTRIDBUF); break; default: usage(); } } if (iset.intrid[0] == '\0' || index == ULONG_MAX) usage(); if (index >= (u_long)sysconf(_SC_NPROCESSORS_CONF)) err(EXIT_FAILURE, "invalid cpu index"); cpuset = cpuset_create(); if (cpuset == NULL) err(EXIT_FAILURE, "create_cpuset()"); cpuset_zero(cpuset); cpuset_set(index, cpuset); iset.cpuset = cpuset; iset.cpuset_size = cpuset_size(cpuset); error = sysctlbyname("kern.intr.affinity", NULL, NULL, &iset, sizeof(iset)); cpuset_destroy(cpuset); if (error < 0) err(EXIT_FAILURE, "sysctl kern.intr.affinity"); }
int sys_cpuset(struct thread *td, struct cpuset_args *uap) { struct cpuset *root; struct cpuset *set; int error; thread_lock(td); root = cpuset_refroot(td->td_cpuset); thread_unlock(td); error = cpuset_create(&set, root, &root->cs_mask); cpuset_rel(root); if (error) return (error); error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); if (error == 0) error = cpuset_setproc(-1, set, NULL); cpuset_rel(set); return (error); }
int dt_setaffinity(dthread_t *thread, unsigned* cpu_id, size_t cpu_count) { if (thread == NULL) { return KNOT_EINVAL; } #ifdef HAVE_PTHREAD_SETAFFINITY_NP int ret = -1; /* Linux, FreeBSD interface. */ #if defined(HAVE_CPUSET_LINUX) || defined(HAVE_CPUSET_BSD) cpu_set_t set; CPU_ZERO(&set); for (unsigned i = 0; i < cpu_count; ++i) { CPU_SET(cpu_id[i], &set); } ret = pthread_setaffinity_np(thread->_thr, sizeof(cpu_set_t), &set); /* NetBSD interface. */ #elif defined(HAVE_CPUSET_NETBSD) cpuset_t *set = cpuset_create(); if (set == NULL) { return KNOT_ENOMEM; } cpuset_zero(set); for (unsigned i = 0; i < cpu_count; ++i) { cpuset_set(cpu_id[i], set); } ret = pthread_setaffinity_np(thread->_thr, cpuset_size(set), set); cpuset_destroy(set); #endif /* interface */ if (ret < 0) { return KNOT_ERROR; } #else /* HAVE_PTHREAD_SETAFFINITY_NP */ return KNOT_ENOTSUP; #endif return KNOT_EOK; }
/* * Create a cpuset, which would be cpuset_create() but * mark the new 'set' as root. * * We are not going to reparent the td to it. Use cpuset_setproc_update_set() * for that. * * In case of no error, returns the set in *setp locked with a reference. */ int cpuset_create_root(struct prison *pr, struct cpuset **setp) { struct cpuset *set; int error; KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); if (error) return (error); KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", __func__, __LINE__)); /* Mark the set as root. */ set = *setp; set->cs_flags |= CPU_SET_ROOT; return (0); }
static void intrctl_nointr(int argc, char **argv) { struct intrio_set iset; cpuset_t *cpuset; unsigned long index; int ch, error; index = ULONG_MAX; while ((ch = getopt(argc, argv, "c:")) != -1) { switch (ch) { case 'c': index = strtoul(optarg, NULL, 10); break; default: usage(); } } if (index == ULONG_MAX) usage(); if (index >= (u_long)sysconf(_SC_NPROCESSORS_CONF)) err(EXIT_FAILURE, "invalid cpu index"); cpuset = cpuset_create(); if (cpuset == NULL) err(EXIT_FAILURE, "create_cpuset()"); cpuset_zero(cpuset); cpuset_set(index, cpuset); iset.cpuset = cpuset; iset.cpuset_size = cpuset_size(cpuset); error = sysctlbyname("kern.intr.nointr", NULL, NULL, &iset, sizeof(iset)); cpuset_destroy(cpuset); if (error < 0) err(EXIT_FAILURE, "sysctl kern.intr.nointr"); }
int set_process_affinity(int cpu) { int retval = -1; #if defined(CPU_ZERO) cpu_set_t cpu_mask; CPU_ZERO(&cpu_mask); if (cpu >= 0 && cpu <= CPU_SETSIZE) { CPU_SET(cpu, &cpu_mask); } else { fprintf (stderr, "Wrong cpu id: %d\n", cpu); return -1; } retval = sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask); #elif defined(cpuset_create) cpuset_t *cpu_mask = cpuset_create(); cpuset_zero(cpu_mask); if (cpu >= 0 && cpu <= cpuset_size(cpu_mask)) { cpuset_set(cpu, cpu_mask); } else { fprintf (stderr, "Wrong cpu id: %d\n", cpu); return -1; } retval = _sched_setaffinity(0, 0, cpuset_size(cpu_mask), cpu_mask); cpuset_destroy(cpu_mask); #else #error "no cpuset" #endif if (retval == -1) perror("Error at sched_setaffinity()"); return retval; }
/* Return the number of processors available to the current process, based on a modern system call that returns the "affinity" between the current process and each CPU. Return 0 if unknown or if such a system call does not exist. */ static unsigned long num_processors_via_affinity_mask (void) { /* glibc >= 2.3.3 with NPTL and NetBSD 5 have pthread_getaffinity_np, but with different APIs. Also it requires linking with -lpthread. Therefore this code is not enabled. glibc >= 2.3.4 has sched_getaffinity whereas NetBSD 5 has sched_getaffinity_np. */ #if HAVE_PTHREAD_GETAFFINITY_NP && defined __GLIBC__ && 0 { cpu_set_t set; if (pthread_getaffinity_np (pthread_self (), sizeof (set), &set) == 0) { unsigned long count; # ifdef CPU_COUNT /* glibc >= 2.6 has the CPU_COUNT macro. */ count = CPU_COUNT (&set); # else size_t i; count = 0; for (i = 0; i < CPU_SETSIZE; i++) if (CPU_ISSET (i, &set)) count++; # endif if (count > 0) return count; } } #elif HAVE_PTHREAD_GETAFFINITY_NP && defined __NetBSD__ && 0 { cpuset_t *set; set = cpuset_create (); if (set != NULL) { unsigned long count = 0; if (pthread_getaffinity_np (pthread_self (), cpuset_size (set), set) == 0) { cpuid_t i; for (i = 0;; i++) { int ret = cpuset_isset (i, set); if (ret < 0) break; if (ret > 0) count++; } } cpuset_destroy (set); if (count > 0) return count; } } #elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC /* glibc >= 2.3.4 */ { cpu_set_t set; if (sched_getaffinity (0, sizeof (set), &set) == 0) { unsigned long count; # ifdef CPU_COUNT /* glibc >= 2.6 has the CPU_COUNT macro. */ count = CPU_COUNT (&set); # else size_t i; count = 0; for (i = 0; i < CPU_SETSIZE; i++) if (CPU_ISSET (i, &set)) count++; # endif if (count > 0) return count; } } #elif HAVE_SCHED_GETAFFINITY_NP /* NetBSD >= 5 */ { cpuset_t *set; set = cpuset_create (); if (set != NULL) { unsigned long count = 0; if (sched_getaffinity_np (getpid (), cpuset_size (set), set) == 0) { cpuid_t i; for (i = 0;; i++) { int ret = cpuset_isset (i, set); if (ret < 0) break; if (ret > 0) count++; } } cpuset_destroy (set); if (count > 0) return count; } } #endif #if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ { /* This works on native Windows platforms. */ DWORD_PTR process_mask; DWORD_PTR system_mask; if (GetProcessAffinityMask (GetCurrentProcess (), &process_mask, &system_mask)) { DWORD_PTR mask = process_mask; unsigned long count = 0; for (; mask != 0; mask = mask >> 1) if (mask & 1) count++; if (count > 0) return count; } }