int LpelHwLocCheckConfig(lpel_config_t *cfg) { /* input sanity checks */ if ( cfg->num_workers <= 0 || cfg->proc_workers <= 0 || cfg->proc_others < 0 ) { return LPEL_ERR_INVAL; } /* check if there are enough processors (if we can check) */ if (cfg->proc_workers + cfg->proc_others > pu_count) { return LPEL_ERR_INVAL; } /* check exclusive flag sanity */ if (LPEL_ICFG(LPEL_FLAG_EXCLUSIVE)) { /* check if we can do a 1-1 mapping */ if (cfg->proc_others == 0 || cfg->num_workers > cfg->proc_workers) { return LPEL_ERR_INVAL; } /* pinned flag must also be set */ if (!LPEL_ICFG(LPEL_FLAG_PINNED)) { return LPEL_ERR_INVAL; } /* check permissions to set exclusive (if we can check) */ if (!LpelCanSetExclusive()) { return LPEL_ERR_EXCL; } } return 0; }
int LpelThreadAssign(int core) { int res; #ifdef HAVE_HWLOC //FIXME if (core < 0) return 0; res = hwloc_set_cpubind(topology, cpu_sets[core], HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT); if (res == -1) return LPEL_ERR_ASSIGN; #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) pthread_t pt = pthread_self(); if (core == -1) { /* assign an others thread to others cpuset */ res = pthread_setaffinity_np(pt, sizeof(cpu_set_t), &cpuset_others); if (res != 0) return LPEL_ERR_ASSIGN; } else if (!LPEL_ICFG(LPEL_FLAG_PINNED)) { /* assign along all workers */ res = pthread_setaffinity_np(pt, sizeof(cpu_set_t), &cpuset_workers); if (res != 0) return LPEL_ERR_ASSIGN; } else { /* LPEL_FLAG_PINNED */ /* assign to specified core */ cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET( core, &cpuset); res = pthread_setaffinity_np(pt, sizeof(cpu_set_t), &cpuset); if (res != 0) return LPEL_ERR_ASSIGN; /* make non-preemptible */ if (LPEL_ICFG(LPEL_FLAG_EXCLUSIVE)) { struct sched_param param; int sp = SCHED_FIFO; /* highest real-time */ param.sched_priority = sched_get_priority_max(sp); res = pthread_setschedparam(pt, sp, ¶m); if (res != 0) { /* we do best effort at this point */ return LPEL_ERR_EXCL; } } } #endif return 0; }
int LpelHwLocCheckConfig(lpel_config_t *cfg) { /* input sanity checks */ if (cfg->type == DECEN_LPEL) { if ( cfg->num_workers <= 0 || cfg->proc_workers <= 0 ) return LPEL_ERR_INVAL; } else if (cfg->type == HRC_LPEL) { if ( cfg->num_workers <= 1 || cfg->proc_workers <= 0) return LPEL_ERR_INVAL; } if ( cfg->proc_others < 0 ) { return LPEL_ERR_INVAL; } /* check if there are enough processors (if we can check) */ if(pu_count != LPEL_ERR_FAIL) { /* in case can not read the number of cores, no need to check */ if (cfg->proc_workers + cfg->proc_others > pu_count) { return LPEL_ERR_INVAL; } /* check exclusive flag sanity */ if ( LPEL_ICFG( LPEL_FLAG_EXCLUSIVE) ) { /* check if we can do a 1-1 mapping */ if ( (cfg->proc_others== 0) || (cfg->num_workers > cfg->proc_workers) ) { return LPEL_ERR_INVAL; } } } /* additional flags for exclusive flag */ if ( LPEL_ICFG( LPEL_FLAG_EXCLUSIVE) ) { int can_rt; /* pinned flag must also be set */ if ( !LPEL_ICFG( LPEL_FLAG_PINNED) ) { return LPEL_ERR_INVAL; } /* check permissions to set exclusive (if we can check) */ if ( 0==LpelCanSetExclusive(&can_rt) && !can_rt ) { // return LPEL_ERR_EXCL; } } return 0; }
int LpelThreadAssign(int core) { int res; #ifdef HAVE_HWLOC //FIXME if (core < 0) return 0; res = hwloc_set_cpubind(topology, cpu_sets[core], HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT); if (res == -1) return LPEL_ERR_ASSIGN; #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) lpel_config_t *cfg = &_lpel_global_config; pthread_t pt = pthread_self(); cpu_set_t cpuset; if ( LPEL_ICFG(LPEL_FLAG_PINNED)) { CPU_ZERO(&cpuset); switch(core) { case LPEL_MAP_WRAPPER: /* round robin pinned to cores in the set */ case LPEL_MAP_SOSI: CPU_SET(rot_others + offset_others, &cpuset); rot_others = (rot_others + 1) % proc_others; break; default: // workers /* assign to specified core */ assert( 0 <= core && core < cfg->num_workers ); CPU_SET( core % proc_workers, &cpuset); } } else { switch (core) { case LPEL_MAP_WRAPPER: case LPEL_MAP_SOSI: cpuset = cpuset_others; break; default: // workers cpuset = cpuset_workers; } } res = pthread_setaffinity_np(pt, sizeof(cpu_set_t), &cpuset); if( res != 0) return LPEL_ERR_ASSIGN; /* make non-preemptible for workers only */ if ( LPEL_ICFG(LPEL_FLAG_EXCLUSIVE) && core >= 0) { struct sched_param param; int sp = SCHED_FIFO; /* highest real-time */ param.sched_priority = sched_get_priority_max(sp); res = pthread_setschedparam(pt, sp, ¶m); if ( res != 0) { /* we do best effort at this point */ return LPEL_ERR_EXCL; } else { fprintf(stderr, "set realtime priority %d for worker %d.\n", param.sched_priority, core); } } #endif return 0; }