示例#1
0
static int ATL_setmyaffinity()
/*
 * Attempts to sets the affinity of an already-running thread.  The
 * aff_set flag is set to true whether we succeed or not (no point in
 * trying multiple times).
 * RETURNS: 0 on success, non-zero error code on error
 */
{
   int bindID;
   bindID = omp_get_thread_num();
   #ifdef ATL_RANK_IS_PROCESSORID
      bindID = bindID % ATL_AFF_NUMID;
   #else
      bindID = ATL_affinityIDs[bindID%ATL_AFF_NUMID];
   #endif
#ifdef ATL_PAFF_PLPA
   plpa_cpu_set_t cpuset;
   PLPA_CPU_ZERO(&cpuset);
   PLPA_CPU_SET(bindID, &cpuset);
   if (me->paff_set)
      return(0);
   me->paff_set = 1;
   return(plpa_sched_setaffinity((pid_t)0, sizeof(cpuset), &cpuset));
#elif defined(ATL_PAFF_PBIND)
   return(processor_bind(P_LWPID, P_MYID, bindID, NULL));
#elif defined(ATL_PAFF_SCHED)
   cpu_set_t cpuset;
   CPU_ZERO(&cpuset);
   CPU_SET(bindID, &cpuset);
   if (me->paff_set)
      return(0);
   me->paff_set = 1;
   return(sched_setaffinity(0, sizeof(cpuset), &cpuset));
#elif defined (ATL_PAFF_RUNON)
   if (me->paff_set)
      return(0);
   me->paff_set = 1;
   return(pthread_setrunon_np(bindID));
#elif defined(ATL_PAFF_BINDP)
   if (me->paff_set)
      return(0);
   me->paff_set = 1;
   return(bindprocessor(BINDTHREAD, thread_self(), bindID));
#elif defined(ATL_PAFF_CPUSET)  /* untried FreeBSD code */
   cpuset_t mycpuset;
   CPU_ZERO(&mycpuset);         /* no manpage, so guess works like linux */
   CPU_SET(bindID, &mycpuset);
   if (me->paff_set)
      return(0);
   me->paff_set = 1;
   return(cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1,
                             sizeof(mycpuset), &mycpuset));
#endif
   return(0);
}
示例#2
0
文件: plpa.c 项目: deniskin82/chapel
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif

#ifdef HAVE_SYSCTL
# ifdef HAVE_SYS_TYPES_H
#  include <sys/types.h>
# endif
# ifdef HAVE_SYS_SYSCTL_H
#  include <sys/sysctl.h>
# endif
#endif
#if defined(HAVE_SYSCONF) && defined(HAVE_UNISTD_H)
# include <unistd.h>
#endif

#include <plpa.h>

#include "qt_affinity.h"
#include "shufflesheps.h"

qthread_shepherd_id_t guess_num_shepherds(void);
qthread_worker_id_t   guess_num_workers_per_shep(qthread_shepherd_id_t nshepherds);

void INTERNAL qt_affinity_init(qthread_shepherd_id_t *nbshepherds,
                               qthread_worker_id_t   *nbworkers)
{                                      /*{{{ */
    if (*nbshepherds == 0) {
        *nbshepherds = guess_num_shepherds();
        if (*nbshepherds <= 0) {
            *nbshepherds = 1;
        }
    }
    if (*nbworkers == 0) {
        *nbworkers = guess_num_workers_per_shep(*nbshepherds);
        if (*nbworkers <= 0) {
            *nbworkers = 1;
        }
    }
}                                      /*}}} */

qthread_shepherd_id_t INTERNAL guess_num_shepherds(void)
{                                      /*{{{ */
    qthread_shepherd_id_t nshepherds = 1;

#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF)      /* Linux */
    long ret = sysconf(_SC_NPROCESSORS_CONF);
    nshepherds = (ret > 0) ? ret : 1;
#elif defined(HAVE_SYSCTL) && defined(CTL_HW) && defined(HW_NCPU)
    int      name[2] = { CTL_HW, HW_NCPU };
    uint32_t oldv;
    size_t   oldvlen = sizeof(oldv);
    if (sysctl(name, 2, &oldv, &oldvlen, NULL, 0) >= 0) {
        assert(oldvlen == sizeof(oldv));
        nshepherds = (int)oldv;
    }
#endif /* if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF) */
    return nshepherds;
}                                      /*}}} */

#ifdef QTHREAD_MULTITHREADED_SHEPHERDS
void INTERNAL qt_affinity_set(qthread_worker_t *me)
{                                      /*{{{ */
    plpa_cpu_set_t *cpuset =
        (plpa_cpu_set_t *)MALLOC(sizeof(plpa_cpu_set_t));

    PLPA_CPU_ZERO(cpuset);
    PLPA_CPU_SET(me->packed_worker_id, cpuset);
    if ((plpa_sched_setaffinity(0, sizeof(plpa_cpu_set_t), cpuset) < 0) &&
        (errno != EINVAL)) {
        perror("plpa setaffinity");
    }
    FREE(cpuset, sizeof(plpa_cpu_set_t));
}                                      /*}}} */

#else /* ifdef QTHREAD_MULTITHREADED_SHEPHERDS */
void INTERNAL qt_affinity_set(qthread_shepherd_t *me)
{                                      /*{{{ */
    plpa_cpu_set_t *cpuset =
        (plpa_cpu_set_t *)MALLOC(sizeof(plpa_cpu_set_t));

    PLPA_CPU_ZERO(cpuset);
    PLPA_CPU_SET(me->node, cpuset);
    if ((plpa_sched_setaffinity(0, sizeof(plpa_cpu_set_t), cpuset) < 0) &&
        (errno != EINVAL)) {
        perror("plpa setaffinity");
    }
    FREE(cpuset, sizeof(plpa_cpu_set_t));
}                                      /*}}} */
示例#3
0
/****** shepherd_binding/binding_explicit() *****************************************
*  NAME
*     binding_explicit() -- Binds current process to specified CPU cores. 
*
*  SYNOPSIS
*     bool binding_explicit(int* list_of_cores, int camount, int* 
*     list_of_sockets, int samount) 
*
*  FUNCTION
*     Binds the current process to the cores specified by a <socket>,<core>
*     tuple. The tuple is given by a list of sockets and a list of cores. 
*     The elements on the same position of these lists are reflecting 
*     a tuple. Therefore the length of the lists must be the same.
*
*     Binding is currently done on Linux hosts only where the machine topology 
*     can be retrieved with PLPA library. It also does require this library.
*
*  INPUTS
*     int* list_of_sockets - List of sockets in the same order as list of cores. 
*     int samount          - Length of the list of sockets. 
*     int* list_of_cores   - List of cores in the same order as list of sockets. 
*     int camount          - Length of the list of cores. 
*     int type             - Type of binding ( set | env | pe ).
*
*  RESULT
*     bool - true when the current process was bound like specified with the 
*            input parameter
*
*  NOTES
*     MT-NOTE: binding_explicit() is not MT safe 
*
*******************************************************************************/
static bool binding_explicit(const int* list_of_sockets, const int samount, 
   const int* list_of_cores, const int camount, const binding_type_t type)
{
   /* return value: successful bound or not */ 
   bool bound = false;

   /* check if we have exactly the same amount of sockets as cores */
   if (camount != samount) {
      shepherd_trace("binding_explicit: bug: amount of sockets != amount of cores");
      return false;
   }

   if (list_of_sockets == NULL || list_of_cores == NULL) {
      shepherd_trace("binding_explicit: wrong input values");
   }   
   
   /* do only on linux when we have core binding feature in kernel */
   if (has_core_binding() == true) {
      
      if (_has_topology_information()) {
         /* bitmask for processors to turn on and off */
         plpa_cpu_set_t cpuset;  
         /* turn off all processors */
         PLPA_CPU_ZERO(&cpuset);
         /* the internal processor ids selected for the binding mask */
         int* proc_id = NULL;
         int proc_id_size = 0;

         /* processor id counter */
         int pr_id_ctr;

         /* Fetch for each socket,core tuple the processor id. 
            If this is not possible for one do nothing and return false. */ 

         /* go through all socket,core tuples and get the processor id */
         for (pr_id_ctr = 0; pr_id_ctr < camount; pr_id_ctr++) { 

            /* get the processor id */
            /* get the OS internal processor ids */ 
            if (add_proc_ids_linux(list_of_sockets[pr_id_ctr], list_of_cores[pr_id_ctr], 
                                    &proc_id, &proc_id_size) != true) {
               sge_free(&proc_id);
               return false;
            }                       

         }
         /* generate the core binding mask out of the processor id array */
         set_processor_binding_mask(&cpuset, proc_id, proc_id_size); 

         if (type == BINDING_TYPE_PE) {
            
            /* rankfile is created */

         } else if (type == BINDING_TYPE_ENV) {
            /* set the environment variable */
            if (create_binding_env_linux(proc_id, proc_id_size) == true) {
               shepherd_trace("binding_explicit: SGE_BINDING env var created");
            } else {
               shepherd_trace("binding_explicit: problems while creating SGE_BINDING env");
            }
         } else {
            /* do the core binding for the current process with the mask */
            if (bind_process_to_mask((pid_t) 0, cpuset) == true) {
               /* there was an error while binding */ 
               bound = true;
            } else {
               /* couldn't be bound return false */
               shepherd_trace("binding_explicit: bind_process_to_mask was not successful");
            }   
         }

         sge_free(&proc_id);
          
      } else {
         /* has no topology information */
         shepherd_trace("binding_explicit: Linux does not offer topology information");
      }  

   } else {
      /* has no core binding ability */
      shepherd_trace("binding_explicit: host does not support core binding");
   }   

   return bound;
}
示例#4
0
/****** shepherd_binding/binding_set_striding_linux() *************************************
*  NAME
*     binding_set_striding_linux() -- Binds current process to cores.  
*
*  SYNOPSIS
*     bool binding_set_striding_linux(int first_socket, int first_core, int 
*     amount_of_cores, int offset, int stepsize) 
*
*  FUNCTION
*     Performs a core binding for the calling process according to the 
*     'striding' strategy. The first core used is specified by first_socket
*     (beginning with 0) and first_core (beginning with 0). If first_core is 
*     greater than available cores on first_socket, the next socket is examined 
*     and first_core is reduced by the skipped cores. If the first_core could 
*     not be found on system (because it was to high) no binding will be done.
*     
*     If the first core was choosen the next one is defined by the step size 'n' 
*     which is incremented to the first core found. If the socket has not the 
*     core (because it was the last core of the socket for example) the next 
*     socket is examined.
*
*     If the system is out of cores and there are still some cores to select 
*     (because of the amount_of_cores parameter) no core binding will be performed.
*    
*  INPUTS
*     int first_socket    - first socket to begin with  
*     int first_core      - first core to start with  
*     int amount_of_cores - total amount of cores to be used 
*     int offset          - core offset for first core (increments first core used) 
*     int stepsize        - step size
*     int type            - type of binding (set or env or pe)
*
*  RESULT
*     bool - Returns true if the binding was performed, otherwise false.
*
*  NOTES
*     MT-NOTE: binding_set_striding() is MT safe 
*
*******************************************************************************/
bool binding_set_striding_linux(int first_socket, int first_core, int amount_of_cores,
                          int offset, int stepsize, const binding_type_t type)
{
   /* n := take every n-th core */ 
   bool bound = false;

   dstring error = DSTRING_INIT;

   if (_has_core_binding(&error) == true) {

      sge_dstring_free(&error);

         /* bitmask for processors to turn on and off */
         plpa_cpu_set_t cpuset;  
         /* turn off all processors */
         PLPA_CPU_ZERO(&cpuset);

         /* when library offers architecture: 
            - get virtual processor ids in the following manner:
              * on socket "first_socket" choose core number "first_core + offset"
              * then add n: if core is not available go to next socket
              * ...
         */
         if (_has_topology_information()) {
            /* amount of cores set in processor binding mask */ 
            int cores_set = 0;
            /* next socket to use */
            int next_socket = first_socket;
            /* next core to use */
            int next_core = first_core + offset;
            /* all the processor ids selected for the mask */
            int* proc_id = NULL; 
            int proc_id_size = 0;
            /* maximal amount of sockets on this system */
            int max_amount_of_sockets = get_amount_of_plpa_sockets();
            
            /* check if we are already out of range */
            if (next_socket >= max_amount_of_sockets) {
               shepherd_trace("binding_set_striding_linux: already out of sockets");
               return false;
            }   

            while (get_amount_of_plpa_cores(next_socket) <= next_core) {
               /* move on to next socket - could be that we have to deal only with cores 
                  instead of <socket><core> tuples */
               next_core -= get_amount_of_plpa_cores(next_socket); 
               next_socket++;
               if (next_socket >= max_amount_of_sockets) {
                  /* we are out of sockets - we do nothing */
                  shepherd_trace("binding_set_striding_linux: first core: out of sockets");
                  return false;
               }
            }  
            
            add_proc_ids_linux(next_socket, next_core, &proc_id, &proc_id_size);
            
            /* turn on processor id in mask */ 
            
            /* collect the rest of the processor ids */ 
            for (cores_set = 1; cores_set < amount_of_cores; cores_set++) {
               /* calculate next_core number */ 
               next_core += stepsize;
               
               /* check if we are already out of range */
               if (next_socket >= max_amount_of_sockets) {
                  shepherd_trace("binding_set_striding_linux: out of sockets");
                  sge_free(&proc_id);
                  return false;
               }   

               while (get_amount_of_plpa_cores(next_socket) <= next_core) {
                  /* move on to next socket - could be that we have to deal only with cores 
                     instead of <socket><core> tuples */
                  next_core -= get_amount_of_plpa_cores(next_socket); 
                  next_socket++;
                  if (next_socket >= max_amount_of_sockets) {
                     /* we are out of sockets - we do nothing */
                     shepherd_trace("binding_set_striding_linux: out of sockets!");
                     sge_free(&proc_id);
                     return false;
                  }
               }    

               /* add processor ids for core */
               add_proc_ids_linux(next_socket, next_core, &proc_id, &proc_id_size);
                
            } /* collecting processor ids */

            /* set the mask for all processor ids */ 
            set_processor_binding_mask(&cpuset, proc_id, proc_id_size);
           
            if (type == BINDING_TYPE_PE) {
            
               /* rankfile is created: do nothing */

            } else if (type == BINDING_TYPE_ENV) {

               /* set the environment variable */
               if (create_binding_env_linux(proc_id, proc_id_size) == true) {
                  shepherd_trace("binding_set_striding_linux: SGE_BINDING env var created");
               } else {
                  shepherd_trace("binding_set_striding_linux: problems while creating SGE_BINDING env");
               }

            } else {
               
               /* bind process to mask */ 
               if (bind_process_to_mask((pid_t) 0, cpuset) == true) {
                  /* there was an error while binding */ 
                  bound = true;
               }
            }
         
            sge_free(&proc_id);
            
         } else {
            /* setting bitmask without topology information which could 
               not be right? */
            shepherd_trace("binding_set_striding_linux: bitmask without topology information");
            return false;
         }

   } else {
      /* has no core binding feature */
      sge_dstring_free(&error);
      
      return false;
   }
   
   
   return bound;
}
示例#5
0
/****** shepherd_binding/binding_set_linear_linux() ***************************************
*  NAME
*     binding_set_linear_linux() -- Bind current process linear to chunk of cores. 
*
*  SYNOPSIS
*     bool binding_set_linear(int first_socket, int first_core, int 
*     amount_of_cores, int offset) 
*
*  FUNCTION
*     Binds current process (shepherd) to a set of cores. All processes 
*     started by the current process are inheriting the core binding (Linux).
*     
*     The core binding is done in a linear manner, that means that 
*     the process is bound to 'amount_of_cores' cores using one core 
*     after another starting at socket 'first_socket' (usually 0) and 
*     core = 'first_core' (usually 0) + 'offset'. If the core number 
*     is higher than the number of cores which are provided by socket 
*     'first_socket' then the next socket is taken (the core number 
*      defines how many cores are skiped).
*
*  INPUTS
*     int first_socket    - The first socket (starting at 0) to bind to. 
*     int first_core      - The first core to bind. 
*     int amount_of_cores - The amount of cores to bind to. 
*     int offset          - The user specified core number offset. 
*     binding_type_t type - The type of binding ONLY FOR EXECD ( set | env | pe )
*                           
*  RESULT
*     bool - true if binding for current process was done, false if not
*
*  NOTES
*     MT-NOTE: binding_set_linear() is not MT safe 
*
*******************************************************************************/
static bool binding_set_linear_linux(int first_socket, int first_core, 
               int amount_of_cores, int offset, const binding_type_t type)
{

   /* sets bitmask in a linear manner        */ 
   /* first core is on exclusive host 0      */ 
   /* first core could be set from scheduler */ 
   /* offset is the first core to start with (make sense only with exclusive host) */
   dstring error = DSTRING_INIT;

   if (_has_core_binding(&error) == true) {

      sge_dstring_clear(&error);
      
      /* bitmask for processors to turn on and off */
      plpa_cpu_set_t cpuset;
      /* turn off all processors */
      PLPA_CPU_ZERO(&cpuset);
         
      sge_dstring_free(&error);
         
      if (_has_topology_information()) {
         /* amount of cores set in processor binding mask */ 
         int cores_set;
         /* next socket to use */
         int next_socket = first_socket;
         /* the amount of cores of the next socket */
         int socket_amount_of_cores;
         /* next core to use */
         int next_core = first_core + offset;
         /* all the processor ids selected for the mask */
         int* proc_id = NULL; 
         /* size of proc_id array */
         int proc_id_size = 0;

         /* maximal amount of sockets on this system */
         int max_amount_of_sockets = get_amount_of_plpa_sockets();

         /* strategy: go to the first_socket and the first_core + offset and 
            fill up socket and go to the next one. */ 
               
         /* TODO maybe better to search for using a core exclusively? */
            
         while (get_amount_of_plpa_cores(next_socket) <= next_core) {
            /* TODO which kind of warning when first socket does not offer this? */
            /* move on to next socket - could be that we have to deal only with cores 
               instead of <socket><core> tuples */
            next_core -= get_amount_of_plpa_cores(next_socket); 
            next_socket++;
            if (next_socket >= max_amount_of_sockets) {
               /* we are out of sockets - we do nothing */
               return false;
            }
         }  
         
         add_proc_ids_linux(next_socket, next_core, &proc_id, &proc_id_size);

         /* collect the other processor ids with the strategy */
         for (cores_set = 1; cores_set < amount_of_cores; cores_set++) {
            next_core++;
            /* jump to next socket when it is needed */
            /* maybe the next socket could offer 0 cores (I can' see when, 
               but just to be sure) */
            while ((socket_amount_of_cores = get_amount_of_plpa_cores(next_socket)) 
                        <= next_core) {
               next_socket++;
               next_core = next_core - socket_amount_of_cores;
               if (next_socket >= max_amount_of_sockets) {
                  /* we are out of sockets - we do nothing */
                  sge_free(&proc_id);
                  return false;
               }
            }
            /* get processor ids */
            add_proc_ids_linux(next_socket, next_core, &proc_id, &proc_id_size);
         }
            
         /* set the mask for all processor ids */
         set_processor_binding_mask(&cpuset, proc_id, proc_id_size);
            
         /* check what to do with the processor ids (set, env or pe) */
         if (type == BINDING_TYPE_PE) {
               
            /* is done outside */

         } else if (type == BINDING_TYPE_ENV) {
               
            /* set the environment variable                    */
            /* this does not show up in "environment" file !!! */
            if (create_binding_env_linux(proc_id, proc_id_size) == true) {
               shepherd_trace("binding_set_linear_linux: SGE_BINDING env var created");
            } else {
               shepherd_trace("binding_set_linear_linux: problems while creating SGE_BINDING env");
            }
             
         } else {

             /* bind SET process to mask */ 
            if (bind_process_to_mask((pid_t) 0, cpuset) == false) {
               /* there was an error while binding */ 
               sge_free(&proc_id);
               return false;
            }
         }

         sge_free(&proc_id);

      } else {
            
         /* TODO DG strategy without topology information but with 
            working library? */
         shepherd_trace("binding_set_linear_linux: no information about topology");
         return false;
      }
         

   } else {

      shepherd_trace("binding_set_linear_linux: PLPA binding not supported: %s", 
                        sge_dstring_get_string(&error));

      sge_dstring_free(&error);
   }

   return true;
}