示例#1
0
static int
sysctl_debug_ktr_cpumask(SYSCTL_HANDLER_ARGS)
{
    char lktr_cpumask_str[CPUSETBUFSIZ];
    cpuset_t imask;
    int error;

    cpusetobj_strprint(lktr_cpumask_str, &ktr_cpumask);
    error = sysctl_handle_string(oidp, lktr_cpumask_str,
                                 sizeof(lktr_cpumask_str), req);
    if (error != 0 || req->newptr == NULL)
        return (error);
    if (cpusetobj_strscan(&imask, lktr_cpumask_str) == -1)
        return (EINVAL);
    CPU_COPY(&imask, &ktr_cpumask);

    return (error);
}
int pthread_attr_setaffinity_np(
  pthread_attr_t    *attr,
  size_t             cpusetsize,
  const cpu_set_t   *cpuset
)
{
  if ( !cpuset )
    return EFAULT;
  if ( !attr )
    return EFAULT;

  if (! _CPU_set_Is_valid( cpuset, cpusetsize ) )
    return EINVAL;

  CPU_COPY( attr->affinityset, cpuset );

  return 0;
}
示例#3
0
文件: init.c 项目: cloud-hot/rtems
static void test_cpu_copy_case_1(void)
{
  size_t i;

  /*
   * CPU_EQUAL
   */
  puts( "Exercise CPU_ZERO, CPU_COPY, and CPU_ISET" );
  CPU_ZERO(&set1);
  CPU_FILL(&set2);

  CPU_COPY(&set2, &set1);

  /* test if all bits clear in set2 */
  for (i=0 ; i<CPU_SETSIZE ; i++) {
    rtems_test_assert( CPU_ISSET(i, &set2) == 0 );
  }
}
bool _Scheduler_priority_affinity_SMP_Get_affinity(
  const Scheduler_Control *scheduler,
  Thread_Control          *thread,
  size_t                   cpusetsize,
  cpu_set_t               *cpuset
)
{
  Scheduler_priority_affinity_SMP_Node *node =
    _Scheduler_priority_affinity_Node_get(thread);

  (void) scheduler;

  if ( node->Affinity.setsize != cpusetsize ) {
    return false;
  }

  CPU_COPY( cpuset, node->Affinity.set );
  return true; 
}
bool _Scheduler_priority_affinity_SMP_Set_affinity(
  const Scheduler_Control *scheduler,
  Thread_Control          *thread,
  size_t                   cpusetsize,
  const cpu_set_t         *cpuset
)
{
  Scheduler_priority_affinity_SMP_Node *node;
  States_Control                        current_state;

  /*
   * Validate that the cpset meets basic requirements.
   */
  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
    return false;
  }

  node = _Scheduler_priority_affinity_SMP_Thread_get_node( thread );

  /*
   * The old and new set are the same, there is no point in
   * doing anything.
   */
  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
    return true;

  current_state = thread->current_state;

  if ( _States_Is_ready( current_state ) ) {
    _Scheduler_priority_affinity_SMP_Block( scheduler, thread );
  }

  CPU_COPY( node->Affinity.set, cpuset );

  if ( _States_Is_ready( current_state ) ) {
    /*
     * FIXME: Do not ignore threads in need for help.
     */
    (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread );
  }

  return true;
}
示例#6
0
/*
 * Recursively check for errors that would occur from applying mask to
 * the tree of sets starting at 'set'.  Checks for sets that would become
 * empty as well as RDONLY flags.
 */
static int
cpuset_testupdate(struct cpuset *set, cpuset_t *mask)
{
	struct cpuset *nset;
	cpuset_t newmask;
	int error;

	mtx_assert(&cpuset_lock, MA_OWNED);
	if (set->cs_flags & CPU_SET_RDONLY)
		return (EPERM);
	if (!CPU_OVERLAP(&set->cs_mask, mask))
		return (EDEADLK);
	CPU_COPY(&set->cs_mask, &newmask);
	CPU_AND(&newmask, mask);
	error = 0;
	LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
		if ((error = cpuset_testupdate(nset, &newmask)) != 0)
			break;
	return (error);
}
bool _Scheduler_default_Get_affinity(
  Scheduler_Control *scheduler,
  Thread_Control    *thread,
  size_t             cpusetsize,
  cpu_set_t         *cpuset
)
{
  const CPU_set_Control *ctl;

  (void) scheduler;
  (void) thread;

  ctl = _CPU_set_Default();
  if ( cpusetsize != ctl->setsize ) {
    return false;
  }

  CPU_COPY( cpuset, ctl->set );

  return true;
}
示例#8
0
/*
 * Create a set in the space provided in 'set' with the provided parameters.
 * The set is returned with a single ref.  May return EDEADLK if the set
 * will have no valid cpu based on restrictions from the parent.
 */
static int
_cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask,
    cpusetid_t id)
{

	if (!CPU_OVERLAP(&parent->cs_mask, mask))
		return (EDEADLK);
	CPU_COPY(mask, &set->cs_mask);
	LIST_INIT(&set->cs_children);
	refcount_init(&set->cs_ref, 1);
	set->cs_flags = 0;
	mtx_lock_spin(&cpuset_lock);
	CPU_AND(&set->cs_mask, &parent->cs_mask);
	set->cs_id = id;
	set->cs_parent = cpuset_ref(parent);
	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
	if (set->cs_id != CPUSET_INVALID)
		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
	mtx_unlock_spin(&cpuset_lock);

	return (0);
}
示例#9
0
int pthread_setaffinity_np(
  pthread_t          id,
  size_t             cpusetsize,
  const cpu_set_t   *cpuset)
{
  Objects_Locations        location;
  POSIX_API_Control       *api;
  Thread_Control          *the_thread;
  bool                     ok;

  if ( !cpuset )
    return EFAULT;

  the_thread = _Thread_Get( id, &location );
  switch ( location ) {

    case OBJECTS_LOCAL:
      ok = _Scheduler_Set_affinity(
        _Scheduler_Get( the_thread ),
        the_thread,
        cpusetsize,
        cpuset
      );
      if ( ok ) {
        api = the_thread->API_Extensions[ THREAD_API_POSIX ];
        CPU_COPY( api->Attributes.affinityset, cpuset );
      }
      _Objects_Put( &the_thread->Object );
      return ok ? 0 : EINVAL;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:
#endif
    case OBJECTS_ERROR:
      break;
  }

  return ESRCH;
}
示例#10
0
/*
 * Modify the set 'set' to use a copy of the mask provided.  Apply this new
 * mask to restrict all children in the tree.  Checks for validity before
 * applying the changes.
 */
static int
cpuset_modify(struct cpuset *set, cpuset_t *mask)
{
	struct cpuset *root;
	int error;

	error = priv_check(curthread, PRIV_SCHED_CPUSET);
	if (error)
		return (error);
	/*
	 * In case we are called from within the jail
	 * we do not allow modifying the dedicated root
	 * cpuset of the jail but may still allow to
	 * change child sets.
	 */
	if (jailed(curthread->td_ucred) &&
	    set->cs_flags & CPU_SET_ROOT)
		return (EPERM);
	/*
	 * Verify that we have access to this set of
	 * cpus.
	 */
	root = set->cs_parent;
	if (root && !CPU_SUBSET(&root->cs_mask, mask))
		return (EINVAL);
	mtx_lock_spin(&cpuset_lock);
	error = cpuset_testupdate(set, mask);
	if (error)
		goto out;
	cpuset_update(set, mask);
	CPU_COPY(mask, &set->cs_mask);
out:
	mtx_unlock_spin(&cpuset_lock);

	return (error);
}
示例#11
0
文件: init.c 项目: gedare/rtems
void Validate_affinity(void )
{
  cpu_set_t            cpuset0;
  cpu_set_t            cpuset1;
  cpu_set_t            cpuset2;
  uint32_t             i;
  int                  sc;
  int                  cpu_count;
  rtems_task_priority  priority;
  char                 ch[2];

  puts( "Init - Set Init priority to high");
  sc = rtems_task_set_priority( Init_id, 1, &priority );
  directive_failed( sc, "Set Init Priority" );

  sc = rtems_task_get_affinity( Init_id, sizeof(cpu_set_t), &cpuset0 );
  directive_failed( sc, "Get Affinity of Init Task" );

  /* Get the number of processors that we are using. */
  cpu_count = rtems_get_processor_count();

  /* Fill the remaining cpus with med priority tasks */
  puts( "Init - Create Medium priority tasks");
  for (i=0; i<(cpu_count-1); i++){
    sprintf(ch, "%01" PRId32, i+1 );
    sc = rtems_task_create(
      rtems_build_name( 'C', 'P', 'U', ch[0] ),
      2,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &Med_id[i]
    );
    directive_failed( sc, "task create" );

    sc = rtems_task_start( Med_id[i], Task_1, i+1 );
    directive_failed( sc, "task start" );

    sc = rtems_task_get_affinity( Med_id[i], sizeof(cpu_set_t), &cpuset2 );
    directive_failed( sc, "Get Affinity of Medium Priority Task" );
    rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) );
  }

  /*
   * Create low priority thread for each remaining cpu with the affinity
   * set to only run on one cpu.
   */
  puts( "Init - Create  Low priority tasks");
  for (i=0; i<cpu_count; i++){
    CPU_ZERO(&cpuset1);
    CPU_SET(i, &cpuset1);

    sprintf(ch, "%01" PRId32, (uint32_t) 0 );
    sc = rtems_task_create(
      rtems_build_name( 'X', 'T', 'R', ch[0] ),
      10,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &Low_id[i]
    );
    directive_failed( sc, "task create" );

    sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset1 );
    directive_failed( sc, "Low priority task set affinity" );

    sc = rtems_task_start( Low_id[i], Task_1, i+1 );
    directive_failed( sc, "task start" );
  }


  /* Verify affinity on low priority tasks */
  puts("Init - Verify affinity on Low priority tasks");
  for (i=0; i<cpu_count; i++){
    CPU_ZERO(&cpuset1);
    CPU_SET(i, &cpuset1);

    sc = rtems_task_get_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset2 );
    directive_failed( sc, "Low priority task get affinity" );
    rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) );
  }

  /* Change the affinity for each low priority task */
  puts("Init - Change affinity on Low priority tasks");
  CPU_COPY(&cpuset0, &cpuset1);
  for (i=0; i<cpu_count; i++){

    CPU_CLR(i, &cpuset1);
    sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset1 );

    /* Verify no cpu's are now set in the cpuset */
    if (i== (cpu_count-1)) {
      rtems_test_assert( sc == RTEMS_INVALID_NUMBER );
      sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset0 );
    }

    directive_failed( sc, "Low priority task set affinity" );
  }

  puts("Init - Validate affinity on Low priority tasks");
  CPU_COPY(&cpuset0, &cpuset1);
  for (i=0; i<cpu_count; i++){
    CPU_CLR(i, &cpuset1);

    sc = rtems_task_get_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset2 );
    directive_failed( sc, "Low priority task get affinity" );
    if (i== (cpu_count-1))
      rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) );
    else
      rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) );
  }
}
示例#12
0
文件: init.c 项目: Fyleo/rtems
void Validate_affinity(void )
{
  pthread_attr_t       attr;
  cpu_set_t            cpuset0;
  cpu_set_t            cpuset1;
  cpu_set_t            cpuset2;
  uint32_t             i;
  int                  sc;
  int                  cpu_count;
  struct sched_param   param;


  puts( "Init - Set Init priority to high");
  sc = pthread_getattr_np( Init_id, &attr );
  rtems_test_assert( sc == 0 );
  sc = pthread_attr_getschedparam( &attr, &param );
  rtems_test_assert( sc == 0 );
  param.sched_priority = sched_get_priority_max( SCHED_FIFO );
  sc = pthread_setschedparam( Init_id, SCHED_FIFO, &param );
  rtems_test_assert( !sc );

  sc = pthread_getaffinity_np( Init_id, sizeof(cpu_set_t), &cpuset0 );
  rtems_test_assert( !sc );

  /* Get the number of processors that we are using. */
  cpu_count = rtems_get_processor_count();

  /* Fill the remaining cpus with med priority tasks */
  puts( "Init - Create Medium priority tasks");
  for (i=0; i<(cpu_count-1); i++){
    sc = pthread_create( &Med_id[i], &attr, Thread_1, NULL );
    rtems_test_assert( !sc );
  }

  puts( "Init - Verify Medium priority tasks");
  for (i=0; i<(cpu_count-1); i++){
    sc = pthread_getaffinity_np( Med_id[i], sizeof(cpu_set_t), &cpuset2 );
    rtems_test_assert( !sc );
    rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) );
  }

  /*
   * Create low priority thread for each remaining cpu with the affinity
   * set to only run on one cpu.
   */
  puts( "Init - Create  Low priority tasks");
  for (i=0; i<cpu_count; i++){
    CPU_ZERO(&cpuset1);
    CPU_SET(i, &cpuset1);

    sc = pthread_attr_setaffinity_np( &attr, sizeof(cpu_set_t), &cpuset1 );
    rtems_test_assert( !sc );

    sc = pthread_create( &Low_id[i], &attr, Thread_1, NULL );
    rtems_test_assert( !sc );
  }

  /* Verify affinity on low priority tasks */
  puts( "Init - Verify Low priority tasks");
  for (i=0; i<(cpu_count-1); i++){
    CPU_ZERO(&cpuset1);
    CPU_SET(i, &cpuset1);

    sc = pthread_getaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset2 );
    rtems_test_assert( !sc );
    rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) );
  }

  /* Change the affinity for each low priority task */
  puts("Init - Change affinity on Low priority tasks");
  CPU_COPY(&cpuset1, &cpuset0);
  for (i=0; i<cpu_count; i++){

    CPU_CLR(i, &cpuset1);
    sc = pthread_setaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset1 );

    /* Verify no cpu's are now set in the cpuset */
    if (i== (cpu_count-1)) {
      rtems_test_assert( sc == EINVAL );
      sc = pthread_setaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset0 );
    }
    rtems_test_assert( !sc );
  }

  puts("Init - Validate affinity on Low priority tasks");
  CPU_COPY(&cpuset1, &cpuset0);
  for (i=0; i<cpu_count; i++){
    CPU_CLR(i, &cpuset1);

    sc = pthread_getaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset2 );
    rtems_test_assert( !sc );
    if (i== (cpu_count-1))
      rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) );
    else
      rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) );
  }
}
示例#13
0
int
sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
{
	struct thread *ttd;
	struct cpuset *nset;
	struct cpuset *set;
	struct proc *p;
	cpuset_t *mask;
	int error;
	size_t size;

	if (uap->cpusetsize < sizeof(cpuset_t) ||
	    uap->cpusetsize > CPU_MAXSIZE / NBBY)
		return (ERANGE);
	size = uap->cpusetsize;
	mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
	error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
	if (error)
		goto out;
	switch (uap->level) {
	case CPU_LEVEL_ROOT:
	case CPU_LEVEL_CPUSET:
		switch (uap->which) {
		case CPU_WHICH_TID:
		case CPU_WHICH_PID:
			thread_lock(ttd);
			set = cpuset_ref(ttd->td_cpuset);
			thread_unlock(ttd);
			break;
		case CPU_WHICH_CPUSET:
		case CPU_WHICH_JAIL:
			break;
		case CPU_WHICH_IRQ:
			error = EINVAL;
			goto out;
		}
		if (uap->level == CPU_LEVEL_ROOT)
			nset = cpuset_refroot(set);
		else
			nset = cpuset_refbase(set);
		CPU_COPY(&nset->cs_mask, mask);
		cpuset_rel(nset);
		break;
	case CPU_LEVEL_WHICH:
		switch (uap->which) {
		case CPU_WHICH_TID:
			thread_lock(ttd);
			CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
			thread_unlock(ttd);
			break;
		case CPU_WHICH_PID:
			FOREACH_THREAD_IN_PROC(p, ttd) {
				thread_lock(ttd);
				CPU_OR(mask, &ttd->td_cpuset->cs_mask);
				thread_unlock(ttd);
			}
			break;
		case CPU_WHICH_CPUSET:
		case CPU_WHICH_JAIL:
			CPU_COPY(&set->cs_mask, mask);
			break;
		case CPU_WHICH_IRQ:
			error = intr_getaffinity(uap->id, mask);
			break;
		}
		break;
	default:
		error = EINVAL;
		break;
	}