示例#1
0
int
test_affinity5(void)
#endif
{
  unsigned int cpu;
  pthread_t tid;
  cpu_set_t threadCpus;
  DWORD_PTR vThreadMask;
  cpu_set_t keepCpus;
  pthread_t self = pthread_self();

  CPU_ZERO(&keepCpus);
  for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &keepCpus);					/* 0b10101010101010101010101010101010 */
    }

  assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0);
  if (CPU_COUNT(&threadCpus) > 1)
    {
	  assert(pthread_create(&tid, NULL, mythread, (void*)&threadCpus) == 0);
	  assert(pthread_join(tid, NULL) == 0);
	  CPU_AND(&threadCpus, &threadCpus, &keepCpus);
	  assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0);
	  vThreadMask = SetThreadAffinityMask(GetCurrentThread(), (*(PDWORD_PTR)&threadCpus) /* Violating Opacity */);
	  assert(vThreadMask != 0);
	  assert(memcmp(&vThreadMask, &threadCpus, sizeof(DWORD_PTR)) == 0);
	  assert(pthread_create(&tid, NULL, mythread, (void*)&threadCpus) == 0);
	  assert(pthread_join(tid, NULL) == 0);
    }

  return 0;
}
示例#2
0
文件: vmm.c 项目: vinceguogit/freebsd
static void
vm_handle_rendezvous(struct vm *vm, int vcpuid)
{

	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
	    ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));

	mtx_lock(&vm->rendezvous_mtx);
	while (vm->rendezvous_func != NULL) {
		/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
		CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);

		if (vcpuid != -1 &&
		    CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
		    !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
			VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
			(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
			CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
		}
		if (CPU_CMP(&vm->rendezvous_req_cpus,
		    &vm->rendezvous_done_cpus) == 0) {
			VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
			vm_set_rendezvous_func(vm, NULL);
			wakeup(&vm->rendezvous_func);
			break;
		}
		RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
		mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
		    "vmrndv", 0);
	}
	mtx_unlock(&vm->rendezvous_mtx);
}
示例#3
0
int
main()
{
  unsigned int cpu;
  int result;
  cpu_set_t newmask;
  cpu_set_t mask;
  cpu_set_t switchmask;
  cpu_set_t flipmask;

  CPU_ZERO(&mask);
  CPU_ZERO(&switchmask);
  CPU_ZERO(&flipmask);

  for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &switchmask);				/* 0b01010101010101010101010101010101 */
    }
  for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++)
    {
	  CPU_SET(cpu, &flipmask);					/* 0b11111111111111111111111111111111 */
    }

  assert(sched_getaffinity(0, sizeof(cpu_set_t), &newmask) == 0);
  assert(!CPU_EQUAL(&newmask, &mask));

  result = sched_setaffinity(0, sizeof(cpu_set_t), &newmask);
  if (result != 0)
	{
	  int err =
#if defined (__PTW32_USES_SEPARATE_CRT)
	  GetLastError();
#else
      errno;
#endif

	  assert(err != ESRCH);
	  assert(err != EFAULT);
	  assert(err != EPERM);
	  assert(err != EINVAL);
	  assert(err != EAGAIN);
	  assert(err == ENOSYS);
	  assert(CPU_COUNT(&mask) == 1);
	}
  else
	{
	  if (CPU_COUNT(&mask) > 1)
		{
		  CPU_AND(&newmask, &mask, &switchmask); /* Remove every other CPU */
		  assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0);
		  assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0);
		  CPU_XOR(&newmask, &mask, &flipmask);  /* Switch to all alternative CPUs */
		  assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0);
		  assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0);
		  assert(!CPU_EQUAL(&newmask, &mask));
		}
	}

  return 0;
}
示例#4
0
/*
 * Applies the mask 'mask' without checking for empty sets or permissions.
 */
static void
cpuset_update(struct cpuset *set, cpuset_t *mask)
{
	struct cpuset *nset;

	mtx_assert(&cpuset_lock, MA_OWNED);
	CPU_AND(&set->cs_mask, mask);
	LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
		cpuset_update(nset, &set->cs_mask);

	return;
}
示例#5
0
	boost::uint32_t SetAffinity(boost::uint32_t cores_bitmask, bool hard)
	{
		if (cores_bitmask == 0) {
			return ~0;
		}

	#if defined(__APPLE__) || defined(__FreeBSD__)
		// no-op
		return 0;

	#elif defined(WIN32)
		// Create mask
		DWORD_PTR cpusWanted = (cores_bitmask & cpusSystem);

		// Set the affinity
		HANDLE thread = GetCurrentThread();
		DWORD_PTR result = 0;
		if (hard) {
			result = SetThreadAffinityMask(thread, cpusWanted);
		} else {
			result = SetThreadIdealProcessor(thread, (DWORD)cpusWanted);
		}

		// Return final mask
		return (result > 0) ? (boost::uint32_t)cpusWanted : 0;
	#else
		// Create mask
		cpu_set_t cpusWanted; CPU_ZERO(&cpusWanted);
		int numCpus = std::min(CPU_COUNT(&cpusSystem), 32); // w/o the min(.., 32) `(1 << n)` could overflow!
		for (int n = numCpus - 1; n >= 0; --n) {
			if ((cores_bitmask & (1 << n)) != 0) {
				CPU_SET(n, &cpusWanted);
			}
		}
		CPU_AND(&cpusWanted, &cpusWanted, &cpusSystem);

		// Set the affinity
		int result = sched_setaffinity(0, sizeof(cpu_set_t), &cpusWanted);

		// Return final mask
		uint32_t finalMask = 0;
		for (int n = numCpus - 1; n >= 0; --n) {
			if (CPU_ISSET(n, &cpusWanted)) {
				finalMask |= (1 << n);
			}
		}
		return (result == 0) ? finalMask : 0;
	#endif
	}
示例#6
0
static int mlx5_enable_stall_cq(struct mlx5_context *ctx, int only_sb)
{
	cpu_set_t my_cpus, dev_local_cpus, result_set;
	int stall_enable;
	int ret;
	int num_cores;

	if (only_sb && !mlx5_is_sandy_bridge(&num_cores))
		return 0;

	/* by default disable stall on sandy bridge arch */
	stall_enable = 0;

	/*
	 * check if app is bound to cpu set that is inside
	 * of device local cpu set. Disable stalling if true
	 */

	/* use static cpu set - up to CPU_SETSIZE (1024) cpus/node */
	CPU_ZERO(&my_cpus);
	CPU_ZERO(&dev_local_cpus);
	CPU_ZERO(&result_set);
	ret = sched_getaffinity(0, sizeof(my_cpus), &my_cpus);
	if (ret == -1) {
		if (errno == EINVAL)
			fprintf(stderr, PFX "Warning: my cpu set is too small\n");
		else
			fprintf(stderr, PFX "Warning: failed to get my cpu set\n");
		goto out;
	}

	/* get device local cpu set */
	mlx5_local_cpu_set(ctx, &dev_local_cpus);

	/* make sure result_set is not init to all 0 */
	CPU_SET(0, &result_set);
	/* Set stall_enable if my cpu set and dev cpu set are disjoint sets */
	CPU_AND(&result_set, &my_cpus, &dev_local_cpus);
	stall_enable = CPU_COUNT(&result_set) ? 0 : 1;

out:
	return stall_enable;
}
示例#7
0
int
main()
{
    unsigned int cpu;
    pthread_t tid;
    pthread_attr_t attr1, attr2;
    cpu_set_t threadCpus;
    cpu_set_t keepCpus;
    pthread_t self = pthread_self();

    if (pthread_getaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == ENOSYS)
    {
        printf("pthread_get/set_affinity_np API not supported for this platform: skipping test.");
        return 0;
    }

    assert(pthread_attr_init(&attr1) == 0);
    assert(pthread_attr_init(&attr2) == 0);

    CPU_ZERO(&keepCpus);
    for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
        CPU_SET(cpu, &keepCpus);                                      /* 0b10101010101010101010101010101010 */
    }

    assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0);

    if (CPU_COUNT(&threadCpus) > 1)
    {
        assert(pthread_attr_setaffinity_np(&attr1, sizeof(cpu_set_t), &threadCpus) == 0);
        CPU_AND(&threadCpus, &threadCpus, &keepCpus);
        assert(pthread_attr_setaffinity_np(&attr2, sizeof(cpu_set_t), &threadCpus) == 0);

        assert(pthread_create(&tid, &attr1, mythread, (void *) &attr1) == 0);
        assert(pthread_join(tid, NULL) == 0);
        assert(pthread_create(&tid, &attr2, mythread, (void *) &attr2) == 0);
        assert(pthread_join(tid, NULL) == 0);
    }
    assert(pthread_attr_destroy(&attr1) == 0);
    assert(pthread_attr_destroy(&attr2) == 0);
    return 0;
}
示例#8
0
/*
 * Recursively check for errors that would occur from applying mask to
 * the tree of sets starting at 'set'.  Checks for sets that would become
 * empty as well as RDONLY flags.
 */
static int
cpuset_testupdate(struct cpuset *set, cpuset_t *mask)
{
	struct cpuset *nset;
	cpuset_t newmask;
	int error;

	mtx_assert(&cpuset_lock, MA_OWNED);
	if (set->cs_flags & CPU_SET_RDONLY)
		return (EPERM);
	if (!CPU_OVERLAP(&set->cs_mask, mask))
		return (EDEADLK);
	CPU_COPY(&set->cs_mask, &newmask);
	CPU_AND(&newmask, mask);
	error = 0;
	LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
		if ((error = cpuset_testupdate(nset, &newmask)) != 0)
			break;
	return (error);
}
示例#9
0
/*
 * Create a set in the space provided in 'set' with the provided parameters.
 * The set is returned with a single ref.  May return EDEADLK if the set
 * will have no valid cpu based on restrictions from the parent.
 */
static int
_cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask,
    cpusetid_t id)
{

	if (!CPU_OVERLAP(&parent->cs_mask, mask))
		return (EDEADLK);
	CPU_COPY(mask, &set->cs_mask);
	LIST_INIT(&set->cs_children);
	refcount_init(&set->cs_ref, 1);
	set->cs_flags = 0;
	mtx_lock_spin(&cpuset_lock);
	CPU_AND(&set->cs_mask, &parent->cs_mask);
	set->cs_id = id;
	set->cs_parent = cpuset_ref(parent);
	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
	if (set->cs_id != CPUSET_INVALID)
		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
	mtx_unlock_spin(&cpuset_lock);

	return (0);
}
示例#10
0
int
main()
{
  unsigned int cpu;
  pthread_t tid;
  cpu_set_t threadCpus;
  DWORD_PTR vThreadMask;
  cpu_set_t keepCpus;
  pthread_t self = pthread_self();

  if (pthread_getaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == ENOSYS)
    {
      printf("pthread_get/set_affinity_np API not supported for this platform: skipping test.");
      return 0;
    }

  CPU_ZERO(&keepCpus);
  for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &keepCpus);					/* 0b10101010101010101010101010101010 */
    }

  assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0);
  if (CPU_COUNT(&threadCpus) > 1)
    {
	  assert(pthread_create(&tid, NULL, mythread, (void*)&threadCpus) == 0);
	  assert(pthread_join(tid, NULL) == 0);
	  CPU_AND(&threadCpus, &threadCpus, &keepCpus);
	  assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0);
	  vThreadMask = SetThreadAffinityMask(GetCurrentThread(), (*(PDWORD_PTR)&threadCpus) /* Violating Opacity */);
	  assert(vThreadMask != 0);
	  assert(memcmp(&vThreadMask, &threadCpus, sizeof(DWORD_PTR)) == 0);
	  assert(pthread_create(&tid, NULL, mythread, (void*)&threadCpus) == 0);
	  assert(pthread_join(tid, NULL) == 0);
    }

  return 0;
}
示例#11
0
int
main()
{
  int result;
  unsigned int cpu;
  cpu_set_t newmask;
  cpu_set_t processCpus;
  cpu_set_t mask;
  cpu_set_t switchmask;
  cpu_set_t flipmask;
  pthread_t self = pthread_self();

  CPU_ZERO(&mask);
  CPU_ZERO(&switchmask);
  CPU_ZERO(&flipmask);

  if (pthread_getaffinity_np(self, sizeof(cpu_set_t), &processCpus) == ENOSYS)
    {
      printf("pthread_get/set_affinity_np API not supported for this platform: skipping test.");
      return 0;
    }
  assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &processCpus) == 0);
  printf("This thread has a starting affinity with %d CPUs\n", CPU_COUNT(&processCpus));
  assert(!CPU_EQUAL(&mask, &processCpus));

  for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &switchmask);				/* 0b01010101010101010101010101010101 */
    }
  for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++)
    {
	  CPU_SET(cpu, &flipmask);					/* 0b11111111111111111111111111111111 */
    }

  result = pthread_setaffinity_np(self, sizeof(cpu_set_t), &processCpus);
  if (result != 0)
	{
	  assert(result != ESRCH);
	  assert(result != EFAULT);
	  assert(result != EPERM);
	  assert(result != EINVAL);
	  assert(result != EAGAIN);
	  assert(result == ENOSYS);
	  assert(CPU_COUNT(&mask) == 1);
	}
  else
	{
	  if (CPU_COUNT(&mask) > 1)
		{
		  CPU_AND(&newmask, &processCpus, &switchmask); /* Remove every other CPU */
		  assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &newmask) == 0);
		  assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &mask) == 0);
		  assert(CPU_EQUAL(&mask, &newmask));
		  CPU_XOR(&newmask, &mask, &flipmask);  /* Switch to all alternative CPUs */
		  assert(!CPU_EQUAL(&mask, &newmask));
		  assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &newmask) == 0);
		  assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &mask) == 0);
		  assert(CPU_EQUAL(&mask, &newmask));
		}
	}

  return 0;
}
示例#12
0
static int mlx4_enable_sandy_bridge_fix(struct ibv_context *context)
{
	cpu_set_t my_cpus, dev_local_cpus, result_set;
	int stall_enable;
	int ret;
	int num_cores;

	if (!mlx4_is_sandy_bridge(&num_cores))
		return 0;

	/* by default disable stall on sandy bridge arch */
	stall_enable = 0;

	/*
	 * check if app is bound to cpu set that is inside
	 * of device local cpu set. Disable stalling if true
	 */

	/* use static cpu set - up to CPU_SETSIZE (1024) cpus/node */
	CPU_ZERO(&my_cpus);
	CPU_ZERO(&dev_local_cpus);
	CPU_ZERO(&result_set);
	ret = sched_getaffinity(0, sizeof(my_cpus), &my_cpus);
	if (ret == -1) {
		if (errno == EINVAL)
			fprintf(stderr, PFX "Warning: my cpu set is too small\n");
		else
			fprintf(stderr, PFX "Warning: failed to get my cpu set\n");
		goto out;
	}

	if (mlx4_trace) {
		printf(PFX "Running on cpus: ");
		dump_cpu_set(&my_cpus);
		printf("\n");
	}

	/* get device local cpu set */
	mlx4_local_cpu_set(context, &dev_local_cpus);

	/* make sure result_set is not init to all 0 */
	CPU_SET(0, &result_set);
	/* Set stall_enable if my cpu set and dev cpu set are disjoint sets */
	CPU_AND(&result_set, &my_cpus, &dev_local_cpus);
	stall_enable = CPU_COUNT(&result_set) ? 0 : 1;

	if (mlx4_trace) {
		printf(PFX "HCA:%s local cpus: ", ibv_get_device_name(context->device));
		dump_cpu_set(&dev_local_cpus);
		printf("\n");
		if (CPU_COUNT(&my_cpus) == num_cores) {
			printf(PFX "Warning: CPU affinity wasn't used for this "
				   "process, if the system has more than one numa node, it might be using a remote one.\n");
			printf(PFX "         For achieving better performance, "
				   "please consider setting the CPU "
				   "affinity.\n");
		}
	}

out:
	if (mlx4_trace)
		printf(PFX "Sandy Bridge CPU was detected, cq_stall is %s\n",
		       stall_enable ? "enabled" : "disabled");

	return stall_enable;
}
示例#13
0
文件: pisces.c 项目: 8l/kitten
static int 
launch_job(int pisces_fd, struct pisces_job_spec * job_spec)
{

    u32 page_size = (job_spec->use_large_pages ? VM_PAGE_2MB : VM_PAGE_4KB);

    vaddr_t   file_addr = 0;
    cpu_set_t spec_cpus;
    cpu_set_t job_cpus;
    user_cpumask_t lwk_cpumask;

    int status = 0;


    /* Figure out which CPUs are being requested */
    {
	int i = 0;

	CPU_ZERO(&spec_cpus);
	
	for (i = 0; i < 64; i++) {
	    if ((job_spec->cpu_mask & (0x1ULL << i)) != 0) {
		CPU_SET(i, &spec_cpus);
	    }
	}
    }




    /* Check if we can host the job on the current CPUs */
    /* Create a kitten compatible cpumask */
    {
	int i = 0;

	CPU_AND(&job_cpus, &spec_cpus, &enclave_cpus);

	if (CPU_COUNT(&job_cpus) < job_spec->num_ranks) {
	    printf("Error: Could not find enough CPUs for job\n");
	    return -1;
	}
	

	cpus_clear(lwk_cpumask);
	
	for (i = 0; (i < CPU_MAX_ID) && (i < CPU_SETSIZE); i++) {
	    if (CPU_ISSET(i, &job_cpus)) {
		cpu_set(i, lwk_cpumask);
	    }
	}



    }


    /* Load exe file info memory */
    {
	struct pisces_user_file_info * file_info = NULL;
	int    path_len  = strlen(job_spec->exe_path) + 1;
	size_t file_size = 0;
	id_t   my_aspace_id;

	file_info = malloc(sizeof(struct pisces_user_file_info) + path_len);
	memset(file_info, 0, sizeof(struct pisces_user_file_info) + path_len);
    
	file_info->path_len = path_len;
	strncpy(file_info->path, job_spec->exe_path, path_len - 1);
    
	file_size = ioctl(pisces_fd, PISCES_STAT_FILE, file_info);
    
	status = aspace_get_myid(&my_aspace_id);
	if (status != 0) 
	    return status;


	{
	
	    paddr_t pmem = elf_dflt_alloc_pmem(file_size, page_size, 0);

	    printf("PMEM Allocated at %p (file_size=%lu) (page_size=0x%x) (pmem_size=%p)\n", 
		   (void *)pmem, 
		   file_size,
		   page_size, 
		   (void *)round_up(file_size, page_size));

	    if (pmem == 0) {
		printf("Could not allocate space for exe file\n");
		return -1;
	    }

	    /* Map the segment into this address space */
	    status =
		aspace_map_region_anywhere(
					   my_aspace_id,
					   &file_addr,
					   round_up(file_size, page_size),
					   (VM_USER|VM_READ|VM_WRITE),
					   page_size,
					   "File",
					   pmem
					   );
	    if (status)
		return status;
	

	    file_info->user_addr = file_addr;
	}
    
	printf("Loading EXE into memory\n");

	ioctl(pisces_fd, PISCES_LOAD_FILE, file_info);


	free(file_info);
    }


    printf("Job Launch Request (%s) [%s %s]\n", job_spec->name, job_spec->exe_path, job_spec->argv);


    /* Initialize start state for each rank */
    {
	start_state_t * start_state = NULL;
	int rank = 0; 

	/* Allocate start state for each rank */
	start_state = malloc(job_spec->num_ranks * sizeof(start_state_t));
	if (!start_state) {
	    printf("malloc of start_state[] failed\n");
	    return -1;
	}


	for (rank = 0; rank < job_spec->num_ranks; rank++) {
	    int cpu = 0;
	    int i   = 0;
	    
	    for (i = 0; i < CPU_SETSIZE; i++) {
		if (CPU_ISSET(i, &job_cpus)) {
		    CPU_CLR(i, &job_cpus);
		    cpu = i;
		    break;
		}
	    }


	    printf("Loading Rank %d on CPU %d\n", rank, cpu);
	    
	    start_state[rank].task_id  = ANY_ID;
	    start_state[rank].cpu_id   = ANY_ID;
	    start_state[rank].user_id  = 1;
	    start_state[rank].group_id = 1;
	    
	    sprintf(start_state[rank].task_name, job_spec->name);


	    status = elf_load((void *)file_addr,
			      job_spec->name,
			      ANY_ID,
			      page_size, 
			      job_spec->heap_size,   // heap_size 
			      job_spec->stack_size,  // stack_size 
			      job_spec->argv,        // argv_str
			      job_spec->envp,        // envp_str
			      &start_state[rank],
			      0,
			      &elf_dflt_alloc_pmem
			      );
	    

	    if (status) {
		printf("elf_load failed, status=%d\n", status);
	    }
	    

	    if ( aspace_update_user_cpumask(start_state[rank].aspace_id, &lwk_cpumask) != 0) {
		printf("Error updating CPU mask\n");
		return -1;
	    }
		 

	    /* Setup Smartmap regions if enabled */
	    if (job_spec->use_smartmap) {
		int src = 0;
		int dst = 0;

		printf("Creating SMARTMAP mappings...\n");
		for (dst = 0; dst < job_spec->num_ranks; dst++) {
		    for (src = 0; src < job_spec->num_ranks; src++) {
			status =
			    aspace_smartmap(
					    start_state[src].aspace_id,
					    start_state[dst].aspace_id,
					    SMARTMAP_ALIGN + (SMARTMAP_ALIGN * src),
					    SMARTMAP_ALIGN
					    );
			
			if (status) {
			    printf("smartmap failed, status=%d\n", status);
			    return -1;
			}
		    }
		}
		printf("    OK\n");
	    }

	    
	    printf("Creating Task\n");
	    
	    status = task_create(&start_state[rank], NULL);
	}
    }
    

    
    return 0;
}
示例#14
0
int
test_affinity1(void)
#endif
{
  unsigned int cpu;
  cpu_set_t newmask;
  cpu_set_t src1mask;
  cpu_set_t src2mask;
  cpu_set_t src3mask;

  CPU_ZERO(&newmask);
  CPU_ZERO(&src1mask);
  memset(&src2mask, 0, sizeof(cpu_set_t));
  assert(memcmp(&src1mask, &src2mask, sizeof(cpu_set_t)) == 0);
  assert(CPU_EQUAL(&src1mask, &src2mask));
  assert(CPU_COUNT(&src1mask) == 0);

  CPU_ZERO(&src1mask);
  CPU_ZERO(&src2mask);
  CPU_ZERO(&src3mask);

  for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &src1mask);					/* 0b01010101010101010101010101010101 */
    }
  for (cpu = 0; cpu < sizeof(cpu_set_t)*4; cpu++)
    {
	  CPU_SET(cpu, &src2mask);					/* 0b00000000000000001111111111111111 */
    }
  for (cpu = sizeof(cpu_set_t)*4; cpu < sizeof(cpu_set_t)*8; cpu += 2)
  {
	  CPU_SET(cpu, &src2mask);					/* 0b01010101010101011111111111111111 */
  }
  for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &src3mask);					/* 0b01010101010101010101010101010101 */
    }

  assert(CPU_COUNT(&src1mask) == (sizeof(cpu_set_t)*4));
  assert(CPU_COUNT(&src2mask) == ((sizeof(cpu_set_t)*4 + (sizeof(cpu_set_t)*2))));
  assert(CPU_COUNT(&src3mask) == (sizeof(cpu_set_t)*4));
  CPU_SET(0, &newmask);
  CPU_SET(1, &newmask);
  CPU_SET(3, &newmask);
  assert(CPU_ISSET(1, &newmask));
  CPU_CLR(1, &newmask);
  assert(!CPU_ISSET(1, &newmask));
  CPU_OR(&newmask, &src1mask, &src2mask);
  assert(CPU_EQUAL(&newmask, &src2mask));
  CPU_AND(&newmask, &src1mask, &src2mask);
  assert(CPU_EQUAL(&newmask, &src1mask));
  CPU_XOR(&newmask, &src1mask, &src3mask);
  memset(&src2mask, 0, sizeof(cpu_set_t));
  assert(memcmp(&newmask, &src2mask, sizeof(cpu_set_t)) == 0);

  /*
   * Need to confirm the bitwise logical right-shift in CpuCount().
   * i.e. zeros inserted into MSB on shift because cpu_set_t is
   * unsigned.
   */
  CPU_ZERO(&src1mask);
  for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2)
    {
	  CPU_SET(cpu, &src1mask);					/* 0b10101010101010101010101010101010 */
    }
  assert(CPU_ISSET(sizeof(cpu_set_t)*8-1, &src1mask));
  assert(CPU_COUNT(&src1mask) == (sizeof(cpu_set_t)*4));

  return 0;
}
示例#15
0
int
pthread_setaffinity_np (pthread_t thread, size_t cpusetsize,
                                  const cpu_set_t *cpuset)
     /*
      * ------------------------------------------------------
      * DOCPUBLIC
      *   The pthread_setaffinity_np() function sets the CPU affinity mask
      *   of the thread thread to the CPU set pointed to by cpuset.  If the
      *   call is successful, and the thread is not currently running on one
      *   of the CPUs in cpuset, then it is migrated to one of those CPUs.
      *
      * PARAMETERS
      *		thread
      *					The target thread
      *
      *		cpusetsize
      *					Ignored in pthreads4w.
      *					Usually set to sizeof(cpu_set_t)
      *
      *		cpuset
      *					The new cpu set mask.
      *
      *   				The set of CPUs on which the thread will actually run
      *   				is the intersection of the set specified in the cpuset
      *   				argument and the set of CPUs actually present for
      *   				the process.
      *
      * DESCRIPTION
      *   The pthread_setaffinity_np() function sets the CPU affinity mask
      *   of the thread thread to the CPU set pointed to by cpuset.  If the
      *   call is successful, and the thread is not currently running on one
      *   of the CPUs in cpuset, then it is migrated to one of those CPUs.
      *
      * RESULTS
      * 				0		Success
      * 				ESRCH	Thread does not exist
      * 				EFAULT	pcuset is NULL
      * 				EAGAIN	The thread affinity could not be set
      * 				ENOSYS  The platform does not support this function
      *
      * ------------------------------------------------------
      */
{
#if ! defined(HAVE_CPU_AFFINITY)

  return ENOSYS;

#else

  int result = 0;
  ptw32_thread_t * tp;
  ptw32_mcs_local_node_t node;
  cpu_set_t processCpuset;

  ptw32_mcs_lock_acquire (&ptw32_thread_reuse_lock, &node);

  tp = (ptw32_thread_t *) thread.p;

  if (NULL == tp || thread.x != tp->ptHandle.x || NULL == tp->threadH)
    {
	  result = ESRCH;
    }
  else
	{
	  if (cpuset)
		{
		  if (sched_getaffinity(0, sizeof(cpu_set_t), &processCpuset))
		    {
			  result = PTW32_GET_ERRNO();
		    }
		  else
			{
			  /*
			   * Result is the intersection of available CPUs and the mask.
			   */
			  cpu_set_t newMask;

			  CPU_AND(&newMask, &processCpuset, cpuset);

			  if (((_sched_cpu_set_vector_*)&newMask)->_cpuset)
				{
				  if (SetThreadAffinityMask (tp->threadH, ((_sched_cpu_set_vector_*)&newMask)->_cpuset))
					{
					  /*
					   * We record the intersection of the process affinity
					   * and the thread affinity cpusets so that
					   * pthread_getaffinity_np() returns the actual thread
					   * CPU set.
					   */
					  tp->cpuset = ((_sched_cpu_set_vector_*)&newMask)->_cpuset;
					}
				  else
					{
					  result = EAGAIN;
					}
				}
			  else
				{
				  result = EINVAL;
				}
			}
		}
	  else
		{
		  result = EFAULT;
		}
	}

  ptw32_mcs_lock_release (&node);

  return result;

#endif
}
示例#16
0
void odp_cpumask_and(odp_cpumask_t *dest, const odp_cpumask_t *src1,
		     const odp_cpumask_t *src2)
{
	CPU_AND(&dest->set, &src1->set, &src2->set);
}