void ObMergeServer::on_ioth_start()
 {
   int64_t affinity_start_cpu = ms_config_.io_thread_start_cpu;
   int64_t affinity_end_cpu = ms_config_.io_thread_end_cpu;
   if (0 <= affinity_start_cpu
       && affinity_start_cpu <= affinity_end_cpu)
   {
     static volatile int64_t cpu = 0;
     int64_t local_cpu = __sync_fetch_and_add(&cpu, 1) % (affinity_end_cpu - affinity_start_cpu + 1) + affinity_start_cpu;
     cpu_set_t cpuset;
     CPU_ZERO(&cpuset);
     CPU_SET(local_cpu, &cpuset);
     int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
     TBSYS_LOG(INFO, "io thread setaffinity tid=%ld ret=%d cpu=%ld start=%ld end=%ld",
               GETTID(), ret, local_cpu, affinity_start_cpu, affinity_end_cpu);
   }
 }
示例#2
0
文件: zmap.c 项目: jacobgardiner/zmap
static void set_cpu(void)
{
    pthread_mutex_lock(&cpu_affinity_mutex);
    static int core=0;
    int num_cores = sysconf(_SC_NPROCESSORS_ONLN);
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(core, &cpuset);
    if (pthread_setaffinity_np(pthread_self(),
                               sizeof(cpu_set_t), &cpuset) != 0) {
        log_error("zmap", "can't set thread CPU affinity");
    }
    log_trace("zmap", "set thread %u affinity to core %d",
              pthread_self(), core);
    core = (core + 1) % num_cores;
    pthread_mutex_unlock(&cpu_affinity_mutex);
}
示例#3
0
static void
vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip)
{
	int error, rc, prevcpu;
	enum vm_exitcode exitcode;
	cpuset_t active_cpus;

	if (vcpumap[vcpu] != NULL) {
		error = pthread_setaffinity_np(pthread_self(),
		    sizeof(cpuset_t), vcpumap[vcpu]);
		assert(error == 0);
	}

	error = vm_active_cpus(ctx, &active_cpus);
	assert(CPU_ISSET(vcpu, &active_cpus));

	error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip);
	assert(error == 0);

	while (1) {
		error = vm_run(ctx, vcpu, &vmexit[vcpu]);
		if (error != 0)
			break;

		prevcpu = vcpu;

		exitcode = vmexit[vcpu].exitcode;
		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
			    exitcode);
			exit(1);
		}

                rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);

		switch (rc) {
		case VMEXIT_CONTINUE:
			break;
		case VMEXIT_ABORT:
			abort();
		default:
			exit(1);
		}
	}
	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
}
示例#4
0
static void
vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
{
	cpuset_t mask;
	int error, rc, prevcpu;
	enum vm_exitcode exitcode;

	if (pincpu >= 0) {
		CPU_ZERO(&mask);
		CPU_SET(pincpu + vcpu, &mask);
		error = pthread_setaffinity_np(pthread_self(),
					       sizeof(mask), &mask);
		assert(error == 0);
	}

	while (1) {
		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
		if (error != 0)
			break;

		prevcpu = vcpu;

		exitcode = vmexit[vcpu].exitcode;
		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
			    exitcode);
			exit(1);
		}

                rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);

		switch (rc) {
		case VMEXIT_CONTINUE:
                        rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
			break;
		case VMEXIT_RESTART:
                        rip = vmexit[vcpu].rip;
			break;
		case VMEXIT_RESET:
			exit(0);
		default:
			exit(1);
		}
	}
	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
}
示例#5
0
/* set the thread affinity. */
static int setaffinity(pthread_t me, int i)
{
    cpu_set_t cpumask;

    if (i == -1)
        return 0;

    /* Set thread affinity affinity.*/
    CPU_ZERO(&cpumask);
    CPU_SET(i, &cpumask);

    if (pthread_setaffinity_np(me, sizeof(cpu_set_t), &cpumask) != 0) {
        D("Unable to set affinity: %s", strerror(errno));
        return 1;
    }
    return 0;
}
示例#6
0
void AExecutable::SetThreadAffinity(boost::thread* daThread, int threadPriority, std::vector<short> CPUsToBind, int scheduler) {
#ifndef __APPLE__
	int policy;
	pthread_t threadID = (pthread_t) (daThread->native_handle());
	if (scheduler > 0) {

		sched_param param;
		if (pthread_getschedparam(threadID, &policy, &param) != 0) {
			perror("pthread_getschedparam");
			exit(EXIT_FAILURE);
		}
		//LOG_ERROR("Policy " << policy << ", priority " << param.sched_priority);
		/**
		 * Set scheduling algorithm
		 * Possible values: SCHED_FIFO(1), SCHED_RR(2), SCHED_OTHER(0)
		 */
		policy = scheduler;
		param.__sched_priority = threadPriority;
		if (pthread_setschedparam(threadID, policy, &param) != 0) {
			perror("pthread_setschedparam");
			exit(EXIT_FAILURE);
		}
	}

	if (CPUsToBind.size() > 0) {
		/**
		 * Bind the thread to CPUs from CPUsToBind
		 */
		cpu_set_t mask;
		CPU_ZERO(&mask);

		for (unsigned int i = 0; i < CPUsToBind.size(); i++) {
			if (CPUsToBind[i] == -1) {
				CPU_ZERO(&mask);
				break;
			}
			CPU_SET(CPUsToBind[i], &mask);
		}

		if (pthread_setaffinity_np(threadID, sizeof(mask), &mask) < 0) {
			throw NA62Error("Unable to bind threads to specific CPUs!");
		}
	}
#endif
}
示例#7
0
文件: fvl_main.c 项目: Cai900205/test
static void fvl_ssd_write_t(void *arg)
{
    fvl_ssd_arg_t *priv=arg;
    fvl_queue_t *fqueue=priv->fqueue;
    int rvl;
    int fd ;
    uint32_t  index = priv->index;
    uint8_t count=0;
    char path1[20];
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(priv->cpu,&cpuset);
    rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
    if(rvl){
	    printf("(%d)fail:pthread_setaffinity_np()\n",priv->cpu);
	    return;
    }
    sprintf(path1,"/mnt/test%d",index);
    fd=fvl_ssd_open(path1);
    int dequeue_num=-1;
    void *buf=NULL;
/*    uint32_t test_data=0+index*0x1000000;
    uint32_t times=0,error_count=0;
*/
    while(1)
    {
        dequeue_num=fvl_dequeue(fqueue,4);
        if(dequeue_num != -1)
        {
            buf = fqueue->buf+dequeue_num*FVL_SRIO_DMA_BLKBYTES;
            fvl_ssd_write(fd,buf,4*FVL_SRIO_DMA_BLKBYTES);
            fvl_dequeue_complete(fqueue,4);
            count++;
            if(count == 16)
            {
                fvl_ssd_close(fd);
                index=index+4;
                sprintf(path1,"/mnt/test%d",index);
                fd=fvl_ssd_open(path1);
                count = 0;
            }
        }
    }

}
示例#8
0
文件: main.c 项目: Cai900205/test
void thread_channel_recv(void *arg)
{
    struct timeval tm_start,tm_end;
    fvl_read_rvl_t rlen;
    uint8_t i=0;
    int rvl=0;
    uint64_t total_count=0;
    gettimeofday(&tm_start,NULL);
    int fd=0;
    int *j=(int *)arg;
    fd=*j;
    printf("j:%d\n",*j);
    int cpu=*j+11;
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(cpu,&cpuset);
    rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
    if(rvl){
	    printf("(%d)fail:pthread_setaffinity_np()\n",cpu);
	    return;
    }
    while(1)
    {
        rlen.len=0x100000;
 	rvl=fvl_srio_read(fd,&rlen);
        if(rlen.len!=0)
        {
            test_data(i,rlen.buf_virt,10485,0);
            i++;    
//            printf("##########:%d\n",i);
            gettimeofday(&tm_end,NULL);
            total_count++;        
            double diff=(tm_end.tv_sec-tm_start.tv_sec)+(tm_end.tv_usec-tm_start.tv_usec)/1000000.0;
            if(diff>5)
            {
                double da_lu=total_count/diff;
                printf("receive fd: %d length(byte): %-15u time(s): %-15f  avg MB/s: %-15f total_count:%lld \n",fd,rlen.len,diff,da_lu,total_count);
                fflush(stdout);
                total_count=0;
                gettimeofday(&tm_start,NULL);
            }       
        }
        fvl_srio_read_feedback(fd,rlen.num);
    }
}
示例#9
0
文件: udp-send.c 项目: Cai900205/test
void *t_send(void *arg)
{
	
	int z=0;
	char buf[40000];
	struct task_type *send=arg;
	int sockfd=send->fd;
	struct sockaddr_in adr_srvr=send->adr;
	int opt=1;
	int size = send->len;
	cpu_set_t cpuset;
    struct timeval tm_start,tm_end;
	
	CPU_ZERO(&cpuset);
	CPU_SET(send->cpu,&cpuset);

	if((z=pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset))>0)
	{
		printf("cpu error!\n");
		exit(1);
	}
    gettimeofday(&tm_start,NULL);
    uint64_t total_count=0;
	while(1)
	{
//		memset(buf,1,size);
		z=sendto(sockfd,buf,size,0,(struct sockaddr *)&adr_srvr,sizeof(adr_srvr));
		if(z<0)
		{
			printf("send error!\n");
			exit(1);
		}
        total_count++;
        gettimeofday(&tm_end,NULL);
        double diff = (tm_end.tv_sec-tm_start.tv_sec)+((tm_end.tv_usec-tm_start.tv_usec)/1000000.0);
        if(diff>5)
        {
            double du_la=((total_count*size)/diff)/1024/1024;
            printf("thread: %d length(byte):%-15u time(s):%-15f avg MB/s %-15f total_count:%lld\n",send->cpu,size,diff,du_la,total_count);
            total_count=0;
            gettimeofday(&tm_start,NULL);
        }
	}
	pthread_exit(NULL);
}
bool SkThread::setProcessorAffinity(unsigned int processor) {
    SkThread_PThreadData* pthreadData = static_cast<SkThread_PThreadData*>(fData);
    if (!pthreadData->fValidPThread) {
        return false;
    }

    cpu_set_t parentCpuset;
    if (0 != pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &parentCpuset)) {
        return false;
    }

    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(nth_set_cpu(processor, &parentCpuset), &cpuset);
    return 0 == pthread_setaffinity_np(pthreadData->fPThread,
                                       sizeof(cpu_set_t),
                                       &cpuset);
}
示例#11
0
void write_affinity(cpus_t *p) {
  cpu_set_t mask;
  int exists_pos = 0 ;

  CPU_ZERO(&mask) ;
  for (int k = 0 ; k < p->sz ; k++) {
    if (p->cpu[k] >= 0) {
      CPU_SET(p->cpu[k],&mask) ;
      exists_pos = 1 ;
    }
  }
  if  (exists_pos) {
    int r = pthread_setaffinity_np(pthread_self(),sizeof(mask),&mask) ;
    if (r != 0) {
      errexit("pthread_setaffinity_np",r) ;
    }
  }
}
示例#12
0
			void setaffinity(std::vector<uint64_t> const & procs)
			{
				cpu_set_t cpuset;
				
				CPU_ZERO(&cpuset);
				for ( uint64_t i = 0; i < procs.size(); ++i )
					CPU_SET(procs[i],&cpuset);
					
				int const err = pthread_setaffinity_np(*thread, sizeof(cpu_set_t), &cpuset);
				
				if ( err != 0 )
				{
					::libmaus::exception::LibMausException se;
					se.getStream() << "pthread_setaffinity_np failed: " << strerror(errno) << std::endl;
					se.finish();
					throw se;
				}
			}
示例#13
0
void HttpWorker::setAffinity(int cpu)
{
#ifdef HAVE_PTHREAD_SETAFFINITY_NP
	cpu_set_t set;

	CPU_ZERO(&set);
	CPU_SET(cpu, &set);

	TRACE(1, "setAffinity: %d", cpu);

	int rv = pthread_setaffinity_np(thread_, sizeof(set), &set);
	if (rv < 0) {
		log(Severity::error, "setting scheduler affinity on CPU %d failed for worker %u. %s", cpu, id_, strerror(errno));
	}
#else
	log(Severity::error, "setting scheduler affinity on CPU %d failed for worker %u. %s", cpu, id_, strerror(ENOTSUP));
#endif
}
示例#14
0
int bindthread2core(pthread_t thread_id, u_int core_id) {
#ifdef HAVE_PTHREAD_SETAFFINITY_NP
  cpu_set_t cpuset;
  int s;

  CPU_ZERO(&cpuset);
  CPU_SET(core_id, &cpuset);
  if((s = pthread_setaffinity_np(thread_id, sizeof(cpu_set_t), &cpuset)) != 0) {
    fprintf(stderr, "Error while binding to core %u: errno=%i\n", core_id, s);
    return(-1);
  } else {
    return(0);
  }
#else
  fprintf(stderr, "WARNING: your system lacks of pthread_setaffinity_np() (not core binding)\n");
  return(0);
#endif
}
void bar(int x)
{
    cpu_set_t cpuset;
    pthread_t tid = pthread_self();
    
    CPU_ZERO(&cpuset);
    CPU_SET(2, &cpuset);
     
    pthread_setaffinity_np(tid, sizeof(cpuset), &cpuset);
    timespec time_val{}; 
    //time_val.tv_nsec = sleep_ns; // 10 micro sec
    // do stuff...
    for(int i = 0 ; !signal_received; i++)
    {
        std::cout << " bar: "<< tid << std::endl;
        //nanosleep( &time_val, NULL);
    }
}
示例#16
0
static int bind_cpu(thread_t *thread) {
    size_t setsize;
    cpu_set_t *cur_cpuset;
    cpu_set_t *new_cpuset;

    int ncpus = max_number_of_cpus();

    if (thread == NULL) {
        // if thread is NULL it means the emulator is disabled, return without setting CPU affinity
        //printf("thread self is null");
        return 0;
    }

    if (ncpus == 0) {
    	return 1;
    }

    setsize = CPU_ALLOC_SIZE(ncpus);
    cur_cpuset = CPU_ALLOC(ncpus);
    new_cpuset = CPU_ALLOC(ncpus);
    CPU_ZERO_S(setsize, cur_cpuset);
    CPU_ZERO_S(setsize, new_cpuset);
    CPU_SET_S(thread->cpu_id, setsize, new_cpuset);

    if (pthread_getaffinity_np(thread->pthread, setsize, cur_cpuset) != 0) {
        DBG_LOG(ERROR, "Cannot get thread tid [%d] affinity, pthread: 0x%lx on processor %d\n",
        		thread->tid, thread->pthread, thread->cpu_id);
        return 1;
    }

    if (CPU_EQUAL(cur_cpuset, new_cpuset)) {
        //printf("No need to bind CPU\n");
    	return 0;
    }

    DBG_LOG(INFO, "Binding thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id);

    if (pthread_setaffinity_np(thread->pthread, setsize, new_cpuset) != 0) {
        DBG_LOG(ERROR, "Cannot bind thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id);
        return 1;
    }

    return 0;
}
示例#17
0
int
main(int argc, char *argv[])
{
	int s, j, nprocs;
	cpu_set_t cpuset;
	pthread_t thread;

	thread = pthread_self();
	nprocs = sysconf(_SC_NPROCESSORS_ONLN);

	/* Set affinity mask to include CPUs 0 to 7 */

	CPU_ZERO(&cpuset);
	for (j = 0; j < nprocs; j++)
		CPU_SET(j, &cpuset);


	CPU_CLR(1, &cpuset);
	CPU_CLR(2, &cpuset);
	CPU_CLR(3, &cpuset);
	CPU_CLR(4, &cpuset);
	CPU_CLR(5, &cpuset);
	/* check if the cpu's have actually been set */
	for (j = 0; j < nprocs; j++)
		fprintf(stdout, "CPU: %d, status: %d\n", j, CPU_ISSET(j, &cpuset));

		
	s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
	if (s != 0)
		handle_error_en(s, "pthread_setaffinity_np");

	/* Check the actual affinity mask assigned to the thread */

	s = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
	if (s != 0)
		handle_error_en(s, "pthread_getaffinity_np");

	printf("Set returned by pthread_getaffinity_np() contained:\n");
	for (j = 0; j < CPU_SETSIZE; j++)
	if (CPU_ISSET(j, &cpuset))
		printf("    CPU %d\n", j);

	exit(EXIT_SUCCESS);
}
示例#18
0
void viterbi_stream_create_threads(DATA_STREAM* dstream)
{
	int i;
	pthread_barrier_init(&dstream->barrier, NULL, NTHREADS);

	for (i = 0; i < NTHREADS-1; i++)
		sem_init(&semsynch[i], 0, 0);

	if(NTHREADS == 1)
		return;

	pthread_t threads[NTHREADS];
	pthr_info_t args;
	args.dstream = dstream; args.thrid = 0;

#ifdef _GNU_SOURCE
	cpu_set_t cpuset;
	CPU_ZERO(&cpuset);
	CPU_SET(MAPIDCPU(NTHREADS-1), &cpuset);
	threads[NTHREADS-1] = pthread_self();
	pthread_setaffinity_np(threads[NTHREADS-1], sizeof(cpu_set_t), &cpuset);
#endif

	for (i = 0; i < NTHREADS-1; i++)
	{
		pthr_info_t *argscopy = calloc(1, sizeof(pthr_info_t));
		memcpy(argscopy, &args, sizeof(pthr_info_t));
		argscopy->thrid	= i;

		pthread_attr_t attr;
		pthread_attr_init(&attr);
#ifdef _GNU_SOURCE
		CPU_ZERO(&cpuset);
		CPU_SET(MAPIDCPU(i), &cpuset);
		pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
#endif
		if (pthread_create(&threads[i], &attr, viterbi_stream_thread_loop, argscopy))
			exit(fprintf(stderr, "ERROR could not create worker thread\n"));
	}

#ifdef _GNU_SOURCE
	printf("THR %d running on cpu %d\n", NTHREADS-1, sched_getcpu());
#endif
}
示例#19
0
文件: threads.cpp 项目: notaz/mesa
void bindThread(uint32_t threadId, uint32_t procGroupId = 0, bool bindProcGroup=false)
{
    // Only bind threads when MAX_WORKER_THREADS isn't set.
    if (KNOB_MAX_WORKER_THREADS && bindProcGroup == false)
    {
        return;
    }

#if defined(_WIN32)
    {
        GROUP_AFFINITY affinity = {};
        affinity.Group = procGroupId;

#if !defined(_WIN64)
        if (threadId >= 32)
        {
            // In a 32-bit process on Windows it is impossible to bind
            // to logical processors 32-63 within a processor group.
            // In this case set the mask to 0 and let the system assign
            // the processor.  Hopefully it will make smart choices.
            affinity.Mask = 0;
        }
        else
#endif
        {
            // If KNOB_MAX_WORKER_THREADS is set, only bind to the proc group,
            // Not the individual HW thread.
            if (!KNOB_MAX_WORKER_THREADS)
            {
                affinity.Mask = KAFFINITY(1) << threadId;
            }
        }

        SetThreadGroupAffinity(GetCurrentThread(), &affinity, nullptr);
    }
#else
    cpu_set_t cpuset;
    pthread_t thread = pthread_self();
    CPU_ZERO(&cpuset);
    CPU_SET(threadId, &cpuset);

    pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
#endif
}
示例#20
0
文件: fvl_srio.c 项目: Cai900205/test
//need change
void fvl_srio_recv_ctl(void *arg)
{
    fvl_ctl_thread_t  *priv=arg;
    volatile fvl_srio_ctl_info_t *pcnt;
    uint16_t ctl_count=0;
    pcnt  = (fvl_srio_ctl_info_t *)(priv->buf_virt);
    FVL_LOG("channel:%d Slave recv ctl !\n",priv->fd);
    int rvl=0;
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(priv->cpu,&cpuset);
    rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
    if(rvl)
    {
        FVL_LOG("(%d)fail:pthread_setaffinity_np()\n",priv->cpu);
        return;
    }
    while(1) 
    {
        uint32_t count=0;
        count=pcnt->com;
        if(pcnt->fla==1)
        {
            pcnt->fla=0;
           // FVL_LOG("recv buffer full!\n");
            continue;
        }
        else
        {
            if(count < ctl_count)
            {
                receive_num[priv->fd]=receive_num[priv->fd]+(head_port[priv->port_num].data_re_cluster[(priv->fd%FVL_PORT_CHAN_NUM_MAX)].buf_num -ctl_count);
                ctl_count=0;
            }
            else if(count >ctl_count)
            {
                receive_num[priv->fd]=receive_num[priv->fd]+(count-ctl_count);
                ctl_count=count;
            }
        }
    }
    pthread_exit(NULL);
    return;
}
示例#21
0
文件: main.c 项目: Cai900205/test
void thread_channel_send(void *arg)
{
    chan_send_t *param=(chan_send_t *)arg;
    fvl_dma_pool_t *port_data=param->port_data;
    int rvl=0;
    uint8_t i=0;
    int j=param->fd;
    int fd= j;
    struct timeval tm_start,tm_end;
    int cpu=j+14;
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(cpu,&cpuset);
    rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
    if(rvl){
	    printf("(%d)fail:pthread_setaffinity_np()\n",cpu);
	    return;
    }
    sleep(10);
    uint64_t total_count=0;
    gettimeofday(&tm_start,NULL);
    i=0;
    while(1)
    {
	memset(port_data->dma_virt_base,i,Buf_size);
        rvl=fvl_srio_write(fd,port_data->dma_phys_base,Buf_size);
        if(rvl!=0)
        {
            continue;
        }
        gettimeofday(&tm_end,NULL);
        i++;
        total_count++;        
        double diff=(tm_end.tv_sec-tm_start.tv_sec)+(tm_end.tv_usec-tm_start.tv_usec)/1000000.0;
        if(diff>5)
        {
            double da_lu=total_count*Buf_size/1048576/diff;
            printf("fd: %d length(byte): %-15u time(s): %-15f  avg MB/s: %-15f total_count:%lld \n",fd,Buf_size,diff,da_lu,total_count);
            fflush(stdout);
            total_count=0;
            gettimeofday(&tm_start,NULL);
        }
    }
}
示例#22
0
ThreadPool::ThreadPool(size_t numThreads, int cpuAffinityOffset,
    int cpuAffinityIncr) :
    m_stopped(false), m_stopping(false), m_queueLimit(0)
{
  size_t numCPU = sysconf(_SC_NPROCESSORS_ONLN);
  int cpuInd = cpuAffinityOffset % numCPU;

  for (size_t i = 0; i < numThreads; ++i) {
    boost::thread *thread = m_threads.create_thread(
        boost::bind(&ThreadPool::Execute, this));

#ifdef __linux
    if (cpuAffinityOffset >= 0) {
      int s;

      boost::thread::native_handle_type handle = thread->native_handle();

      //cerr << "numCPU=" << numCPU << endl;
      cpu_set_t cpuset;
      CPU_ZERO(&cpuset);

      CPU_SET(cpuInd, &cpuset);
      cpuInd += cpuAffinityIncr;
      cpuInd = cpuInd % numCPU;

      s = pthread_setaffinity_np(handle, sizeof(cpu_set_t), &cpuset);
      if (s != 0) {
        handle_error_en(s, "pthread_setaffinity_np");
        //cerr << "affinity error with thread " << i << endl;
      }

      // get affinity
      CPU_ZERO(&cpuset);
      s = pthread_getaffinity_np(handle, sizeof(cpu_set_t), &cpuset);
      cerr << "Set returned by pthread_getaffinity_np() contained:\n";
      for (int j = 0; j < CPU_SETSIZE; j++) {
        if (CPU_ISSET(j, &cpuset)) {
          cerr << "    CPU " << j << "\n";
        }
      }
    }
#endif
  }
}
示例#23
0
void* packet_consumer_thread(void* _id)
{
   long thread_id = (long)_id;

#ifdef HAVE_PTHREAD_SETAFFINITY_NP
   if(numCPU > 1)
   {
      /* Bind this thread to a specific core */
      cpu_set_t cpuset;
      u_long core_id;
      int s;

      if (thread_core_affinity[thread_id] != -1)
         core_id = thread_core_affinity[thread_id] % numCPU;
      else
         core_id = (thread_id + 1) % numCPU;

      CPU_ZERO(&cpuset);
      CPU_SET(core_id, &cpuset);
      if((s = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)) != 0)
         fprintf(stderr, "Error while binding thread %ld to core %ld: errno=%i\n", 
                 thread_id, core_id, s);
      else {
         printf("Set thread %lu on core %lu/%u\n", thread_id, core_id, numCPU);
      }
   }
#endif

   while(!do_shutdown) {
      u_char *buffer = NULL;
      struct pfring_pkthdr hdr;

      if(pfring_recv(ring[thread_id], &buffer, 0, &hdr, wait_for_packet) > 0) {
         dummyProcesssPacket(&hdr, buffer, (u_char*)thread_id);

      } else {
         if(wait_for_packet == 0) sched_yield();
         //usleep(1);
      }
   }

   return(NULL);
}
示例#24
0
bool LqThreadBase::SetAffinity(uint64_t Mask) {
    bool Res = true;
    StartThreadLocker.LockWrite();
    if(AffinMask != Mask) {
        AffinMask = Mask;
        if(IsThreadRunning()) {
#if defined(LQPLATFORM_WINDOWS)
            SetThreadAffinityMask((HANDLE)NativeHandle(), Mask);
#elif !defined(LQPLATFORM_ANDROID)
            pthread_setaffinity_np(NativeHandle(), sizeof(Mask), (const cpu_set_t*)&Mask);
#endif
        } else {
            lq_errno_set(ENOENT);
            Res = false;
        }
    }
    StartThreadLocker.UnlockWrite();
    return Res;
}
示例#25
0
void* thread_rtin(void* param)
{
	int cpu = ((TRD_Param*)param)->cpu_num;
	cpu_set_t cpu_set;
	CPU_ZERO(&cpu_set);
	CPU_SET(cpu, &cpu_set);

	if( pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_set) != 0 ){
		fprintf(stderr, "Error: cpu[%d] bindind failed.\n", cpu);
		exit(0);
	}
	else{
		;
	}

	memr();

	return NULL;
}
示例#26
0
static void set_current_worker(int wid) {
    int err;
    if ((err = pthread_setspecific(ws_key, hc_context->workers[wid])) != 0) {
        log_die("Cannot set thread-local worker state");
    }

    /*
     * don't bother worrying about core affinity on Mac OS since no one will be
     * running performance tests there anyway and it doesn't support
     * pthread_setaffinity_np.
     */
#ifndef __MACH__
    /*
     * Using pthread_setaffinity_np can interfere with other tools trying to
     * control affinity (e.g. if you are using srun/aprun/taskset from outside
     * the HClib process). For now we disable this.
     */
#if 0
    cpu_set_t cpu_set;
    CPU_ZERO(&cpu_set);
    if (wid >= hc_context->ncores) {
        /*
         * If we are spawning more worker threads than there are cores, allow
         * the extras to float around.
         */
        int i;
        for (i = 0; i < hc_context->ncores; i++) {
            CPU_SET(i, &cpu_set);
        }
    } else {
        // Pin worker i to core i
        CPU_SET(wid, &cpu_set);
    }

    if ((err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
                &cpu_set)) != 0) {
        fprintf(stderr, "WARNING: Failed setting pthread affinity of worker "
                "thread %d, ncores=%d: %s\n", wid, hc_context->ncores,
                strerror(err));
    }
#endif
#endif
}
示例#27
0
void InternalThread::SetThreadAffinity() {
#define MAX_CORES 64

  static int count = 0;
  static int ncores = 0;
  static int affinity_cores[MAX_CORES];
  static boost::mutex internal_thread_mutex;

  boost::mutex::scoped_lock lock(internal_thread_mutex);
  if (count == 0) {
    char * pin_cores = getenv("INTERNAL_THREADS_PIN");
    if (pin_cores != NULL) {
      char * token = strtok(pin_cores, ",");
      while (token != NULL) {
        affinity_cores[ncores] = atoi(token);
        token = strtok(NULL, ",");
        ncores++;
        if (ncores >= MAX_CORES) {
          LOG(INFO) << "Too many cores used for internal threads. Just take first " << ncores << " cores.";
          break;
        }
      }
    }
  }

  if (ncores > 0) {
    int pin_core_id = count % ncores;
    cpu_set_t set;
    CPU_ZERO(&set);
    CPU_SET(affinity_cores[pin_core_id], &set);
    pthread_t thread = pthread_self();
    int s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &set);
    if (s != 0) {
      LOG(WARNING) << "Cannot set affinity for internal thread!";
    }
    for (int j=0; j<CPU_SETSIZE; j++) {
      if (CPU_ISSET(j, &set)) {
        LOG(INFO) << "Internal thread is affinitized to core " << j;
      }
    }
  }
  count++;
}
示例#28
0
void *SetRotine(void *arg)
{
	cpu_set_t mask;
	CPU_ZERO(&mask);
    CPU_SET(0, &mask);
    if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) < 0) {
		fprintf(stderr, "set thread affinity failed\n");
    }
    int idx = 0;
    int pos = 0;
	while(1)
	{
		struct point p;
		++pos;
		p.x = p.y = p.z = pos+1;
		SetPoint(&g_points[idx],p);
		idx = (idx + 1)%1;
	}
}
示例#29
0
static void * pkt_cap_cb(void *arg)
{
	pkt_cap_ctx_p ctx = (pkt_cap_ctx_p)arg;
	char *data = NULL;
	int data_len = 0;
	int is_pkt_first = 1;
	cpu_set_t mask;
	struct timeval init_tm;
	memset(&init_tm, 0x00, sizeof(init_tm));
	memset(&ctx->tm_beg, 0x00, sizeof(ctx->tm_beg));

	if (ctx->id >= 0) {
		CPU_ZERO(&mask);
		CPU_SET(ctx->id, &mask);
		if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) < 0) {
			fprintf(stderr, "%s pthread_setaffinity_np on %d failed!\n", __FUNCTION__, ctx->id);
			return NULL;
		}
	}

	while (1) {
		// todo: capture data from device.
		// 

		if (memcmp(&ctx->tm_beg, &init_tm, sizeof(init_tm)) == 0) {
			fprintf(stdout, "is pkt first!\n");
			gettimeofday(&ctx->tm_beg, NULL);
		}
		pkt_st_inc(ctx->p_pkt_info, PKT_PKT, data_len);

		// if packet is http get
		if (is_http_get_pkt(data, data_len)) {
			pkt_st_inc(ctx->p_pkt_info, PKT_HTTP_GET, data_len);
		}

		// todo: send packet.
		//

		gettimeofday(&ctx->tm_end, NULL);
	}

	return NULL;
}
示例#30
0
PacketChunker::PacketChunker(unsigned port, unsigned nAntennas, unsigned nSubbands, 
                             unsigned nSpectra, unsigned packetsPerHeap)
        : _port(port), _nsamp(nSpectra), _nchans(nSubbands), _npackets(packetsPerHeap), _nantennas(nAntennas)
{   
    // Set configuration options
    _startTime = _startBlockid = 0;
    _heapSize  = nAntennas * nSubbands * nSpectra * sizeof(char);

    // Initialise chunker
    connectDevice();

    // Set thread affinity
    pthread_t thread = pthread_self();
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(0, &cpuset);

    if ((pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset)) != 0)
        perror("Cannot set pthread affinity");
}