コード例 #1
0
ファイル: futexbm.c プロジェクト: bochf/testing
int main(int argc, char *argv[])
{
	/* int		err;  <------ Unused. Commenting out */
	int		N;                 //number of cycles
	int		ping_cpu=0;          //cpu number to pin the ping process
	int		pong_cpu=1;          //cpu number to pin the pong process 
	int		pingbusy = 0;
	int		pongbusy = 0;
   int     ncpus=0;

   struct optbase *ob;

 
   if(NULL == (ob = ReadOptions(argc, argv)))
      return(1);


   ncpus=sysconf( _SC_NPROCESSORS_ONLN );


   if ( ncpus > 2) {

      VerboseMessage ("number of cpu on the box %d\n",ncpus);
   } 
   else{

      ErrorMessage ("Not enough cpus to run the test\n");

      return -1;
   }
   

   /* 
      The cpu_set_t data structure represents a set of CPUs.  CPU sets are
      
      used by sched_setaffinity(2) and similar interfaces.
      cpu_set_t data type is implemented as a bitset. 
      
      mask should have only the total number of max cpu
   */

	cpu_set_t	mask[128];

   /* 
      inizialize mask with all 0
   */
	memset(mask, 0, sizeof(mask));

   /* 
      work the test options
      it must be update
   */


      int min_cpu=0;
      int max_cpu=ncpus-1;
      int busy=1;
      int nobusy=0;

      GetOptionValue(ob, "PING_CPU", GOV_INT16, &ping_cpu, &min_cpu, &max_cpu, NULL);
      GetOptionValue(ob, "PONG_CPU", GOV_INT16, &pong_cpu, &min_cpu, &max_cpu, NULL);
      GetOptionValue(ob, "N", GOV_INT16, &N, NULL, NULL, NULL);
      GetOptionValue(ob, "PING_BUSY", GOV_INT16, &pingbusy,&nobusy,&busy,NULL);
      GetOptionValue(ob, "PONG_BUSY", GOV_INT16, &pongbusy,&nobusy,&busy,NULL); 
      EvalOptions(ob);


if (1<0){
	if(argc < 4) {
		usage(argv[0]);
		return -1;
	}

	ping_cpu = atoi(argv[1]);
	pong_cpu = atoi(argv[2]);
	N = atoi(argv[3]);

	if(argc > 4) {
		if(strcmp(argv[4], "pongbusy") == 0)
			pongbusy = 1;
		else
		if(strcmp(argv[4], "pingbusy") == 0)
			pingbusy = 1;
	}

	if(argc > 5) {
		if(strcmp(argv[5], "pongbusy") == 0)
			pongbusy = 1;
		else
		if(strcmp(argv[5], "pingbusy") == 0)
			pingbusy = 1;
	}
}





/*
Let's create sem_open the two semaphore
oflag is O_CREATE, mode is 
mode is 0600 S_IRUSR | S_IWUSR from umask
*/



	semInit(&g_semPing, g_pingName);
	semInit(&g_semPong, g_pongName);



	if(fork() == 0) {
          /* child
           CPU_SET Add CPU cpu to set. 
         */
        VerboseMessage("run child\n");
		CPU_SET(pong_cpu, mask);
		sched_setaffinity(0, 128, mask); // If pid is zero, then the calling process is used.
	
        
    	if(pongbusy)
			pong_busy(N);
		else
			pong(N);
		TRY(sem_close(g_semPong), -1);
		TRY(sem_unlink(g_pongName), -1);
      VerboseMessage ("wrap child\n");
	  } else {
		/* parent */
		time_type start, finish;
		/* time_type start_one, finish_one; <---- Unused. Commenting out */
      VerboseMessage ("run parent\n");
		CPU_SET(ping_cpu, mask);
		sched_setaffinity(0, 128, mask);

		getTime(&start);
		if(pingbusy)
			ping_busy(N);
		else
			ping(N);
        VerboseMessage("wrapping parent\n");
		getTime(&finish);

		const long int elapsed = timeDiff(&start, &finish);

		printf("avg RTT = %.1f nsec.\n", (elapsed * 1.0) / N);
		TRY(sem_close(g_semPing), -1);
		TRY(sem_unlink(g_pingName), -1);

	}

	return 0;
}
コード例 #2
0
ファイル: fvl_srio.c プロジェクト: Cai900205/test
void fvl_srio_recv_head(void *arg)
{
    fvl_head_thread_t  *priv=arg;
    fvl_srio_ctrlblk_t *pscb;
    fvl_srio_portpool_t *ppool;
    volatile fvl_srio_head_info_t *pcnt;
    pcnt  = (fvl_srio_head_info_t *)(priv->buf_virt);
    uint32_t  port_num=priv->num;
    FVL_LOG("port:%d fvl_srio_recv_head!\n",port_num);
    int rvl=0;
    cpu_set_t cpuset;
    if(!priv->op_mode)
    {
        CPU_ZERO(&cpuset);
        CPU_SET(priv->cpu,&cpuset);
        rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
        if(rvl)
        {
	          FVL_LOG("(%d)fail:pthread_setaffinity_np()\n",priv->cpu);
	          return;
        }
    }
    while(1) 
    {
        if(!priv->op_mode)
        {
            if(!pcnt->re_flag)
            {
                continue;
            }

            fvl_srio_context_t  *psrio = &g_srio_context;
            uint32_t win_size=0,chan_size=0;
            int win_law=0,i,rvl=0;
            struct srio_dev *sriodev;
            uint32_t attr_read, attr_write;
            attr_read = srio_test_win_attrv[3];
            attr_write = srio_test_win_attrv[0];
            ppool = &psrio->portpool[port_num];
            pcnt->re_flag=0;
            head_port[port_num].re_flag=1;
            FVL_LOG("chan_num:%d\n",psrio->chan_num[port_num]);
            for(i=0;i<psrio->chan_num[port_num];i++)
            {
                head_port[port_num].data_re_cluster[i].buf_size=pcnt->data_re_cluster[i].buf_size;
                head_port[port_num].data_re_cluster[i].buf_num=pcnt->data_re_cluster[i].buf_num;
                head_port[port_num].data_re_cluster[i].cluster_addr=pcnt->data_re_cluster[i].cluster_addr;
                head_port[port_num].data_se_cluster[i].buf_size=pcnt->data_se_cluster[i].buf_size;
                head_port[port_num].data_se_cluster[i].buf_num=pcnt->data_se_cluster[i].buf_num;
                head_port[port_num].data_se_cluster[i].cluster_addr=pcnt->data_se_cluster[i].cluster_addr;
                chan_size =pcnt->data_se_cluster[i].buf_num*pcnt->data_se_cluster[i].buf_size;
                srio_channel_context[(FVL_PORT_CHAN_NUM_MAX*port_num+i)].chan_size=chan_size;
                win_size=win_size+chan_size;
                FVL_LOG("win_size:%08x\n",win_size);
            }
                 
            FVL_LOG("*************************************************************\n");
            sriodev=psrio->sriodev;
            win_law=FVL_BASE_LAW+fvl_get_law((win_size/FVL_BASE_LAW_SIZE)-1);
            FVL_LOG("WIN_LAW:%d, WIN_SIZE:%08x\n",win_law,win_size);
            fsl_srio_set_ibwin(sriodev, port_num, 1, ppool->write_result,
                               head_port[port_num].data_re_cluster[0].cluster_addr, win_law);
            uint32_t win_offset=FVL_BASE_LAW_SIZE;
            for(i=FVL_BASE_LAW;i<win_law;i++)
            {
                win_offset=win_offset*2;
            }
            FVL_LOG("WIN_OFFSET:%08x\n",win_offset);
            ppool->ctl_info_start=ppool->port_info.range_start+win_offset;

            if (fsl_srio_port_connected(sriodev) & (0x1 << port_num)) 
            {
//data
                fsl_srio_set_obwin(sriodev, port_num, 1,
                                   ppool->port_info.range_start,
                                   head_port[port_num].data_se_cluster[0].cluster_addr, win_law);
                fsl_srio_set_obwin_attr(sriodev, port_num, 1,
                                        attr_read, attr_write);
                fsl_srio_set_obwin(sriodev, port_num, 2,
                           ppool->port_info.range_start+win_offset,
                           FVL_SRIO_CTL_ADDR, 11);
                fsl_srio_set_obwin_attr(sriodev, port_num, 2,
                                attr_read, attr_write);
            } 
            else 
            {
                FVL_LOG("SRIO port %d error!\n", port_num + 1);
                return -errno;
            }
//add debug    
            rvl=fsl_srio_set_targetid(sriodev,port_num,1,target_id[port_num]);
            if(rvl!=0)
            {
                FVL_LOG("SRIO port %d set  target_id faile!\n",port_num);
                return -errno;
            }
            rvl=fsl_srio_set_targetid(sriodev,port_num,2,target_id[port_num]);
            if(rvl!=0)
            {
                FVL_LOG("SRIO port %d set  target_id faile!\n",port_num);
                return -errno;
            }
//end
            // reflag need set 
            memcpy(ppool->pwrite_ctl_data,&head_port[port_num],HEAD_SIZE);
            uint64_t dest_phys,src_phys;
            pscb=&psrio->ctrlblk[FVL_PORT_DMA_NUM*port_num];
            src_phys= ppool->write_ctl_data;
            dest_phys= ppool->ctl_info_start;
            fvl_srio_send(pscb->dmadev,src_phys,dest_phys,HEAD_SIZE);
            head_port[port_num].uflag=1;
            FVL_LOG("port_num:%d Receive Head info and send chan num info sucess!\n",port_num);
        }
        else if(priv->op_mode == 1)
        {
            if(!pcnt->re_flag)
            {
                continue;
            }
            pcnt->re_flag=0;
            head_channel[priv->num].re_flag=1;
            head_channel[priv->num].uflag=1;
            break;
        }
    }
    return;
}
コード例 #3
0
ファイル: cpu-miner.c プロジェクト: acimir/cpu-jackpotcoin
static inline void affine_to_cpu(int id, int cpu) {
	cpuset_t set;
	CPU_ZERO(&set);
	CPU_SET(cpu, &set);
	cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_CPUSET, -1, sizeof(cpuset_t), &set);
}
コード例 #4
0
ファイル: test_tcpnotify_user.c プロジェクト: xroumegue/linux
int main(int argc, char **argv)
{
	const char *file = "test_tcpnotify_kern.o";
	int prog_fd, map_fd, perf_event_fd;
	struct tcpnotify_globals g = {0};
	const char *cg_path = "/foo";
	int error = EXIT_FAILURE;
	struct bpf_object *obj;
	int cg_fd = -1;
	__u32 key = 0;
	int rv;
	char test_script[80];
	int pmu_fd;
	cpu_set_t cpuset;

	CPU_ZERO(&cpuset);
	CPU_SET(0, &cpuset);
	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);

	if (setup_cgroup_environment())
		goto err;

	cg_fd = create_and_get_cgroup(cg_path);
	if (!cg_fd)
		goto err;

	if (join_cgroup(cg_path))
		goto err;

	if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
		printf("FAILED: load_bpf_file failed for: %s\n", file);
		goto err;
	}

	rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
	if (rv) {
		printf("FAILED: bpf_prog_attach: %d (%s)\n",
		       error, strerror(errno));
		goto err;
	}

	perf_event_fd = bpf_find_map(__func__, obj, "perf_event_map");
	if (perf_event_fd < 0)
		goto err;

	map_fd = bpf_find_map(__func__, obj, "global_map");
	if (map_fd < 0)
		goto err;

	pmu_fd = setup_bpf_perf_event(perf_event_fd);
	if (pmu_fd < 0 || perf_event_mmap(pmu_fd) < 0)
		goto err;

	pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);

	sprintf(test_script,
		"/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
		TESTPORT);
	system(test_script);

	sprintf(test_script,
		"/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
		TESTPORT);
	system(test_script);

	sprintf(test_script,
		"/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
		TESTPORT);
	system(test_script);

	rv = bpf_map_lookup_elem(map_fd, &key, &g);
	if (rv != 0) {
		printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
		goto err;
	}

	sleep(10);

	if (verify_result(&g)) {
		printf("FAILED: Wrong stats Expected %d calls, got %d\n",
			g.ncalls, rx_callbacks);
		goto err;
	}

	printf("PASSED!\n");
	error = 0;
err:
	bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
	close(cg_fd);
	cleanup_cgroup_environment();
	return error;
}
コード例 #5
0
ファイル: snake.c プロジェクト: ystk/debian-ltp
int main(int argc, char *argv[]) {
	unsigned long long num_nodes, ram_size;
	unsigned long num_forks = 1;
	struct sysinfo info;
	void *shm;
	int *cond;
	struct sigaction zig;
	int c, add_wait = -1, is_parent = 1;
#ifdef __cpu_set_t_defined
	int affinity = 0;
	cpu_set_t my_cpu_mask;
#endif

	/* By default we'll use 1/16th of total RAM, rounded
	 * down to the nearest page. */
	if (sysinfo(&info) != 0) {
		perror("sysinfo");
		return 1;
	}

	ram_size = info.totalram / 16;
	ram_size = ram_size & ~(getpagesize() - 1);
	num_nodes = ram_size / sizeof(void *);

	/* Parse command line args */
	while ( (c = getopt(argc, argv, "a:p:n:d:s:t")) != -1) {
		switch (c) {
			case 'p':
				num_forks = atoi(optarg);
				break;
			case 'd':
				ram_size = info.totalram / atoi(optarg);
				ram_size = ram_size & ~(getpagesize() - 1);
				num_nodes = ram_size / sizeof(void *);
				break;
			case 'n':
				num_nodes = atoi(optarg);
				ram_size = num_nodes * sizeof(void *);
				break;
			case 's':
				report_interval = atoi(optarg);
				break;
			case 'a':
				add_wait = atoi(optarg);
				break;
#ifdef __cpu_set_t_defined
			case 't':
				affinity = 1;
				break;
#endif
			default:
				print_help(argv[0]);
				return 0;
		}
	}

	/* Will we exceed half the address space size?  Use 1/4 of it at most.  */
	if (ram_size > ((unsigned long long)1 << ((sizeof(void *) * 8) - 1))) {
		printf("Was going to use %lluKB (%llu nodes) but that's too big.\n",
			ram_size / 1024, num_nodes);
		ram_size = ((unsigned long long)1 << (sizeof(void *) * 8));
		ram_size /= 4;
		num_nodes = ram_size / sizeof(void *);
		printf("Clamping to %lluKB (%llu nodes) instead.\n",
			ram_size / 1024, num_nodes);
	}

	/* Talk about what we're going to do. */
	printf("Going to use %lluKB (%llu nodes).\n", ram_size / 1024,
		num_nodes);
	
	/* Make a shared anonymous map of the RAM */
	shm = mmap(NULL, ram_size, PROT_READ | PROT_WRITE,
		MAP_SHARED | MAP_ANONYMOUS, 0, 0);
	if (shm == MAP_FAILED) {
		perror("mmap");
		return 2;
	}
	printf("mmap region: %p (%llu nodes)\n", shm, num_nodes);

	/* Create an SHM condition variable.  Bogus, I know... */
	cond = mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
		MAP_SHARED | MAP_ANONYMOUS, 0, 0);
	if (cond == MAP_FAILED) {
		perror("mmap");
		return 4;
	}
	*cond = 1;

	/* Create a "graph" by populating it with random pointers. */
	printf("Populating nodes...");
	fflush(stdout);
	populate_graph(shm, num_nodes);
	printf("done.\n");

	printf("Creating %lu processes with reports every %lu seconds \
and %d seconds between adding children.\n",
		num_forks, report_interval, add_wait);

	/* Fork off separate processes.  The shared region is shared
	 * across all children.  If we only wanted one thread, we shouldn't
	 * fork anything.  Note that the "cond" mmap is a really crappy
	 * condition variable kludge that works well enough for HERE ONLY. */
	for (c = (add_wait >= 0 ? 0 : 1); c < num_forks; c++) {
		/* Child should wait for the condition and then break. */
		if (!fork()) {
#ifdef __cpu_set_t_defined
			if (affinity) {
				CPU_ZERO(&my_cpu_mask);
				CPU_SET(c, &my_cpu_mask);
				if (0 != sched_setaffinity(0,sizeof(cpu_set_t), &my_cpu_mask)) {
					perror("sched_setaffinity");
				}
			}
#endif

			is_parent = 0;
			while (*cond) {
				usleep(10000);
			}
			break;
		}
	}
	if (is_parent) {
#ifdef __cpu_set_t_defined
		if (affinity) {
			CPU_ZERO(&my_cpu_mask);
			CPU_SET(0, &my_cpu_mask);
			if (0 != sched_setaffinity(0,sizeof(cpu_set_t), &my_cpu_mask)) {
				perror("sched_setaffinity");
			}
		}
#endif
		printf("All threads created.  Launching!\n");
		*cond = 0;
	}

	/* now start the work */
	if (!is_parent) {
start_thread:
		/* Set up the alarm handler to print speed info. */
		memset(&zig, 0x00, sizeof(zig));
		zig.sa_handler = alarm_func;
		sigaction(SIGALRM, &zig, NULL);
		gettimeofday(&last, NULL);
		alarm(report_interval);

		/* Walk the graph. */
		walk_graph(shm);

		/* This function never returns */
	} else {
		/* Start the ramp-up.  The children will never die,
		 * so we don't need to wait() for 'em.
		 */
		while (add_wait != -1) {
			sleep(add_wait);
			if (fork() == 0) {
				/* goto is cheesy, but works. */
				goto start_thread;
			} else {
				printf("Added thread.\n");
			}
		}
		goto start_thread;
	}
	
	return 0;
}
コード例 #6
0
ファイル: en_tx.c プロジェクト: outbackdingo/uBSD
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
			   struct mlx4_en_tx_ring **pring, u32 size,
			   u16 stride, int node, int queue_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_tx_ring *ring;
	uint32_t x;
	int tmp;
	int err;

	ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
	if (!ring) {
		ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
		if (!ring) {
			en_err(priv, "Failed allocating TX ring\n");
			return -ENOMEM;
		}
	}

	/* Create DMA descriptor TAG */
	if ((err = -bus_dma_tag_create(
	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
	    1,					/* any alignment */
	    0,					/* no boundary */
	    BUS_SPACE_MAXADDR,			/* lowaddr */
	    BUS_SPACE_MAXADDR,			/* highaddr */
	    NULL, NULL,				/* filter, filterarg */
	    MLX4_EN_TX_MAX_PAYLOAD_SIZE,	/* maxsize */
	    MLX4_EN_TX_MAX_MBUF_FRAGS,		/* nsegments */
	    MLX4_EN_TX_MAX_MBUF_SIZE,		/* maxsegsize */
	    0,					/* flags */
	    NULL, NULL,				/* lockfunc, lockfuncarg */
	    &ring->dma_tag)))
		goto done;

	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
	ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE));
	mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
	mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);

	/* Allocate the buf ring */
	ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
		 M_WAITOK, &ring->tx_lock.m, queue_idx,
		 priv->tx_ring_num);
	if (ring->br == NULL) {
		en_err(priv, "Failed allocating tx_info ring\n");
		err = -ENOMEM;
		goto err_free_dma_tag;
	}

	tmp = size * sizeof(struct mlx4_en_tx_info);
	ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
	if (!ring->tx_info) {
		ring->tx_info = kzalloc(tmp, GFP_KERNEL);
		if (!ring->tx_info) {
			err = -ENOMEM;
			goto err_ring;
		}
	}

	/* Create DMA descriptor MAPs */
	for (x = 0; x != size; x++) {
		err = -bus_dmamap_create(ring->dma_tag, 0,
		    &ring->tx_info[x].dma_map);
		if (err != 0) {
			while (x--) {
				bus_dmamap_destroy(ring->dma_tag,
				    ring->tx_info[x].dma_map);
			}
			goto err_info;
		}
	}

	en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
		 ring->tx_info, tmp);

	ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);

	/* Allocate HW buffers on provided NUMA node */
	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
				 2 * PAGE_SIZE);
	if (err) {
		en_err(priv, "Failed allocating hwq resources\n");
		goto err_dma_map;
	}

	err = mlx4_en_map_buffer(&ring->wqres.buf);
	if (err) {
		en_err(priv, "Failed to map TX buffer\n");
		goto err_hwq_res;
	}

	ring->buf = ring->wqres.buf.direct.buf;

	en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
	       "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
	       ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);

	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
				    MLX4_RESERVE_BF_QP);
	if (err) {
		en_err(priv, "failed reserving qp for TX ring\n");
		goto err_map;
	}

	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
	if (err) {
		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
		goto err_reserve;
	}
	ring->qp.event = mlx4_en_sqp_event;

	err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
	if (err) {
		en_dbg(DRV, priv, "working without blueflame (%d)", err);
		ring->bf.uar = &mdev->priv_uar;
		ring->bf.uar->map = mdev->uar_map;
		ring->bf_enabled = false;
	} else
		ring->bf_enabled = true;
	ring->queue_index = queue_idx;
	if (queue_idx < priv->num_tx_rings_p_up )
		CPU_SET(queue_idx, &ring->affinity_mask);

	*pring = ring;
	return 0;

err_reserve:
	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_map:
	mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_dma_map:
	for (x = 0; x != size; x++)
		bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
err_info:
	vfree(ring->tx_info);
err_ring:
	buf_ring_free(ring->br, M_DEVBUF);
err_free_dma_tag:
	bus_dma_tag_destroy(ring->dma_tag);
done:
	kfree(ring);
	return err;
}
コード例 #7
0
/*---------------------------------------------------------------------------*/
static void *worker_thread(void *data)
{
	struct thread_data		*tdata = (struct thread_data *)data;
	struct xio_connection_params	cparams;
	struct xio_iovec_ex		*sglist;
	cpu_set_t			cpuset;
	struct xio_msg			*msg;
	unsigned int			i;

	/* set affinity to thread */

	CPU_ZERO(&cpuset);
	CPU_SET(tdata->affinity, &cpuset);

	pthread_setaffinity_np(tdata->thread_id, sizeof(cpu_set_t), &cpuset);

	/* prepare data for the cuurent thread */
	tdata->pool = msg_pool_alloc(tdata->user_param->queue_depth);

	/* create thread context for the client */
	tdata->ctx = xio_context_create(NULL, tdata->user_param->poll_timeout,
					tdata->affinity);

	memset(&cparams, 0, sizeof(cparams));
	cparams.session			= tdata->session;
	cparams.ctx			= tdata->ctx;
	cparams.conn_idx		= tdata->cid;
	cparams.conn_user_context	= tdata;

	/* connect the session  */
	tdata->conn = xio_connect(&cparams);

	if (tdata->data_len)
		tdata->xbuf = xio_alloc(tdata->data_len);

	for (i = 0;  i < tdata->user_param->queue_depth; i++) {
		/* create transaction */
		msg = msg_pool_get(tdata->pool);
		if (msg == NULL)
			break;

		/* get pointers to internal buffers */
		msg->in.header.iov_len = 0;

		sglist = vmsg_sglist(&msg->in);
		vmsg_sglist_set_nents(&msg->in, 0);

		msg->out.header.iov_len = 0;
		sglist = vmsg_sglist(&msg->out);
		if (tdata->data_len) {
			vmsg_sglist_set_nents(&msg->out, 1);
			sglist[0].iov_base	= tdata->xbuf->addr;
			sglist[0].iov_len	= tdata->xbuf->length;
			sglist[0].mr		= tdata->xbuf->mr;
		} else {
			vmsg_sglist_set_nents(&msg->out, 0);
		}
		msg->user_context = (void *)get_cycles();
		/* send first message */
		if (xio_send_request(tdata->conn, msg) == -1) {
			if (xio_errno() != EAGAIN)
				printf("**** [%p] Error - xio_send_request " \
				       "failed. %s\n",
					tdata->session,
					xio_strerror(xio_errno()));
			msg_pool_put(tdata->pool, msg);
			return 0;
		}
		if (tdata->do_stat)
			tdata->stat.scnt++;
		tdata->tx_nr++;
	}

	/* the default xio supplied main loop */
	xio_context_run_loop(tdata->ctx, XIO_INFINITE);

	/* normal exit phase */

	if (tdata->pool)
		msg_pool_free(tdata->pool);

	if (tdata->xbuf)
		xio_free(&tdata->xbuf);


	/* free the context */
	xio_context_destroy(tdata->ctx);

	return NULL;
}
コード例 #8
0
ファイル: process.c プロジェクト: whr4935/apue
int test_control_priority()
{
	int nzero = 0;
	int pri = 0;
	int ret;
	int pri_min, pri_max;
	int policy;
	pthread_attr_t attr;
	pid_t pid;
	struct sched_param s_param;
	cpu_set_t cpu_set;
	int cpu_total;
	int i;
	pthread_t thr;
	unsigned long long count = 0;

#if 0
	//设置CPU亲和性
	cpu_total = get_nprocs();
	printf("total cpu number: %d\n", cpu_total);

	CPU_ZERO(&cpu_set);
	CPU_SET(0, &cpu_set);
//	CPU_SET(1, &cpu_set);
	ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpu_set);
	if (ret < 0)
		err_sys("sched_setaffinity");

	CPU_ZERO(&cpu_set);
	ret = sched_getaffinity(0, sizeof(cpu_set_t), &cpu_set);
	for (i = 0; i < cpu_total; ++i) {
		if (CPU_ISSET(i, &cpu_set)) {
			printf("current process affinity cpu %d\n", i);
		}
	}
//	printf("sizeof(cpu_set_t) = %d\n", sizeof(cpu_set_t));
#endif
	
#if defined(NZERO)
	nzero = NZERO;
#elif defined(_SC_NZERO)
	nzero = sysconf(_SC_NZERO);
#else
#error NZERO undefined
#endif

	//设置非实时进程优先级
//	pri = getpriority(PRIO_PROCESS, 0);
//	printf("current priority: %d\n", pri);

// 	ret = setpriority(PRIO_PROCESS, 0, -1);		//优先级值越低,占有越多的CPU时间
// 	if (ret < 0)
// 		err_sys("setpriority");

//	pri = getpriority(PRIO_PROCESS, 0);
//	printf("current priority: %d\n", pri);

#if 1
	///////////////////////////////////
	//创建测试线程
	pthread_mutex_init(&priority_mutex, NULL);
	pthread_cond_init(&priority_cond, NULL);

	ret = pthread_barrier_init(&barrier, NULL, 2);
	if (ret != 0)
		err_exit(ret, "pthread_barrier_init");

	ret = pthread_attr_init(&attr);
	if (ret != 0)
		err_exit(ret, "pthread_attr_init");
	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
	pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
	s_param.__sched_priority = 0;//sched_get_priority_max(SCHED_RR)-11;
	pthread_attr_setschedparam(&attr, &s_param);

	gettimeofday(&end, NULL);
	end.tv_sec += 5;

	ret = pthread_create(&thr, &attr, priority_test_thread, NULL);
	if (ret != 0)
		err_exit(ret, "pthread_create");

	pthread_attr_destroy(&attr);


//	ret= pthread_barrier_wait(&barrier);
//	printf("pthread_barrier_wait return %d\n", ret);
//	printf("main thread continue...\n");

// 	pthread_mutex_lock(&priority_mutex);
// 	notify_signal = 1;
// 	pthread_mutex_unlock(&priority_mutex);
// 	ret = pthread_cond_signal(&priority_cond);
// 	if (ret != 0)
// 		err_exit(ret, "pthread_cond_signal");

	////////////////////////////
	//设置为实时进程
	s_param.__sched_priority = sched_get_priority_max(SCHED_RR) - 10;
	ret = sched_setscheduler(0, SCHED_RR, &s_param);
	if (ret < 0)
		err_sys("sched_setscheduler");

	policy = sched_getscheduler(0);
	if (policy < 0)
		err_sys("policy");

	printf("main thread ");
	switch (policy) {
	case SCHED_FIFO:
		printf("policy SCHED_FIFO!\n");
		break;

	case SCHED_RR:
		printf("policy SCHED_RR!\n");
		break;

	case SCHED_OTHER:
		printf("policy SCHED_OTHER!\n");
		break;

	default:
		printf("policy unknown!\n");
		break;
	}

	ret = sched_getparam(0, &s_param);
	if (ret < 0)
		err_sys("sched_getparam");
	printf("main thread priority: %d\n", s_param.__sched_priority);



	///////////////////////////
	printf("main begin test...\n");
	for (;;) {
		++count;
		if (count == 0)
			err_quit("main count wrap");
		if (checktime("main  ", count) < 0)
			break;
	}

	printf("main wait for exit...\n");
	pthread_join(thr, NULL);

	exit(0);
#endif

	////////////////////////////////////
	pri_max = sched_get_priority_max(SCHED_OTHER);		//pri_max和pri_min都是0,分时调度由nice值影响优先级
	pri_min = sched_get_priority_min(SCHED_OTHER);

	pri_max = sched_get_priority_max(SCHED_RR);
	pri_min = sched_get_priority_min(SCHED_RR);

	pri_max = sched_get_priority_max(SCHED_FIFO);
	pri_min = sched_get_priority_min(SCHED_FIFO);


	gettimeofday(&end, NULL);
	end.tv_sec += 10;
	pid = fork();
	if (pid < 0)
		err_sys("fork");
	else if (pid == 0) {
		policy = sched_getscheduler(0);
		printf("child process policy = %d\n", policy);
		ret = sched_getparam(0, &s_param);
		printf("child prirotiy: %d\n", s_param.__sched_priority);
		s_param.__sched_priority--;
		ret = sched_setparam(0, &s_param);
		if (ret < 0)
			err_sys("sched_setparam");
		ret = sched_getparam(0, &s_param);
		printf("reset child prirotiy: %d\n", s_param.__sched_priority);



// #if 1
// 		ret = setpriority(PRIO_PROCESS, 0, 19);
// 		if (ret < 0)
// 			err_sys("setpriority");
// #else
// 		errno = 0;
// 		if ((pri = nice(19)) == -1 && errno != 0)		//nice函数被setpriority取代
// 			err_sys("nice");
// #endif
// 
// 		errno = 0;
// 		pri = getpriority(PRIO_PROCESS, 0);
// 		if (pri == -1 && errno != 0) {
// 			err_sys("getpriority");
// 		}
// 		printf("child priority: %d\n", pri);

		for (;;) {
			++count;
			if (count == 0)
				err_quit("child count wrap");
			checktime("child ", count);
		}
	} else {
// 		errno = 0;
// 		pri = getpriority(PRIO_PROCESS, 0);
// 		if (pri == -1 && errno != 0) {
// 			err_sys("getpriority");
// 		}
// 		printf("parent priority: %d\n", pri);

		for (;;) {
			++count;
			if (count == 0)
				err_quit("parent count wrap");
			checktime("parent", count);
		}
	}


	return 0;
}
コード例 #9
0
ファイル: shim.c プロジェクト: jjeui0308/swift-nio
void CNIOLinux_CPU_SET(int cpu, cpu_set_t *set) {
    CPU_SET(cpu, set);
}
コード例 #10
0
/*---------------------------------------------------------------------------*/
static void *statistics_thread_cb(void *data)
{
	uint64_t		start_time;
	uint64_t		tx_len = hdr_len + data_len;
	double			delta;
	uint64_t		scnt_start = 0;
	uint64_t		scnt_end = 0;
	uint64_t		rtt_start = 0;
	uint64_t		rtt_end = 0;
	uint64_t		min_rtt = -1;
	uint64_t		max_rtt = 0;
	struct session_data	*sess_data = (struct session_data *)data;
	cpu_set_t		cpuset;
	unsigned int		i;

	/* set affinity to thread */

	CPU_ZERO(&cpuset);
	CPU_SET(0, &cpuset);

	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);

	/* makes it hot */
	sleep(1);

	for (i = 0; i < threads_iter; i++) {
		scnt_start += sess_data->tdata[i].stat.scnt;
		rtt_start += sess_data->tdata[i].stat.tot_rtt;
		sess_data->tdata[i].stat.min_rtt  = -1;
		sess_data->tdata[i].stat.max_rtt  = 0;
	}

	/* test period */
	/* start collecting statistics data */
	start_time = get_cycles();
	for (i = 0; i < threads_iter; i++)
		sess_data->tdata[i].do_stat = 1;

	sleep(2);
	/* stop collecting statistics data */
	for (i = 0; i < threads_iter; i++)
		sess_data->tdata[i].do_stat = 0;

	delta = (get_cycles() - start_time)/g_mhz;

	for (i = 0; i < threads_iter; i++) {
		scnt_end += sess_data->tdata[i].stat.scnt;
		rtt_end += sess_data->tdata[i].stat.tot_rtt;
		if (min_rtt > sess_data->tdata[i].stat.min_rtt)
			min_rtt = sess_data->tdata[i].stat.min_rtt;
		if (max_rtt < sess_data->tdata[i].stat.min_rtt)
			max_rtt = sess_data->tdata[i].stat.max_rtt;
	}
	if ( scnt_end != scnt_start) {
		sess_data->avg_lat_us = (rtt_end - rtt_start)/g_mhz;
		sess_data->avg_lat_us /= (scnt_end - scnt_start);

		sess_data->min_lat_us = min_rtt/g_mhz;
		sess_data->max_lat_us = max_rtt/g_mhz;

		sess_data->tps    = ((scnt_end - scnt_start)*USECS_IN_SEC)/delta;
		sess_data->avg_bw = (1.0*sess_data->tps*tx_len/ONE_MB);
	}

	for (i = 0; i < threads_iter; i++)
		sess_data->tdata[i].disconnect = 1;

	return NULL;
}
コード例 #11
0
ファイル: qla_bench.c プロジェクト: 6twirl9/qla
int
main(int argc, char *argv[])
{
  QLA_Real sum, *r1;
  QLA_Complex *c1;
  QLA_ColorVector *v1, *v2, *v3, *v4, *v5;
  QLA_ColorVector **vp1, **vp2, **vp3, **vp4;
  QLA_HalfFermion *h1, *h2, **hp1;
  QLA_DiracFermion *d1, *d2, **dp1;
  QLA_ColorMatrix *m1, *m2, *m3, *m4, **mp1;
  double cf0, flop, mem, time1;
  int nmin, nmax, c, nthreads=1;

  nmin = 64;
  if(argc>1) nmin = atoi(argv[1]);
  nmax = 256*1024;
  if(argc>2) nmax = atoi(argv[2]);
  cf0 = 1e9;
  if(argc>3) cf0 *= atof(argv[3]);

  printf("QLA version %s (%i)\n", QLA_version_str(), QLA_version_int());
  printf("QLA_Precision = %c\n", QLA_Precision);
  printf("QLA_Nc = %i\n", QLA_Nc);

#ifdef _OPENMP
  nthreads = omp_get_max_threads();
  printf("OMP threads = %i\n", nthreads);
  printf("omp_get_wtick = %g\n", omp_get_wtick());
#ifdef CPU_ZERO
#pragma omp parallel
  {
    int tid = omp_get_thread_num();
    cpu_set_t set;
    CPU_ZERO(&set);
    CPU_SET(tid, &set);
    sched_setaffinity(0, sizeof(set), &set);
  }
#endif
#endif

  nmin *= nthreads;
  nmax *= nthreads;

  r1 = myalloc(QLA_Real, nmax);
  c1 = myalloc(QLA_Complex, nmax);
  v1 = myalloc(QLA_ColorVector, nmax);
  v2 = myalloc(QLA_ColorVector, nmax);
  v3 = myalloc(QLA_ColorVector, nmax);
  v4 = myalloc(QLA_ColorVector, nmax);
  v5 = myalloc(QLA_ColorVector, nmax);
  vp1 = myalloc(QLA_ColorVector *, nmax);
  vp2 = myalloc(QLA_ColorVector *, nmax);
  vp3 = myalloc(QLA_ColorVector *, nmax);
  vp4 = myalloc(QLA_ColorVector *, nmax);
  h1 = myalloc(QLA_HalfFermion, nmax);
  h2 = myalloc(QLA_HalfFermion, nmax);
  hp1 = myalloc(QLA_HalfFermion *, nmax);
  d1 = myalloc(QLA_DiracFermion, nmax);
  d2 = myalloc(QLA_DiracFermion, nmax);
  dp1 = myalloc(QLA_DiracFermion *, nmax);
  m1 = myalloc(QLA_ColorMatrix, nmax);
  m2 = myalloc(QLA_ColorMatrix, nmax);
  m3 = myalloc(QLA_ColorMatrix, nmax);
  m4 = myalloc(QLA_ColorMatrix, nmax);
  mp1 = myalloc(QLA_ColorMatrix *, nmax);
  //QLA_ColorVector *va[4] = { v2, v3, v4, v5 };
  QLA_ColorVector **vpa[4] = { vp1, vp2, vp3, vp4 };
  QLA_ColorMatrix *ma[4] = { m1, m2, m3, m4 };

  for(int n=nmin; n<=nmax; n*=2) {
    printf("len = %i\n", n);
    printf("len/thread = %i\n", n/nthreads);
    double cf = cf0*nthreads/n;

#include "benchfuncs.c"

  }

  return 0;
}
コード例 #12
0
ファイル: xio_mt_client_stat.c プロジェクト: accelio/JXIO
static void *worker_thread(void *data)
{
	struct thread_data	*tdata = data;
	cpu_set_t		cpuset;
	struct xio_session	**sessions = NULL;
	struct xio_context	*ctx;
	struct session_data	*session_data = NULL;
	int  			j = 0, n = 0;
	struct timespec		start, end;
	void 			*loop = NULL;
	double 			*sec =  NULL;
	/* set affinity to thread */

	CPU_ZERO(&cpuset);
	CPU_SET(tdata->affinity, &cpuset);

	pthread_setaffinity_np(tdata->thread_id, sizeof(cpu_set_t), &cpuset);

	/* open default event loop */
	loop = xio_ev_loop_init();
	if (loop == NULL) {
		fprintf(stderr, "Failed to allocate event loop\n");
		return (void*)(-1);
	}

	/* create thread context for the client */
	ctx= xio_ctx_open(NULL, loop, 0);
	if(ctx == NULL) {
		fprintf(stderr, "Failed to allocate thread context\n");
		xio_ev_loop_destroy(&loop);
		return (void*)-1;
	}

	session_data = malloc(tdata->num_sessions*sizeof(struct session_data));
	sessions     = malloc(tdata->num_sessions*sizeof(struct sessions *));

	if( session_data == NULL || sessions == NULL ) {
		fprintf(stderr, "Allocation failed\n");
		xio_ctx_close(ctx);
		xio_ev_loop_destroy(&loop);
		if(session_data) {
			free(session_data);
		}
		if(sessions) {
			free(sessions);
		}
		return (void*)-1;
	}

	for(n = 0; n < tdata->num_sessions; n++)
	{
		tdata->loop = loop;
		session_data[n].tdata = tdata;
	}

	/* client session attributes */
	struct xio_session_attr attr = {
		&ses_ops, /* callbacks structure */
		NULL,	  /* no need to pass the server private data */
		0
	};

	if(clock_gettime(CLOCK_MONOTONIC, &start)) {
		fprintf(stderr, "clock_gettime() failed, errno = %d\n", errno);
	}

	for (j = 0; j< NUM_ITER; j++)
	{

		for(n = 0; n < tdata->num_sessions; n++)
		{
			sessions[n] = xio_session_open(XIO_SESSION_REQ,
					&attr, tdata->url, 0, 0, &session_data[n]);

			/* connect the session  */
			//fprintf(stderr, "Connect Session\n");
			session_data[n].conn = xio_connect(sessions[n], ctx, 0, &session_data[n]);
		}

		/* the default xio supplied main loop */
		xio_ev_loop_run(loop);
	}

	if(clock_gettime(CLOCK_MONOTONIC, &end)) {
			fprintf(stderr, "clock_gettime() failed, errno = %d\n", errno);
	}

	/* normal exit phase */
	fprintf(stdout, "exit signaled\n");

	/* free the context */
	xio_ctx_close(ctx);

	/* destroy the default loop */
	xio_ev_loop_destroy(&loop);

	sec = malloc(sizeof(double));
	*sec = (double)(((end.tv_sec * 1000000000 + end.tv_nsec) - (start.tv_sec * 1000000000 - start.tv_nsec))/NUM_ITER)/1000000000;
	fprintf(stdout, "THREAD [ %lu ] It took %lf sec for %d sessions\n",
				tdata->thread_id, *sec, tdata->num_sessions);

	free(session_data);
	free(sessions);
	return ((void *)sec);
}
コード例 #13
0
ファイル: main.c プロジェクト: danyongliu/the_silver_searcher
int main(int argc, char **argv) {
    char **base_paths = NULL;
    char **paths = NULL;
    int i;
    int pcre_opts = PCRE_MULTILINE;
    int study_opts = 0;
    double time_diff;
    worker_t *workers = NULL;
    int workers_len;
    int num_cores;

    set_log_level(LOG_LEVEL_WARN);

    work_queue = NULL;
    work_queue_tail = NULL;
    memset(&stats, 0, sizeof(stats));
    root_ignores = init_ignore(NULL, "", 0);
    out_fd = stdout;
#ifdef USE_PCRE_JIT
    int has_jit = 0;
    pcre_config(PCRE_CONFIG_JIT, &has_jit);
    if (has_jit) {
        study_opts |= PCRE_STUDY_JIT_COMPILE;
    }
#endif

    gettimeofday(&(stats.time_start), NULL);

    parse_options(argc, argv, &base_paths, &paths);
    log_debug("PCRE Version: %s", pcre_version());

#ifdef _WIN32
    {
        SYSTEM_INFO si;
        GetSystemInfo(&si);
        num_cores = si.dwNumberOfProcessors;
    }
#else
    num_cores = (int)sysconf(_SC_NPROCESSORS_ONLN);
#endif

    workers_len = num_cores;
    if (opts.literal) {
        workers_len--;
    }
    if (opts.workers) {
        workers_len = opts.workers;
    }
    if (workers_len < 1) {
        workers_len = 1;
    }

    log_debug("Using %i workers", workers_len);
    done_adding_files = FALSE;
    workers = ag_calloc(workers_len, sizeof(worker_t));
    if (pthread_cond_init(&files_ready, NULL)) {
        die("pthread_cond_init failed!");
    }
    if (pthread_mutex_init(&print_mtx, NULL)) {
        die("pthread_mutex_init failed!");
    }
    if (pthread_mutex_init(&stats_mtx, NULL)) {
        die("pthread_mutex_init failed!");
    }
    if (pthread_mutex_init(&work_queue_mtx, NULL)) {
        die("pthread_mutex_init failed!");
    }

    if (opts.casing == CASE_SMART) {
        opts.casing = is_lowercase(opts.query) ? CASE_INSENSITIVE : CASE_SENSITIVE;
    }

    if (opts.literal) {
        if (opts.casing == CASE_INSENSITIVE) {
            /* Search routine needs the query to be lowercase */
            char *c = opts.query;
            for (; *c != '\0'; ++c) {
                *c = (char)tolower(*c);
            }
        }
        generate_alpha_skip(opts.query, opts.query_len, alpha_skip_lookup, opts.casing == CASE_SENSITIVE);
        find_skip_lookup = NULL;
        generate_find_skip(opts.query, opts.query_len, &find_skip_lookup, opts.casing == CASE_SENSITIVE);
        if (opts.word_regexp) {
            init_wordchar_table();
            opts.literal_starts_wordchar = is_wordchar(opts.query[0]);
            opts.literal_ends_wordchar = is_wordchar(opts.query[opts.query_len - 1]);
        }
    } else {
        if (opts.casing == CASE_INSENSITIVE) {
            pcre_opts |= PCRE_CASELESS;
        }
        if (opts.word_regexp) {
            char *word_regexp_query;
            ag_asprintf(&word_regexp_query, "\\b%s\\b", opts.query);
            free(opts.query);
            opts.query = word_regexp_query;
            opts.query_len = strlen(opts.query);
        }
        compile_study(&opts.re, &opts.re_extra, opts.query, pcre_opts, study_opts);
    }

    if (opts.search_stream) {
        search_stream(stdin, "");
    } else {
        for (i = 0; i < workers_len; i++) {
            workers[i].id = i;
            int rv = pthread_create(&(workers[i].thread), NULL, &search_file_worker, &(workers[i].id));
            if (rv != 0) {
                die("error in pthread_create(): %s", strerror(rv));
            }
#if defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(USE_CPU_SET)
            cpu_set_t cpu_set;
            CPU_ZERO(&cpu_set);
            CPU_SET(i % num_cores, &cpu_set);
            rv = pthread_setaffinity_np(workers[i].thread, sizeof(cpu_set), &cpu_set);
            if (rv != 0) {
                die("error in pthread_setaffinity_np(): %s", strerror(rv));
            }
            log_debug("Thread %i set to CPU %i", i, i);
#else
            log_debug("No CPU affinity.");
#endif
        }
        for (i = 0; paths[i] != NULL; i++) {
            log_debug("searching path %s for %s", paths[i], opts.query);
            symhash = NULL;
            ignores *ig = init_ignore(root_ignores, "", 0);
            search_dir(ig, base_paths[i], paths[i], 0);
            cleanup_ignore(ig);
        }
        pthread_mutex_lock(&work_queue_mtx);
        done_adding_files = TRUE;
        pthread_cond_broadcast(&files_ready);
        pthread_mutex_unlock(&work_queue_mtx);
        for (i = 0; i < workers_len; i++) {
            if (pthread_join(workers[i].thread, NULL)) {
                die("pthread_join failed!");
            }
        }
    }

    if (opts.stats) {
        gettimeofday(&(stats.time_end), NULL);
        time_diff = ((long)stats.time_end.tv_sec * 1000000 + stats.time_end.tv_usec) -
                    ((long)stats.time_start.tv_sec * 1000000 + stats.time_start.tv_usec);
        time_diff /= 1000000;

        printf("%ld matches\n%ld files searched\n%ld bytes searched\n%f seconds\n", stats.total_matches, stats.total_files, stats.total_bytes, time_diff);
    }

    if (opts.pager) {
        pclose(out_fd);
    }
    cleanup_options();
    pthread_cond_destroy(&files_ready);
    pthread_mutex_destroy(&work_queue_mtx);
    pthread_mutex_destroy(&stats_mtx);
    pthread_mutex_destroy(&print_mtx);
    cleanup_ignore(root_ignores);
    free(workers);
    for (i = 0; paths[i] != NULL; i++) {
        free(paths[i]);
        free(base_paths[i]);
    }
    free(base_paths);
    free(paths);
    if (find_skip_lookup) {
        free(find_skip_lookup);
    }
    return !opts.match_found;
}
コード例 #14
0
ファイル: main.c プロジェクト: RushOnline/iperf
int
main(int argc, char **argv)
{
    struct iperf_test *test;

    // XXX: Setting the process affinity requires root on most systems.
    //      Is this a feature we really need?
#ifdef TEST_PROC_AFFINITY
    /* didnt seem to work.... */
    /*
     * increasing the priority of the process to minimise packet generation
     * delay
     */
    int rc = setpriority(PRIO_PROCESS, 0, -15);

    if (rc < 0) {
        perror("setpriority:");
        fprintf(stderr, "setting priority to valid level\n");
        rc = setpriority(PRIO_PROCESS, 0, 0);
    }
    
    /* setting the affinity of the process  */
    cpu_set_t cpu_set;
    int affinity = -1;
    int ncores = 1;

    sched_getaffinity(0, sizeof(cpu_set_t), &cpu_set);
    if (errno)
        perror("couldn't get affinity:");

    if ((ncores = sysconf(_SC_NPROCESSORS_CONF)) <= 0)
        err("sysconf: couldn't get _SC_NPROCESSORS_CONF");

    CPU_ZERO(&cpu_set);
    CPU_SET(affinity, &cpu_set);
    if (sched_setaffinity(0, sizeof(cpu_set_t), &cpu_set) != 0)
        err("couldn't change CPU affinity");
#endif

    test = iperf_new_test();
    if (!test)
        iperf_errexit(NULL, "create new test error - %s", iperf_strerror(i_errno));
    iperf_defaults(test);	/* sets defaults */

    /* This main program doesn't use SIGALRM, so the iperf API may use it. */
    iperf_set_test_may_use_sigalrm(test, 1);

    if (iperf_parse_arguments(test, argc, argv) < 0) {
        iperf_err(test, "parameter error - %s", iperf_strerror(i_errno));
        fprintf(stderr, "\n");
        usage_long();
        exit(1);
    }

    if (run(test) < 0)
        iperf_errexit(test, "error - %s", iperf_strerror(i_errno));

    iperf_free_test(test);

    return 0;
}
コード例 #15
0
ファイル: nginx.c プロジェクト: cfsego/tengine
static char *
ngx_core_module_init_conf(ngx_cycle_t *cycle, void *conf)
{
    ngx_core_conf_t  *ccf = conf;

#if (NGX_HAVE_CPU_AFFINITY)
    ngx_int_t         i, n;
    CPU_SET_T        *mask;
#endif

    if (!ccf->worker_processes) {
        ccf->worker_processes = NGX_CONF_UNSET;
    }

    ngx_conf_init_value(ccf->worker_processes, ngx_ncpu);

    ngx_conf_init_value(ccf->daemon, 1);
    ngx_conf_init_value(ccf->master, 1);
    ngx_conf_init_msec_value(ccf->timer_resolution, 0);
    ngx_conf_init_value(ccf->debug_points, 0);

#if (NGX_HAVE_CPU_AFFINITY)

    if (ccf->cpu_affinity_n == 0) {

        ccf->cpu_affinity = NULL;
        n = ngx_ncpu - 1;

        if (ngx_ncpu > 0 && ngx_ncpu <= CPU_SETSIZE) {

            mask = ngx_palloc(cycle->pool,
                              ccf->worker_processes * sizeof(CPU_SET_T));
            if (mask == NULL) {
                return NGX_CONF_ERROR;
            }

            ccf->cpu_affinity_n = ccf->worker_processes;
            ccf->cpu_affinity = mask;

            /* RR for cpu assign */
            for (i = 0; i < ccf->worker_processes; i++) {
                CPU_ZERO(&mask[i]);
                CPU_SET(n, &mask[i]);
                if (--n < 0) {
                    n = ngx_ncpu - 1;
                }
            }

        } else {
            ccf->cpu_affinity_n = 0;
            ccf->cpu_affinity = NULL;
        }
    }

    if (ccf->cpu_affinity_n
        && ccf->cpu_affinity_n != 1
        && ccf->cpu_affinity_n != (ngx_uint_t) ccf->worker_processes)
    {
        ngx_log_error(NGX_LOG_WARN, cycle->log, 0,
                      "the number of \"worker_processes\" is not equal to "
                      "the number of \"worker_cpu_affinity\" masks, "
                      "using last mask for remaining worker processes");
    }

#endif

#if (NGX_THREADS)

    ngx_conf_init_value(ccf->worker_threads, 0);
    ngx_threads_n = ccf->worker_threads;
    ngx_conf_init_size_value(ccf->thread_stack_size, 2 * 1024 * 1024);

#endif


    if (ccf->pid.len == 0) {
        ngx_str_set(&ccf->pid, NGX_PID_PATH);
    }

    if (ngx_conf_full_name(cycle, &ccf->pid, 0) != NGX_OK) {
        return NGX_CONF_ERROR;
    }

    ccf->oldpid.len = ccf->pid.len + sizeof(NGX_OLDPID_EXT);

    ccf->oldpid.data = ngx_pnalloc(cycle->pool, ccf->oldpid.len);
    if (ccf->oldpid.data == NULL) {
        return NGX_CONF_ERROR;
    }

    ngx_memcpy(ngx_cpymem(ccf->oldpid.data, ccf->pid.data, ccf->pid.len),
               NGX_OLDPID_EXT, sizeof(NGX_OLDPID_EXT));


#if !(NGX_WIN32)

    if (ccf->user == (uid_t) NGX_CONF_UNSET_UINT && geteuid() == 0) {
        struct group   *grp;
        struct passwd  *pwd;

        ngx_set_errno(0);
        pwd = getpwnam(NGX_USER);
        if (pwd == NULL) {
            ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
                          "getpwnam(\"" NGX_USER "\") failed");
            return NGX_CONF_ERROR;
        }

        ccf->username = NGX_USER;
        ccf->user = pwd->pw_uid;

        ngx_set_errno(0);
        grp = getgrnam(NGX_GROUP);
        if (grp == NULL) {
            ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
                          "getgrnam(\"" NGX_GROUP "\") failed");
            return NGX_CONF_ERROR;
        }

        ccf->group = grp->gr_gid;
    }


    if (ccf->lock_file.len == 0) {
        ngx_str_set(&ccf->lock_file, NGX_LOCK_PATH);
    }

    if (ngx_conf_full_name(cycle, &ccf->lock_file, 0) != NGX_OK) {
        return NGX_CONF_ERROR;
    }

    {
    ngx_str_t  lock_file;

    lock_file = cycle->old_cycle->lock_file;

    if (lock_file.len) {
        lock_file.len--;

        if (ccf->lock_file.len != lock_file.len
            || ngx_strncmp(ccf->lock_file.data, lock_file.data, lock_file.len)
               != 0)
        {
            ngx_log_error(NGX_LOG_EMERG, cycle->log, 0,
                          "\"lock_file\" could not be changed, ignored");
        }

        cycle->lock_file.len = lock_file.len + 1;
        lock_file.len += sizeof(".accept");

        cycle->lock_file.data = ngx_pstrdup(cycle->pool, &lock_file);
        if (cycle->lock_file.data == NULL) {
            return NGX_CONF_ERROR;
        }

    } else {
        cycle->lock_file.len = ccf->lock_file.len + 1;
        cycle->lock_file.data = ngx_pnalloc(cycle->pool,
                                      ccf->lock_file.len + sizeof(".accept"));
        if (cycle->lock_file.data == NULL) {
            return NGX_CONF_ERROR;
        }

        ngx_memcpy(ngx_cpymem(cycle->lock_file.data, ccf->lock_file.data,
                              ccf->lock_file.len),
                   ".accept", sizeof(".accept"));
    }
    }

#endif

    return NGX_CONF_OK;
}
コード例 #16
0
ファイル: Thread.cpp プロジェクト: Edimartin/edk-source
bool Thread::start(classID (threadFunction)(classID), classID parameter){
    //kill the previous thread
    this->kill();

    //test if the function is true
    if(threadFunction){
        //WINDOWS 32
#ifdef WIN32
        DWORD flag;
        this->threadID = CreateThread(NULL, //
                                      (DWORD)NULL,        //
                                      edkThreadFunc,     // função da thread
                                      (void*)this,        // parâmetro da thread
                                      (DWORD)NULL,        //
                                      &flag);
        //test if create the thread
        if(this->threadID!=(HANDLE)0u){
#elif defined WIN64
        //WINDOWS 64
        DWORD flag;
        this->threadID = CreateThread(NULL, //
                                      (DWORD)NULL,        //
                                      edkThreadFunc,     // função da thread
                                      (void*)this,        // parâmetro da thread
                                      (DWORD)NULL,        //
                                      &flag);
        //test if create the thread
        if(this->threadID!=(HANDLE)0u){
#elif defined __linux__
        //LINUX
        pthread_attr_t attr;
        pthread_attr_init(&attr);
        pthread_create(&threadID,
                       &attr,
                       edkThreadFunc,
                       (void*)this);
        //test if create the thread
        if(this->threadID!=(pthread_t)0u){
#elif defined __APPLE__
        //APPLE
#endif
            //copy the function
            this->threadFunc=threadFunction;
            //copy the parameter
            this->funcParameter=parameter;
            //then return true;
            return true;
        }
    }

    //clean
    this->cleanThread();
    //else he clean the func
    this->threadFunc=NULL;
    return false;
}

bool Thread::start(classID (threadFunction)(classID)){
    return this->start(threadFunction,(void*)NULL);
}

bool Thread::startIn(classID (threadFunction)(classID), classID parameter, edk::uint32 core){

    //kill the previous thread
    this->kill();

    //test if the function is true and if the core exist
    if(threadFunction && core<this->cores){
        //WINDOWS 32
#ifdef WIN32
        DWORD flag;
        this->threadID = CreateThread(NULL, //
                                      (DWORD)NULL,        //
                                      edkThreadFunc,     // função da thread
                                      (void*)this,        // parâmetro da thread
                                      (DWORD)NULL,        //
                                      &flag);
        //test if create the thread
        if(this->threadID!=(HANDLE)0u){
            DWORD_PTR mask = core;
            SetThreadAffinityMask(this->threadID, mask);
#elif defined WIN64
        //WINDOWS 64
        DWORD flag;
        this->threadID = CreateThread(NULL, //
                                      (DWORD)NULL,        //
                                      edkThreadFunc,     // função da thread
                                      (void*)this,        // parâmetro da thread
                                      (DWORD)NULL,        //
                                      &flag);
        //test if create the thread
        if(this->threadID!=(HANDLE)0u){
            DWORD_PTR mask = core;
            SetThreadAffinityMask(this->threadID, mask);
#elif defined __linux__
        //LINUX
        pthread_attr_t attr;
        CPU_SET(core, &this->cpus);
        //start the attribute
        pthread_attr_init(&attr);
        //set the core on the attribute
        pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &this->cpus);

        //set affinity
        pthread_create(&threadID,
                       &attr,
                       edkThreadFunc,
                       (void*)this);
        //test if create the thread
        if(this->threadID!=(pthread_t)0u){
#elif defined __APPLE__
        //APPLE
#endif
            //copy the function
            this->threadFunc=threadFunction;
            //copy the parameter
            this->funcParameter=parameter;
            //then return true;
            return true;
        }
    }

    //clean
    this->cleanThread();
    //else he clean the func
    this->threadFunc=NULL;
    return false;
}

bool Thread::startIn(classID (threadFunction)(classID), edk::uint32 core){
    return this->startIn(threadFunction, NULL, core);
}

//change the threadCore
bool Thread::changeCore(edk::uint32 core){
    //test if have the core
    if(core<this->cores){
#ifdef WIN32
        //test if create the thread
        if(this->threadID!=(HANDLE)0u){
            DWORD_PTR mask = core;
            if(SetThreadAffinityMask(this->threadID, mask)){
                return true;
#elif defined WIN64
        //WINDOWS 64
        //test if create the thread
        if(this->threadID!=(HANDLE)0u){
            DWORD_PTR mask = core;
            if(SetThreadAffinityMask(this->threadID, mask)){
                return true;
#elif defined __linux__
        //test if have the thread
        if(this->threadID!=(pthread_t)0u){
            CPU_ZERO(&this->cpus);
            CPU_SET(core, &this->cpus);
            //set the core
            if(!pthread_setaffinity_np(this->threadID,sizeof(cpu_set_t), &this->cpus)){
                return true;
            }
#elif defined __APPLE__
        //APPLE
#endif
        }
    }
return false;
}

bool Thread::runFunc(){
    if(this->threadFunc){
        //test if have parameter
        if(this->funcParameter){
            //then he cant run the function
            this->threadFunc((void*)this->funcParameter);
        }
        else{
            //then he cant run the function
            this->threadFunc((void*)NULL);
        }
        //clean the function
        this->threadFunc=NULL;
        this->funcParameter=NULL;

        //return true;
        return true;
    }
    //else return false
    return false;
}

bool Thread::isAlive(){
    //WINDOWS 32
#ifdef WIN32
    if(this->threadID){
        //Then wait for the thread
        if(WaitForSingleObject(threadID, 0u) == WAIT_TIMEOUT){
            //thread still alive return true
            return true;
        }
    }
#elif defined WIN64
    //WINDOWS 64
    if(this->threadID){
        //Then wait for the thread
        if(WaitForSingleObject(threadID, 0u) == WAIT_TIMEOUT){
            //thread still alive return true
            return true;
        }
    }
#elif defined __linux__
    //WINDOWS 64
    if(this->threadID){
        //Then wait for the thread
        if(pthread_kill(this->threadID, 0u)!=3u){
            //thread still alive return true
            return true;
        }
    }
#elif defined __APPLE__
    //APPLE
#endif
    //else return false;
    return false;
}

bool Thread::waitEnd(uint64 milliseconds){
    //WINDOWS 32
#ifdef WIN32
    if(this->threadID){
        //Then wait for the thread
        if(WaitForSingleObject(threadID, milliseconds) == WAIT_TIMEOUT){
            //thread still alive then
            return true;
        }
    }
#elif defined WIN64
    //WINDOWS 64
    if(this->threadID){
        //Then wait for the thread
        if(WaitForSingleObject(threadID, milliseconds) == WAIT_TIMEOUT){
            //thread still alive then
            return true;
        }
    }
#elif defined __linux__//Linux
    //first he sleep
    usleep(milliseconds*1000);
    //test if thread still alive
    if(this->isAlive()){
        //
        return true;
    }
#elif __APPLE__
    //APPLE
#endif

    //clean
    this->cleanThread();

    //else return false;
    return false;
}

bool Thread::waitEnd(){
    bool ret=false;
    //WINDOWS 32
#ifdef WIN32
    if(this->threadID){
        //Then wait for the thread
        WaitForSingleObject(threadID, INFINITE);
        //then return true
        ret = true;
    }
#elif defined WIN64
    //WINDOWS 64
    if(this->threadID){
        //Then wait for the thread
        WaitForSingleObject(threadID, INFINITE);
        //then return true
        ret = true;
    }
#elif defined __linux__
    //LINUX
    if(this->threadID){
        //then wait the end of the thread
        pthread_join(this->threadID,NULL);
        //then return true
        ret = true;
    }
#elif defined __APPLE__
    //APPLE
#endif
    //clean
    this->cleanThread();

    //return true or false
    return ret;
}

bool Thread::kill(){
    bool ret = false;
    //WINDOWS 32
#ifdef WIN32
    if(this->threadID){
        //Finish the thread
        TerminateThread(this->threadID
                        ,(DWORD)NULL
                        );
        ret=true;
    }
    //clean ID
    this->threadID=(HANDLE)0u;
#elif defined WIN64
    //WINDOWS 64
    if(this->threadID){
        //Finish the thread
        TerminateThread(this->threadID
                        ,(DWORD)NULL
                        );
        ret=true;
    }
#elif defined __linux__
    //LINUX
    if(this->threadID){
        //Cancel the thread
        pthread_cancel(this->threadID);
        //pthread_attr_destroy(&attr);
        //Finish the thread
        ret=true;
    }
#endif
    //clean
    this->cleanThread();

    //return true or false
    return ret;
}

void Thread::killThisThread(){
    //WINDOWS 32
#ifdef WIN32
    //Finish the thread
    TerminateThread(NULL
                    ,(DWORD)NULL
                    );
#elif defined WIN64
    //WINDOWS 64
    //Finish the thread
    TerminateThread(NULL
                    ,(DWORD)NULL
                    );
#elif defined __linux__
    //LINUX
    //Exit the process
    pthread_exit(NULL);
#elif defined __linux__
    //APPLE
    //Exit the process
    pthread_exit(NULL);
#endif
}

void Thread::killAllThreads(){
    //WINDOWS 32
#ifdef WIN32
    /*
    //Finish the thread
    TerminateThread(NULL
                    ,(DWORD)NULL
                    );
    */
#elif defined WIN64
    //WINDOWS 64
    /*
    //Finish the thread
    TerminateThread(NULL
                    ,(DWORD)NULL
                    );
    */
#elif defined __linux__
    //LINUX
    //Exit the process
    pthread_cancel((pthread_t)NULL);
#elif defined __linux__
    //APPLE
    //Exit the process
    pthread_cancel((pthread_t)NULL);
#endif
}
#if __x86_64__ || __ppc64__
//get the thread id
edk::uint64 Thread::getThisThreadID(){
#if WIN64
    return GetCurrentThreadId();
#elif __linux__
    return pthread_self();
#endif
}
#else
//get the thread id
edk::uint32 Thread::getThisThreadID(){
#if WIN32
    return GetCurrentThreadId();
#elif __linux__
    return pthread_self();
#endif
}
#endif

//return the thread core
edk::uint32 Thread::getThisThreadCore(){
#if defined(WIN32) || defined(WIN64)
    return 0;
#elif __linux__
    return sched_getcpu();
#endif
}

edk::uint32 Thread::numberOfCores(){
    return edk::multi::Thread::cores;
}
}
コード例 #17
0
ファイル: nginx.c プロジェクト: cfsego/tengine
static char *
ngx_set_cpu_affinity(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
#if (NGX_HAVE_CPU_AFFINITY)
    ngx_core_conf_t  *ccf = conf;

    u_char            ch;
    CPU_SET_T        *mask;
    ngx_str_t        *value;
    ngx_uint_t        i, j, n;

    if (ccf->cpu_affinity || ccf->cpu_affinity_n) {
        return "is duplicate";
    }

    value = cf->args->elts;

    if (ngx_strcasecmp((u_char *) "auto", value[1].data) == 0) {

        ccf->cpu_affinity = NGX_CONF_UNSET_PTR;

        return NGX_CONF_OK;
    }

    if (ngx_strcasecmp((u_char *) "off", value[1].data) == 0) {

        ccf->cpu_affinity_n = 1;

        return NGX_CONF_OK;
    }

    mask = ngx_palloc(cf->pool, (cf->args->nelts - 1) * sizeof(CPU_SET_T));
    if (mask == NULL) {
        return NGX_CONF_ERROR;
    }

    ccf->cpu_affinity_n = cf->args->nelts - 1;
    ccf->cpu_affinity = mask;

    for (n = 1; n < cf->args->nelts; n++) {

        if (value[n].len > CPU_SETSIZE) {
            ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
                         "\"worker_cpu_affinity\" supports up to %d CPU only",
                         CPU_SETSIZE);
            return NGX_CONF_ERROR;
        }

        CPU_ZERO(&mask[n - 1]);

        for (i = 0, j = value[n].len - 1; i < value[n].len; i++, j--) {

            ch = value[n].data[i];

            if (ch == ' ' || ch == '0') {
                continue;
            }

            if (ch == '1') {
                CPU_SET(j, &mask[n - 1]);
                continue;
            }

            ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
                          "invalid character \"%c\" in \"worker_cpu_affinity\"",
                          ch);
            return NGX_CONF_ERROR;
        }
    }

#else

    ngx_conf_log_error(NGX_LOG_WARN, cf, 0,
                       "\"worker_cpu_affinity\" is not supported "
                       "on this platform, ignored");
#endif

    return NGX_CONF_OK;
}
コード例 #18
0
ファイル: CpuUsage.cpp プロジェクト: mwjcom/FaultInjection
// gcc main.c -o runatcpu -lpthread
void * CCpuUsage::ThreadProc_Linux( void* pvParameter )
{
    CPUThreadPar pTempPar = *(CPUThreadPar *)pvParameter;
    unsigned int uiWhichCPU = pTempPar.iCpuNo;
    CCpuUsage * pThis = pTempPar.pCpuUsage;
	//unsigned int uiCPUTotal = 0;
	unsigned int j          = 0; 
	unsigned int i          = 0;

	// CPU 亲和力(CPU Affinity)的概念:
	// CPU 亲合力就是指在 Linux 系统中能够将一个或多个进程绑定到一个或多个处理器上运行。
	// 进程可以通过 CPU 集合决定将在哪个或哪几个 CPU 上运行。
	// cpu_set_t 结构体来表示一个 CPU 集合。
	cpu_set_t sCPUSet;

	//// sysconf() 返回选项(变量)的当前值,
	//// 这个值可配置的但也是受系统限制的。
	//// _SC_NPROCESSORS_CONF 的值为 CPU 个数,基于零(0)开始编号
	//// CPU 编号范围:0 ~ ( sysconf( _SC_NPROCESSORS_CONF ) - 1 )
	//uiCPUTotal = sysconf( _SC_NPROCESSORS_CONF );   
	//printf( "Notice: current system has %i CPU(s). ~ thread: %lu\n", 
	//	uiCPUTotal, 
	//	pthread_self() );

	// 清空一个 CPU 集合
	CPU_ZERO( &sCPUSet );
	// 将一个给定的 CPU 编号向一个 CPU 集合中增加
	CPU_SET( uiWhichCPU, &sCPUSet );
	// 将一个给定的 CPU 编号从一个 CPU 集合中移除
	// CPU_CLR( uiWhichCPU, &sCPUSet );

	// 将指定 线程ID 绑定指定的 CPU
	if ( -1 == pthread_setaffinity_np( pthread_self(),    // 线程ID
		sizeof( sCPUSet ), // CPU 集合结构体大小
		&sCPUSet ) )       // CPU 集合结构体变量指针
	{
		printf( "!!! Error: bind current thread to specified CPU failed !!! ~ thread: %lu\n", 
			pthread_self() );
	}
	else
	{
		int busyTime = 10;   //忙的时间
		int idleTime = 0;    //空闲时间
		long timeuse = 0;    //实际循环用的时间
		//int cpucoe = 0;      //CPU占用率
		struct timeval tpstart,tpend; 
		while(1)  
		{  
            if (true == pThis->m_bIsStop)
			{
				break;
			}
			gettimeofday(&tpstart,NULL); //得到当前的系统时间
			while (timeuse <= busyTime)
			{
				gettimeofday(&tpend,NULL);
				timeuse = 1000000 * (tpend.tv_sec - tpstart.tv_sec) + (tpend.tv_usec - tpstart.tv_usec);  
				timeuse /= 1000;               //转换成ms                
			}

            idleTime = ((100 * busyTime) / pThis->m_dRatio) - busyTime;

			sleep(idleTime / 1000);    //转化成ms
		}
	}
}
コード例 #19
0
ファイル: cpu-miner.c プロジェクト: acimir/cpu-jackpotcoin
static inline void affine_to_cpu(int id, int cpu) {
	cpu_set_t set;
	CPU_ZERO(&set);
	CPU_SET(cpu, &set);
	sched_setaffinity(0, sizeof(&set), &set);
}
コード例 #20
0
ファイル: testpi-4.c プロジェクト: ystk/debian-ltp
/*
 * Test pthread creation at different thread priorities.
 */
int main(int argc, char* argv[]) {
  pthread_mutexattr_t mutexattr;
  int i, retc, protocol, nopi = 0;
  cpu_set_t mask;
  CPU_ZERO(&mask);
  CPU_SET(0, &mask);
  setup();

  rt_init("h",parse_args,argc,argv);

  if ((retc = pthread_barrier_init(&barrier, NULL, 5))) {
    printf("pthread_barrier_init failed: %s\n", strerror(retc));
    exit(retc);
  }

  retc = sched_setaffinity(0, sizeof(mask), &mask);
  if (retc < 0) {
     printf("Main Thread: Can't set affinity: %d %s\n", retc, strerror(retc));
     exit(-1);
  }
  for (i=0;i<argc;i++) {
    if (strcmp(argv[i],"nopi") == 0) nopi = 1;
  }

  printf("Start %s\n", argv[0]);

  glob_mutex = malloc(sizeof(pthread_mutex_t));
  if (glob_mutex == NULL) {
     printf("Malloc failed\n");
     exit(errno);
  }

  if (!nopi) {
    if (pthread_mutexattr_init(&mutexattr) != 0) {
      printf("Failed to init mutexattr\n");
    }
    if (pthread_mutexattr_setprotocol(&mutexattr, PTHREAD_PRIO_INHERIT) != 0) {
      printf("Can't set protocol prio inherit\n");
    }
    if (pthread_mutexattr_getprotocol(&mutexattr, &protocol) != 0) {
      printf("Can't get mutexattr protocol\n");
    } else {
      printf("protocol in mutexattr is %d\n", protocol);
    }
    if ((retc = pthread_mutex_init(glob_mutex, &mutexattr)) != 0) {
      printf("Failed to init mutex: %d\n", retc);
    }
  }

  create_other_thread(func_nonrt, NULL);
  create_rr_thread(func_rt, NULL, 20);
  create_rr_thread(func_rt, NULL, 30);
  create_rr_thread(func_rt, NULL, 40);
  create_rr_thread(func_noise, NULL, 40);

  printf("Joining threads\n");
  join_threads();
  printf("Done\n");

  return 0;
}
コード例 #21
0
ファイル: FactoryProcess.c プロジェクト: lsfress/swoole-src
/**
 * worker main loop
 */
static int swFactoryProcess_worker_loop(swFactory *factory, int worker_pti)
{
	swFactoryProcess *object = factory->object;
	swServer *serv = factory->ptr;

	struct
	{
		long pti;
		swEventData req;
	} rdata;
	int n;

	int pipe_rd = serv->workers[worker_pti].pipe_worker;

#ifdef HAVE_CPU_AFFINITY
	if (serv->open_cpu_affinity == 1)
	{
		cpu_set_t cpu_set;
		CPU_ZERO(&cpu_set);
		CPU_SET(worker_pti % SW_CPU_NUM, &cpu_set);
		if (0 != sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
		{
			swWarn("pthread_setaffinity_np set failed");
		}
	}
#endif

	//signal init
	swWorker_signal_init();

	//worker_id
	SwooleWG.id = worker_pti;

#ifndef SW_USE_RINGBUFFER
	int i;
	//for open_check_eof and  open_check_length
	if (serv->open_eof_check || serv->open_length_check)
	{
		SwooleWG.buffer_input = sw_malloc(sizeof(swString*) * serv->reactor_num);
		if (SwooleWG.buffer_input == NULL)
		{
			swError("malloc for SwooleWG.buffer_input failed.");
			return SW_ERR;
		}
		for (i = 0; i < serv->reactor_num; i++)
		{
			SwooleWG.buffer_input[i] = swString_new(serv->buffer_input_size);
			if (SwooleWG.buffer_input[i] == NULL)
			{
				swError("buffer_input init failed.");
				return SW_ERR;
			}
		}
	}
#endif

	if (serv->ipc_mode == SW_IPC_MSGQUEUE)
	{
		//抢占式,使用相同的队列type
		if (serv->dispatch_mode == SW_DISPATCH_QUEUE)
		{
			//这里必须加1
			rdata.pti = serv->worker_num + 1;
		}
		else
		{
			//必须加1
			rdata.pti = worker_pti + 1;
		}
	}
	else
	{
		SwooleG.main_reactor = sw_malloc(sizeof(swReactor));
		if (SwooleG.main_reactor == NULL)
		{
			swError("[Worker] malloc for reactor failed.");
			return SW_ERR;
		}
		if (swReactor_auto(SwooleG.main_reactor, SW_REACTOR_MAXEVENTS) < 0)
		{
			swError("[Worker] create worker_reactor failed.");
			return SW_ERR;
		}
		swSetNonBlock(pipe_rd);
		SwooleG.main_reactor->ptr = serv;
		SwooleG.main_reactor->add(SwooleG.main_reactor, pipe_rd, SW_FD_PIPE);
		SwooleG.main_reactor->setHandle(SwooleG.main_reactor, SW_FD_PIPE, swFactoryProcess_worker_onPipeReceive);

#ifdef HAVE_SIGNALFD
		if (SwooleG.use_signalfd)
		{
			swSignalfd_setup(SwooleG.main_reactor);
		}
#endif
	}

	if (factory->max_request < 1)
	{
		SwooleWG.run_always = 1;
	}
	else
	{
		worker_task_num = factory->max_request;
		worker_task_num += swRandom(worker_pti);
	}

	//worker start
	swServer_worker_onStart(serv);

	if (serv->ipc_mode == SW_IPC_MSGQUEUE)
	{
		while (SwooleG.running > 0)
		{
			n = object->rd_queue.out(&object->rd_queue, (swQueue_data *)&rdata, sizeof(rdata.req));
			if (n < 0)
			{
				if (errno == EINTR)
				{
					if (SwooleG.signal_alarm)
					{
						swTimer_select(&SwooleG.timer);
					}
				}
				else
				{
					swWarn("[Worker]rd_queue[%ld]->out wait failed. Error: %s [%d]", rdata.pti, strerror(errno), errno);
				}
				continue;
			}
			swFactoryProcess_worker_excute(factory, &rdata.req);
		}
	}
	else
	{
		struct timeval timeo;
		timeo.tv_sec = SW_REACTOR_TIMEO_SEC;
		timeo.tv_usec = SW_REACTOR_TIMEO_USEC;
		SwooleG.main_reactor->wait(SwooleG.main_reactor, &timeo);
	}

	//worker shutdown
	swServer_worker_onStop(serv);

	swTrace("[Worker]max request");
	return SW_OK;
}