Ejemplo n.º 1
0
void llamadaHilo(int socket_fd){
	char buf[BUF_SIZE];
	int lectura;

	if(mostrarInfo) printf("Socket Operativo: %d, \t CPU: %d\n", socket_fd, sched_getcpu());

	int i;
	int paquetesParaAtender = MAX_PACKS/NTHREADS;

  //Marca FTRACE
  if(enabledTrace) write(marker_fd, "MITRACE UDP: Nuevo Thread\n", 26);

	for(i = 0; i < paquetesParaAtender; i++) {
    if(enabledTrace) write(marker_fd, "MITRACE UDP: Comienza el read del socket\n", 41);
		//lectura = recv(socket_fd, buf, BUF_SIZE, 0);
		lectura = read(socket_fd, buf, BUF_SIZE);
		if(lectura <= 0) {
			fprintf(stderr, "Error en el read del socket (%d)\n", lectura);
			exit(1);
		}
		if(first_pack==0) {
			pthread_mutex_lock(&lock);
			if(first_pack == 0) {
				if(mostrarInfo)	printf("got first pack\n");
				first_pack = 1;
				//Medir Inicio
				gettimeofday(&dateInicio, NULL);
			}
			pthread_mutex_unlock(&lock);
		}
	}

	if(mostrarInfo) printf("Fin Socket Operativo: %d, \t CPU: %d\n", socket_fd, sched_getcpu());
}
Ejemplo n.º 2
0
void* viterbi_stream_thread_loop(void* argst)
{
	int i, execcount = 0;
	pthr_info_t* args = (pthr_info_t*) argst;
	DATA_STREAM *dstream = args->dstream;
	
#ifdef _GNU_SOURCE
	printf("THR %d running on cpu %d\n", args->thrid, sched_getcpu());
#endif

	for (i = 0; 1; i++)
	{	
		execcount++; while (dstream->synccontrol != execcount) sched_yield();

//		while (syncflags[args->thrid] == 0) sched_yield(); 	syncflags[args->thrid] = 0;
		
//		sem_wait(&semsynch[args->thrid]);

		tprintf("THR %d entering\n", args->thrid);

		viterbi_stream_word_partitioned(dstream, NULL, args->thrid);
	}

#ifdef _GNU_SOURCE
	printf("THR %d running on cpu %d\n", args->thrid, sched_getcpu());
#endif
	return (void*) 0;
}
void set_cpu_affinity(int cpu) {
	int ret;
	cpu_set_t cpuset;

	CPU_ZERO(&cpuset);
	CPU_SET(cpu, &cpuset);
	pprintf("current CPU %d\n", sched_getcpu());
	ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
	if (ret == -1)
		err("sched_setaffinity");
	pprintf("current CPU %d\n", sched_getcpu());
}
Ejemplo n.º 4
0
Archivo: pr13.c Proyecto: kura-pl/priry
void *thread_func_1(void *ptr){
	cpu_set_t mask; 
	CPU_ZERO(&mask);
    	CPU_SET(0, &mask);
	if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) <0)perror("pthread_setaffinity_np");
	printf("Policzylem %f na CPU %d\n", waste_time(10000), sched_getcpu());
	CPU_ZERO(&mask);
    	CPU_SET(1, &mask);
	if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) <0)perror("pthread_setaffinity_np");
	printf("Policzylem %f na CPU %d\n", waste_time(10000), sched_getcpu());
	return NULL;
}
Ejemplo n.º 5
0
resource_allocation_key ring_allocation_logic::get_res_key_by_logic()
{
	resource_allocation_key key = DEFAULT_RING_KEY;

	switch (m_ring_allocation_logic) {
	case RING_LOGIC_PER_INTERFACE:
		key = 0;
		break;
	case RING_LOGIC_PER_SOCKET:
		key = m_fd;
		break;
	case RING_LOGIC_PER_THREAD:
		key = pthread_self();
		break;
	case RING_LOGIC_PER_CORE:
	case RING_LOGIC_PER_CORE_ATTACH_THREADS:
		key = sched_getcpu();
		break;
	BULLSEYE_EXCLUDE_BLOCK_START
	default:
		//not suppose to get here
		ral_logdbg("non-valid ring logic = %d", m_ring_allocation_logic);
		break;
	BULLSEYE_EXCLUDE_BLOCK_END
	}

	return key;
}
Ejemplo n.º 6
0
int main(int argc, const char* argv[]) {
    constexpr unsigned num_threads = 4;
    std::mutex iomutex;
    std::vector<std::thread> threads(num_threads);

    for(unsigned i=0;i<num_threads;i++) {
        threads[i] = std::thread([&iomutex, i] {
            std::this_thread::sleep_for(std::chrono::milliseconds(20));
            while(1) {
                {
                    std::lock_guard<std::mutex> iolock(iomutex);
                    std::cout<<"Thread #"<<i<<": on CPU"<<sched_getcpu()<<"\n";
                }
                std::this_thread::sleep_for(std::chrono::milliseconds(900));
            }
        });

        cpu_set_t cpuset;
        CPU_ZERO(&cpuset);
        CPU_SET(i, &cpuset);

        int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);

        if(rc != 0) {
            std::cerr<< "Error calling pthread_setaffinity_np: "<<rc<<"\n";
        }
    }

    for(auto &t : threads) {
        t.join();
    }

    return 0;
}
Ejemplo n.º 7
0
void *test(void *data)
{

  unsigned int mySeed = seed + sched_getcpu();

  long myOps = operations / nb_threads;
  long val = -1;
  int op;

  while (myOps > 0) {
    op = rand_r(&mySeed) % 100;
    if (op < update) {
      if (val == -1) {
        /* Add random value */  
        val = (rand_r(&mySeed) % range) + 1;
        if(set_add(val) == 0) {
          val = -1;
        }
      } else {
        /* Remove random value */
        int res = set_remove( val);
        val = -1;
      }
    } else {
      /* Look for random value */
      long tmp = (rand_r(&mySeed) % range) + 1;
      set_contains(tmp);
    }

    myOps--;
  }

  return NULL;
}
Ejemplo n.º 8
0
void* HiccupsInfo::runthread(void* vp) {
  ThrdStart* ts = (ThrdStart*)vp;
  int id = ts->thrdid_;
  int cpu;

  runoncpu(conf.cpus_[id]);
  if ((cpu = sched_getcpu()) < 0) {
    perror("sched_getcpu");
    exit(1);
  }
  if (cpu != conf.cpus_[id]) {
    fprintf(stderr, "Thread %d: tid: %d, running on wrong cpu: %d, expected: %d\n",
        id, gettid(), cpu, conf.cpus_[id]);
    exit(1);
  }
  unsigned long sz = HPGSZ(sizeof(HiccupsInfo));

  fprintf(stdout, "thread#: %3d tid: %d size %lu cpu[%d]: %d\n",
      id, gettid(), sz, id, conf.cpus_[id]);
  HiccupsInfo::hudatap_[id] = ts->hudata_ = (HiccupsInfo*)memzalloc(sz);
  ts->hudata_->cpuid_ = conf.cpus_[id];
  ts->hudata_->id_ = id;
  ts->hudata_->bins_ = (HUbin*)memzalloc(conf.bins_ * sizeof(bins_[0]));

  ts->rv_ = ts->hudata_->run();
  if (id)
    pthread_exit(0);
  return 0;
}
Ejemplo n.º 9
0
	//----------------------------------------------------------------------------------
	//
	//----------------------------------------------------------------------------------
	void Profiler_Imp::Start(int id)
	{
		Profile::Ptr profile = nullptr;
		for (auto& x : m_profiles)
		{
			if (x->GetID() == id)
			{
				profile = x;
			}
		}

		if (profile == nullptr)
		{
			profile = make_shared<Profile>(id);
			m_profiles.push_back(profile);
		}

		profile->GetCurrent()->SetStartTime(asd::GetTime());

#if _WIN32
		profile->GetCurrent()->SetProcessorNumber(GetCurrentProcessorNumber());
#elif defined(__APPLE__)
		// sched_getcpuがないようなので代用。よりよいものがあれば差し替えてください。
		profile->GetCurrent()->SetProcessorNumber(
			std::hash<std::thread::id>()(std::this_thread::get_id()));
#else
		profile->GetCurrent()->SetProcessorNumber(sched_getcpu());
#endif
	}
Ejemplo n.º 10
0
static apr_status_t sononblock(int sd)
{
#ifndef BEOS
#ifdef HAVE_MTCP
    int cpu = sched_getcpu();
    if(mtcp_setsock_nonblock(g_mctx[cpu], sd)<0)
        return errno;
#else
    int fd_flags;

    fd_flags = fcntl(sd, F_GETFL, 0);
#if defined(O_NONBLOCK)
    fd_flags |= O_NONBLOCK;
#elif defined(O_NDELAY)
    fd_flags |= O_NDELAY;
#elif defined(FNDELAY)
    fd_flags |= FNDELAY;
#else
#error Please teach APR how to make sockets non-blocking on your platform.
#endif
    if (fcntl(sd, F_SETFL, fd_flags) == -1) {
        return errno;
    }
#endif
#else
    int on = 1;
    if (setsockopt(sd, SOL_SOCKET, SO_NONBLOCK, &on, sizeof(int)) < 0)
        return errno;
#endif /* BEOS */
    return APR_SUCCESS;
}
Ejemplo n.º 11
0
void uv_inject(page_desc_t      *pd,
		page_desc_t      *pdbegin,
		page_desc_t      *pdend,
		unsigned long    pages,
		unsigned long    addr,
		unsigned long    addrend,
		unsigned int     pagesize,
		unsigned long    mattr,
		unsigned long    nodeid,
		unsigned long    paddr,
		char             *pte_str,
		unsigned long    nodeid_start,
		unsigned long    mattr_start,
		unsigned long    addr_start,
		int              mce_opt)
{
        int count = 0;
	eid.cpu = sched_getcpu();

        for (pd=pdbegin, pdend=pd+pages; pd<pdend && addr < addrend; pd++, addr += pagesize) {
		if (pd->flags & PD_HOLE) {
			pagesize = pd->pte;
			mattr = 0;
			nodeid = -1;
		} else {
			nodeid = get_pnodeid(*pd);
			paddr = get_paddr(*pd);
			if (nodeid == INVALID_NODE)
				nodeid = 0;

			mattr = get_memory_attr(*pd);
			pagesize = get_pagesize(*pd);
			if (mattr && paddr) {
				if ((pd_total / 2) == count){
				sprintf(pte_str, "  0x%016lx  ", pd->pte);
				printf("\t[%012lx] -> 0x%012lx on %s %3s  %s%s\n",
						addr, paddr, idstr(), nodestr(nodeid),
						pte_str, get_memory_attr_str(nodeid, mattr));
				/* Setting value at memory location  for recovery
 				 * before injecting.
 				 */
        			memset((void *)addr, 'A', pagesize);
				injecteddata = (char *)addr;
				printf("Data:%x\n",*injecteddata);
				eid.addr = paddr;
				eid.cpu = nodeid;
				break;//only allow once for now
				}
			}
		}
		count++;
	} 
	if (delay){
		printf("Enter char to inject..");
		getchar();
	}	
	if(!manual){
		inject_uc(eid.addr, 0 /*int notrigger*/);
	}
}
Ejemplo n.º 12
0
static void randroutine(const void *const arg)
{
	const idargument *const ia = (idargument *)arg;
	const workset *const ws = ia->tp->extra;
	const runconfig *const rc = ia->tp->rc;
	
	const unsigned id = ia->id;
	const unsigned sz = rc->size;

	const unsigned l = sz;
	const unsigned m = sz;
	const unsigned n = sz;

	const unsigned tr = tilerows;
	const unsigned tc = tilecols;

	const joblayout al = definejob(rc, id, l, m, tr, tc);
	const joblayout bl = definejob(rc, id, m, n, tc, tr);

	eltype *const a = ws->a + al.baseoffset / sizeof(eltype);
	eltype *const b = ws->b + bl.baseoffset / sizeof(eltype);

	matfill(id, al.absolutebaserow, a, al.baserow, al.nrows, m, tc,
		elrand);

	matfill(id * 5, bl.absolutebaserow, b, bl.baserow, bl.nrows, n, tr,
		elrand); 

	printf("rand %03u with %u rows is done on core %d\n", id, al.nrows,
		sched_getcpu());
}
Ejemplo n.º 13
0
static void multroutine(const void *const arg)
{
	const idargument *const ia = (idargument *)arg;
	const workset *const ws = ia->tp->extra;
	const runconfig *const rc = ia->tp->rc;
	
	const unsigned id = ia->id;
	const unsigned sz = rc->size;

	const unsigned l = sz;
	const unsigned m = sz;
	const unsigned n = sz;

	const unsigned tr = tilerows;
	const unsigned tc = tilecols;

	const joblayout al = definejob(rc, id, l, m, tr, tc);
	const joblayout rl = definejob(rc, id, l, n, tr, tr);

	const eltype *const a = ws->a + al.baseoffset / sizeof(eltype);
	eltype *const r = ws->r + rl.baseoffset / sizeof(eltype);

	matmul(a, ws->b, al.baserow, al.nrows, m, n, r);

	printf("mult %03u with %u rows is done on core %d\n", id, al.nrows,
		sched_getcpu());
}
Ejemplo n.º 14
0
/**
 *
 * @return the key that is part of a unique id in rings map
 */
uint64_t ring_allocation_logic::calc_res_key_by_logic()
{
	uint64_t res_key = 0;
	switch (m_res_key.get_ring_alloc_logic()) {
	case RING_LOGIC_PER_INTERFACE:
		res_key = 0;
		if (safe_mce_sys().tcp_ctl_thread > CTL_THREAD_DISABLE)
			res_key = 1;
		break;
	case RING_LOGIC_PER_IP:
		res_key = m_source.m_ip;
		break;
	case RING_LOGIC_PER_SOCKET:
		res_key = m_source.m_fd;
		break;
	case RING_LOGIC_PER_USER_ID:
		res_key = m_res_key.get_user_id_key();
		break;
	case RING_LOGIC_PER_THREAD:
		res_key = pthread_self();
		break;
	case RING_LOGIC_PER_CORE:
	case RING_LOGIC_PER_CORE_ATTACH_THREADS:
		res_key = sched_getcpu();
		break;
	BULLSEYE_EXCLUDE_BLOCK_START
	default:
		//not suppose to get here
		ral_logdbg("non-valid ring logic = %d", m_res_key.get_ring_alloc_logic());
		break;
	BULLSEYE_EXCLUDE_BLOCK_END
	}
	return res_key;
}
Ejemplo n.º 15
0
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */)
{
	lock();
	int cpu = g_n_thread_cpu_core;
	if (cpu != NO_CPU) { //already reserved
		unlock();
		return cpu;
	}

	cpu_set_t cpu_set;
	CPU_ZERO(&cpu_set);

	int ret = pthread_getaffinity_np(tid, sizeof(cpu_set_t), &cpu_set);
	if (ret) {
		unlock();
		__log_err("pthread_getaffinity_np failed for tid=%lu, ret=%d (errno=%d %m)", tid, ret, errno);
		return -1;
	}

	int avail_cpus = CPU_COUNT(&cpu_set);
	if (avail_cpus == 0) {
		unlock();
		__log_err("no cpu available for tid=%lu", tid);
		return -1;
	}

	if (avail_cpus == 1) { //already attached
		for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET(cpu, &cpu_set); cpu++) {}
	} else { //need to choose one cpu to attach to
		int min_cpu_count = -1;
		for (int i = 0, j = 0; i < MAX_CPU && j < avail_cpus; i++) {
			if (!CPU_ISSET(i, &cpu_set)) continue;
			j++;
			if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) {
				min_cpu_count = m_cpu_thread_count[i];
				cpu = i;
			}
		}
		if (suggested_cpu >= 0
			&& CPU_ISSET(suggested_cpu, &cpu_set)
			&& m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) {
			cpu = suggested_cpu;
		}
		CPU_ZERO(&cpu_set);
		CPU_SET(cpu, &cpu_set);
		__log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu);
		ret = pthread_setaffinity_np(tid, sizeof(cpu_set_t), &cpu_set);
		if (ret) {
			unlock();
			__log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d, ret=%d (errno=%d %m)", tid, cpu, ret, errno);
			return -1;
		}
	}

	g_n_thread_cpu_core = cpu;
	if (cpu > NO_CPU && cpu < MAX_CPU)
		m_cpu_thread_count[cpu]++;
	unlock();
	return cpu;
}
Ejemplo n.º 16
0
int odp_thread_init_local(odp_thread_type_t type)
{
	int id;
	int cpu;

	odp_spinlock_lock(&thread_globals->lock);
	id = alloc_id(type);
	odp_spinlock_unlock(&thread_globals->lock);

	if (id < 0) {
		ODP_ERR("Too many threads\n");
		return -1;
	}

	cpu = sched_getcpu();

	if (cpu < 0) {
		ODP_ERR("getcpu failed\n");
		return -1;
	}

	thread_globals->thr[id].thr  = id;
	thread_globals->thr[id].cpu  = cpu;
	thread_globals->thr[id].type = type;

	this_thread = &thread_globals->thr[id];
	return 0;
}
Ejemplo n.º 17
0
static inline void
laysakura_log(char* s)
{
  struct timespec tp;
  clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);
  fprintf(stderr, "%d.%09ld CPU:%d %s\n", (int)tp.tv_sec, (long int)tp.tv_nsec, sched_getcpu(), s);
}
Ejemplo n.º 18
0
void counter_incr(struct counter *c)
{
	int cpu = sched_getcpu();

	nassert(cpu < c->cpus);
	c->per_cpu_counter[cpu]++;
}
Ejemplo n.º 19
0
ThreadLocation ThreadLocation_create() {
	ThreadLocation tl;
	tl.thread_id = omp_get_thread_num();
	tl.cpu_id = sched_getcpu();
	MPI_Comm_rank(MPI_COMM_WORLD, &(tl.rank));
	gethostname(tl.hostname, length_of_hostname);
	return tl;
}
Ejemplo n.º 20
0
/*
Set the threads Id in the library vector
parameters: none
return: cpu/core
*/
int get_thread_cpu()
{
  int idxcpu=0;

  idxcpu = sched_getcpu();		
 
  return idxcpu;
}
Ejemplo n.º 21
0
extern "C" int32_t CoreLibNative_SchedGetCpu()
{
#if HAVE_SCHED_GETCPU
    return sched_getcpu();
#else
    return -1;
#endif
}
Ejemplo n.º 22
0
/**
 * tries to determine on which cpu the program is being run
 */
static int get_cpu()
{
    int cpu=-1;
    #if (defined(linux) || defined(__linux__)) && defined (SCHED_GETCPU)
        cpu = sched_getcpu();
    #endif
    return cpu;
}
Ejemplo n.º 23
0
unsigned gpr_cpu_current_cpu(void) {
  int cpu = sched_getcpu();
  if (cpu < 0) {
    gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno));
    return 0;
  }
  return (unsigned)cpu;
}
Ejemplo n.º 24
0
/* Called by pthread_create() */
void* start_thread(int* rank)
{
  _thread_id = *rank;

  gsoc_setaffinity(*rank);
  co_vp_init(); /* Necessary to set initial value for "co_curr__" in pcl.c.
                   Without this, SEGV would happen because
                   swapcontext(co_curr__->context, co_next->context)
                   is called in pcl.c internally. */
  if (*rank == 0)
    fprintf(stderr, "Starting Master Thread on CPU%d. Scheduler is %p\n", sched_getcpu(), _workers[_thread_id].scheduler_task);
  else
    fprintf(stderr, "Starting Slave Thread on CPU%d. Scheduler is %p\n", sched_getcpu(), _workers[_thread_id].scheduler_task);

  co_call(_workers[_thread_id].scheduler_task);

  return NULL;
}
Ejemplo n.º 25
0
void counter_incr(struct counter *c)
{
    int cpu = 1;
#ifdef __linux__
    cpu = sched_getcpu();
#endif
	nassert(cpu < c->cpus);
	c->per_cpu_counter[cpu]++;
}
Ejemplo n.º 26
0
/*
* Get the core where the current thread is running
* return the core
*/
int hw_my_core()
{
  int core;

  //hwloc can't detect the thread core - use linux based syscall
  core = sched_getcpu();

  return core;
}
Ejemplo n.º 27
0
static dynarray_t *do_create(unsigned long elem_size,
                             unsigned long alloc_grain,
                             unsigned long elems_nr,
                             int numa)
{
	struct dynarray *da;
	int node = 0;
	if (numa) {
		int cpu = sched_getcpu();
		/* Numa-aware allocation */
		if (cpu < 0) {
			perror("dynarray_create: sched_getcpu");
			exit(1);
		}

		node = numa_node_of_cpu(cpu);
		if (node < 0) {
			perror("dynarray_create: numa_node_of_cpu");
			exit(1);
		}

		da = numa_alloc_onnode(sizeof(*da), node);
	} else {
		da = malloc(sizeof(*da));
	}

	if ( !da ) {
		fprintf(stderr, "dynarray_create: malloc\n");
		exit(1);
	}
	
	da->numa = numa;
	da->next_idx = 0;
	da->elem_size = elem_size;
	if (elems_nr <= alloc_grain) {
	    da->elems_nr = alloc_grain;
	} else {
	    unsigned long rem = elems_nr % alloc_grain;
	    da->elems_nr = elems_nr;
	    if (rem)
	        da->elems_nr += alloc_grain - rem;
	}
	da->alloc_grain = alloc_grain;

	if (numa) {
		da->elems = numa_alloc_onnode(elem_size*da->elems_nr, node);
	} else {
		da->elems = malloc(elem_size*da->elems_nr);
	}

	if ( !da->elems ){
		fprintf(stderr, "dynarray_create: malloc\n");
		exit(1);
	}

	return da;
}
Ejemplo n.º 28
0
Archivo: main.c Proyecto: adcastel/GLT
void final_func(void *arguments) {

#ifdef VERBOSE

    printf("#ULT: %d Thread: %d (CPU: %d)\n", (int)arguments,glt_get_thread_num(),sched_getcpu());

#endif


}
Ejemplo n.º 29
0
// Get the number of the current processor
uint32_t GCToOSInterface::GetCurrentProcessorNumber()
{
#if HAVE_SCHED_GETCPU
    int processorNumber = sched_getcpu();
    assert(processorNumber != -1);
    return processorNumber;
#else
    return 0;
#endif
}
Ejemplo n.º 30
0
static long dvfs_set_freq(int32_t frequency, int32_t cpu) {
#ifdef VERBOSE
    fprintf(stderr,"adapting 1 frequency to %" PRId32 " %" PRId32 "\n",frequency, cpu);
#endif
    assert(frequency);
    if (cpu < 0) {
        cpu = sched_getcpu();
    }
    return fcf_set_frequency(cpu , frequency);
}