Example #1
0
void *thr_reader(void *data)
{
	unsigned long tidx = (unsigned long)data;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"reader", pthread_self(), (unsigned long)gettid());

	set_affinity();

	while (!test_go)
	{
	}

	for (;;) {
		pthread_mutex_lock(&lock);
		assert(test_array.a == 8);
		if (unlikely(rduration))
			loop_sleep(rduration);
		pthread_mutex_unlock(&lock);
		nr_reads++;
		if (unlikely(!test_duration_read()))
			break;
	}

	tot_nr_reads[tidx] = nr_reads;
	printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
			"reader", pthread_self(), (unsigned long)gettid());
	return ((void*)1);

}
Example #2
0
void *thr_writer(void *_count)
{
	unsigned long long *count = _count;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"writer", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		pthread_rwlock_wrlock(&lock);
		test_array.a = 0;
		test_array.a = 8;
		if (caa_unlikely(wduration))
			loop_sleep(wduration);
		pthread_rwlock_unlock(&lock);
		URCU_TLS(nr_writes)++;
		if (caa_unlikely(!test_duration_write()))
			break;
		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
	}

	printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
			"writer", (unsigned long) pthread_self(),
			(unsigned long) gettid());
	*count = URCU_TLS(nr_writes);
	return ((void*)2);
}
Example #3
0
File: init.c Project: gedare/rtems
static void reset(test_context *ctx)
{
  rtems_status_code sc;
  size_t i;

  for (i = 0; i < TASK_COUNT; ++i) {
    set_priority(ctx->task_ids[i], P(i));
    set_affinity(ctx->task_ids[i], A(1, 1));
  }

  for (i = CPU_COUNT; i < TASK_COUNT; ++i) {
    sc = rtems_task_suspend(ctx->task_ids[i]);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED);
  }

  for (i = 0; i < CPU_COUNT; ++i) {
    sc = rtems_task_resume(ctx->task_ids[i]);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INCORRECT_STATE);
  }

  /* Order the idle threads explicitly */
  for (i = 0; i < CPU_COUNT; ++i) {
    const Per_CPU_Control *c;
    const Thread_Control *h;

    c = _Per_CPU_Get_by_index(CPU_COUNT - 1 - i);
    h = c->heir;

    sc = rtems_task_suspend(h->Object.id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }
}
Example #4
0
void *thr_reader(void *data)
{
	unsigned long tidx = (unsigned long)data;

	printf_verbose("thread_begin %s, tid %lu\n",
			"reader", urcu_get_thread_id());

	set_affinity();

	while (!test_go)
	{
	}

	for (;;) {
		pthread_mutex_lock(&per_thread_lock[tidx].lock);
		assert(test_array.a == 8);
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
		pthread_mutex_unlock(&per_thread_lock[tidx].lock);
		URCU_TLS(nr_reads)++;
		if (caa_unlikely(!test_duration_read()))
			break;
	}

	tot_nr_reads[tidx] = URCU_TLS(nr_reads);
	printf_verbose("thread_end %s, tid %lu\n",
			"reader", urcu_get_thread_id());
	return ((void*)1);

}
Example #5
0
void *thr_writer(void *data)
{
	unsigned long wtidx = (unsigned long)data;

	printf_verbose("thread_begin %s, tid %lu\n",
			"writer", urcu_get_thread_id());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		pthread_mutex_lock(&lock);
		test_array.a = 0;
		test_array.a = 8;
		if (caa_unlikely(wduration))
			loop_sleep(wduration);
		pthread_mutex_unlock(&lock);
		URCU_TLS(nr_writes)++;
		if (caa_unlikely(!test_duration_write()))
			break;
		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
	}

	printf_verbose("thread_end %s, tid %lu\n",
			"writer", urcu_get_thread_id());
	tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
	return ((void*)2);
}
Example #6
0
void *thr_reader(void *_count)
{
	unsigned long long *count = _count;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"reader", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}

	for (;;) {
		pthread_rwlock_rdlock(&lock);
		assert(test_array.a == 8);
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
		pthread_rwlock_unlock(&lock);
		URCU_TLS(nr_reads)++;
		if (caa_unlikely(!test_duration_read()))
			break;
	}

	*count = URCU_TLS(nr_reads);
	printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
			"reader", (unsigned long) pthread_self(),
			(unsigned long) gettid());
	return ((void*)1);

}
Example #7
0
/**
 * start_counting
 *
 * Arguments: <context_id> <event_set_id>
 *
 * Call the pfm_start system-call to start counting for a perfmon context
 * that was previously stopped.
 **/
static int start_counting(int argc, char **argv)
{
	pfarg_start_t start_arg;
	struct context *ctx;
	struct event_set *evt;
	cpu_set_t old_cpu_set;
	int ctx_id, event_set_id;
	int system_wide, rc;

	memset(&start_arg, 0, sizeof(start_arg));

	ctx_id = strtoul(argv[1], NULL, 0);
	event_set_id = strtoul(argv[2], NULL, 0);

	if (ctx_id <= 0 || event_set_id < 0) {
		LOG_ERROR("context ID and event-set ID must be "
			  "positive integers.");
		return EINVAL;
	}

	ctx = find_context(ctx_id);
	if (!ctx) {
		LOG_ERROR("Can't find context with ID %d.", ctx_id);
		return EINVAL;
	}

	evt = find_event_set(ctx, event_set_id);
	if (!evt) {
		LOG_ERROR("Can't find event-set with ID %d in context %d.",
			  event_set_id, ctx_id);
		return EINVAL;
	}

	start_arg.start_set = evt->id;

	system_wide = ctx->ctx_arg.ctx_flags & PFM_FL_SYSTEM_WIDE;
	if (system_wide && ctx->cpu >= 0) {
		rc = set_affinity(ctx->cpu, &old_cpu_set);
		if (rc) {
			return rc;
		}
	}

	rc = pfm_start(ctx->fd, &start_arg);
	if (rc) {
		rc = errno;
		LOG_ERROR("pfm_start system call returned an error: %d.", rc);
		return rc;
	}

	if (system_wide && ctx->cpu >= 0) {
		revert_affinity(&old_cpu_set);
	}

	LOG_INFO("Started counting for context %d, event-set %d.",
		 ctx_id, event_set_id);

	return 0;
}
static void run_child(size_t cpu)
{
	struct child * self = &children[cpu];

	self->pid = getpid();
	self->sigusr1 = 0;
	self->sigusr2 = 0;
	self->sigterm = 0;

	inner_child = self;
	if (atexit(close_pipe)){
		close_pipe();
		exit(EXIT_FAILURE);
	}

	umask(0);
	/* Change directory to allow directory to be removed */
	if (chdir("/") < 0) {
		perror("Unable to chdir to \"/\"");
		exit(EXIT_FAILURE);
	}

	setup_signals();

	set_affinity(cpu);

	create_context(self);

	write_pmu(self);

	load_context(self);

	notify_parent(self, cpu);

	/* Redirect standard files to /dev/null */
	freopen( "/dev/null", "r", stdin);
	freopen( "/dev/null", "w", stdout);
	freopen( "/dev/null", "w", stderr);

	for (;;) {
		sigset_t sigmask;
		sigfillset(&sigmask);
		sigdelset(&sigmask, SIGUSR1);
		sigdelset(&sigmask, SIGUSR2);
		sigdelset(&sigmask, SIGTERM);

		if (self->sigusr1) {
			perfmon_start_child(self->ctx_fd);
			self->sigusr1 = 0;
		}

		if (self->sigusr2) {
			perfmon_stop_child(self->ctx_fd);
			self->sigusr2 = 0;
		}

		sigsuspend(&sigmask);
	}
}
Example #9
0
File: main.cpp Project: CCJY/coliru
void worker (unsigned name, unsigned mask) {
    std::lock_guard<std::mutex> lock(m);
    std::cout << "thread #" << name << "(" << mask << ")";
    if (!set_affinity (mask))
      std::cout << ": error set affinity\n";
    else
      std::cout << " is running with mask " << get_affinity () << std::endl;
}
void *perftest_thread(void *arg)
{
    thread_data_t *thread_data = (thread_data_t *)arg;

    int thread_index = thread_data->thread_index;
    int write_elem = thread_data->write_elem;
    int read_elem;
    unsigned long random_seed = 1234;

    unsigned long *value_ptr;
    unsigned long *new_value_ptr;
    unsigned long value;

    unsigned long long reads = 0;
    unsigned long long writes = 0;

    set_affinity(thread_index);
    rcu_register_thread();
    rcu_defer_register_thread();
    lock_mb();

	while (goflag == GOFLAG_INIT)
		poll(NULL, 0, 10);

    switch (thread_data->mode)
    {
        case MODE_READONLY:
            while (goflag == GOFLAG_RUN) 
            {
                read_elem = get_random(&random_seed) % Tree_Scale;
                value = *Values[read_elem];
                reads++;
            }
            break;
        case MODE_WRITE:
            while (goflag == GOFLAG_RUN)
            {
                write_elem = get_random(&random_seed) % Tree_Size;
                value_ptr = Values[write_elem];
                value = get_random(&random_seed) % Tree_Scale + 1;

                new_value_ptr = (unsigned long *)malloc(sizeof(unsigned long *));
                *new_value_ptr = value;

                rcu_assign_pointer(Values[write_elem], new_value_ptr);

                defer_rcu(free, value_ptr);
                writes++;
            }
            break;
    }

    rcu_unregister_thread();
    rcu_defer_unregister_thread();

    printf("thread %d reads %lld writes %lld\n", thread_index, reads, writes);
    return NULL;
}
Example #11
0
void *test_hash_rw_thr_reader(void *_count)
{
	unsigned long long *count = _count;
	struct lfht_test_node *node;
	struct cds_lfht_iter iter;

	printf_verbose("thread_begin %s, tid %lu\n",
			"reader", urcu_get_thread_id());

	URCU_TLS(rand_lookup) = urcu_get_thread_id() ^ time(NULL);

	set_affinity();

	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		rcu_read_lock();
		cds_lfht_test_lookup(test_ht,
			(void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % lookup_pool_size) + lookup_pool_offset),
			sizeof(void *), &iter);
		node = cds_lfht_iter_get_test_node(&iter);
		if (node == NULL) {
			if (validate_lookup) {
				printf("[ERROR] Lookup cannot find initial node.\n");
				exit(-1);
			}
			URCU_TLS(lookup_fail)++;
		} else {
			URCU_TLS(lookup_ok)++;
		}
		rcu_debug_yield_read();
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
		rcu_read_unlock();
		URCU_TLS(nr_reads)++;
		if (caa_unlikely(!test_duration_read()))
			break;
		if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
			rcu_quiescent_state();
	}

	rcu_unregister_thread();

	*count = URCU_TLS(nr_reads);
	printf_verbose("thread_end %s, tid %lu\n",
			"reader", urcu_get_thread_id());
	printf_verbose("read tid : %lx, lookupfail %lu, lookupok %lu\n",
			urcu_get_thread_id(),
			URCU_TLS(lookup_fail),
			URCU_TLS(lookup_ok));
	return ((void*)1);

}
Example #12
0
static int init_realtime(void)
{
	struct sched_param schedparm;
	memset(&schedparm, 0, sizeof(schedparm));
	schedparm.sched_priority = thread_info.thread_prio;
	sched_setscheduler(0, SCHED_FIFO, &schedparm);
	set_affinity();
	return 0;
}
Example #13
0
/**
 * @brief Execute command (fork, exec, wait)
 *
 * @param [in] argc number of cmd args
 * @param [in] argv cmd args
 *
 * @return Operation status
 * @retval 0 on success
 * @retval -1 on error
 */
static int
execute_cmd(int argc, char **argv)
{
	int i = 0;

	if (0 >= argc || NULL == argv)
		return -1;

	if (g_cfg.verbose) {
		printf("Trying to execute ");
		for (i = 0; i < argc; i++)
			printf("%s ", argv[i]);

		printf("\n");
	}

	pid_t pid = fork();

	if (-1 == pid) {
		fprintf(stderr, "%s,%s:%d Failed to execute %s !"
				" fork failed\n", __FILE__, __func__, __LINE__,
				argv[0]);
		return -1;
	} else if (0 < pid) {
		int status = EXIT_FAILURE;
		/* Wait for child */
		waitpid(pid, &status, 0);

		if (EXIT_SUCCESS != status)
			return -1;
	} else {
		/* set cpu affinity */
		if (0 != set_affinity(0)) {
			fprintf(stderr, "%s,%s:%d Failed to set core "
				"affinity!\n", __FILE__, __func__,
				__LINE__);
			_Exit(EXIT_FAILURE);
		}

		/* drop elevated root privileges */
		if (0 == g_cfg.sudo_keep && 0 != sudo_drop())
			_Exit(EXIT_FAILURE);

		errno = 0;
		/* execute command */
		execvp(argv[0], argv);

		fprintf(stderr, "%s,%s:%d Failed to execute %s, %s (%i) !\n",
				__FILE__, __func__, __LINE__,
				argv[0], strerror(errno), errno);

		_Exit(EXIT_FAILURE);
	}

	return 0;
}
Example #14
0
File: init.c Project: gedare/rtems
/*
 * Use a timer to execute the actions, since it runs with thread dispatching
 * disabled.  This is necessary to check the expected processor allocations.
 */
static void timer(rtems_id id, void *arg)
{
  test_context *ctx;
  rtems_status_code sc;
  size_t i;

  ctx = arg;
  i = ctx->action_index;

  if (i == 0) {
    sc = rtems_task_suspend(ctx->master_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }

  if (i < RTEMS_ARRAY_SIZE(test_actions)) {
    const test_action *action = &test_actions[i];
    rtems_id task;

    ctx->action_index = i + 1;

    task = ctx->task_ids[action->index];

    switch (action->kind) {
      case KIND_SET_PRIORITY:
        set_priority(task, action->data.priority);
        break;
      case KIND_SET_AFFINITY:
        set_affinity(task, action->data.cpu_set);
        break;
      case KIND_BLOCK:
        sc = rtems_task_suspend(task);
        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
        break;
      case KIND_UNBLOCK:
        sc = rtems_task_resume(task);
        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
        break;
      default:
        rtems_test_assert(action->kind == KIND_RESET);
        reset(ctx);
        break;
    }

    check_cpu_allocations(ctx, action);

    sc = rtems_timer_reset(id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  } else {
    sc = rtems_task_resume(ctx->master_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_send(ctx->master_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }
}
Example #15
0
void *
Query_queue::threadInitQuery(void * This) {
	Query_queue * query_queue = (Query_queue *)This;
	uint32_t tid = ATOM_FETCH_ADD(_next_tid, 1);
	
	// set cpu affinity
	set_affinity(tid);

	query_queue->init_per_thread(tid);
	return NULL;
}
Example #16
0
void *thr_dequeuer(void *_count)
{
	unsigned long long *count = _count;
	int ret;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"dequeuer", pthread_self(), (unsigned long)gettid());

	set_affinity();

	ret = rcu_defer_register_thread();
	if (ret) {
		printf("Error in rcu_defer_register_thread\n");
		exit(-1);
	}
	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct cds_lfq_node_rcu *qnode;
		struct test *node;

		rcu_read_lock();
		qnode = cds_lfq_dequeue_rcu(&q);
		node = caa_container_of(qnode, struct test, list);
		rcu_read_unlock();

		if (node) {
			call_rcu(&node->rcu, free_node_cb);
			nr_successful_dequeues++;
		}

		nr_dequeues++;
		if (caa_unlikely(!test_duration_dequeue()))
			break;
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
	}

	rcu_unregister_thread();
	rcu_defer_unregister_thread();
	printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
		       "dequeues %llu, successful_dequeues %llu\n",
		       pthread_self(), (unsigned long)gettid(), nr_dequeues,
		       nr_successful_dequeues);
	count[0] = nr_dequeues;
	count[1] = nr_successful_dequeues;
	return ((void*)2);
}
Example #17
0
/**
 * unload_context
 *
 * Arguments: <context_id>
 *
 * Call the pfm_unload_context system-call to unload a perfmon context from
 * the system's performance monitoring unit.
 **/
static int unload_context(int argc, char **argv)
{
	struct context *ctx;
	cpu_set_t old_cpu_set;
	int system_wide;
	int ctx_id;
	int rc;

	ctx_id = strtoul(argv[1], NULL, 0);
	if (ctx_id <= 0) {
		LOG_ERROR("context ID must be a positive integer.");
		return EINVAL;
	}

	ctx = find_context(ctx_id);
	if (!ctx) {
		LOG_ERROR("Can't find context with ID %d.", ctx_id);
		return EINVAL;
	}

	system_wide = ctx->ctx_flags & PFM_FL_SYSTEM_WIDE;
	if (system_wide) {
		if (ctx->cpu < 0) {
			/* This context isn't loaded on any CPU. */
			LOG_ERROR("Trying to unload context %d that isn't "
				  "loaded.\n", ctx_id);
			return EINVAL;
		}

		rc = set_affinity(ctx->cpu, &old_cpu_set);
		if (rc) {
			return rc;
		}
	}

	rc = pfm_attach(ctx->fd, 0, PFM_NO_TARGET);
	if (rc) {
		rc = errno;
		LOG_ERROR("pfm_attach(detach) system call returned "
			  "an error: %d.", rc);
		return rc;
	}

	if (system_wide) {
		ctx->cpu = -1;
		revert_affinity(&old_cpu_set);
	}

	LOG_INFO("Unloaded context %d.", ctx_id);

	return 0;
}
Example #18
0
static void *thr_dequeuer(void *_count)
{
	unsigned long long *count = _count;
	unsigned int counter = 0;

	printf_verbose("thread_begin %s, tid %lu\n",
			"dequeuer", urcu_get_thread_id());

	set_affinity();

	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	assert(test_pop || test_pop_all);

	for (;;) {
		if (test_pop && test_pop_all) {
			/* both pop and pop all */
			if (counter & 1)
				do_test_pop(test_sync);
			else
				do_test_pop_all(test_sync);
			counter++;
		} else {
			if (test_pop)
				do_test_pop(test_sync);
			else
				do_test_pop_all(test_sync);
		}

		if (caa_unlikely(!test_duration_dequeue()))
			break;
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
	}

	rcu_unregister_thread();

	printf_verbose("dequeuer thread_end, tid %lu, "
			"dequeues %llu, successful_dequeues %llu\n",
			urcu_get_thread_id(),
			URCU_TLS(nr_dequeues),
			URCU_TLS(nr_successful_dequeues));
	count[0] = URCU_TLS(nr_dequeues);
	count[1] = URCU_TLS(nr_successful_dequeues);
	return ((void*)2);
}
Example #19
0
static int do_set_affinity(const char* protos, struct sockaddr_in la,
                           struct sockaddr_in ra, int rxq, int cpu)
{
  int proto = str_to_proto(protos);
  int i, ifindex;

  refresh_ip_list();

  if( la.sin_addr.s_addr == 0 ) {
    for( i = 0; i < ip_list_n; ++i )
      if( interface_driver_is(ip_list_name(i), "sfc") )
        set_affinity(interface_to_ifindex(ip_list_name(i)), proto,
                     ip_list_ip(i), la.sin_port,
                     ra.sin_addr.s_addr, ra.sin_port, rxq, cpu);
    return 1;
  }
  else if( CI_IP_IS_MULTICAST(la.sin_addr.s_addr) ) {
    for( i = 0; i < ip_list_n; ++i )
      if( interface_is(ip_list_name(i), "sfc") )
        set_affinity(interface_to_ifindex(ip_list_name(i)), proto,
                     la.sin_addr.s_addr, la.sin_port,
                     ra.sin_addr.s_addr, ra.sin_port, rxq, cpu);
    return 1;
  }
  else {
    ifindex = ip_to_ifindex(la.sin_addr.s_addr);
    if( ifindex < 0 ) {
      err("%s: ERROR: Can't find interface for IP %s\n",
          me, inet_ntoa(la.sin_addr));
      return 0;
    }
    set_affinity(ifindex, proto,
                 la.sin_addr.s_addr, la.sin_port,
                 ra.sin_addr.s_addr, ra.sin_port, rxq, cpu);
    return 1;
  }
}
Example #20
0
static void *thr_enqueuer(void *_count)
{
	unsigned long long *count = _count;
	bool was_nonempty;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"enqueuer", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct cds_wfs_node *node = malloc(sizeof(*node));
		if (!node)
			goto fail;
		cds_wfs_node_init(node);
		was_nonempty = cds_wfs_push(&s, node);
		URCU_TLS(nr_successful_enqueues)++;
		if (!was_nonempty)
			URCU_TLS(nr_empty_dest_enqueues)++;

		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
fail:
		URCU_TLS(nr_enqueues)++;
		if (caa_unlikely(!test_duration_enqueue()))
			break;
	}

	uatomic_inc(&test_enqueue_stopped);
	count[0] = URCU_TLS(nr_enqueues);
	count[1] = URCU_TLS(nr_successful_enqueues);
	count[2] = URCU_TLS(nr_empty_dest_enqueues);
	printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
		       "enqueues %llu successful_enqueues %llu, "
		       "empty_dest_enqueues %llu\n",
		       pthread_self(),
			(unsigned long) gettid(),
		       URCU_TLS(nr_enqueues),
		       URCU_TLS(nr_successful_enqueues),
		       URCU_TLS(nr_empty_dest_enqueues));
	return ((void*)1);

}
Example #21
0
static void try_set_affinity_full(const char* caller, int fd)
{
  struct sockaddr_in sa_local, sa_peer;
  socklen_t sa_len;
  int errno_save = errno;
  int rc, type;

  T(fprintf(stderr, LPF "%s(%s(%d))\n", __FUNCTION__, caller, fd));

  if( is_inet_sock(fd, &type, &sa_local) )
    if( type == SOCK_STREAM || type == SOCK_DGRAM ) {
      sa_len = sizeof(sa_peer);
      rc = do_sys_getpeername(fd, (struct sockaddr*) &sa_peer, &sa_len);
      if( rc == 0 )
        set_affinity(type, sa_local.sin_addr.s_addr, sa_local.sin_port,
                     sa_peer.sin_addr.s_addr, sa_peer.sin_port);
      else if( type == SOCK_DGRAM )
        /* ?? TODO: We'll sometimes want a full-match for UDP. */
        set_affinity(type, sa_local.sin_addr.s_addr,
                     sa_local.sin_port, 0, 0);
    }

  errno = errno_save;
}
Example #22
0
pid_t fork(void) {
    pid_t ret;

    // When a new process is forked, the refcounter must be incremented
    pthread_mutex_lock(&get_shm()->pin_lock);
    get_shm()->refcount++;
    pthread_mutex_unlock(&get_shm()->pin_lock);

    ret = old_fork();
    if(ret > 0) {
        set_affinity(ret, get_next_core());
    }

    return ret;
}
Example #23
0
void *thr_dequeuer(void *_count)
{
    unsigned long long *count = _count;

    printf_verbose("thread_begin %s, tid %lu\n",
                   "dequeuer", urcu_get_thread_id());

    set_affinity();

    rcu_register_thread();

    while (!test_go)
    {
    }
    cmm_smp_mb();

    for (;;) {
        struct cds_lfq_node_rcu *qnode;

        rcu_read_lock();
        qnode = cds_lfq_dequeue_rcu(&q);
        rcu_read_unlock();

        if (qnode) {
            struct test *node;

            node = caa_container_of(qnode, struct test, list);
            call_rcu(&node->rcu, free_node_cb);
            URCU_TLS(nr_successful_dequeues)++;
        }

        URCU_TLS(nr_dequeues)++;
        if (caa_unlikely(!test_duration_dequeue()))
            break;
        if (caa_unlikely(rduration))
            loop_sleep(rduration);
    }

    rcu_unregister_thread();
    printf_verbose("dequeuer thread_end, tid %lu, "
                   "dequeues %llu, successful_dequeues %llu\n",
                   urcu_get_thread_id(),
                   URCU_TLS(nr_dequeues),
                   URCU_TLS(nr_successful_dequeues));
    count[0] = URCU_TLS(nr_dequeues);
    count[1] = URCU_TLS(nr_successful_dequeues);
    return ((void*)2);
}
Example #24
0
pid_t fork(void) {
   pid_t ret;

   // Increment refcount on fork to avoid parent dying before child and destroying the shm
   __sync_fetch_and_add(&get_shm()->refcount, 1);

   ret = old_fork();
   if(ret > 0) {
      set_affinity(ret, get_next_core());
   } else if (ret < 0) {
       // fork failed, decrement
       __sync_fetch_and_sub(&get_shm()->refcount, 1);
   }       

   return ret;
}
Example #25
0
void *thr_enqueuer(void *_count)
{
    unsigned long long *count = _count;

    printf_verbose("thread_begin %s, tid %lu\n",
                   "enqueuer", urcu_get_thread_id());

    set_affinity();

    rcu_register_thread();

    while (!test_go)
    {
    }
    cmm_smp_mb();

    for (;;) {
        struct test *node = malloc(sizeof(*node));
        if (!node)
            goto fail;
        cds_lfq_node_init_rcu(&node->list);
        rcu_read_lock();
        cds_lfq_enqueue_rcu(&q, &node->list);
        rcu_read_unlock();
        URCU_TLS(nr_successful_enqueues)++;

        if (caa_unlikely(wdelay))
            loop_sleep(wdelay);
fail:
        URCU_TLS(nr_enqueues)++;
        if (caa_unlikely(!test_duration_enqueue()))
            break;
    }

    rcu_unregister_thread();

    count[0] = URCU_TLS(nr_enqueues);
    count[1] = URCU_TLS(nr_successful_enqueues);
    printf_verbose("enqueuer thread_end, tid %lu, "
                   "enqueues %llu successful_enqueues %llu\n",
                   urcu_get_thread_id(),
                   URCU_TLS(nr_enqueues),
                   URCU_TLS(nr_successful_enqueues));
    return ((void*)1);

}
Example #26
0
static void try_set_affinity_wild(const char* caller, int fd)
{
  struct sockaddr_in sa;
  int errno_save = errno;
  int type;

  T(fprintf(stderr, LPF "%s(%s(%d))\n", __FUNCTION__, caller, fd));

  if( is_inet_sock(fd, &type, &sa) ) {
    if( type == SOCK_STREAM )
      set_affinity_wild_tcp(type, sa.sin_addr.s_addr, sa.sin_port);
    else if( type == SOCK_DGRAM )
      set_affinity(type, sa.sin_addr.s_addr, sa.sin_port, 0, 0);
  }

  errno = errno_save;
}
Example #27
0
static void run_child(size_t cpu)
{
	struct child * self = &children[cpu];

	self->pid = getpid();
	self->sigusr1 = 0;
	self->sigusr2 = 0;
	self->sigterm = 0;

	setup_signals();

	set_affinity(cpu);

	create_context(self);

	write_pmu(self);

	load_context(self);

	notify_parent(self, cpu);

	for (;;) {
		sigset_t sigmask;
		sigfillset(&sigmask);
		sigdelset(&sigmask, SIGUSR1);
		sigdelset(&sigmask, SIGUSR2);
		sigdelset(&sigmask, SIGTERM);

		if (self->sigusr1) {
			printf("PFM_START on CPU%d\n", (int)cpu);
			fflush(stdout);
			perfmon_start_child(self->ctx_fd);
			self->sigusr1 = 0;
		}

		if (self->sigusr2) {
			printf("PFM_STOP on CPU%d\n", (int)cpu);
			fflush(stdout);
			perfmon_stop_child(self->ctx_fd);
			self->sigusr2 = 0;
		}

		sigsuspend(&sigmask);
	}
}
Example #28
0
File: stm.c Project: HPDCS/stmF2C2
_CALLCONV stm_tx_t *stm_pre_init_thread(int id){

	stm_tx_t *tx;
	tx=stm_init_thread();

	tx->thread_identifier=id;
	if (tx->thread_identifier<active_threads) {
		tx->thread_gate=0;
	} else {
		tx->thread_gate=1;
		if (scheduling_policy==2) {
			struct sembuf *sop = (struct sembuf *) malloc(sizeof(struct sembuf));
			sop[0].sem_num = tx->thread_identifier;
			sop[0].sem_op = 1; /* increment semaphore to become one */
			sop[0].sem_flg = SEM_UNDO | IPC_NOWAIT; /* take off semaphore */


			if (semop(semid, sop, 1) == -1) {
				printf("Semop failed on thread %i on stm_pre_init_thread",tx->thread_identifier);
				exit(0);
			}
		}
	}

	//printf("\nThread %i thread_gate % i", tx->thread_identifier, tx->thread_gate);

	tx->committed_transactions=0;

    set_affinity(id);

	char filename[512];
	int cpu_id=sched_getcpu();
	sprintf(filename, "/sys/devices/system/cpu/cpu%i/cpufreq/scaling_setspeed",cpu_id);
	//printf("Filename: %s", filename);
	tx->scaling_setspeed_fd=open(filename, O_WRONLY);
    if(tx->scaling_setspeed_fd==-1){
        printf("\nError opening file %s \n", filename);
        exit(1);
    }
	char target_freq[]="2000000";
	write(tx->scaling_setspeed_fd, &target_freq, sizeof(target_freq));

	return tx;
}
Example #29
0
/**
 * restart_counting
 *
 * Arguments: <context_id>
 *
 * Call the pfm_restart system-call to clear the data counters and start
 * counting from zero for a perfmon context that was previously loaded.
 **/
static int restart_counting(int argc, char **argv)
{
	struct context *ctx;
	cpu_set_t old_cpu_set;
	int system_wide;
	int ctx_id;
	int rc;

	ctx_id = strtoul(argv[1], NULL, 0);

	if (ctx_id <= 0) {
		LOG_ERROR("context ID must be a positive integer.");
		return EINVAL;
	}

	ctx = find_context(ctx_id);
	if (!ctx) {
		LOG_ERROR("Can't find context with ID %d.", ctx_id);
		return EINVAL;
	}

	system_wide = ctx->ctx_flags & PFM_FL_SYSTEM_WIDE;
	if (system_wide && ctx->cpu >= 0) {
		rc = set_affinity(ctx->cpu, &old_cpu_set);
		if (rc) {
			return rc;
		}
	}

	rc = pfm_set_state(ctx->fd, 0, PFM_ST_RESTART);
	if (rc) {
		rc = errno;
		LOG_ERROR("pfm_set_state(restart) system call returned an error: %d.", rc);
		return rc;
	}

	if (system_wide && ctx->cpu >= 0) {
		revert_affinity(&old_cpu_set);
	}

	LOG_INFO("Restarted counting for context %d.", ctx_id);

	return 0;
}
Example #30
0
void hash_init() 
{
	int i;

	hash.heads = calloc(sizeof(struct node_t*), nbbuckets);
	hash.locks = calloc(sizeof(lock_t), nbbuckets);

	for(i=0; i < nbbuckets; i++)
	{
		lock_init(&hash.locks[i]);
	} 

	//populate
	node_t **hd;
	lock_t *lck;

#ifdef POPULATE
	unsigned nbcores = get_nbcores();
	unsigned percorekeys = (nbkeys/nbcores)+1;
	unsigned nextcore = 1;
	//printf("npn %d %d %d\n", nbcores, percorekeys, nextcore);
	for(i=0; i< nbkeys; i++)
	{
		if(!(i%percorekeys))
		{
			//printf("switching to core %d at %d keys\n", nextcore, i);
			set_affinity(nextcore%nbcores);
			nextcore++;
		} 
		get_buckets(i, &hd, &lck);
		insert(i, hd, lck);
	}
	//printf("empty = %d, total = %d\n", empty, nbkeys);
#endif

#ifdef SET_TAIL
	printf("SET TAIL\n");
	//insert max key
	get_buckets(i, &hd, &lck);
	insert(nbkeys+1, hd, lck);
#endif
}