Exemplo n.º 1
0
static void *rcu_update_stress_test(void *arg)
{
    int i;
    struct rcu_stress *p;

    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        i = rcu_stress_idx + 1;
        if (i >= RCU_STRESS_PIPE_LEN) {
            i = 0;
        }
        p = &rcu_stress_array[i];
        p->mbtest = 0;
        smp_mb();
        p->pipe_count = 0;
        p->mbtest = 1;
        atomic_rcu_set(&rcu_stress_current, p);
        rcu_stress_idx = i;
        for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) {
            if (i != rcu_stress_idx) {
                rcu_stress_array[i].pipe_count++;
            }
        }
        synchronize_rcu();
        n_updates++;
    }

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 2
0
static void *
workerLoop(UA_Worker *worker) {
    UA_Server *server = worker->server;
    UA_UInt32 *counter = &worker->counter;
    volatile UA_Boolean *running = &worker->running;

    /* Initialize the (thread local) random seed with the ram address of worker */
    UA_random_seed((uintptr_t)worker);
    rcu_register_thread();

    pthread_mutex_t mutex; // required for the condition variable
    pthread_mutex_init(&mutex, 0);
    pthread_mutex_lock(&mutex);

    while(*running) {
        struct DispatchJob *dj = (struct DispatchJob*)
            cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
        if(dj) {
            processJob(server, &dj->job);
            UA_free(dj);
        } else {
            /* nothing to do. sleep until a job is dispatched (and wakes up all worker threads) */
            pthread_cond_wait(&server->dispatchQueue_condition, &mutex);
        }
        uatomic_inc(counter);
    }

    pthread_mutex_unlock(&mutex);
    pthread_mutex_destroy(&mutex);
    UA_ASSERT_RCU_UNLOCKED();
    rcu_barrier(); // wait for all scheduled call_rcu work to complete
    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 3
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;

    rcu_register_thread();

    my_iothread = iothread;
    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (iothread->running) {
        aio_poll(iothread->ctx, true);

        if (atomic_read(&iothread->worker_context)) {
            GMainLoop *loop;

            g_main_context_push_thread_default(iothread->worker_context);
            iothread->main_loop =
                g_main_loop_new(iothread->worker_context, TRUE);
            loop = iothread->main_loop;

            g_main_loop_run(iothread->main_loop);
            iothread->main_loop = NULL;
            g_main_loop_unref(loop);

            g_main_context_pop_thread_default(iothread->worker_context);
        }
    }

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 4
0
static void *rcu_read_perf_test(void *arg)
{
    int i;
    long long n_reads_local = 0;

    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    atomic_inc(&nthreadsrunning);
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        for (i = 0; i < RCU_READ_RUN; i++) {
            rcu_read_lock();
            rcu_read_unlock();
        }
        n_reads_local += RCU_READ_RUN;
    }
    qemu_mutex_lock(&counts_mutex);
    n_reads += n_reads_local;
    qemu_mutex_unlock(&counts_mutex);

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 5
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;
    bool blocking;

    rcu_register_thread();

    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (!iothread->stopping) {
        aio_context_acquire(iothread->ctx);
        blocking = true;
        while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
            /* Progress was made, keep going */
            blocking = false;
        }
        aio_context_release(iothread->ctx);
    }

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 6
0
void *test_hash_rw_thr_reader(void *_count)
{
	unsigned long long *count = _count;
	struct lfht_test_node *node;
	struct cds_lfht_iter iter;

	printf_verbose("thread_begin %s, tid %lu\n",
			"reader", urcu_get_thread_id());

	URCU_TLS(rand_lookup) = urcu_get_thread_id() ^ time(NULL);

	set_affinity();

	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		rcu_read_lock();
		cds_lfht_test_lookup(test_ht,
			(void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % lookup_pool_size) + lookup_pool_offset),
			sizeof(void *), &iter);
		node = cds_lfht_iter_get_test_node(&iter);
		if (node == NULL) {
			if (validate_lookup) {
				printf("[ERROR] Lookup cannot find initial node.\n");
				exit(-1);
			}
			URCU_TLS(lookup_fail)++;
		} else {
			URCU_TLS(lookup_ok)++;
		}
		rcu_debug_yield_read();
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
		rcu_read_unlock();
		URCU_TLS(nr_reads)++;
		if (caa_unlikely(!test_duration_read()))
			break;
		if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
			rcu_quiescent_state();
	}

	rcu_unregister_thread();

	*count = URCU_TLS(nr_reads);
	printf_verbose("thread_end %s, tid %lu\n",
			"reader", urcu_get_thread_id());
	printf_verbose("read tid : %lx, lookupfail %lu, lookupok %lu\n",
			urcu_get_thread_id(),
			URCU_TLS(lookup_fail),
			URCU_TLS(lookup_ok));
	return ((void*)1);

}
Exemplo n.º 7
0
void *perftest_thread(void *arg)
{
    thread_data_t *thread_data = (thread_data_t *)arg;

    int thread_index = thread_data->thread_index;
    int write_elem = thread_data->write_elem;
    int read_elem;
    unsigned long random_seed = 1234;

    unsigned long *value_ptr;
    unsigned long *new_value_ptr;
    unsigned long value;

    unsigned long long reads = 0;
    unsigned long long writes = 0;

    set_affinity(thread_index);
    rcu_register_thread();
    rcu_defer_register_thread();
    lock_mb();

	while (goflag == GOFLAG_INIT)
		poll(NULL, 0, 10);

    switch (thread_data->mode)
    {
        case MODE_READONLY:
            while (goflag == GOFLAG_RUN) 
            {
                read_elem = get_random(&random_seed) % Tree_Scale;
                value = *Values[read_elem];
                reads++;
            }
            break;
        case MODE_WRITE:
            while (goflag == GOFLAG_RUN)
            {
                write_elem = get_random(&random_seed) % Tree_Size;
                value_ptr = Values[write_elem];
                value = get_random(&random_seed) % Tree_Scale + 1;

                new_value_ptr = (unsigned long *)malloc(sizeof(unsigned long *));
                *new_value_ptr = value;

                rcu_assign_pointer(Values[write_elem], new_value_ptr);

                defer_rcu(free, value_ptr);
                writes++;
            }
            break;
    }

    rcu_unregister_thread();
    rcu_defer_unregister_thread();

    printf("thread %d reads %lld writes %lld\n", thread_index, reads, writes);
    return NULL;
}
int test_mf_signal(void)
{
	rcu_register_thread();
	rcu_read_lock();
	rcu_read_unlock();
	synchronize_rcu();
	rcu_unregister_thread();
	return 0;
}
Exemplo n.º 9
0
void *thr_dequeuer(void *_count)
{
	unsigned long long *count = _count;
	int ret;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"dequeuer", pthread_self(), (unsigned long)gettid());

	set_affinity();

	ret = rcu_defer_register_thread();
	if (ret) {
		printf("Error in rcu_defer_register_thread\n");
		exit(-1);
	}
	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct cds_lfq_node_rcu *qnode;
		struct test *node;

		rcu_read_lock();
		qnode = cds_lfq_dequeue_rcu(&q);
		node = caa_container_of(qnode, struct test, list);
		rcu_read_unlock();

		if (node) {
			call_rcu(&node->rcu, free_node_cb);
			nr_successful_dequeues++;
		}

		nr_dequeues++;
		if (caa_unlikely(!test_duration_dequeue()))
			break;
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
	}

	rcu_unregister_thread();
	rcu_defer_unregister_thread();
	printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
		       "dequeues %llu, successful_dequeues %llu\n",
		       pthread_self(), (unsigned long)gettid(), nr_dequeues,
		       nr_successful_dequeues);
	count[0] = nr_dequeues;
	count[1] = nr_successful_dequeues;
	return ((void*)2);
}
Exemplo n.º 10
0
static void *thr_dequeuer(void *_count)
{
	unsigned long long *count = _count;
	unsigned int counter = 0;

	printf_verbose("thread_begin %s, tid %lu\n",
			"dequeuer", urcu_get_thread_id());

	set_affinity();

	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	assert(test_pop || test_pop_all);

	for (;;) {
		if (test_pop && test_pop_all) {
			/* both pop and pop all */
			if (counter & 1)
				do_test_pop(test_sync);
			else
				do_test_pop_all(test_sync);
			counter++;
		} else {
			if (test_pop)
				do_test_pop(test_sync);
			else
				do_test_pop_all(test_sync);
		}

		if (caa_unlikely(!test_duration_dequeue()))
			break;
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
	}

	rcu_unregister_thread();

	printf_verbose("dequeuer thread_end, tid %lu, "
			"dequeues %llu, successful_dequeues %llu\n",
			urcu_get_thread_id(),
			URCU_TLS(nr_dequeues),
			URCU_TLS(nr_successful_dequeues));
	count[0] = URCU_TLS(nr_dequeues);
	count[1] = URCU_TLS(nr_successful_dequeues);
	return ((void*)2);
}
Exemplo n.º 11
0
static void *regression1_fn(void *arg)
{
	rcu_register_thread();

	if (pthread_barrier_wait(&worker_barrier) ==
			PTHREAD_BARRIER_SERIAL_THREAD) {
		int j;

		for (j = 0; j < 1000000; j++) {
			struct page *p;

			p = page_alloc();
			pthread_mutex_lock(&mt_lock);
			radix_tree_insert(&mt_tree, 0, p);
			pthread_mutex_unlock(&mt_lock);

			p = page_alloc();
			pthread_mutex_lock(&mt_lock);
			radix_tree_insert(&mt_tree, 1, p);
			pthread_mutex_unlock(&mt_lock);

			pthread_mutex_lock(&mt_lock);
			p = radix_tree_delete(&mt_tree, 1);
			pthread_mutex_lock(&p->lock);
			p->count--;
			pthread_mutex_unlock(&p->lock);
			pthread_mutex_unlock(&mt_lock);
			page_free(p);

			pthread_mutex_lock(&mt_lock);
			p = radix_tree_delete(&mt_tree, 0);
			pthread_mutex_lock(&p->lock);
			p->count--;
			pthread_mutex_unlock(&p->lock);
			pthread_mutex_unlock(&mt_lock);
			page_free(p);
		}
	} else {
		int j;

		for (j = 0; j < 100000000; j++) {
			struct page *pages[10];

			find_get_pages(0, 10, pages);
		}
	}

	rcu_unregister_thread();

	return NULL;
}
Exemplo n.º 12
0
/** Waits until jobs arrive in the dispatch queue and processes them. */
static void * workerLoop(struct workerStartData *startInfo) {
    /* Initialized the (thread local) random seed */
    UA_random_seed((uintptr_t)startInfo);

   	rcu_register_thread();
    UA_UInt32 *c = UA_malloc(sizeof(UA_UInt32));
    uatomic_set(c, 0);
    *startInfo->workerCounter = c;
    UA_Server *server = startInfo->server;
    UA_free(startInfo);

    pthread_mutex_t mutex; // required for the condition variable
    pthread_mutex_init(&mutex,0);
    pthread_mutex_lock(&mutex);
    struct timespec to;

    while(*server->running) {
        struct DispatchJobsList *wln = (struct DispatchJobsList*)
            cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
        if(wln) {
            processJobs(server, wln->jobs, wln->jobsSize);
            UA_free(wln->jobs);
            UA_free(wln);
        } else {
            /* sleep until a work arrives (and wakes up all worker threads) */
            #if defined(__APPLE__) || defined(__MACH__) // OS X does not have clock_gettime, use clock_get_time
              clock_serv_t cclock;
              mach_timespec_t mts;
              host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
              clock_get_time(cclock, &mts);
              mach_port_deallocate(mach_task_self(), cclock);
              to.tv_sec = mts.tv_sec;
              to.tv_nsec = mts.tv_nsec;
            #else
              clock_gettime(CLOCK_REALTIME, &to);
            #endif
            to.tv_sec += 2;
            pthread_cond_timedwait(&server->dispatchQueue_condition, &mutex, &to);
        }
        uatomic_inc(c); // increase the workerCounter;
    }
    pthread_mutex_unlock(&mutex);
    pthread_mutex_destroy(&mutex);

    rcu_barrier(); // wait for all scheduled call_rcu work to complete
   	rcu_unregister_thread();

    /* we need to return _something_ for pthreads */
    return NULL;
}
Exemplo n.º 13
0
static void *rcu_read_stress_test(void *arg)
{
    int i;
    int itercnt = 0;
    struct rcu_stress *p;
    int pc;
    long long n_reads_local = 0;
    long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
    volatile int garbage = 0;

    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        rcu_read_lock();
        p = atomic_rcu_read(&rcu_stress_current);
        if (p->mbtest == 0) {
            n_mberror++;
        }
        rcu_read_lock();
        for (i = 0; i < 100; i++) {
            garbage++;
        }
        rcu_read_unlock();
        pc = p->pipe_count;
        rcu_read_unlock();
        if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
            pc = RCU_STRESS_PIPE_LEN;
        }
        rcu_stress_local[pc]++;
        n_reads_local++;
        if ((++itercnt % 0x1000) == 0) {
            synchronize_rcu();
        }
    }
    qemu_mutex_lock(&counts_mutex);
    n_reads += n_reads_local;
    for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
        rcu_stress_count[i] += rcu_stress_local[i];
    }
    qemu_mutex_unlock(&counts_mutex);

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 14
0
void *thr_dequeuer(void *_count)
{
    unsigned long long *count = _count;

    printf_verbose("thread_begin %s, tid %lu\n",
                   "dequeuer", urcu_get_thread_id());

    set_affinity();

    rcu_register_thread();

    while (!test_go)
    {
    }
    cmm_smp_mb();

    for (;;) {
        struct cds_lfq_node_rcu *qnode;

        rcu_read_lock();
        qnode = cds_lfq_dequeue_rcu(&q);
        rcu_read_unlock();

        if (qnode) {
            struct test *node;

            node = caa_container_of(qnode, struct test, list);
            call_rcu(&node->rcu, free_node_cb);
            URCU_TLS(nr_successful_dequeues)++;
        }

        URCU_TLS(nr_dequeues)++;
        if (caa_unlikely(!test_duration_dequeue()))
            break;
        if (caa_unlikely(rduration))
            loop_sleep(rduration);
    }

    rcu_unregister_thread();
    printf_verbose("dequeuer thread_end, tid %lu, "
                   "dequeues %llu, successful_dequeues %llu\n",
                   urcu_get_thread_id(),
                   URCU_TLS(nr_dequeues),
                   URCU_TLS(nr_successful_dequeues));
    count[0] = URCU_TLS(nr_dequeues);
    count[1] = URCU_TLS(nr_successful_dequeues);
    return ((void*)2);
}
Exemplo n.º 15
0
static void *writer_fn(void *arg)
{
	int i;

	rcu_register_thread();
	pthread_barrier_wait(&worker_barrier);

	for (i = 0; i < 1000000; i++) {
		radix_tree_insert(&mt_tree, 1, &obj1);
		radix_tree_delete(&mt_tree, 1);
	}

	rcu_unregister_thread();

	return NULL;
}
Exemplo n.º 16
0
static void *rcu_fake_update_stress_test(void *arg)
{
    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        synchronize_rcu();
        g_usleep(1000);
    }

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 17
0
Arquivo: main.c Projeto: 020gzh/linux
int main(void)
{
	rcu_register_thread();
	radix_tree_init();

	regression1_test();
	regression2_test();
	regression3_test();
	single_thread_tests();

	sleep(1);
	printf("after sleep(1): %d allocated\n", nr_allocated);
	rcu_unregister_thread();

	exit(0);
}
Exemplo n.º 18
0
void *thr_enqueuer(void *_count)
{
    unsigned long long *count = _count;

    printf_verbose("thread_begin %s, tid %lu\n",
                   "enqueuer", urcu_get_thread_id());

    set_affinity();

    rcu_register_thread();

    while (!test_go)
    {
    }
    cmm_smp_mb();

    for (;;) {
        struct test *node = malloc(sizeof(*node));
        if (!node)
            goto fail;
        cds_lfq_node_init_rcu(&node->list);
        rcu_read_lock();
        cds_lfq_enqueue_rcu(&q, &node->list);
        rcu_read_unlock();
        URCU_TLS(nr_successful_enqueues)++;

        if (caa_unlikely(wdelay))
            loop_sleep(wdelay);
fail:
        URCU_TLS(nr_enqueues)++;
        if (caa_unlikely(!test_duration_enqueue()))
            break;
    }

    rcu_unregister_thread();

    count[0] = URCU_TLS(nr_enqueues);
    count[1] = URCU_TLS(nr_successful_enqueues);
    printf_verbose("enqueuer thread_end, tid %lu, "
                   "enqueues %llu successful_enqueues %llu\n",
                   urcu_get_thread_id(),
                   URCU_TLS(nr_enqueues),
                   URCU_TLS(nr_successful_enqueues));
    return ((void*)1);

}
Exemplo n.º 19
0
int main(int argc, char **argv)
{
	bool long_run = false;
	int opt;
	unsigned int seed = time(NULL);

	while ((opt = getopt(argc, argv, "ls:v")) != -1) {
		if (opt == 'l')
			long_run = true;
		else if (opt == 's')
			seed = strtoul(optarg, NULL, 0);
		else if (opt == 'v')
			test_verbose++;
	}

	printf("random seed %u\n", seed);
	srand(seed);

	printf("running tests\n");

	rcu_register_thread();
	radix_tree_init();

	xarray_tests();
	regression1_test();
	regression2_test();
	regression3_test();
	regression4_test();
	iteration_test(0, 10 + 90 * long_run);
	iteration_test(7, 10 + 90 * long_run);
	single_thread_tests(long_run);

	/* Free any remaining preallocated nodes */
	radix_tree_cpu_dead(0);

	benchmark();

	rcu_barrier();
	printv(2, "after rcu_barrier: %d allocated, preempt %d\n",
		nr_allocated, preempt_count);
	rcu_unregister_thread();

	printf("tests completed\n");

	exit(0);
}
Exemplo n.º 20
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;

    rcu_register_thread();

    my_iothread = iothread;
    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (!atomic_read(&iothread->stopping)) {
        aio_poll(iothread->ctx, true);
    }

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 21
0
static void *reader_fn(void *arg)
{
	int i;
	void *entry;

	rcu_register_thread();
	pthread_barrier_wait(&worker_barrier);

	for (i = 0; i < 1000000; i++) {
		rcu_read_lock();
		entry = radix_tree_lookup(&mt_tree, 0);
		rcu_read_unlock();
		if (entry != &obj0) {
			printf("iteration %d bad entry = %p\n", i, entry);
			abort();
		}
	}

	rcu_unregister_thread();

	return NULL;
}
Exemplo n.º 22
0
static void *rcu_update_perf_test(void *arg)
{
    long long n_updates_local = 0;

    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    atomic_inc(&nthreadsrunning);
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        synchronize_rcu();
        n_updates_local++;
    }
    qemu_mutex_lock(&counts_mutex);
    n_updates += n_updates_local;
    qemu_mutex_unlock(&counts_mutex);

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 23
0
static
void *output_thread_fct(void *data)
{
	DBG("In user output thread.");
	rcu_register_thread();

	/* Read keys typed by the user */
	while (!CMM_LOAD_SHARED(exit_program)) {
		if (!CMM_LOAD_SHARED(hide_output)) {
			pthread_mutex_lock(&print_output_mutex);
			DBG("Refresh screen.");

			do_print_output();

			fflush(stdout);
			pthread_mutex_unlock(&print_output_mutex);
		}
		sleep(URCU_GAME_REFRESH_PERIOD);
	}

	rcu_unregister_thread();
	DBG("User output thread exiting.");
	return NULL;
}
Exemplo n.º 24
0
/*
 * Thread managing health check socket.
 */
void *thread_manage_health(void *data)
{
	int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;
	struct health_comm_msg msg;
	struct health_comm_reply reply;
	int is_root;

	DBG("[thread] Manage health check started");

	setup_health_path();

	rcu_register_thread();

	/* We might hit an error path before this is created. */
	lttng_poll_init(&events);

	/* Create unix socket */
	sock = lttcomm_create_unix_sock(health_unix_sock_path);
	if (sock < 0) {
		ERR("Unable to create health check Unix socket");
		ret = -1;
		goto error;
	}

	is_root = !getuid();
	if (is_root) {
		/* lttng health client socket path permissions */
		ret = chown(health_unix_sock_path, 0,
				utils_get_group_id(tracing_group_name));
		if (ret < 0) {
			ERR("Unable to set group on %s", health_unix_sock_path);
			PERROR("chown");
			ret = -1;
			goto error;
		}

		ret = chmod(health_unix_sock_path,
				S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
		if (ret < 0) {
			ERR("Unable to set permissions on %s", health_unix_sock_path);
			PERROR("chmod");
			ret = -1;
			goto error;
		}
	}

	/*
	 * Set the CLOEXEC flag. Return code is useless because either way, the
	 * show must go on.
	 */
	(void) utils_set_fd_cloexec(sock);

	ret = lttcomm_listen_unix_sock(sock);
	if (ret < 0) {
		goto error;
	}

	/* Size is set to 1 for the consumer_channel pipe */
	ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
	if (ret < 0) {
		ERR("Poll set creation failed");
		goto error;
	}

	ret = lttng_poll_add(&events, health_quit_pipe[0], LPOLLIN);
	if (ret < 0) {
		goto error;
	}

	/* Add the application registration socket */
	ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
	if (ret < 0) {
		goto error;
	}

	/* Perform prior memory accesses before decrementing ready */
	cmm_smp_mb__before_uatomic_dec();
	uatomic_dec(&lttng_consumer_ready);

	while (1) {
		DBG("Health check ready");

		/* Inifinite blocking call, waiting for transmission */
restart:
		ret = lttng_poll_wait(&events, -1);
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			if (!revents) {
				/* No activity for this FD (poll implementation). */
				continue;
			}

			/* Thread quit pipe has been closed. Killing thread. */
			ret = check_health_quit_pipe(pollfd, revents);
			if (ret) {
				err = 0;
				goto exit;
			}

			/* Event on the registration socket */
			if (pollfd == sock) {
				if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
						&& !(revents & LPOLLIN)) {
					ERR("Health socket poll error");
					goto error;
				}
			}
		}

		new_sock = lttcomm_accept_unix_sock(sock);
		if (new_sock < 0) {
			goto error;
		}

		/*
		 * Set the CLOEXEC flag. Return code is useless because either way, the
		 * show must go on.
		 */
		(void) utils_set_fd_cloexec(new_sock);

		DBG("Receiving data from client for health...");
		ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
		if (ret <= 0) {
			DBG("Nothing recv() from client... continuing");
			ret = close(new_sock);
			if (ret) {
				PERROR("close");
			}
			new_sock = -1;
			continue;
		}

		rcu_thread_online();

		assert(msg.cmd == HEALTH_CMD_CHECK);

		memset(&reply, 0, sizeof(reply));
		for (i = 0; i < NR_HEALTH_CONSUMERD_TYPES; i++) {
			/*
			 * health_check_state return 0 if thread is in
			 * error.
			 */
			if (!health_check_state(health_consumerd, i)) {
				reply.ret_code |= 1ULL << i;
			}
		}

		DBG("Health check return value %" PRIx64, reply.ret_code);

		ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
		if (ret < 0) {
			ERR("Failed to send health data back to client");
		}

		/* End of transmission */
		ret = close(new_sock);
		if (ret) {
			PERROR("close");
		}
		new_sock = -1;
	}

exit:
error:
	if (err) {
		ERR("Health error occurred in %s", __func__);
	}
	DBG("Health check thread dying");
	unlink(health_unix_sock_path);
	if (sock >= 0) {
		ret = close(sock);
		if (ret) {
			PERROR("close");
		}
	}

	lttng_poll_clean(&events);

	rcu_unregister_thread();
	return NULL;
}
Exemplo n.º 25
0
Arquivo: colo.c Projeto: CTU-IIG/qemu
void *colo_process_incoming_thread(void *opaque)
{
    MigrationIncomingState *mis = opaque;
    QEMUFile *fb = NULL;
    QIOChannelBuffer *bioc = NULL; /* Cache incoming device state */
    uint64_t total_size;
    uint64_t value;
    Error *local_err = NULL;
    int ret;

    rcu_register_thread();
    qemu_sem_init(&mis->colo_incoming_sem, 0);

    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_COLO);

    failover_init_state();

    mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
    if (!mis->to_src_file) {
        error_report("COLO incoming thread: Open QEMUFile to_src_file failed");
        goto out;
    }
    /*
     * Note: the communication between Primary side and Secondary side
     * should be sequential, we set the fd to unblocked in migration incoming
     * coroutine, and here we are in the COLO incoming thread, so it is ok to
     * set the fd back to blocked.
     */
    qemu_file_set_blocking(mis->from_src_file, true);

    bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE);
    fb = qemu_fopen_channel_input(QIO_CHANNEL(bioc));
    object_unref(OBJECT(bioc));

    qemu_mutex_lock_iothread();
#ifdef CONFIG_REPLICATION
    replication_start_all(REPLICATION_MODE_SECONDARY, &local_err);
    if (local_err) {
        qemu_mutex_unlock_iothread();
        goto out;
    }
#else
        abort();
#endif
    vm_start();
    trace_colo_vm_state_change("stop", "run");
    qemu_mutex_unlock_iothread();

    colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY,
                      &local_err);
    if (local_err) {
        goto out;
    }

    while (mis->state == MIGRATION_STATUS_COLO) {
        int request = 0;

        colo_wait_handle_message(mis->from_src_file, &request, &local_err);
        if (local_err) {
            goto out;
        }
        assert(request);
        if (failover_get_state() != FAILOVER_STATUS_NONE) {
            error_report("failover request");
            goto out;
        }

        qemu_mutex_lock_iothread();
        vm_stop_force_state(RUN_STATE_COLO);
        trace_colo_vm_state_change("run", "stop");
        qemu_mutex_unlock_iothread();

        /* FIXME: This is unnecessary for periodic checkpoint mode */
        colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_REPLY,
                     &local_err);
        if (local_err) {
            goto out;
        }

        colo_receive_check_message(mis->from_src_file,
                           COLO_MESSAGE_VMSTATE_SEND, &local_err);
        if (local_err) {
            goto out;
        }

        qemu_mutex_lock_iothread();
        cpu_synchronize_all_pre_loadvm();
        ret = qemu_loadvm_state_main(mis->from_src_file, mis);
        qemu_mutex_unlock_iothread();

        if (ret < 0) {
            error_report("Load VM's live state (ram) error");
            goto out;
        }

        value = colo_receive_message_value(mis->from_src_file,
                                 COLO_MESSAGE_VMSTATE_SIZE, &local_err);
        if (local_err) {
            goto out;
        }

        /*
         * Read VM device state data into channel buffer,
         * It's better to re-use the memory allocated.
         * Here we need to handle the channel buffer directly.
         */
        if (value > bioc->capacity) {
            bioc->capacity = value;
            bioc->data = g_realloc(bioc->data, bioc->capacity);
        }
        total_size = qemu_get_buffer(mis->from_src_file, bioc->data, value);
        if (total_size != value) {
            error_report("Got %" PRIu64 " VMState data, less than expected"
                        " %" PRIu64, total_size, value);
            goto out;
        }
        bioc->usage = total_size;
        qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL);

        colo_send_message(mis->to_src_file, COLO_MESSAGE_VMSTATE_RECEIVED,
                     &local_err);
        if (local_err) {
            goto out;
        }

        qemu_mutex_lock_iothread();
        vmstate_loading = true;
        ret = qemu_load_device_state(fb);
        if (ret < 0) {
            error_report("COLO: load device state failed");
            qemu_mutex_unlock_iothread();
            goto out;
        }

#ifdef CONFIG_REPLICATION
        replication_get_error_all(&local_err);
        if (local_err) {
            qemu_mutex_unlock_iothread();
            goto out;
        }

        /* discard colo disk buffer */
        replication_do_checkpoint_all(&local_err);
        if (local_err) {
            qemu_mutex_unlock_iothread();
            goto out;
        }
#else
        abort();
#endif
        /* Notify all filters of all NIC to do checkpoint */
        colo_notify_filters_event(COLO_EVENT_CHECKPOINT, &local_err);

        if (local_err) {
            qemu_mutex_unlock_iothread();
            goto out;
        }

        vmstate_loading = false;
        vm_start();
        trace_colo_vm_state_change("stop", "run");
        qemu_mutex_unlock_iothread();

        if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) {
            failover_set_state(FAILOVER_STATUS_RELAUNCH,
                            FAILOVER_STATUS_NONE);
            failover_request_active(NULL);
            goto out;
        }

        colo_send_message(mis->to_src_file, COLO_MESSAGE_VMSTATE_LOADED,
                     &local_err);
        if (local_err) {
            goto out;
        }
    }

out:
    vmstate_loading = false;
    /* Throw the unreported error message after exited from loop */
    if (local_err) {
        error_report_err(local_err);
    }

    switch (failover_get_state()) {
    case FAILOVER_STATUS_NONE:
        qapi_event_send_colo_exit(COLO_MODE_SECONDARY,
                                  COLO_EXIT_REASON_ERROR);
        break;
    case FAILOVER_STATUS_REQUIRE:
        qapi_event_send_colo_exit(COLO_MODE_SECONDARY,
                                  COLO_EXIT_REASON_REQUEST);
        break;
    default:
        abort();
    }

    if (fb) {
        qemu_fclose(fb);
    }

    /* Hope this not to be too long to loop here */
    qemu_sem_wait(&mis->colo_incoming_sem);
    qemu_sem_destroy(&mis->colo_incoming_sem);
    /* Must be called after failover BH is completed */
    if (mis->to_src_file) {
        qemu_fclose(mis->to_src_file);
    }
    migration_incoming_disable_colo();

    rcu_unregister_thread();
    return NULL;
}
Exemplo n.º 26
0
void* run(void* arg)
{
	unsigned long i;
	char iamreader;
	int key; long t = (long) arg; long nbn;//same cache line for the best rand!
	double start, end;
	node_t **hd;
	lock_t *lck;

	set_affinity(t);

	/************ init *****************/
	thd_ins = 0;
	thd_del = 0;
	thd_sea = 0;
	nbmallocs = 0;
	nbretry = 0;
	nbrelink = 0;
	nbreprev = 0;
	nbfl = 0;
	thread=t;
	thd_nbupdaters=nbupdaters;
	setsignals();
	iamreader = t >= nbupdaters ? 1 : 0;
	if(!iamreader)	prealloc();
	mysrand(t^time(NULL));
	/************** init done **************/

	/************** barrier ****************/
	atomic_xadd4(&ready, 1);
	while(!go) memory_barrier();

	/******************* START ****************/
	start = d_gettimeofday();
#ifdef RCU
	rcu_register_thread();
#endif

	i=0;
	do{
		key = myrand()%nbkeys;
		//key = rand_r(&thd_seed)%nbthreads;
		//key = (t+key)%nbkeys;
		//key = random() % nbkeys;
		//if(i%100000) printf("%d %d\n", thread, key);

		get_buckets(key, &hd, &lck);
		if(iamreader)
		{
			thd_sea += search(key, hd, lck);
			if(i>= NB_TEST) break;
		}
		else
		{
			if(i%2)
				thd_ins += insert(key, hd, lck);
			else
				thd_del += delete(key, hd, lck);
			if(done) {
				//printf("writer stopped\n");
				break;
			}
		}
		//if(!(i%10000)) 
			//printf("%d loop %d\n", t, i);
#ifdef RCU_QSBR
#if ((defined RCU_QSBR_ACCELERATE) || (defined RCU_Q10))
#ifdef RCU_Q10
		if(!(i%10)) 
#else
		if(!(i%100)) 
#endif
#endif
			rcu_quiescent_state();
#endif
	}while(++i);

#ifdef RCU
	//if(!iamreader) rcu_barrier();
	//printf("iamreader %d, ops %d\n", iamreader, i);
	rcu_unregister_thread();
#endif

	end = d_gettimeofday(); 
	/******************* END ****************/

	
	//number of ops done
	thd_ops[t] = i;
	
	//printf("nbmallocs %g\n", nbmallocs);
	thd_mallocs[t] = nbmallocs;
	thd_retry[t] = nbretry;
	thd_relink[t] = nbrelink;
	thd_reprev[t] = nbreprev;

	//if(t==0) printf("%lu | nbblocked %g\n", t, nbblockeds);
#ifdef RCU
	thd_blockeds[t] = atomic_read_ptr(&rcublockeds);
#else
	thd_blockeds[t] = nbblockeds;
#endif

	//average time per ops
	avg_time[t] = (((end - start))/i);

	//total time
	thd_time[t] = (end - start);

	suc_ins[t] = thd_ins;
	suc_sea[t] = thd_sea;
	suc_del[t] = thd_del;

	return NULL;
}
Exemplo n.º 27
0
/*!
 * \brief Thread entrypoint function.
 *
 * When a thread is created and started, it immediately enters this function.
 * Depending on thread state, it either enters runnable or
 * blocks until it is awakened.
 *
 * This function also handles "ThreadIdle" state to quickly suspend and resume
 * threads and mitigate thread creation costs. Also, thread runnable may
 * be changed to alter the thread behavior on runtime
 */
static void *thread_ep(void *data)
{
	// Check data
	dthread_t *thread = (dthread_t *)data;
	if (thread == 0) {
		return 0;
	}

	// Check if is a member of unit
	dt_unit_t *unit = thread->unit;
	if (unit == 0) {
		return 0;
	}

	// Unblock SIGALRM
	sigset_t mask;
	sigemptyset(&mask);
	sigaddset(&mask, SIGALRM);
	pthread_sigmask(SIG_UNBLOCK, &mask, NULL);

	rcu_register_thread();
	dbg_dt("dthreads: [%p] entered ep\n", thread);

	/* Drop capabilities except FS access. */
#ifdef HAVE_CAP_NG_H
	if (capng_have_capability(CAPNG_EFFECTIVE, CAP_SETPCAP)) {
		capng_type_t tp = CAPNG_EFFECTIVE|CAPNG_PERMITTED;
		capng_clear(CAPNG_SELECT_BOTH);
		capng_update(CAPNG_ADD, tp, CAP_DAC_OVERRIDE);
		capng_apply(CAPNG_SELECT_BOTH);
	}
#endif /* HAVE_CAP_NG_H */

	// Run loop
	for (;;) {

		// Check thread state
		lock_thread_rw(thread);
		if (thread->state == ThreadDead) {
			dbg_dt("dthreads: [%p] marked as dead\n", thread);
			unlock_thread_rw(thread);
			break;
		}

		// Update data
		thread->data = thread->_adata;
		runnable_t _run = thread->run;

		// Start runnable if thread is marked Active
		if ((thread->state == ThreadActive) && (thread->run != 0)) {
			unlock_thread_rw(thread);
			dbg_dt("dthreads: [%p] entering runnable\n", thread);
			_run(thread);
			dbg_dt("dthreads: [%p] exited runnable\n", thread);
		} else {
			unlock_thread_rw(thread);
		}

		// If the runnable was cancelled, start new iteration
		lock_thread_rw(thread);
		if (thread->state & ThreadCancelled) {
			dbg_dt("dthreads: [%p] cancelled\n", thread);
			thread->state &= ~ThreadCancelled;
			unlock_thread_rw(thread);
			continue;
		}
		unlock_thread_rw(thread);

		// Runnable finished without interruption, mark as Idle
		pthread_mutex_lock(&unit->_notify_mx);
		lock_thread_rw(thread);
		if (thread->state & ThreadActive) {
			thread->state &= ~ThreadActive;
			thread->state |= ThreadIdle;
		}

		// Go to sleep if idle
		if (thread->state & ThreadIdle) {
			unlock_thread_rw(thread);

			// Signalize state change
			unit_signalize_change(unit);

			// Wait for notification from unit
			dbg_dt("dthreads: [%p] going idle\n", thread);
			pthread_cond_wait(&unit->_notify, &unit->_notify_mx);
			pthread_mutex_unlock(&unit->_notify_mx);
			dbg_dt("dthreads: [%p] resumed from idle\n", thread);
		} else {
			unlock_thread_rw(thread);
			pthread_mutex_unlock(&unit->_notify_mx);
		}
	}

	// Thread destructor
	if (thread->destruct) {
		dbg_dt("dthreads: [%p] entering destructor\n", thread);
		thread->destruct(thread);
		dbg_dt("dthreads: [%p] exited destructor\n", thread);
	}

	// Report thread state change
	dbg_dt("dthreads: [%p] thread finished\n", thread);
	unit_signalize_change(unit);
	dbg_dt("dthreads: [%p] thread exited ep\n", thread);
	lock_thread_rw(thread);
	thread->state |= ThreadJoinable;
	unlock_thread_rw(thread);
	rcu_unregister_thread();

	// Return
	return 0;
}
Exemplo n.º 28
0
/*
 * This thread manage application notify communication.
 */
void *ust_thread_manage_notify(void *data)
{
	int i, ret, pollfd, err = -1;
	ssize_t size_ret;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;

	DBG("[ust-thread] Manage application notify command");

	rcu_register_thread();
	rcu_thread_online();

	health_register(health_sessiond,
		HEALTH_SESSIOND_TYPE_APP_MANAGE_NOTIFY);

	if (testpoint(sessiond_thread_app_manage_notify)) {
		goto error_testpoint;
	}

	health_code_update();

	ret = sessiond_set_thread_pollset(&events, 2);
	if (ret < 0) {
		goto error_poll_create;
	}

	/* Add notify pipe to the pollset. */
	ret = lttng_poll_add(&events, apps_cmd_notify_pipe[0], LPOLLIN | LPOLLERR);
	if (ret < 0) {
		goto error;
	}

	health_code_update();

	while (1) {
		DBG3("[ust-thread] Manage notify polling on %d fds",
				LTTNG_POLL_GETNB(&events));

		/* Inifinite blocking call, waiting for transmission */
restart:
		health_poll_entry();
		ret = lttng_poll_wait(&events, -1);
		health_poll_exit();
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			health_code_update();

			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			/* Thread quit pipe has been closed. Killing thread. */
			ret = sessiond_check_thread_quit_pipe(pollfd, revents);
			if (ret) {
				err = 0;
				goto exit;
			}

			/* Inspect the apps cmd pipe */
			if (pollfd == apps_cmd_notify_pipe[0]) {
				int sock;

				if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
					ERR("Apps notify command pipe error");
					goto error;
				} else if (!(revents & LPOLLIN)) {
					/* No POLLIN and not a catched error, stop the thread. */
					ERR("Notify command pipe failed. revent: %u", revents);
					goto error;
				}

				/* Get socket from dispatch thread. */
				size_ret = lttng_read(apps_cmd_notify_pipe[0],
						&sock, sizeof(sock));
				if (size_ret < sizeof(sock)) {
					PERROR("read apps notify pipe");
					goto error;
				}
				health_code_update();

				ret = lttng_poll_add(&events, sock,
						LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
				if (ret < 0) {
					/*
					 * It's possible we've reached the max poll fd allowed.
					 * Let's close the socket but continue normal execution.
					 */
					ret = close(sock);
					if (ret) {
						PERROR("close notify socket %d", sock);
					}
					lttng_fd_put(LTTNG_FD_APPS, 1);
					continue;
				}
				DBG3("UST thread notify added sock %d to pollset", sock);
			} else {
				/*
				 * At this point, we know that a registered application
				 * triggered the event.
				 */
				if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
					/* Removing from the poll set */
					ret = lttng_poll_del(&events, pollfd);
					if (ret < 0) {
						goto error;
					}

					/* The socket is closed after a grace period here. */
					ust_app_notify_sock_unregister(pollfd);
				} else if (revents & (LPOLLIN | LPOLLPRI)) {
					ret = ust_app_recv_notify(pollfd);
					if (ret < 0) {
						/*
						 * If the notification failed either the application is
						 * dead or an internal error happened. In both cases,
						 * we can only continue here. If the application is
						 * dead, an unregistration will follow or else the
						 * application will notice that we are not responding
						 * on that socket and will close it.
						 */
						continue;
					}
				} else {
					ERR("Unknown poll events %u for sock %d", revents, pollfd);
					continue;
				}
				health_code_update();
			}
		}
	}

exit:
error:
	lttng_poll_clean(&events);
error_poll_create:
error_testpoint:
	utils_close_pipe(apps_cmd_notify_pipe);
	apps_cmd_notify_pipe[0] = apps_cmd_notify_pipe[1] = -1;
	DBG("Application notify communication apps thread cleanup complete");
	if (err) {
		health_error();
		ERR("Health error occurred in %s", __func__);
	}
	health_unregister(health_sessiond);
	rcu_thread_offline();
	rcu_unregister_thread();
	return NULL;
}
Exemplo n.º 29
0
/*
 * Thread managing health check socket.
 */
static void *thread_manage_health(void *data)
{
	const bool is_root = (getuid() == 0);
	int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;
	struct health_comm_msg msg;
	struct health_comm_reply reply;
	/* Thread-specific quit pipe. */
	struct thread_notifiers *notifiers = data;
	const int quit_pipe_read_fd = lttng_pipe_get_readfd(
			notifiers->quit_pipe);

	DBG("[thread] Manage health check started");

	rcu_register_thread();

	/*
	 * Created with a size of two for:
	 *   - client socket
	 *   - thread quit pipe
	 */
	ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
	if (ret < 0) {
		goto error;
	}

	/* Create unix socket */
	sock = lttcomm_create_unix_sock(config.health_unix_sock_path.value);
	if (sock < 0) {
		ERR("Unable to create health check Unix socket");
		goto error;
	}

	if (is_root) {
		/* lttng health client socket path permissions */
		gid_t gid;

		ret = utils_get_group_id(config.tracing_group_name.value, true, &gid);
		if (ret) {
			/* Default to root group. */
			gid = 0;
		}

		ret = chown(config.health_unix_sock_path.value, 0, gid);
		if (ret < 0) {
			ERR("Unable to set group on %s", config.health_unix_sock_path.value);
			PERROR("chown");
			goto error;
		}

		ret = chmod(config.health_unix_sock_path.value,
				S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
		if (ret < 0) {
			ERR("Unable to set permissions on %s", config.health_unix_sock_path.value);
			PERROR("chmod");
			goto error;
		}
	}

	/*
	 * Set the CLOEXEC flag. Return code is useless because either way, the
	 * show must go on.
	 */
	(void) utils_set_fd_cloexec(sock);

	ret = lttcomm_listen_unix_sock(sock);
	if (ret < 0) {
		goto error;
	}

	ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
	if (ret < 0) {
		goto error;
	}

	/* Add the application registration socket */
	ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
	if (ret < 0) {
		goto error;
	}

	mark_thread_as_ready(notifiers);
	while (1) {
		DBG("Health check ready");

		/* Infinite blocking call, waiting for transmission */
restart:
		ret = lttng_poll_wait(&events, -1);
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			/* Event on the registration socket */
			if (pollfd == sock) {
				if (revents & LPOLLIN) {
					continue;
				} else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
					ERR("Health socket poll error");
					goto error;
				} else {
					ERR("Unexpected poll events %u for sock %d", revents, pollfd);
					goto error;
				}
			} else {
				/* Event on the thread's quit pipe. */
				err = 0;
				goto exit;
			}
		}

		new_sock = lttcomm_accept_unix_sock(sock);
		if (new_sock < 0) {
			goto error;
		}

		/*
		 * Set the CLOEXEC flag. Return code is useless because either way, the
		 * show must go on.
		 */
		(void) utils_set_fd_cloexec(new_sock);

		DBG("Receiving data from client for health...");
		ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
		if (ret <= 0) {
			DBG("Nothing recv() from client... continuing");
			ret = close(new_sock);
			if (ret) {
				PERROR("close");
			}
			continue;
		}

		rcu_thread_online();

		memset(&reply, 0, sizeof(reply));
		for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
			/*
			 * health_check_state returns 0 if health is
			 * bad.
			 */
			if (!health_check_state(health_sessiond, i)) {
				reply.ret_code |= 1ULL << i;
			}
		}

		DBG2("Health check return value %" PRIx64, reply.ret_code);

		ret = lttcomm_send_unix_sock(new_sock, (void *) &reply,
				sizeof(reply));
		if (ret < 0) {
			ERR("Failed to send health data back to client");
		}

		/* End of transmission */
		ret = close(new_sock);
		if (ret) {
			PERROR("close");
		}
	}

exit:
error:
	if (err) {
		ERR("Health error occurred in %s", __func__);
	}
	DBG("Health check thread dying");
	unlink(config.health_unix_sock_path.value);
	if (sock >= 0) {
		ret = close(sock);
		if (ret) {
			PERROR("close");
		}
	}

	lttng_poll_clean(&events);
	rcu_unregister_thread();
	return NULL;
}
Exemplo n.º 30
0
void *thread_ht_cleanup(void *data)
{
	int ret, i, pollfd, err = -1;
	ssize_t size_ret;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;

	DBG("[ht-thread] startup.");

	rcu_register_thread();
	rcu_thread_online();

	health_register(health_sessiond, HEALTH_SESSIOND_TYPE_HT_CLEANUP);

	health_code_update();

	ret = sessiond_set_thread_pollset(&events, 2);
	if (ret < 0) {
		goto error_poll_create;
	}

	/* Add pipe to the pollset. */
	ret = lttng_poll_add(&events, ht_cleanup_pipe[0], LPOLLIN | LPOLLERR);
	if (ret < 0) {
		goto error;
	}

	health_code_update();

	while (1) {
		DBG3("[ht-thread] Polling on %d fds.",
			LTTNG_POLL_GETNB(&events));

		/* Inifinite blocking call, waiting for transmission */
restart:
		health_poll_entry();
		ret = lttng_poll_wait(&events, -1);
		health_poll_exit();
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			struct lttng_ht *ht;

			health_code_update();

			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			/* Thread quit pipe has been closed. Killing thread. */
			ret = sessiond_check_thread_quit_pipe(pollfd, revents);
			if (ret) {
				err = 0;
				goto exit;
			}
			assert(pollfd == ht_cleanup_pipe[0]);

			if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
				ERR("ht cleanup pipe error");
				goto error;
			} else if (!(revents & LPOLLIN)) {
				/* No POLLIN and not a catched error, stop the thread. */
				ERR("ht cleanup failed. revent: %u", revents);
				goto error;
			}

			/* Get socket from dispatch thread. */
			size_ret = lttng_read(ht_cleanup_pipe[0], &ht,
					sizeof(ht));
			if (size_ret < sizeof(ht)) {
				PERROR("ht cleanup notify pipe");
				goto error;
			}
			health_code_update();
			/*
			 * The whole point of this thread is to call
			 * lttng_ht_destroy from a context that is NOT:
			 * 1) a read-side RCU lock,
			 * 2) a call_rcu thread.
			 */
			lttng_ht_destroy(ht);

			health_code_update();
		}
	}

exit:
error:
	lttng_poll_clean(&events);
error_poll_create:
	utils_close_pipe(ht_cleanup_pipe);
	ht_cleanup_pipe[0] = ht_cleanup_pipe[1] = -1;
	DBG("[ust-thread] cleanup complete.");
	if (err) {
		health_error();
		ERR("Health error occurred in %s", __func__);
	}
	health_unregister(health_sessiond);
	rcu_thread_offline();
	rcu_unregister_thread();
	return NULL;
}