Пример #1
0
static void * workerLoop(UA_Worker *worker) {
    UA_Server *server = worker->server;
    UA_UInt32 *counter = &worker->counter;
    volatile UA_Boolean *running = &worker->running;
    
    /* Initialize the (thread local) random seed with the ram address of worker */
    UA_random_seed((uintptr_t)worker);
   	rcu_register_thread();

    pthread_mutex_t mutex; // required for the condition variable
    pthread_mutex_init(&mutex,0);
    pthread_mutex_lock(&mutex);

    while(*running) {
        struct DispatchJobsList *wln = (struct DispatchJobsList*)
            cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
        if(!wln) {
            uatomic_inc(counter);
            /* sleep until a work arrives (and wakes up all worker threads) */
            pthread_cond_wait(&server->dispatchQueue_condition, &mutex);
            continue;
        }
        processJobs(server, wln->jobs, wln->jobsSize);
        UA_free(wln->jobs);
        UA_free(wln);
        uatomic_inc(counter);
    }

    pthread_mutex_unlock(&mutex);
    pthread_mutex_destroy(&mutex);
    UA_ASSERT_RCU_UNLOCKED();
    rcu_barrier(); // wait for all scheduled call_rcu work to complete
   	rcu_unregister_thread();
    return NULL;
}
Пример #2
0
/*
 * Grab an additional reference to the passed in vnode info.
 *
 * The caller must already hold a reference to vnode_info, this function must
 * only be used to grab an additional reference from code that wants the
 * vnode information to outlive the request structure.
 */
struct vnode_info *grab_vnode_info(struct vnode_info *vnode_info)
{
	assert(uatomic_read(&vnode_info->refcnt) > 0);

	uatomic_inc(&vnode_info->refcnt);
	return vnode_info;
}
Пример #3
0
/** Waits until jobs arrive in the dispatch queue and processes them. */
static void * workerLoop(struct workerStartData *startInfo) {
    /* Initialized the (thread local) random seed */
    UA_random_seed((uintptr_t)startInfo);

   	rcu_register_thread();
    UA_UInt32 *c = UA_malloc(sizeof(UA_UInt32));
    uatomic_set(c, 0);
    *startInfo->workerCounter = c;
    UA_Server *server = startInfo->server;
    UA_free(startInfo);

    pthread_mutex_t mutex; // required for the condition variable
    pthread_mutex_init(&mutex,0);
    pthread_mutex_lock(&mutex);
    struct timespec to;

    while(*server->running) {
        struct DispatchJobsList *wln = (struct DispatchJobsList*)
            cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
        if(wln) {
            processJobs(server, wln->jobs, wln->jobsSize);
            UA_free(wln->jobs);
            UA_free(wln);
        } else {
            /* sleep until a work arrives (and wakes up all worker threads) */
            #if defined(__APPLE__) || defined(__MACH__) // OS X does not have clock_gettime, use clock_get_time
              clock_serv_t cclock;
              mach_timespec_t mts;
              host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
              clock_get_time(cclock, &mts);
              mach_port_deallocate(mach_task_self(), cclock);
              to.tv_sec = mts.tv_sec;
              to.tv_nsec = mts.tv_nsec;
            #else
              clock_gettime(CLOCK_REALTIME, &to);
            #endif
            to.tv_sec += 2;
            pthread_cond_timedwait(&server->dispatchQueue_condition, &mutex, &to);
        }
        uatomic_inc(c); // increase the workerCounter;
    }
    pthread_mutex_unlock(&mutex);
    pthread_mutex_destroy(&mutex);

    rcu_barrier(); // wait for all scheduled call_rcu work to complete
   	rcu_unregister_thread();

    /* we need to return _something_ for pthreads */
    return NULL;
}
Пример #4
0
static void *thr_enqueuer(void *_count)
{
	unsigned long long *count = _count;
	bool was_nonempty;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"enqueuer", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct cds_wfs_node *node = malloc(sizeof(*node));
		if (!node)
			goto fail;
		cds_wfs_node_init(node);
		was_nonempty = cds_wfs_push(&s, node);
		URCU_TLS(nr_successful_enqueues)++;
		if (!was_nonempty)
			URCU_TLS(nr_empty_dest_enqueues)++;

		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
fail:
		URCU_TLS(nr_enqueues)++;
		if (caa_unlikely(!test_duration_enqueue()))
			break;
	}

	uatomic_inc(&test_enqueue_stopped);
	count[0] = URCU_TLS(nr_enqueues);
	count[1] = URCU_TLS(nr_successful_enqueues);
	count[2] = URCU_TLS(nr_empty_dest_enqueues);
	printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
		       "enqueues %llu successful_enqueues %llu, "
		       "empty_dest_enqueues %llu\n",
		       pthread_self(),
			(unsigned long) gettid(),
		       URCU_TLS(nr_enqueues),
		       URCU_TLS(nr_successful_enqueues),
		       URCU_TLS(nr_empty_dest_enqueues));
	return ((void*)1);

}
Пример #5
0
void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
    struct cds_lfht     *ht = ns->ht;
    struct cds_lfht_iter iter;

    rcu_read_lock();
    cds_lfht_first(ht, &iter);
    while(iter.node != NULL) {
        struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
        uatomic_inc(&found_entry->refcount);
        const UA_Node      *node = &found_entry->node;
        rcu_read_unlock();
        visitor(node);
        UA_NodeStore_release((const UA_Node *)node);
        rcu_read_lock();
        cds_lfht_next(ht, &iter);
    }
    rcu_read_unlock();
}
Пример #6
0
const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
    hash_t nhash = hash(nodeid);
    struct cds_lfht_iter iter;

    rcu_read_lock();
    cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
    struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);

    if(!found_entry) {
        rcu_read_unlock();
        return NULL;
    }

    /* This is done within a read-lock. The node will not be marked dead within a read-lock. */
    uatomic_inc(&found_entry->refcount);
    rcu_read_unlock();
    return &found_entry->node;
}
Пример #7
0
 void inc() {
   uatomic_inc(&m_counter);
 }
Пример #8
0
static void update_cluster_info(struct join_message *msg,
				struct sd_node *joined, struct sd_node *nodes,
				size_t nr_nodes)
{
	struct vnode_info *old_vnode_info = NULL;

	eprintf("status = %d, epoch = %d, finished: %d\n", msg->cluster_status,
		msg->epoch, sys->join_finished);

	sys->disable_recovery = msg->disable_recovery;

	if (!sys->join_finished)
		finish_join(msg, joined, nodes, nr_nodes);

	if (!sys->disable_recovery) {
		old_vnode_info = current_vnode_info;
		current_vnode_info = alloc_vnode_info(nodes, nr_nodes);
	}

	switch (msg->cluster_status) {
	case SD_STATUS_OK:
	case SD_STATUS_HALT:
		switch (sys->status) {
		case SD_STATUS_WAIT_FOR_FORMAT:
			sys->nr_copies = msg->nr_copies;
			sys->flags = msg->cluster_flags;

			set_cluster_copies(sys->nr_copies);
			set_cluster_flags(sys->flags);
			set_cluster_ctime(msg->ctime);
			/*FALLTHROUGH*/
		case SD_STATUS_WAIT_FOR_JOIN:
			get_vdi_bitmap(nodes, nr_nodes);
			break;
		default:
			break;
		}

		sys->status = msg->cluster_status;

		if (msg->inc_epoch) {
			if (!sys->disable_recovery) {
				uatomic_inc(&sys->epoch);
				log_current_epoch();
				clear_exceptional_node_lists();

				if (!old_vnode_info) {
					old_vnode_info =
						alloc_old_vnode_info(joined,
							nodes, nr_nodes);
				}

				start_recovery(current_vnode_info,
					       old_vnode_info);
			} else
				prepare_recovery(joined, nodes, nr_nodes);
		}

		if (have_enough_zones())
			sys->status = SD_STATUS_OK;
		break;
	default:
		sys->status = msg->cluster_status;
		break;
	}

	put_vnode_info(old_vnode_info);

	sockfd_cache_add(&joined->nid);
}