int update_futex(int fd, int active) { size_t mmap_size = sysconf(_SC_PAGE_SIZE); char *wait_shm_mmap; int ret; wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (wait_shm_mmap == MAP_FAILED) { perror("mmap"); goto error; } if (active) { uatomic_set((int32_t *) wait_shm_mmap, 1); futex_async((int32_t *) wait_shm_mmap, FUTEX_WAKE, INT_MAX, NULL, NULL, 0); } else { uatomic_set((int32_t *) wait_shm_mmap, 0); } ret = munmap(wait_shm_mmap, mmap_size); if (ret) { perror("Error unmapping wait shm"); goto error; } return 0; error: return -1; }
/* * Update futex according to active or not. This scheme is used to wake every * libust waiting on the shared memory map futex hence the INT_MAX used in the * futex() call. If active, we set the value and wake everyone else we indicate * that we are gone (cleanup() case). */ LTTNG_HIDDEN void futex_wait_update(int32_t *futex, int active) { if (active) { uatomic_set(futex, 1); futex_async(futex, FUTEX_WAKE, INT_MAX, NULL, NULL, 0); } else { uatomic_set(futex, 0); } DBG("Futex wait update active %d", active); }
/* * Always called with rcu_registry lock held. Releases this lock between * iterations and grabs it again. Holds the lock when it returns. */ static void wait_for_readers(struct cds_list_head *input_readers, struct cds_list_head *cur_snap_readers, struct cds_list_head *qsreaders) { unsigned int wait_loops = 0; struct rcu_reader *index, *tmp; /* * Wait for each thread URCU_TLS(rcu_reader).ctr to either * indicate quiescence (offline), or for them to observe the * current rcu_gp.ctr value. */ for (;;) { if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) wait_loops++; if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { uatomic_set(&rcu_gp.futex, -1); /* * Write futex before write waiting (the other side * reads them in the opposite order). */ cmm_smp_wmb(); cds_list_for_each_entry(index, input_readers, node) { _CMM_STORE_SHARED(index->waiting, 1); } /* Write futex before read reader_gp */ cmm_smp_mb(); }
/* * Prepare futex. */ LTTNG_HIDDEN void futex_nto1_prepare(int32_t *futex) { uatomic_set(futex, -1); cmm_smp_mb(); DBG("Futex n to 1 prepare done"); }
/* * Wake 1 futex. */ LTTNG_HIDDEN void futex_nto1_wake(int32_t *futex) { if (caa_unlikely(uatomic_read(futex) == -1)) { uatomic_set(futex, 0); futex_async(futex, FUTEX_WAKE, 1, NULL, NULL, 0); } DBG("Futex n to 1 wake done"); }
int update_futex(int fd, int active) { long page_size; char *wait_shm_mmap; int ret; page_size = sysconf(_SC_PAGE_SIZE); if (page_size <= 0) { if (!page_size) { errno = EINVAL; } perror("Error in sysconf(_SC_PAGE_SIZE)"); goto error; } wait_shm_mmap = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (wait_shm_mmap == MAP_FAILED) { perror("mmap"); goto error; } if (active) { uatomic_set((int32_t *) wait_shm_mmap, 1); if (futex_async((int32_t *) wait_shm_mmap, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0) { perror("futex_async"); goto error; } } else { uatomic_set((int32_t *) wait_shm_mmap, 0); } ret = munmap(wait_shm_mmap, page_size); if (ret) { perror("Error unmapping wait shm"); goto error; } return 0; error: return -1; }
/* * Wake 1 futex. */ LTTNG_HIDDEN void futex_nto1_wake(int32_t *futex) { if (caa_unlikely(uatomic_read(futex) != -1)) goto end; uatomic_set(futex, 0); if (futex_async(futex, FUTEX_WAKE, 1, NULL, NULL, 0) < 0) { PERROR("futex_async"); abort(); } end: DBG("Futex n to 1 wake done"); }
/** Waits until jobs arrive in the dispatch queue and processes them. */ static void * workerLoop(struct workerStartData *startInfo) { /* Initialized the (thread local) random seed */ UA_random_seed((uintptr_t)startInfo); rcu_register_thread(); UA_UInt32 *c = UA_malloc(sizeof(UA_UInt32)); uatomic_set(c, 0); *startInfo->workerCounter = c; UA_Server *server = startInfo->server; UA_free(startInfo); pthread_mutex_t mutex; // required for the condition variable pthread_mutex_init(&mutex,0); pthread_mutex_lock(&mutex); struct timespec to; while(*server->running) { struct DispatchJobsList *wln = (struct DispatchJobsList*) cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail); if(wln) { processJobs(server, wln->jobs, wln->jobsSize); UA_free(wln->jobs); UA_free(wln); } else { /* sleep until a work arrives (and wakes up all worker threads) */ #if defined(__APPLE__) || defined(__MACH__) // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); to.tv_sec = mts.tv_sec; to.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &to); #endif to.tv_sec += 2; pthread_cond_timedwait(&server->dispatchQueue_condition, &mutex, &to); } uatomic_inc(c); // increase the workerCounter; } pthread_mutex_unlock(&mutex); pthread_mutex_destroy(&mutex); rcu_barrier(); // wait for all scheduled call_rcu work to complete rcu_unregister_thread(); /* we need to return _something_ for pthreads */ return NULL; }
struct vnode_info *alloc_vnode_info(struct sd_node *nodes, size_t nr_nodes) { struct vnode_info *vnode_info; vnode_info = xzalloc(sizeof(*vnode_info)); vnode_info->nr_nodes = nr_nodes; memcpy(vnode_info->nodes, nodes, sizeof(*nodes) * nr_nodes); qsort(vnode_info->nodes, nr_nodes, sizeof(*nodes), node_id_cmp); recalculate_vnodes(vnode_info->nodes, nr_nodes); vnode_info->nr_vnodes = nodes_to_vnodes(vnode_info->nodes, nr_nodes, vnode_info->vnodes); vnode_info->nr_zones = get_zones_nr_from(nodes, nr_nodes); uatomic_set(&vnode_info->refcnt, 1); return vnode_info; }
void *rcu_set_pointer_sym(void **p, void *v) { cmm_wmb(); return uatomic_set(p, v); }