lagopus_result_t lagopus_callout_start_main_loop(void) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; if (likely(s_is_handler_inited == true)) { if (s_n_workers > 0) { ret = s_start_callout_stage(); if (likely(ret == LAGOPUS_RESULT_OK)) { s_do_loop = true; mbar(); ret = s_start_callout_main_loop(); } } else { s_do_loop = true; mbar(); ret = s_start_callout_main_loop(); } } else { ret = LAGOPUS_RESULT_NOT_OPERATIONAL; } return ret; }
static void s_once_proc(void) { gallus_result_t r; s_gstate = MODULE_GLOBAL_STATE_UNKNOWN; s_n_modules = 0; s_n_finalized_modules = 0; s_is_unloading = false; s_cur_module_idx = 0; s_initializer_tid = pthread_self(); (void)__sync_add_and_fetch(&s_is_exit_handler_called, 0); (void)memset((void *)s_modules, 0, sizeof(s_modules)); mbar(); if ((r = gallus_mutex_create(&s_lck)) != GALLUS_RESULT_OK) { gallus_perror(r); gallus_exit_fatal("can't initialize a mutex.\n"); } if ((r = gallus_cond_create(&s_cnd)) != GALLUS_RESULT_OK) { gallus_perror(r); gallus_exit_fatal("can't initialize a cond.\n"); } if (atexit(s_atexit_handler) != 0) { gallus_perror(GALLUS_RESULT_POSIX_API_ERROR); gallus_exit_fatal("can't add an exit handler.\n"); } }
static inline lagopus_result_t s_stop_callout_main_loop(void) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; if (s_is_stopped == false) { if (s_do_loop == true) { /* * Stop the main loop first. */ s_do_loop = false; mbar(); (void)lagopus_bbq_wakeup(&s_urgent_tsk_q, -1LL); s_lock_sched(); { while (s_is_stopped == false) { s_wait_sched(-1LL); } } s_unlock_sched(); ret = LAGOPUS_RESULT_OK; } else { ret = LAGOPUS_RESULT_OK; } } else { ret = LAGOPUS_RESULT_OK; } return ret; }
void UPC_L2_Reset() { int i; int j; // Switch to using UPC_C Sync Control uint64_t saveRunState = DCRReadUser(UPC_C_DCR(COUNTER_START)); UPC_C_Stop_Sync_Counting(); // Don't need to wait for counts to flush on L2 // Low granularity counts are cleared each 800 cycles // and residuals considered low enough to ignore. // check if combining counts if (UPC_C_DCR__CONFIG__L2_COMBINE_get(upc_c->c_config)) { // clear only combined upc_c counts for (j=0; j < UPC_L2_NUM_COUNTERS; j++) { upc_c->data16.grp[UPC_C_SRAM_BASE_L2_GROUP].counter[j] = 0; } } else { // clear all upc_c counts for (i=0; i < L2_DCR_num; i++) { for (j=0; j < UPC_L2_NUM_COUNTERS; j++) { upc_c->data16.grp[UPC_C_SRAM_BASE_L2_GROUP + i].counter[j] = 0; } } } if (saveRunState) { UPC_C_Start_Sync_Counting(); } mbar(); }
//! \brief: UPC_L2_DisableUPC void UPC_L2_DisableUPC() { int i; for (i=0; i < L2_DCR_num; i++) { DCRWriteUser(L2_DCR(i, UPC_L2_CONFIG), L2_DCR__UPC_L2_CONFIG__UPC_L2_RING_ENABLE_set(0) ); DCRWriteUser(L2_DCR(i, UPC_L2_COUNTER_CONTROL_W1C), L2_DCR__UPC_L2_COUNTER_CONTROL_RW__ENABLE_set(0) ); } mbar(); }
//flush the cache inline void cache_flush() { #if defined BGQ #if ! defined BGQ_EMU mbar(); #else __sync_synchronize(); #endif #else #pragma omp flush #endif }
static gallus_result_t s_ingress_main(const gallus_pipeline_stage_t *sptr, size_t idx, void *evbuf, size_t n_evs) { gallus_result_t ret = GALLUS_RESULT_ANY_FAILURES; (void)evbuf; (void)n_evs; if (likely(sptr != NULL && *sptr != NULL)) { test_stage_t ts = (test_stage_t)(*sptr); base_stage_t bs = (base_stage_t)ts; if (likely(bs != NULL && bs->m_next_stg != NULL)) { if (likely(ts->m_states[idx] != test_stage_state_done)) { gallus_chrono_t start_time; uint64_t start_clock; uint64_t end_clock; uint64_t *addr = ts->m_data + ts->m_enq_infos[idx].m_offset; size_t len = ts->m_enq_infos[idx].m_length; WHAT_TIME_IS_IT_NOW_IN_NSEC(start_time); start_clock = gallus_rdtsc(); ret = gallus_pipeline_stage_submit((gallus_pipeline_stage_t *) &(bs->m_next_stg), (void *)addr, len, (void *)idx); end_clock = gallus_rdtsc(); gallus_atomic_update_min(uint64_t, &(ts->m_start_clock), 0, start_clock); gallus_atomic_update_max(uint64_t, &(ts->m_end_clock), 0, end_clock); gallus_atomic_update_min(gallus_chrono_t, &(ts->m_start_time), -1LL, start_time); ts->m_states[idx] = test_stage_state_done; mbar(); } else { ret = s_test_stage_wait(ts); } } else { ret = GALLUS_RESULT_INVALID_ARGS; } } else { ret = GALLUS_RESULT_INVALID_ARGS; } return ret; }
static void init_global(global_t *g,int id) { if (id == 0) { #ifdef TIMELIMIT /* Starting time */ g->start = timeofday() ; #endif /* Global barrier */ barrier_init(&g->gb,AVAIL) ; /* Align to cache line */ uintptr_t x = (uintptr_t)(g->mem) ; x += LINE-1 ; x /= LINE ; x *= LINE ; intmax_t *m = (intmax_t *)x ; /* Instance contexts */ for (int k = 0 ; k < NEXE ; k++) { instance_init(&g->ctx[k],k,m) ; m += NVARS*LINESZ ; } mbar() ; g->go = 1 ; } else { while (g->go == 0) ; mbar() ; } }
void UPC_L2_EnableUPC(UpciBool_t indepCtl, UpciBool_t combine) { int i; for (i=0; i < L2_DCR_num; i++) { DCRWriteUser(L2_DCR(i, UPC_L2_COUNTER_CONTROL_W1S), L2_DCR__UPC_L2_COUNTER_CONTROL_RW__RESET_set(0xFFFFUL) | L2_DCR__UPC_L2_COUNTER_CONTROL_RW__ENABLE_set( (indepCtl ? 0 : 0xFFFFUL) )); DCRWriteUser(L2_DCR(i, UPC_L2_CONFIG), L2_DCR__UPC_L2_CONFIG__UPC_L2_RING_ENABLE_set(1)); //| L2_DCR__UPC_L2_CONFIG__SYNC_OVERRIDE_set( (indepCtl ? 0xFFFFUL : 0) ) ); // Switch to rely on sync start - rely on other counter types having override set. } UPC_C_DCR__CONFIG__L2_COMBINE_insert(upc_c->c_config, (combine==UpciTrue?1:0)); mbar(); }
void UPC_P_Clear_Unit( int unit ) { #ifdef __HWSTUBS__ memset(dummyUPC_P_Array, 0, 0x5000); #endif upc_p_mmio_t *const upc_p = UPC_P_Addr(unit); // make sure it's quiet first upc_p->p_config = 0; // clear counter config int i; for (i=0; i<UPC_P_NUM_COUNTERS; i++) { // counter config should always default to sync-override. // Otherwise a start signal from UPC_C will start counting regardless of control run bit settings. upc_p->counter_cfg[i] = UPC_P__COUNTER_CFG__RESET; } // Reset Counters and disable run upc_p->control = UPC_P__CONTROL__RESET_MASK; mbar(); // Clear any counter inversions or edge detect configurations. upc_p->cfg_invert = 0; upc_p->cfg_edge = 0; // Insure any interrupt status is clear upc_p->int_status_w1c = UPC_P__INT_STATUS__ALL_STATUS; // Clear opcode matches upc_p->opcode_cfg = 0; // Clear thread combining masks upc_p->lp1_tc_config0 = 0; upc_p->lp1_tc_config1 = 0; upc_p->a2_tc_config = 0; }
bool gallus_module_is_unloading(void) { mbar(); return (s_n_modules > 0) ? s_is_unloading : true; }
static void s_atexit_handler(void) { if (likely(__sync_fetch_and_add(&s_is_exit_handler_called, 1) == 0)) { gallus_result_t r; r = s_trylock(); if (likely(r == GALLUS_RESULT_OK)) { bool is_finished_cleanly = false; if (s_n_modules > 0) { recheck: mbar(); if (s_gstate == MODULE_GLOBAL_STATE_UNKNOWN) { is_finished_cleanly = true; } else if (s_gstate == MODULE_GLOBAL_STATE_STARTED) { (void)global_state_request_shutdown(SHUTDOWN_RIGHT_NOW); } else if (s_gstate != MODULE_GLOBAL_STATE_FINALIZED) { r = s_wait(100LL * 1000LL * 1000LL); if (r == GALLUS_RESULT_OK) { goto recheck; } else if (r == GALLUS_RESULT_TIMEDOUT) { gallus_msg_warning("Module finalization seems not completed.\n"); } else { gallus_perror(r); gallus_msg_error("module finalization wait failed.\n"); } } else { is_finished_cleanly = true; } } if (is_finished_cleanly == true) { s_is_unloading = true; mbar(); } s_unlock(); } else if (r == GALLUS_RESULT_BUSY) { /* * The lock failure. Snoop s_gstate anyway. Note that it's safe * since the modules are always accessesed only by a single * thread and the thread is calling exit(3) at this moment. */ if (s_gstate == MODULE_GLOBAL_STATE_UNKNOWN) { /* * No modules are initialized. Just exit cleanly and let all * the static destructors run. */ s_is_unloading = true; mbar(); } else { if (pthread_self() == s_initializer_tid) { /* * Made sure that this very thread is the module * initializer. So we can safely unlock the lock. */ switch (s_gstate) { case MODULE_GLOBAL_STATE_FINALIZING: case MODULE_GLOBAL_STATE_FINALIZED: case MODULE_GLOBAL_STATE_UNKNOWN: { s_unlock(); /* * Nothing is needed to do. */ break; } case MODULE_GLOBAL_STATE_INITIALIZING: case MODULE_GLOBAL_STATE_INITIALIZED: case MODULE_GLOBAL_STATE_STARTING: { s_unlock(); /* * With this only modules safely finalizable so far are * finalized. */ gallus_module_finalize_all(); break; } case MODULE_GLOBAL_STATE_STARTED: { s_unlock(); (void)global_state_request_shutdown(SHUTDOWN_RIGHT_NOW); break; } case MODULE_GLOBAL_STATE_SHUTTINGDOWN: case MODULE_GLOBAL_STATE_STOPPING: case MODULE_GLOBAL_STATE_WAITING: case MODULE_GLOBAL_STATE_SHUTDOWN: { s_unlock(); /* * There's nothing we can do at this moment. */ break; } default: { s_unlock(); break; } } } else { /* (pthread_self() == s_initializer_tid) */ /* * This menas that a thread other than module initialized is * locking the lock. There's nothing we can do at this moment. */ return; } } /* (s_gstate == MODULE_GLOBAL_STATE_UNKNOWN) */ } } }
static inline lagopus_result_t s_start_callout_main_loop(void) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; global_state_t s; shutdown_grace_level_t l; ret = global_state_wait_for(GLOBAL_STATE_STARTED, &s, &l, -1LL); if (likely(ret == LAGOPUS_RESULT_OK)) { if (likely(s == GLOBAL_STATE_STARTED)) { #ifdef CO_MSG_DEBUG lagopus_chrono_t timeout = s_idle_interval; #else lagopus_chrono_t timeout; #endif /* CO_MSG_DEBUG */ lagopus_callout_task_t out_tasks[CALLOUT_TASK_MAX * 3]; size_t n_out_tasks; lagopus_callout_task_t urgent_tasks[CALLOUT_TASK_MAX]; lagopus_result_t sn_urgent_tasks; lagopus_callout_task_t idle_tasks[CALLOUT_TASK_MAX]; lagopus_result_t sn_idle_tasks; lagopus_callout_task_t timed_tasks[CALLOUT_TASK_MAX]; lagopus_result_t sn_timed_tasks; lagopus_result_t r; lagopus_chrono_t now; lagopus_chrono_t next_wakeup; lagopus_chrono_t prev_wakeup; int cstate = 0; WHAT_TIME_IS_IT_NOW_IN_NSEC(prev_wakeup); (void)lagopus_mutex_enter_critical(&s_sched_lck, &cstate); { s_is_stopped = false; mbar(); while (s_do_loop == true) { n_out_tasks = 0; /* * Get the current time. */ WHAT_TIME_IS_IT_NOW_IN_NSEC(now); #ifdef CO_MSG_DEBUG lagopus_msg_debug(3, "now: " PF64(d) "\n", now); lagopus_msg_debug(3, "prv: " PF64(d) "\n", prev_wakeup); lagopus_msg_debug(3, "to: " PF64(d) "\n", timeout); #endif /* CO_MSG_DEBUG */ s_lock_global(); { /* * Acquire the global lock to make the task * submisson/fetch atomic. */ sn_urgent_tasks = lagopus_bbq_get_n(&s_urgent_tsk_q, (void **)urgent_tasks, CALLOUT_TASK_MAX, 1LL, lagopus_callout_task_t, 0LL, NULL); sn_idle_tasks = lagopus_bbq_get_n(&s_idle_tsk_q, (void **)idle_tasks, CALLOUT_TASK_MAX, 1LL, lagopus_callout_task_t, 0LL, NULL); } s_unlock_global(); /* * Pack the tasks into a buffer. */ sn_timed_tasks = s_get_runnable_timed_task(now, timed_tasks, CALLOUT_TASK_MAX, &next_wakeup); if (sn_timed_tasks > 0) { /* * Pack the timed tasks. */ (void)memcpy((void *)(out_tasks + n_out_tasks), timed_tasks, (size_t)(sn_timed_tasks) * sizeof(lagopus_callout_task_t)); n_out_tasks += (size_t)sn_timed_tasks; #ifdef CO_MSG_DEBUG lagopus_msg_debug(3, "timed task " PF64(u) ".\n", sn_timed_tasks); lagopus_msg_debug(3, "nw: " PF64(d) ".\n", next_wakeup); #endif /* CO_MSG_DEBUG */ } else if (sn_timed_tasks < 0) { /* * We can't be treat this as a fatal error. Carry on. */ lagopus_perror(sn_timed_tasks); lagopus_msg_error("timed tasks fetch failed.\n"); } if (sn_urgent_tasks > 0) { /* * Pack the urgent tasks. */ (void)memcpy((void *)(out_tasks + n_out_tasks), urgent_tasks, (size_t)(sn_urgent_tasks) * sizeof(lagopus_callout_task_t)); n_out_tasks += (size_t)sn_urgent_tasks; } else if (sn_urgent_tasks < 0) { /* * We can't be treat this as a fatal error. Carry on. */ lagopus_perror(sn_urgent_tasks); lagopus_msg_error("urgent tasks fetch failed.\n"); } if (sn_idle_tasks > 0) { /* * Pack the idle tasks. */ (void)memcpy((void *)(out_tasks + n_out_tasks), idle_tasks, (size_t)(sn_idle_tasks) * sizeof(lagopus_callout_task_t)); n_out_tasks += (size_t)sn_idle_tasks; } else if (sn_idle_tasks < 0) { /* * We can't be treat this as a fatal error. Carry on. */ lagopus_perror(sn_idle_tasks); lagopus_msg_error("idle tasks fetch failed.\n"); } if (n_out_tasks > 0) { /* * Run/Submit the tasks. */ r = (s_final_task_sched_proc)(out_tasks, now, n_out_tasks); if (unlikely(r <= 0)) { /* * We can't be treat this as a fatal error. Carry on. */ lagopus_perror(r); lagopus_msg_error("failed to submit " PFSZ(u) " urgent/timed tasks.\n", n_out_tasks); } } if (s_idle_proc != NULL && s_next_idle_abstime < (now + CALLOUT_TASK_SCHED_JITTER)) { if (likely(s_idle_proc(s_idle_proc_arg) == LAGOPUS_RESULT_OK)) { s_next_idle_abstime = now + s_idle_interval; } else { /* * Stop the main loop and return (clean finish.) */ s_do_loop = false; goto critical_end; } } /* * fetch the start time of the timed task in the queue head. */ next_wakeup = s_peek_current_wakeup_time(); if (next_wakeup <= 0LL) { /* * Nothing in the timed Q. */ if (s_next_idle_abstime <= 0LL) { s_next_idle_abstime = now + s_idle_interval; } next_wakeup = s_next_idle_abstime; } /* * TODO * * Re-optimize forcible waje up by timed task submission * timing and times. See also * callout_queue.c:s_do_sched(). */ /* * calculate the timeout and sleep. */ timeout = next_wakeup - now; if (likely(timeout > 0LL)) { if (timeout > s_idle_interval) { timeout = s_idle_interval; next_wakeup = now + timeout; } #ifdef CO_MSG_DEBUG lagopus_msg_debug(4, "about to sleep, timeout " PF64(d) " nsec.\n", timeout); #endif /* CO_MSG_DEBUG */ prev_wakeup = next_wakeup; r = lagopus_bbq_wait_gettable(&s_urgent_tsk_q, timeout); if (unlikely(r <= 0 && r != LAGOPUS_RESULT_TIMEDOUT && r != LAGOPUS_RESULT_WAKEUP_REQUESTED)) { lagopus_perror(r); lagopus_msg_error("Event wait failure.\n"); ret = r; goto critical_end; } else { if (r == LAGOPUS_RESULT_WAKEUP_REQUESTED) { #ifdef CO_MSG_DEBUG lagopus_msg_debug(4, "woke up.\n"); #endif /* CO_MSG_DEBUG */ } } } else { WHAT_TIME_IS_IT_NOW_IN_NSEC(next_wakeup); prev_wakeup = next_wakeup; #ifdef CO_MSG_DEBUG lagopus_msg_debug(4, "timeout zero. contiune.\n"); #endif /* CO_MSG_DEBUG */ } /* * The end of the desired potion of the loop. */ } /* while (s_do_loop == true) */ } critical_end: s_is_stopped = true; s_wakeup_sched(); (void)lagopus_mutex_leave_critical(&s_sched_lck, cstate); if (s_do_loop == false) { /* * The clean finish. */ ret = LAGOPUS_RESULT_OK; } } else { /* s == GLOBAL_STATE_STARTED */ s_is_stopped = true; ret = LAGOPUS_RESULT_INVALID_STATE_TRANSITION; } } else { s_is_stopped = true; } return ret; }
lagopus_result_t lagopus_cbuffer_wakeup(lagopus_cbuffer_t *cbptr, lagopus_chrono_t nsec) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; if (cbptr != NULL && *cbptr != NULL) { size_t n_waiters; s_lock(*cbptr); { n_waiters = __sync_fetch_and_add(&((*cbptr)->m_n_waiters), 0); if (n_waiters > 0) { if ((*cbptr)->m_is_operational == true) { if ((*cbptr)->m_is_awakened == false) { /* * Wake all the waiters up. */ (*cbptr)->m_is_awakened = true; (void)lagopus_cond_notify(&((*cbptr)->m_cond_get), true); (void)lagopus_cond_notify(&((*cbptr)->m_cond_put), true); if (nsec != 0LL) { /* * Then wait for one of the waiters wakes this thread up. */ recheck: mbar(); if ((*cbptr)->m_is_operational == true) { if ((*cbptr)->m_is_awakened == true) { lagopus_msg_debug(5, "sync wait a waiter wake me up...\n"); if ((ret = lagopus_cond_wait(&((*cbptr)->m_cond_awakened), &((*cbptr)->m_lock), nsec)) == LAGOPUS_RESULT_OK) { goto recheck; } lagopus_msg_debug(5, "a waiter woke me up.\n"); } else { ret = LAGOPUS_RESULT_OK; } } else { ret = LAGOPUS_RESULT_NOT_OPERATIONAL; } } else { ret = LAGOPUS_RESULT_OK; } } else { ret = LAGOPUS_RESULT_OK; } } else { ret = LAGOPUS_RESULT_NOT_OPERATIONAL; } } else { ret = LAGOPUS_RESULT_OK; } } s_unlock(*cbptr); } else { ret = LAGOPUS_RESULT_INVALID_ARGS; } return ret; }
static inline lagopus_result_t s_get_n(lagopus_cbuffer_t *cbptr, void *valptr, size_t n_vals_max, size_t n_at_least, size_t valsz, lagopus_chrono_t nsec, size_t *n_actual_get, bool do_incr) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; lagopus_cbuffer_t cb = NULL; if (cbptr != NULL && (cb = *cbptr) != NULL && valptr != NULL && valsz == cb->m_element_size) { if (n_vals_max > 0) { int64_t n_copyout = 0LL; if (nsec == 0LL) { s_lock(cb); { /* * Just get and return. */ if (cb->m_is_operational == true) { n_copyout = ret = s_copyout(cb, valptr, n_vals_max, do_incr); } else { ret = LAGOPUS_RESULT_NOT_OPERATIONAL; } } s_unlock(cb); } else if (nsec < 0LL) { s_lock(cb); { /* * Repeat getting until all the required # of the data are got. */ check_inf: mbar(); if (cb->m_is_operational == true) { n_copyout += s_copyout(cb, (void *)((char *)valptr + ((size_t)n_copyout * valsz)), n_vals_max - (size_t)n_copyout, do_incr); if ((size_t)n_copyout < n_vals_max) { /* * Need to repeat. */ if (cb->m_n_elements < 1LL) { /* * No data. Need to wait for someone put data to the * buffer. */ if ((ret = s_wait_gettable(cb, -1LL)) == LAGOPUS_RESULT_OK) { goto check_inf; } else { /* * Any errors occur while waiting. */ if (ret == LAGOPUS_RESULT_TIMEDOUT) { /* * Must not happen. */ lagopus_msg_fatal("Timed out must not happen here.\n"); } } } else { /* * The buffer still has data but it couldn't get all * the data?? Must not happen?? */ lagopus_msg_fatal("Couldn't get all the data even the data " "available. Must not happen.\n"); } } else { /* * Succeeded. */ ret = n_copyout; } } else { ret = LAGOPUS_RESULT_NOT_OPERATIONAL; } } s_unlock(cb); } else { s_lock(cb); { /* * Repeat getting until all the required # of the data are * got or the spcified time limit is expired. */ lagopus_chrono_t copy_start; lagopus_chrono_t wait_end; lagopus_chrono_t to = nsec; check_to: mbar(); if (cb->m_is_operational == true) { WHAT_TIME_IS_IT_NOW_IN_NSEC(copy_start); n_copyout += s_copyout(cb, (void *)((char *)valptr + ((size_t)n_copyout * valsz)), n_vals_max - (size_t)n_copyout, do_incr); if ((size_t)n_copyout < n_at_least) { /* * Need to repeat. */ if (cb->m_n_elements < 1LL) { /* * No data. Need to wait for someone put data to the * buffer. */ if ((ret = s_wait_gettable(cb, to)) == LAGOPUS_RESULT_OK) { WHAT_TIME_IS_IT_NOW_IN_NSEC(wait_end); to -= (wait_end - copy_start); if (to > 0LL) { goto check_to; } ret = LAGOPUS_RESULT_TIMEDOUT; } } else { /* * The buffer still has data but it couldn't get all * the data?? Must not happen?? */ lagopus_msg_fatal("Couldn't get all the data even the data " "available. Must not happen.\n"); } } else { /* * Succeeded. */ ret = n_copyout; } } else { ret = LAGOPUS_RESULT_NOT_OPERATIONAL; } } s_unlock(cb); } if (n_actual_get != NULL) { *n_actual_get = (size_t)n_copyout; } } else { if (n_actual_get != NULL) { *n_actual_get = 0LL; } ret = LAGOPUS_RESULT_OK; } } else { if (n_actual_get != NULL) { *n_actual_get = 0LL; } ret = LAGOPUS_RESULT_INVALID_ARGS; } return ret; }
static gallus_result_t s_intermediate_main(const gallus_pipeline_stage_t *sptr, size_t idx, void *evbuf, size_t n_evs) { gallus_result_t ret = GALLUS_RESULT_ANY_FAILURES; if (likely(sptr != NULL && *sptr != NULL)) { test_stage_t ts = (test_stage_t)(*sptr); size_t n_cur_evs = __sync_add_and_fetch(&(ts->m_n_events), 0); if (likely(n_cur_evs < ts->m_n_data)) { uint64_t *data = (uint64_t *)evbuf; size_t i; size_t j; uint64_t sum = 0; if (unlikely(ts->m_states[idx] == test_stage_state_initialized)) { uint64_t cur_clock = gallus_rdtsc(); gallus_atomic_update_min(uint64_t, &(ts->m_start_clock), 0, cur_clock); ts->m_states[idx] = test_stage_state_running; } for (i = 0; i < ts->m_weight; i++) { sum = 0; for (j = 0; j < n_evs; j++) { sum += data[j]; } } (void)__sync_add_and_fetch(&(ts->m_sum), sum); n_cur_evs = __sync_add_and_fetch(&(ts->m_n_events), n_evs); if (unlikely(ts->m_n_data == n_cur_evs)) { uint64_t cur_clock = gallus_rdtsc(); gallus_chrono_t end_time; WHAT_TIME_IS_IT_NOW_IN_NSEC(end_time); gallus_msg_debug(1, "got " PFSZ(u) " / " PFSZ(u)" events.\n", n_cur_evs, ts->m_n_data); gallus_atomic_update_min(uint64_t, &(ts->m_end_clock), 0, cur_clock); gallus_atomic_update_max(gallus_chrono_t, &(ts->m_end_time), -1, end_time); ts->m_states[idx] = test_stage_state_done; mbar(); if (ts->m_type == test_stage_type_egress) { /* * Wake the master up. */ (void)s_test_stage_wakeup(ts); } } ret = (gallus_result_t)n_evs; } else { ret = s_test_stage_wait(ts); } } else { ret = GALLUS_RESULT_INVALID_ARGS; } return ret; }