int prockeystate_update_value(void * vproc, void * vstate, wsdata_t * tuple, wsdata_t *key, wsdata_t *value) { proc_instance_t * proc = (proc_instance_t*)vproc; key_data_t * kdata = (key_data_t *) vstate; int rtn = 0; uint64_t v64 = 0; if (!dtype_get_uint64(value, &v64)) { return 0; } update_epoch(proc, tuple); if (proc->epoch == kdata->epoch) { kdata->cnt += v64; } else { /// report epoch rtn = report_key_rate(proc, tuple, kdata); //reset epoch kdata->epoch = proc->epoch; kdata->cnt = v64; } return rtn; }
int prockeystate_update(void * vproc, void * vstate, wsdata_t * tuple, wsdata_t *key) { proc_instance_t * proc = (proc_instance_t*)vproc; key_data_t * kdata = (key_data_t *) vstate; int rtn = 0; //lookup up time.. otherwise poll for time update_epoch(proc, tuple); if (proc->epoch == kdata->epoch) { kdata->cnt++; } else { /// report epoch rtn = report_key_rate(proc, tuple, kdata); //reset epoch kdata->epoch = proc->epoch; kdata->cnt = 1; } return rtn; }
/* * Informs other threads that this thread has passed through a quiescent * state. * If all threads have passed through a quiescent state since the last time * this thread processed it's callbacks, proceed to process pending callbacks. */ void quiescent_state (int blocking) { uint8_t my_index = ltd.thread_index; int epoch; int orig; retry: epoch = qg->global_epoch; if (shtd[my_index].epoch != epoch) { /* New epoch. */ /* Process callbacks for old 'incarnation' of this epoch. */ process_callbacks(ltd.limbo_list[epoch]); shtd[my_index].epoch = epoch; } else { orig = shtd[my_index].in_critical; shtd[my_index].in_critical = 0; int res = update_epoch(); if (res) { shtd[my_index].in_critical = orig; MEM_BARRIER; epoch = qg->global_epoch; if (shtd[my_index].epoch != epoch) { process_callbacks(ltd.limbo_list[epoch]); shtd[my_index].epoch = epoch; } return; } else if (blocking) { shtd[my_index].in_critical = orig; MEM_BARRIER; sched_yield(); goto retry; } shtd[my_index].in_critical = orig; MEM_BARRIER; } return; }