void handle_testsvr_req_rev( testsvr_state * ns, testsvr_msg * m, tw_lp * lp){ assert(ns->idx == 1); codes_local_latency_reverse(lp); }
/* * handle_rev_io_completion * - reverse io completion event * - currently nothing to do in this case */ static void handle_rev_io_completion (lsm_state_t *ns, tw_bf *b, lsm_message_t *m_in, tw_lp *lp) { if (ns->use_sched) handle_rev_io_sched_compl(ns, b, m_in, lp); codes_local_latency_reverse(lp); return; }
static void s_event_rc(s_state *ns, tw_bf * b, s_msg *m, tw_lp *lp){ assert(m->h.magic == s_magic); switch(m->h.event_type){ case S_KICKOFF: resource_lp_get_rc(lp); break; case S_ALLOC_ACK: if (m->c.ret == 0){ ns->mem -= bsize; ns->mem_max = m->mem_max_prev; resource_lp_get_rc(lp); break; } /* else fall into the free stmt */ case S_FREE: /* undoing is unconditional given this lps logic */ resource_lp_free_rc(lp); if (ns->mem > 0){ codes_local_latency_reverse(lp); } ns->mem += bsize; } }
/* reverse for collective operation of the dragonfly network */ void torus_collective_rc(int message_size, tw_lp* sender) { codes_local_latency_reverse(sender); return; }
/* torus packet reverse event */ static void torus_packet_event_rc(tw_lp *sender) { codes_local_latency_reverse(sender); return; }
void lsm_io_event_rc(tw_lp *sender) { codes_local_latency_reverse(sender); }