void lpf_awe_server_init( awe_server_state * ns, tw_lp * lp) { tw_event *e; awe_msg *m; tw_stime kickoff_time; memset(ns, 0, sizeof(*ns)); work_queue = g_queue_new(); client_req_queue = g_queue_new(); /* skew each kickoff event slightly to help avoid event ties later on */ kickoff_time = 0; /* first create the event (time arg is an offset, not absolute time) */ e = codes_event_new(lp->gid, kickoff_time, lp); /* after event is created, grab the allocated message and set msg-specific * data */ m = tw_event_data(e); m->event_type = KICK_OFF; m->src = lp->gid; /* event is ready to be processed, send it off */ tw_event_send(e); return; }
static void svr_init( svr_state * ns, tw_lp * lp) { ns->server_idx = lp->gid / 2; if (ns->server_idx < NUM_SERVERS-1){ for (int i = 0; i < NUM_PRIOS; i++){ ns->random_order[i] = -1; } for (int i = 0; i < NUM_PRIOS; i++){ for (;;){ int idx = tw_rand_integer(lp->rng, 0, NUM_PRIOS-1); // not sure whether rand_integer is inclusive or not... assert(idx < NUM_PRIOS); if (ns->random_order[idx] == -1){ ns->random_order[idx] = i; break; } } } tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp); svr_msg * m = tw_event_data(e); msg_set_header(666, KICKOFF, lp->gid, &m->h); tw_event_send(e); } else { memset(ns->num_recv, 0, NUM_SERVERS*sizeof(*ns->num_recv)); } }
/* handle initial event (initialize job submission) */ void handle_kick_off_event( awe_server_state * ns, tw_bf * b, awe_msg * m, tw_lp * lp) { printf("%lf;awe_server;%lu]Start serving\n", now_sec(lp), lp->gid); GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, job_map); while (g_hash_table_iter_next(&iter, &key, &value)) { Job* job = (Job*)value; tw_event *e; awe_msg *msg; tw_stime submit_time; submit_time = s_to_ns(etime_to_stime(job->stats.created)) + ns_tw_lookahead; if (fraction < 1.0) { submit_time = submit_time * fraction; } e = codes_event_new(lp->gid, submit_time, lp); msg = tw_event_data(e); msg->event_type = JOB_SUBMIT; strcpy(msg->object_id, job->id); tw_event_send(e); } return; }
/** * Initialize the LPs with BB capacity and cluster flag then kickoff * @Params ns node state * m message * lp LP */ void node_lp_init(node_state * ns, tw_lp * lp) { burst_buffer_capacity = ((long) (burst_buffer_max_capacity)) * 1000000000; //burst_buffer_capacity = ((long) (burst_buffer_max_capacity))*10; //printf("Burst Buffer Capacity:%li\n",burst_buffer_capacity); printf("In node_lp_init\n"); ns->num_processed = 0; // nodes are addressed in their logical id space (0...num_client_nodes-1 and // 0...num_svr_nodes-1, respectively). LPs are computed upon use with // model-net, other events ns->id_clust = codes_mapping_get_lp_relative_id(lp->gid, 1, 0); int id_all = codes_mapping_get_lp_relative_id(lp->gid, 0, 0); // track which cluster we're in ns->is_in_client = (id_all < num_client_nodes); ns->is_in_server = (id_all < (num_svr_nodes + num_client_nodes) && (id_all >= num_client_nodes)); ns->is_in_bb = (id_all < (num_svr_nodes + num_client_nodes + num_burst_buffer_nodes) && (id_all >= num_svr_nodes + num_client_nodes)); printf("is_in_client=%d\nis_in_svr=%d\nis_in_bb=%d\n", ns->is_in_client, ns->is_in_server, ns->is_in_bb); printf("id_all= %d\nnum_client_nodes= %d\n", id_all, num_client_nodes); // send a self kickoff event tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp); node_msg *m = tw_event_data(e); msg_set_header(node_magic, NODE_KICKOFF, lp->gid, &m->h); tw_event_send(e); }
static void svr_init( svr_state * ns, tw_lp * lp) { tw_event *e; svr_msg *m; tw_stime kickoff_time; memset(ns, 0, sizeof(*ns)); /* each server sends a dummy event to itself that will kick off the real * simulation */ //printf("\n Initializing servers %d ", (int)lp->gid); /* skew each kickoff event slightly to help avoid event ties later on */ kickoff_time = g_tw_lookahead + tw_rand_unif(lp->rng); e = codes_event_new(lp->gid, kickoff_time, lp); m = tw_event_data(e); m->svr_event_type = KICKOFF; tw_event_send(e); return; }
void testsvr_lp_init( testsvr_state * ns, tw_lp * lp){ /* for test, just use dummy way (assume 1 svr / 1 modelnet) */ ns->idx = lp->gid / 2; /* expect exactly three servers */ assert(ns->idx <= 2); memset(ns->req_stat, 0x0, NUM_REQS*sizeof(int)); /* create kickoff event only if we're a request server */ if (ns->idx == 0 || ns->idx == 2){ tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp); testsvr_msg *m_local = tw_event_data(e); m_local->magic = testsvr_magic; m_local->event_type = KICKOFF; /* dummy values for kickoff */ m_local->idx_src = INT_MAX; m_local->lp_src = INT_MAX; m_local->req_num = INT_MAX; tw_event_send(e); } #if TEST_DEBUG char name[32]; sprintf(name, "testsvr.%d.%lu", ns->idx, lp->gid); ns->fdebug = fopen(name, "w"); setvbuf(ns->fdebug, NULL, _IONBF, 0); assert(ns->fdebug != NULL); ns->event_ctr = 0; #endif }
static void s_event(s_state *ns, tw_bf *bf, s_msg *m, tw_lp *lp){ assert(m->h.magic == s_magic); switch(m->h.event_type){ case S_KICKOFF: ; msg_header h; msg_set_header(s_magic, S_ALLOC_ACK, lp->gid, &h); resource_lp_get(bsize, 0, lp, CODES_MCTX_DEFAULT, 0, &h, &ns->cb); break; case S_ALLOC_ACK: if (m->c.ret == 0){ ns->mem += bsize; m->mem_max_prev = ns->mem_max; ns->mem_max = maxu64(ns->mem, ns->mem_max); msg_header h; msg_set_header(s_magic, S_ALLOC_ACK, lp->gid, &h); resource_lp_get(bsize, 0, lp, CODES_MCTX_DEFAULT, 0, &h, &ns->cb); break; } /* else fall into the free stmt */ case S_FREE: resource_lp_free(bsize, lp, CODES_MCTX_DEFAULT); ns->mem -= bsize; if (ns->mem > 0){ tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp); s_msg *m = tw_event_data(e); msg_set_header(s_magic, S_FREE, lp->gid, &m->h); tw_event_send(e); } break; } }
void handle_work_checkout_event( awe_server_state * ns, tw_bf * b, awe_msg * m, tw_lp * lp) { tw_event *e; awe_msg *msg; e = codes_event_new(m->src, ns_tw_lookahead, lp); msg = tw_event_data(e); msg->event_type = WORK_CHECKOUT; memset(msg->object_id, 0, sizeof(msg->object_id)); tw_lpid client_id = m->src; // char group_name[MAX_LENGTH_GROUP]; //char lp_type_name[MAX_LENGTH_GROUP]; //int lp_type_id, grp_id, grp_rep_id, offset; // codes_mapping_get_lp_info(client_id, group_name, &grp_id, &lp_type_id, // lp_type_name, &grp_rep_id, &offset); int group_id = 0; group_id = get_group_id(client_id); /*if queue is empty, msg->object_id is "", otherwise msg->object-id is the dequeued workid*/ int got_work = 0; char workid[MAX_LENGTH_ID]; if (!g_queue_is_empty(work_queue)) { if (group_id == 1 && sched_policy>0) { //client from remote site char* work = NULL; if (sched_policy==1) { work = get_first_work_by_stage(5); //checkout task 5 (blat) only for remote site } else if (sched_policy==2) { work = get_first_work_by_greedy(WorkOrder); } if (work) { strcpy(workid, work); got_work = 1; } } else { strcpy(workid, g_queue_pop_head(work_queue)); got_work = 1; } } if (got_work) { //eligible work found, send back to the requesting client fprintf(event_log, "%lf;awe_server;%lu;WC;work=%s client=%lu\n", now_sec(lp), lp->gid, workid, m->src); assert (strlen(workid) > 10); strcpy(msg->object_id, workid); tw_event_send(e); } else { //no eligible work found, put client request to the waiting queue tw_lpid *clientid = NULL; clientid = malloc(sizeof(tw_lpid)); *clientid = m->src; g_queue_push_tail(client_req_queue, clientid); } return; }
void plan_work_enqueue_event(char* work_id, tw_lp *lp) { tw_event *e; awe_msg *msg; e = codes_event_new(lp->gid, ns_tw_lookahead, lp); msg = tw_event_data(e); msg->event_type = WORK_ENQUEUE; strcpy(msg->object_id, work_id); tw_event_send(e); }
static void s_init(s_state *ns, tw_lp *lp){ ns->mem = 0; ns->mem_max = 0; INIT_CODES_CB_INFO(&ns->cb, s_msg, h, tag, c); ns->id = codes_mapping_get_lp_relative_id(lp->gid, 0, 0); tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp); s_msg *m = tw_event_data(e); msg_set_header(s_magic, S_KICKOFF, lp->gid, &m->h); tw_event_send(e); }
void handle_work_enqueue_event( awe_server_state * ns, tw_bf * b, awe_msg * m, tw_lp * lp) { fprintf(event_log, "%lf;awe_server;%lu;WQ;work=%s\n", now_sec(lp), lp->gid, m->object_id); char* workid=NULL; workid = malloc(sizeof(char[MAX_LENGTH_ID])); strcpy(workid, m->object_id); assert(workid); tw_lpid *clientid; int has_match = 0; int len = g_queue_get_length(client_req_queue); if (len > 0) { int n = -1; for (int i=0; i<len; i++) { clientid = g_queue_peek_nth(client_req_queue, i); if (client_match_work(*clientid, workid)) { n = i; break; } } if (n >=0) { clientid = g_queue_pop_nth(client_req_queue, n); has_match = 1; } } if (has_match) { tw_event *e; awe_msg *msg; e = codes_event_new(*clientid, ns_tw_lookahead, lp); msg = tw_event_data(e); msg->event_type = WORK_CHECKOUT; strcpy(msg->object_id, workid); tw_event_send(e); fprintf(event_log, "%lf;awe_server;%lu;WC;work=%s client=%lu\n", now_sec(lp), lp->gid, workid, *clientid); free(workid); free(clientid); } else { g_queue_push_tail(work_queue, workid); } return; }
static void node_init(node_state *ns, tw_lp *lp) { tw_event *e; node_msg *m; tw_stime init_time; memset(ns, 0, sizeof(*ns)); init_time = g_tw_lookahead + tw_rand_unif(lp->rng); e = codes_event_new(lp->gid, init_time, lp); m = tw_event_data(e); m->node_event_type = INIT; tw_event_send(e); return; }
void handle_data_download_req_event( shock_router_state * ns, tw_bf * b, awe_msg * m, tw_lp * lp) { tw_event *e; awe_msg *msg; tw_lpid dest_id = m->next_hop; e = codes_event_new(dest_id, ns_tw_lookahead, lp); msg = tw_event_data(e); msg->event_type = DNLOAD_REQ; msg->src = lp->gid; msg->last_hop = m->src; msg->size = m->size; strcpy(msg->object_id, m->object_id); tw_event_send(e); return; }
void handle_data_upload_ack_event( shock_router_state * ns, tw_bf * b, awe_msg * m, tw_lp * lp) { //printf("[%lf][shock_router][%lu][Received]client=%lu;filesize=%llu...\n", now_sec(lp), lp->gid, m->src, m->size); ns->size_upload += m->size; tw_event *e; awe_msg *msg; tw_lpid dest_id = m->next_hop; e = codes_event_new(dest_id, ns_tw_lookahead, lp); msg = tw_event_data(e); msg->event_type = UPLOAD_ACK; msg->src = lp->gid; msg->size = m->size; strcpy(msg->object_id, m->object_id); tw_event_send(e); return; }
static void send_remote_event(nodes_state * s, tw_bf * bf, nodes_message * msg, tw_lp * lp) { // Trigger an event on receiving server if(msg->remote_event_size_bytes) { tw_event* e; tw_stime ts; nodes_message * m; ts = (1/s->params->link_bandwidth) * msg->remote_event_size_bytes; e = codes_event_new(s->origin_svr, ts, lp); m = tw_event_data(e); char* tmp_ptr = (char*)msg; tmp_ptr += torus_get_msg_sz(); memcpy(m, tmp_ptr, msg->remote_event_size_bytes); tw_event_send(e); } }
static void svr_init( svr_state * ns, tw_lp * lp) { tw_event *e; svr_msg *m; tw_stime kickoff_time; memset(ns, 0, sizeof(*ns)); /* each server sends a dummy event to itself */ /* skew each kickoff event slightly to help avoid event ties later on */ kickoff_time = g_tw_lookahead + tw_rand_unif(lp->rng); e = codes_event_new(lp->gid, kickoff_time, lp); m = tw_event_data(e); m->event_type = KICKOFF; tw_event_send(e); return; }
void handle_testsvr_req( testsvr_state * ns, testsvr_msg * m, tw_lp * lp){ /* only server 1 processes requests */ assert(ns->idx == 1); /* add a random amount of time to it */ tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp); testsvr_msg *m_local = tw_event_data(e); *m_local = *m; m_local->event_type = LOCAL; #if TEST_DEBUG m_local->src_event_ctr = ns->event_ctr; #endif tw_event_send(e); #if TEST_DEBUG ns->event_ctr++; #endif }