void handle_worker_response(Worker_handle worker_handle, const Response_msg& resp) { // Master node has received a response from one of its workers. // Here we directly return this response to the client. DLOG(INFO) << ">>>Master received a response from a worker: [" << resp.get_tag() << ":" << resp.get_response() << "]" << std::endl; Client_handle client = mstate.tagClientMap[resp.get_tag()]; send_client_response(client, resp); DLOG(INFO) << "<<Master send response back to client: " << client << std::endl; mstate.num_pending_client_requests --; DLOG(INFO) << "pending_client_requests: " << mstate.num_pending_client_requests << std::endl; //store to cache int tag = resp.get_tag(); std::string req_string = mstate.tagReqStringMap[tag]; cache_manager.cacheMap[req_string] = resp.get_response(); //find the worker for(unsigned int i = 0; i< mstate.my_workers.size(); i++) { if(mstate.my_workers[i].worker_handle == worker_handle) { // update mstate mstate.handle_work_done(resp, i); break; } } // re-dispatching vip queue for (unsigned int i = 0; i < mstate.requests_queue_vip.size(); i++) { Request_info req = mstate.requests_queue_vip.front(); DLOG(INFO) << "deque(vip):" << req.client_req.get_request_string()<< std::endl; mstate.requests_queue_vip.pop(); handle_client_request(req.client_handle, req.client_req); } // re-dispatching queue for (unsigned int i = 0; i < mstate.requests_queue.size(); i++) { Request_info req = mstate.requests_queue.front(); DLOG(INFO) << "deque:" << req.client_req.get_request_string()<< std::endl; mstate.requests_queue.pop(); handle_client_request(req.client_handle, req.client_req); } return; }
static void handle_list_cmd(bloom_conn_handler *handle, char *args, int args_len) { // cheat gcc to compile without warnings (void)args_len; // List all the filters bloom_filter_list_head *head; int res = filtmgr_list_filters(handle->mgr, args, &head); if (res != 0) { INTERNAL_ERROR(); return; } // Allocate buffers for the responses int num_out = (head->size+2); char** output_bufs = malloc(num_out * sizeof(char*)); int* output_bufs_len = malloc(num_out * sizeof(int)); // Setup the START/END lines output_bufs[0] = (char*)&START_RESP; output_bufs_len[0] = START_RESP_LEN; output_bufs[head->size+1] = (char*)&END_RESP; output_bufs_len[head->size+1] = END_RESP_LEN; // Generate the responses char *resp; bloom_filter_list *node = head->head; for (int i=0; i < head->size; i++) { res = filtmgr_filter_cb(handle->mgr, node->filter_name, list_filter_cb, &resp); if (res == 0) { output_bufs[i+1] = resp; output_bufs_len[i+1] = strlen(resp); } else { // Skip this output output_bufs[i+1] = NULL; output_bufs_len[i+1] = 0; } node = node->next; } // Write the response send_client_response(handle->conn, output_bufs, output_bufs_len, num_out); // Cleanup for (int i=1; i <= head->size; i++) if(output_bufs[i]) free(output_bufs[i]); free(output_bufs); free(output_bufs_len); filtmgr_cleanup_list(head); }
/** * Helper to handle sending the response to the multi commands, * either multi or bulk. * @arg handle The conn handle * @arg cmd_res The result of the command * @arg num_keys The number of keys in the result buffer. This should NOT be * more than MULTI_OP_SIZE. * @arg res_buf The result buffer * @arg end_of_input Should the last result include a new line * @return 0 on success, 1 if we should stop. */ static int handle_multi_response(bloom_conn_handler *handle, int cmd_res, int num_keys, char *res_buf, int end_of_input) { // Do nothing if we get too many keys if (num_keys > MULTI_OP_SIZE || num_keys <= 0) return 1; if (cmd_res != 0) { switch (cmd_res) { case -1: handle_client_resp(handle->conn, (char*)FILT_NOT_EXIST, FILT_NOT_EXIST_LEN); break; default: INTERNAL_ERROR(); break; } return 1; } // Allocate buffers for our response, plus a newline char *resp_bufs[MULTI_OP_SIZE]; int resp_buf_lens[MULTI_OP_SIZE]; // Set the response buffers according to the results int last_key = 1; for (int i=0; i < num_keys; i++) { last_key = end_of_input && (i == (num_keys - 1)); switch (res_buf[i]) { case 0: resp_bufs[i] = (char*)((last_key) ? NO_RESP : NO_SPACE); resp_buf_lens[i] = (last_key) ? NO_RESP_LEN: NO_SPACE_LEN; break; case 1: resp_bufs[i] = (char*)((last_key) ? YES_RESP : YES_SPACE); resp_buf_lens[i] = (last_key) ? YES_RESP_LEN: YES_SPACE_LEN; break; default: INTERNAL_ERROR(); return 1; } } // Write out! send_client_response(handle->conn, (char**)&resp_bufs, (int*)&resp_buf_lens, num_keys); return 0; }
static void handle_info_cmd(bloom_conn_handler *handle, char *args, int args_len) { // If we have no args, complain. if (!args) { handle_client_err(handle->conn, (char*)&FILT_NEEDED, FILT_NEEDED_LEN); return; } // Scan past the filter name char *key; int key_len; int after = buffer_after_terminator(args, args_len, ' ', &key, &key_len); if (after == 0) { handle_client_err(handle->conn, (char*)&UNEXPECTED_ARGS, UNEXPECTED_ARGS_LEN); return; } // Create output buffers char *output[] = {(char*)&START_RESP, NULL, (char*)&END_RESP}; int lens[] = {START_RESP_LEN, 0, END_RESP_LEN}; // Invoke the callback to get the filter stats int res = filtmgr_filter_cb(handle->mgr, args, info_filter_cb, &output[1]); // Check for no filter if (res != 0) { switch (res) { case -1: handle_client_resp(handle->conn, (char*)FILT_NOT_EXIST, FILT_NOT_EXIST_LEN); break; default: INTERNAL_ERROR(); break; } return; } // Adjust the buffer size lens[1] = strlen(output[1]); // Write out the bufs send_client_response(handle->conn, (char**)&output, (int*)&lens, 3); free(output[1]); }
/* * This function could be called periodically, * until log_last_applied < s->commit_index */ void commit_unapplied_entries(struct server_context_t *s, uint64_t commit_index) { //apply commited changes to state machine until commit_index if(s->commit_index < commit_index) { uint64_t last_index = s->log->tail ? log_index(s->log->tail) : 0; s->commit_index = commit_index <= last_index ? commit_index : last_index; } DBG_LOG(LOG_DEBUG, "[%s][%d] Last Applied = %d, Commit Index = %d", ss[s->state], s->current_term, s->log_last_applied, s->commit_index); if(s->log_last_applied < s->commit_index) { DBG_LOG(LOG_INFO, "[%s][%d] Commit unapplied entries", ss[s->state], s->current_term); struct p_log *log = s->log; //get the next entry after last commit struct p_log_entry_t *iter = get_iterator_at(s->log, s->log_last_applied + 1); if(iter) { while(iter && log_index(iter) <= commit_index) { int outlen = 0; void *r = stm_apply(s->stm, (void *)iter->e->buffer, iter->e->bufsize, &outlen); if(r) { if(s->state == LEADER) { send_client_response(s, iter->e->req_id, r, outlen); } else { //TODO: send response that its no more client } free(r); } else { //TODO: what to do with STM error? crash? //let's just break for now! break; } s->log_last_applied = log_index(iter); iter = iter->next; } } } }
/** * Sends a client error message back. Optimizes to use multiple * output buffers so we can collapse this into a single write without * needing to move our buffers around. */ static void handle_client_err(bloom_conn_info *conn, char* err_msg, int msg_len) { char *buffers[] = {(char*)&CLIENT_ERR, err_msg, (char*)&NEW_LINE}; int sizes[] = {CLIENT_ERR_LEN, msg_len, NEW_LINE_LEN}; send_client_response(conn, (char**)&buffers, (int*)&sizes, 3); }
/** * Sends a client response message back. Simple convenience wrapper * around handle_client_resp. */ static inline void handle_client_resp(bloom_conn_info *conn, char* resp_mesg, int resp_len) { char *buffers[] = {resp_mesg}; int sizes[] = {resp_len}; send_client_response(conn, (char**)&buffers, (int*)&sizes, 1); }
void handle_client_request(Client_handle client_handle, const Request_msg& client_req) { std::string request_string = client_req.get_request_string(); std::string request_arg = client_req.get_arg("cmd"); DLOG(INFO) << ">>Received request: " << request_string << std::endl; // You can assume that traces end with this special message. It // exists because it might be useful for debugging to dump // information about the entire run here: statistics, etc. if (request_arg == "lastrequest") { Response_msg resp(0); resp.set_response("ack"); send_client_response(client_handle, resp); return; } // cache early response if (cache_manager.cacheMap.find(request_string) != cache_manager.cacheMap.end()) { std::string response_string = cache_manager.cacheMap[request_string]; Response_msg resp; resp.set_response(response_string); send_client_response(client_handle, resp); // hit and return return; } bool is_assigned = false; mstate.num_pending_client_requests++; // Assign to worker base on its work_status // assign to low workload node if (request_arg == "418wisdom") { // Level 1 for(unsigned int i = 0; i < mstate.my_workers.size(); i++) { RESERVE_CONTEXT_FOR_PI_LEVEL1(1); if(mstate.my_workers[i].num_running_task <= MAX_EXEC_CONTEXT_LEVEL1) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, WISDOM); is_assigned = true; break; } } // Level 2 if (!is_assigned) { for(unsigned int i = 0; i < mstate.my_workers.size(); i++) { RESERVE_CONTEXT_FOR_PI_LEVEL2(1); if(mstate.my_workers[i].num_running_task <= MAX_EXEC_CONTEXT_LEVEL2) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, WISDOM); is_assigned = true; break; } } } } // assign to idle node if (request_arg == "projectidea") { if (!is_assigned) { for(unsigned int i=0; i<mstate.my_workers.size(); i++) { if(mstate.my_workers[i].num_running_task <= MAX_EXEC_CONTEXT_LEVEL2 + 1 && mstate.my_workers[i].num_work_type[PROJECTIDEA] == 0) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, PROJECTIDEA); is_assigned = true; break; } } } } // find any possible spot if (request_arg == "tellmenow") { // Level 1 for(unsigned int i=0; i<mstate.my_workers.size(); i++) { if(mstate.my_workers[i].num_running_task < MAX_EXEC_CONTEXT_LEVEL1) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, TELLMENOW); is_assigned = true; break; } } // // Level 2 if (!is_assigned) { for(unsigned int i=0; i<mstate.my_workers.size(); i++) { if(mstate.my_workers[i].num_running_task < MAX_EXEC_CONTEXT_LEVEL2) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, TELLMENOW); is_assigned = true; break; } } } } // find node that has low workload if (request_arg == "countprimes") { // Level 1 for(unsigned int i=0; i<mstate.my_workers.size(); i++) { RESERVE_CONTEXT_FOR_PI_LEVEL1(1); if(mstate.my_workers[i].num_running_task < MAX_EXEC_CONTEXT_LEVEL1) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, COUNTPRIMES); is_assigned = true; break; } } // Level 2 if (!is_assigned) { for(unsigned int i=0; i<mstate.my_workers.size(); i++) { RESERVE_CONTEXT_FOR_PI_LEVEL2(1); if(mstate.my_workers[i].num_running_task < MAX_EXEC_CONTEXT_LEVEL2) { send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, COUNTPRIMES); is_assigned = true; break; } } } } // find node that has more than 4 context if (request_arg == "compareprimes") { // Level 1 for(unsigned int i=0; i<mstate.my_workers.size(); i++) { RESERVE_CONTEXT_FOR_PI_LEVEL1(4); ///only execute if remianing context > 4 if(MAX_EXEC_CONTEXT_LEVEL1 - mstate.my_workers[i].num_running_task < 4) continue; send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, COMPAREPRIMES); is_assigned = true; break; } // Level 2 if (!is_assigned) { for(unsigned int i=0; i<mstate.my_workers.size(); i++) { RESERVE_CONTEXT_FOR_PI_LEVEL2(4); ///only execute if remianing context > 4 if(MAX_EXEC_CONTEXT_LEVEL2 - mstate.my_workers[i].num_running_task < 4) continue; send_and_update(client_handle, mstate.my_workers[i].worker_handle, client_req, i, COMPAREPRIMES); is_assigned = true; break; } } } //if all workers are busy, push the request to queue if(!is_assigned) { if (request_arg == "tellmenow" || request_arg == "projectidea") { DLOG(INFO) << "enque vip:" << client_req.get_tag() << ":" << client_req.get_request_string() << std::endl; mstate.requests_queue_vip.push(Request_info(client_handle, client_req)); } else { DLOG(INFO) << "enque:" << client_req.get_tag() << ":" << client_req.get_request_string() << std::endl; mstate.requests_queue.push(Request_info(client_handle, client_req)); } } // We're done! This event handler now returns, and the master // process calls another one of your handlers when action is // required. return; }