/* * Thread main loop. */ void *thread_main(void *arg) { while (1) { int want_get = (random() % 100) < probability_get; int fd = unused_fd(); int usec; if (want_get) send_get(fd); else send_delete(fd); release_fd(fd); pthread_mutex_lock(&stats_mutex); iteration_count++; pthread_mutex_unlock(&stats_mutex); } }
int Cache(int csd, char *url){ FILE *infile; int MAX_LENGTH; infile = fopen("website", "ab+"); if (infile == NULL){ perror("Could not read cache!\n"); return 0; /* char new[strlen(url)+1];*/ /* memset(&new, 0, sizeof(new));*/ /* memcpy(&new, url, strlen(url));*/ /* new[strlen(url)]='\n';*/ /* */ /* if (fputs(new, infile) == -1) {*/ /* return 0;*/ /* }*/ } else { fseek(infile, 0, SEEK_END); MAX_LENGTH = ftell(infile); rewind(infile); char buf[MAX_LENGTH]; if (fread(buf, 1, MAX_LENGTH, infile) != MAX_LENGTH){ perror("Could not copy data from cache!\n"); fclose(infile); } else { fclose(infile); if (strstr(buf, url) != NULL) {//read from cache if (ReadCache(csd, url) != 1) { perror("send cache error!\n"); return 0; } } else {// connect to web server char host[256]; char page[256]; char *get; char *tran = url; int k = 0; while (url[k] != '_'){ k++; if (url[k] == '\0') break; } memset(host, 0, strlen(host)); memcpy(host, tran, strlen(tran)); host[k]='\0'; memset(page, 0, strlen(page)); page[0]='/'; memcpy(page+1, tran+k+1, strlen(tran)-k); page[strlen(tran)-k] = '\0'; get = build_get(host, page); fprintf(stderr, "Query is:\n<<START>>\n%s<<END>>\n", get); if (send_get(csd, get) != 1) { perror("Send ERROR!\n"); exit(EXIT_FAILURE); } } } } if (ModifyCache(url) != 1) { perror("ModifyCache() error!\n"); return 0; } return 1; }
HTTPCode ChronosInternalConnection::resynchronise_with_single_node( const std::string server_to_sync, std::vector<std::string> cluster_nodes, std::string localhost) { TRC_DEBUG("Querying %s for timers", server_to_sync.c_str()); // Get the cluster view ID from the global configuration std::string cluster_view_id; __globals->get_cluster_view_id(cluster_view_id); std::string response; HTTPCode rc; // Loop sending GETs to the server while the response is a 206 do { std::map<TimerID, int> delete_map; rc = send_get(server_to_sync, localhost, PARAM_SYNC_MODE_VALUE_SCALE, cluster_view_id, MAX_TIMERS_IN_RESPONSE, response); if ((rc == HTTP_PARTIAL_CONTENT) || (rc == HTTP_OK)) { // Parse the GET response rapidjson::Document doc; doc.Parse<0>(response.c_str()); if (doc.HasParseError()) { // We've failed to parse the document as JSON. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("Failed to parse document as JSON"); rc = HTTP_BAD_REQUEST; break; } try { JSON_ASSERT_CONTAINS(doc, JSON_TIMERS); JSON_ASSERT_ARRAY(doc[JSON_TIMERS]); const rapidjson::Value& ids_arr = doc[JSON_TIMERS]; int total_timers = ids_arr.Size(); int count_invalid_timers = 0; for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin(); ids_it != ids_arr.End(); ++ids_it) { try { const rapidjson::Value& id_arr = *ids_it; JSON_ASSERT_OBJECT(id_arr); // Get the timer ID TimerID timer_id; JSON_GET_INT_MEMBER(id_arr, JSON_TIMER_ID, timer_id); // Get the old replicas std::vector<std::string> old_replicas; JSON_ASSERT_CONTAINS(id_arr, JSON_OLD_REPLICAS); JSON_ASSERT_ARRAY(id_arr[JSON_OLD_REPLICAS]); const rapidjson::Value& old_repl_arr = id_arr[JSON_OLD_REPLICAS]; for (rapidjson::Value::ConstValueIterator repl_it = old_repl_arr.Begin(); repl_it != old_repl_arr.End(); ++repl_it) { JSON_ASSERT_STRING(*repl_it); old_replicas.push_back(repl_it->GetString()); } // Get the timer. JSON_ASSERT_CONTAINS(id_arr, JSON_TIMER); JSON_ASSERT_OBJECT(id_arr[JSON_TIMER]); const rapidjson::Value& timer_obj = id_arr[JSON_TIMER]; bool store_timer = false; std::string error_str; bool replicated_timer; Timer* timer = Timer::from_json_obj(timer_id, 0, error_str, replicated_timer, (rapidjson::Value&)timer_obj); if (!timer) { count_invalid_timers++; TRC_INFO("Unable to create timer - error: %s", error_str.c_str()); continue; } else if (!replicated_timer) { count_invalid_timers++; TRC_INFO("Unreplicated timer in response - ignoring"); delete timer; timer = NULL; continue; } // Decide what we're going to do with this timer. int old_level = 0; bool in_old_replica_list = get_replica_level(old_level, localhost, old_replicas); int new_level = 0; bool in_new_replica_list = get_replica_level(new_level, localhost, timer->replicas); // Add the timer to the delete map we're building up delete_map.insert(std::pair<TimerID, int>(timer_id, new_level)); if (in_new_replica_list) { // Add the timer to my store if I can. if (in_old_replica_list) { if (old_level >= new_level) { // Add/update timer // LCOV_EXCL_START - Adding timer paths are tested elsewhere store_timer = true; // LCOV_EXCL_STOP } } else { // Add/update timer store_timer = true; } // Now loop through the new replicas. int index = 0; for (std::vector<std::string>::iterator it = timer->replicas.begin(); it != timer->replicas.end(); ++it, ++index) { if (index <= new_level) { // Do nothing. We've covered adding the timer to the store above } else { // We can potentially replicate the timer to one of these nodes. // Check whether the new replica was involved previously int old_rep_level = 0; bool is_new_rep_in_old_rep = get_replica_level(old_rep_level, *it, old_replicas); if (is_new_rep_in_old_rep) { if (old_rep_level >= new_level) { _replicator->replicate_timer_to_node(timer, *it); } } else { _replicator->replicate_timer_to_node(timer, *it); } } } // Now loop through the old replicas. We can send a tombstone // replication to any node that used to be a replica and was // higher in the replica list than the new replica. index = 0; for (std::vector<std::string>::iterator it = old_replicas.begin(); it != old_replicas.end(); ++it, ++index) { if (index >= new_level) { // We can potentially tombstone the timer to one of these nodes. bool old_rep_in_new_rep = get_replica_presence(*it, timer->replicas); if (!old_rep_in_new_rep) { Timer* timer_copy = new Timer(*timer); timer_copy->become_tombstone(); _replicator->replicate_timer_to_node(timer_copy, *it); delete timer_copy; timer_copy = NULL; } } } } // Add the timer to the store if we can. This is done // last so we don't invalidate the pointer to the timer. if (store_timer) { _handler->add_timer(timer); } else { delete timer; timer = NULL; } // Finally, note that we processed the timer _timers_processed_stat->increment(); } catch (JsonFormatError err) { // A single entry is badly formatted. This is unexpected but we'll try // to keep going and process the rest of the timers. count_invalid_timers++; _invalid_timers_processed_stat->increment(); TRC_INFO("JSON entry was invalid (hit error at %s:%d)", err._file, err._line); } } // Check if we were able to successfully process any timers - if not // then bail out as there's something wrong with the node we're // querying if ((total_timers != 0) && (count_invalid_timers == total_timers)) { TRC_WARNING("Unable to process any timer entries in GET response"); rc = HTTP_BAD_REQUEST; } } catch (JsonFormatError err) { // We've failed to find the Timers array. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("JSON body didn't contain the Timers array"); rc = HTTP_BAD_REQUEST; } // Send a DELETE to all the nodes to update their timer references if (delete_map.size() > 0) { std::string delete_body = create_delete_body(delete_map); for (std::vector<std::string>::iterator it = cluster_nodes.begin(); it != cluster_nodes.end(); ++it) { HTTPCode delete_rc = send_delete(*it, delete_body); if (delete_rc != HTTP_ACCEPTED) { // We've received an error response to the DELETE request. There's // not much more we can do here (a timeout will have already // been retried). A failed DELETE won't prevent the scaling operation // from finishing, it just means that we'll tell other nodes // about timers inefficiently. TRC_INFO("Error response (%d) to DELETE request to %s", delete_rc, (*it).c_str()); } } } } else { // We've received an error response to the GET request. A timeout // will already have been retried by the underlying HTTPConnection, // so don't retry again TRC_WARNING("Error response (%d) to GET request to %s", rc, server_to_sync.c_str()); } } while (rc == HTTP_PARTIAL_CONTENT); return rc; }