AuthStore::Digest* AuthStore::JsonSerializerDeserializer:: deserialize_digest(const std::string& digest_s) { TRC_DEBUG("Deserialize JSON document: %s", digest_s.c_str()); rapidjson::Document doc; doc.Parse<0>(digest_s.c_str()); if (doc.HasParseError()) { TRC_DEBUG("Failed to parse document"); return NULL; } Digest* digest = new Digest(); try { JSON_ASSERT_OBJECT(doc); JSON_ASSERT_CONTAINS(doc, JSON_DIGEST); JSON_ASSERT_OBJECT(doc[JSON_DIGEST]); const rapidjson::Value& digest_block = doc[JSON_DIGEST]; { JSON_GET_STRING_MEMBER(digest_block, JSON_HA1, digest->_ha1); // The QoP is assumed to always be 'auth'. JSON_GET_STRING_MEMBER(digest_block, JSON_REALM, digest->_realm); } JSON_GET_STRING_MEMBER(doc, JSON_OPAQUE, digest->_opaque); JSON_GET_STRING_MEMBER(doc, JSON_IMPU, digest->_impu); JSON_GET_INT_MEMBER(doc, JSON_NC, digest->_nonce_count); } catch(JsonFormatError err) { TRC_INFO("Failed to deserialize JSON document (hit error at %s:%d)", err._file, err._line); delete digest; digest = NULL; } return digest; }
Timer* Timer::from_json_obj(TimerID id, uint64_t replica_hash, std::string& error, bool& replicated, rapidjson::Value& doc) { Timer* timer = NULL; try { JSON_ASSERT_CONTAINS(doc, "timing"); JSON_ASSERT_CONTAINS(doc, "callback"); // Parse out the timing block rapidjson::Value& timing = doc["timing"]; JSON_ASSERT_OBJECT(timing); JSON_ASSERT_CONTAINS(timing, "interval"); rapidjson::Value& interval = timing["interval"]; JSON_ASSERT_INT(interval); // Extract the repeat-for parameter, if it's absent, set it to the interval // instead. int repeat_for_int; if (timing.HasMember("repeat-for")) { JSON_GET_INT_MEMBER(timing, "repeat-for", repeat_for_int); } else { repeat_for_int = interval.GetInt(); } if ((interval.GetInt() == 0) && (repeat_for_int != 0)) { // If the interval time is 0 and the repeat_for_int isn't then reject the timer. error = "Can't have a zero interval time with a non-zero (%s) repeat-for time", std::to_string(repeat_for_int); return NULL; } timer = new Timer(id, (interval.GetInt() * 1000), (repeat_for_int * 1000)); if (timing.HasMember("start-time-delta")) { // Timer JSON specified a time offset, use that to determine the true // start time. uint64_t start_time_delta; JSON_GET_INT_64_MEMBER(timing, "start-time-delta", start_time_delta); // This cast is safe as this sum is deliberately designed to wrap over UINT_MAX. timer->start_time_mono_ms = (uint32_t)(clock_gettime_ms(CLOCK_MONOTONIC) + start_time_delta); } else if (timing.HasMember("start-time")) { // Timer JSON specifies a start-time, use that instead of now. uint64_t real_start_time; JSON_GET_INT_64_MEMBER(timing, "start-time", real_start_time); uint64_t real_time = clock_gettime_ms(CLOCK_REALTIME); uint64_t mono_time = clock_gettime_ms(CLOCK_MONOTONIC); // This cast is safe as this sum is deliberately designed to wrap over UINT_MAX. timer->start_time_mono_ms = (uint32_t)(mono_time + real_start_time - real_time); } if (timing.HasMember("sequence-number")) { JSON_GET_INT_MEMBER(timing, "sequence-number", timer->sequence_number); } // Parse out the 'callback' block rapidjson::Value& callback = doc["callback"]; JSON_ASSERT_OBJECT(callback); JSON_ASSERT_CONTAINS(callback, "http"); rapidjson::Value& http = callback["http"]; JSON_ASSERT_OBJECT(http); JSON_GET_STRING_MEMBER(http, "uri", timer->callback_url); JSON_GET_STRING_MEMBER(http, "opaque", timer->callback_body); if (doc.HasMember("reliability")) { // Parse out the 'reliability' block rapidjson::Value& reliability = doc["reliability"]; JSON_ASSERT_OBJECT(reliability); if (reliability.HasMember("cluster-view-id")) { JSON_GET_STRING_MEMBER(reliability, "cluster-view-id", timer->cluster_view_id); } if (reliability.HasMember("replicas")) { rapidjson::Value& replicas = reliability["replicas"]; JSON_ASSERT_ARRAY(replicas); if (replicas.Size() == 0) { error = "If replicas is specified it must be non-empty"; delete timer; timer = NULL; return NULL; } timer->_replication_factor = replicas.Size(); for (rapidjson::Value::ConstValueIterator it = replicas.Begin(); it != replicas.End(); ++it) { JSON_ASSERT_STRING(*it); timer->replicas.push_back(std::string(it->GetString(), it->GetStringLength())); } } else { if (reliability.HasMember("replication-factor")) { JSON_GET_INT_MEMBER(reliability, "replication-factor", timer->_replication_factor); } else { // Default replication factor is 2. timer->_replication_factor = 2; } } } else { // Default to 2 replicas timer->_replication_factor = 2; } timer->_replica_tracker = pow(2, timer->_replication_factor) - 1; if (timer->replicas.empty()) { // Replicas not determined above, determine them now. Note that this implies // the request is from a client, not another replica. replicated = false; timer->calculate_replicas(replica_hash); } else { // Replicas were specified in the request, must be a replication message // from another cluster node. replicated = true; } } catch (JsonFormatError err) { error = "Badly formed Timer entry - hit error on line " + std::to_string(err._line); delete timer; timer = NULL; return NULL; } return timer; }
HTTPCode ChronosInternalConnection::resynchronise_with_single_node( const std::string server_to_sync, std::vector<std::string> cluster_nodes, std::string localhost) { TRC_DEBUG("Querying %s for timers", server_to_sync.c_str()); // Get the cluster view ID from the global configuration std::string cluster_view_id; __globals->get_cluster_view_id(cluster_view_id); std::string response; HTTPCode rc; // Loop sending GETs to the server while the response is a 206 do { std::map<TimerID, int> delete_map; rc = send_get(server_to_sync, localhost, PARAM_SYNC_MODE_VALUE_SCALE, cluster_view_id, MAX_TIMERS_IN_RESPONSE, response); if ((rc == HTTP_PARTIAL_CONTENT) || (rc == HTTP_OK)) { // Parse the GET response rapidjson::Document doc; doc.Parse<0>(response.c_str()); if (doc.HasParseError()) { // We've failed to parse the document as JSON. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("Failed to parse document as JSON"); rc = HTTP_BAD_REQUEST; break; } try { JSON_ASSERT_CONTAINS(doc, JSON_TIMERS); JSON_ASSERT_ARRAY(doc[JSON_TIMERS]); const rapidjson::Value& ids_arr = doc[JSON_TIMERS]; int total_timers = ids_arr.Size(); int count_invalid_timers = 0; for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin(); ids_it != ids_arr.End(); ++ids_it) { try { const rapidjson::Value& id_arr = *ids_it; JSON_ASSERT_OBJECT(id_arr); // Get the timer ID TimerID timer_id; JSON_GET_INT_MEMBER(id_arr, JSON_TIMER_ID, timer_id); // Get the old replicas std::vector<std::string> old_replicas; JSON_ASSERT_CONTAINS(id_arr, JSON_OLD_REPLICAS); JSON_ASSERT_ARRAY(id_arr[JSON_OLD_REPLICAS]); const rapidjson::Value& old_repl_arr = id_arr[JSON_OLD_REPLICAS]; for (rapidjson::Value::ConstValueIterator repl_it = old_repl_arr.Begin(); repl_it != old_repl_arr.End(); ++repl_it) { JSON_ASSERT_STRING(*repl_it); old_replicas.push_back(repl_it->GetString()); } // Get the timer. JSON_ASSERT_CONTAINS(id_arr, JSON_TIMER); JSON_ASSERT_OBJECT(id_arr[JSON_TIMER]); const rapidjson::Value& timer_obj = id_arr[JSON_TIMER]; bool store_timer = false; std::string error_str; bool replicated_timer; Timer* timer = Timer::from_json_obj(timer_id, 0, error_str, replicated_timer, (rapidjson::Value&)timer_obj); if (!timer) { count_invalid_timers++; TRC_INFO("Unable to create timer - error: %s", error_str.c_str()); continue; } else if (!replicated_timer) { count_invalid_timers++; TRC_INFO("Unreplicated timer in response - ignoring"); delete timer; timer = NULL; continue; } // Decide what we're going to do with this timer. int old_level = 0; bool in_old_replica_list = get_replica_level(old_level, localhost, old_replicas); int new_level = 0; bool in_new_replica_list = get_replica_level(new_level, localhost, timer->replicas); // Add the timer to the delete map we're building up delete_map.insert(std::pair<TimerID, int>(timer_id, new_level)); if (in_new_replica_list) { // Add the timer to my store if I can. if (in_old_replica_list) { if (old_level >= new_level) { // Add/update timer // LCOV_EXCL_START - Adding timer paths are tested elsewhere store_timer = true; // LCOV_EXCL_STOP } } else { // Add/update timer store_timer = true; } // Now loop through the new replicas. int index = 0; for (std::vector<std::string>::iterator it = timer->replicas.begin(); it != timer->replicas.end(); ++it, ++index) { if (index <= new_level) { // Do nothing. We've covered adding the timer to the store above } else { // We can potentially replicate the timer to one of these nodes. // Check whether the new replica was involved previously int old_rep_level = 0; bool is_new_rep_in_old_rep = get_replica_level(old_rep_level, *it, old_replicas); if (is_new_rep_in_old_rep) { if (old_rep_level >= new_level) { _replicator->replicate_timer_to_node(timer, *it); } } else { _replicator->replicate_timer_to_node(timer, *it); } } } // Now loop through the old replicas. We can send a tombstone // replication to any node that used to be a replica and was // higher in the replica list than the new replica. index = 0; for (std::vector<std::string>::iterator it = old_replicas.begin(); it != old_replicas.end(); ++it, ++index) { if (index >= new_level) { // We can potentially tombstone the timer to one of these nodes. bool old_rep_in_new_rep = get_replica_presence(*it, timer->replicas); if (!old_rep_in_new_rep) { Timer* timer_copy = new Timer(*timer); timer_copy->become_tombstone(); _replicator->replicate_timer_to_node(timer_copy, *it); delete timer_copy; timer_copy = NULL; } } } } // Add the timer to the store if we can. This is done // last so we don't invalidate the pointer to the timer. if (store_timer) { _handler->add_timer(timer); } else { delete timer; timer = NULL; } // Finally, note that we processed the timer _timers_processed_stat->increment(); } catch (JsonFormatError err) { // A single entry is badly formatted. This is unexpected but we'll try // to keep going and process the rest of the timers. count_invalid_timers++; _invalid_timers_processed_stat->increment(); TRC_INFO("JSON entry was invalid (hit error at %s:%d)", err._file, err._line); } } // Check if we were able to successfully process any timers - if not // then bail out as there's something wrong with the node we're // querying if ((total_timers != 0) && (count_invalid_timers == total_timers)) { TRC_WARNING("Unable to process any timer entries in GET response"); rc = HTTP_BAD_REQUEST; } } catch (JsonFormatError err) { // We've failed to find the Timers array. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("JSON body didn't contain the Timers array"); rc = HTTP_BAD_REQUEST; } // Send a DELETE to all the nodes to update their timer references if (delete_map.size() > 0) { std::string delete_body = create_delete_body(delete_map); for (std::vector<std::string>::iterator it = cluster_nodes.begin(); it != cluster_nodes.end(); ++it) { HTTPCode delete_rc = send_delete(*it, delete_body); if (delete_rc != HTTP_ACCEPTED) { // We've received an error response to the DELETE request. There's // not much more we can do here (a timeout will have already // been retried). A failed DELETE won't prevent the scaling operation // from finishing, it just means that we'll tell other nodes // about timers inefficiently. TRC_INFO("Error response (%d) to DELETE request to %s", delete_rc, (*it).c_str()); } } } } else { // We've received an error response to the GET request. A timeout // will already have been retried by the underlying HTTPConnection, // so don't retry again TRC_WARNING("Error response (%d) to GET request to %s", rc, server_to_sync.c_str()); } } while (rc == HTTP_PARTIAL_CONTENT); return rc; }
void SasService::extract_config() { // Check whether the file exists. struct stat s; TRC_DEBUG("stat(%s) returns %d", _configuration.c_str(), stat(_configuration.c_str(), &s)); if ((stat(_configuration.c_str(), &s) != 0) && (errno == ENOENT)) { TRC_STATUS("No SAS configuration (file %s does not exist)", _configuration.c_str()); CL_SAS_FILE_MISSING.log(); return; } TRC_STATUS("Loading SAS configuration from %s", _configuration.c_str()); // Read from the file std::ifstream fs(_configuration.c_str()); std::string sas_str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); if (sas_str == "") { TRC_ERROR("Failed to read SAS configuration data from %s", _configuration.c_str()); CL_SAS_FILE_EMPTY.log(); return; } // Now parse the document rapidjson::Document doc; doc.Parse<0>(sas_str.c_str()); if (doc.HasParseError()) { TRC_ERROR("Failed to read SAS configuration data: %s\nError: %s", sas_str.c_str(), rapidjson::GetParseError_En(doc.GetParseError())); CL_SAS_FILE_INVALID.log(); return; } try { JSON_ASSERT_CONTAINS(doc, "sas_servers"); JSON_ASSERT_ARRAY(doc["sas_servers"]); rapidjson::Value& sas_servers = doc["sas_servers"]; for (rapidjson::Value::ValueIterator sas_it = sas_servers.Begin(); sas_it != sas_servers.End(); ++sas_it) { JSON_ASSERT_OBJECT(*sas_it); JSON_ASSERT_CONTAINS(*sas_it, "ip"); JSON_ASSERT_STRING((*sas_it)["ip"]); boost::lock_guard<boost::shared_mutex> write_lock(_sas_server_lock); _single_sas_server = (*sas_it)["ip"].GetString(); } // We have a valid rapidjson object. Write this to the _sas_servers member rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); sas_servers.Accept(writer); TRC_DEBUG("New _sas_servers config: %s", buffer.GetString()); boost::lock_guard<boost::shared_mutex> write_lock(_sas_server_lock); _sas_servers = buffer.GetString(); } catch (JsonFormatError err) { TRC_ERROR("Badly formed SAS configuration file"); CL_SAS_FILE_INVALID.log(); } }