SessionStore::Session* SessionStore::JsonSerializerDeserializer:: deserialize_session(const std::string& data) { TRC_DEBUG("Deserialize JSON document: %s", data.c_str()); rapidjson::Document doc; doc.Parse<0>(data.c_str()); if (doc.HasParseError()) { TRC_DEBUG("Failed to parse document"); return NULL; } Session* session = new Session(); try { JSON_GET_STRING_MEMBER(doc, JSON_SESSION_ID, session->session_id); JSON_ASSERT_CONTAINS(doc, JSON_CCFS); JSON_ASSERT_ARRAY(doc[JSON_CCFS]); for (rapidjson::Value::ConstValueIterator ccfs_it = doc[JSON_CCFS].Begin(); ccfs_it != doc[JSON_CCFS].End(); ++ccfs_it) { JSON_ASSERT_STRING(*ccfs_it); session->ccf.push_back(ccfs_it->GetString()); } JSON_GET_INT_MEMBER(doc, JSON_ACCT_RECORD_NUM, session->acct_record_number); JSON_GET_STRING_MEMBER(doc, JSON_TIMER_ID, session->timer_id); JSON_GET_INT_MEMBER(doc, JSON_REFRESH_TIME, session->session_refresh_time); JSON_GET_INT_MEMBER(doc, JSON_INTERIM_INTERVAL, session->interim_interval); } catch(JsonFormatError err) { TRC_INFO("Failed to deserialize JSON document (hit error at %s:%d)", err._file, err._line); delete session; session = NULL; } return session; }
AuthStore::Digest* AuthStore::JsonSerializerDeserializer:: deserialize_digest(const std::string& digest_s) { TRC_DEBUG("Deserialize JSON document: %s", digest_s.c_str()); rapidjson::Document doc; doc.Parse<0>(digest_s.c_str()); if (doc.HasParseError()) { TRC_DEBUG("Failed to parse document"); return NULL; } Digest* digest = new Digest(); try { JSON_ASSERT_OBJECT(doc); JSON_ASSERT_CONTAINS(doc, JSON_DIGEST); JSON_ASSERT_OBJECT(doc[JSON_DIGEST]); const rapidjson::Value& digest_block = doc[JSON_DIGEST]; { JSON_GET_STRING_MEMBER(digest_block, JSON_HA1, digest->_ha1); // The QoP is assumed to always be 'auth'. JSON_GET_STRING_MEMBER(digest_block, JSON_REALM, digest->_realm); } JSON_GET_STRING_MEMBER(doc, JSON_OPAQUE, digest->_opaque); JSON_GET_STRING_MEMBER(doc, JSON_IMPU, digest->_impu); JSON_GET_INT_MEMBER(doc, JSON_NC, digest->_nonce_count); } catch(JsonFormatError err) { TRC_INFO("Failed to deserialize JSON document (hit error at %s:%d)", err._file, err._line); delete digest; digest = NULL; } return digest; }
JSONEnumService::JSONEnumService(std::string configuration) { // Check whether the file exists. struct stat s; if ((stat(configuration.c_str(), &s) != 0) && (errno == ENOENT)) { TRC_STATUS("No ENUM configuration (file %s does not exist)", configuration.c_str()); CL_SPROUT_ENUM_FILE_MISSING.log(configuration.c_str()); return; } TRC_STATUS("Loading ENUM configuration from %s", configuration.c_str()); // Read from the file std::ifstream fs(configuration.c_str()); std::string enum_str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); if (enum_str == "") { // LCOV_EXCL_START TRC_ERROR("Failed to read ENUM configuration data from %s", configuration.c_str()); CL_SPROUT_ENUM_FILE_EMPTY.log(configuration.c_str()); return; // LCOV_EXCL_STOP } // Now parse the document rapidjson::Document doc; doc.Parse<0>(enum_str.c_str()); if (doc.HasParseError()) { TRC_ERROR("Failed to read ENUM configuration data: %s\nError: %s", enum_str.c_str(), rapidjson::GetParseError_En(doc.GetParseError())); CL_SPROUT_ENUM_FILE_INVALID.log(configuration.c_str()); return; } try { JSON_ASSERT_CONTAINS(doc, "number_blocks"); JSON_ASSERT_ARRAY(doc["number_blocks"]); const rapidjson::Value& nb_arr = doc["number_blocks"]; for (rapidjson::Value::ConstValueIterator nb_it = nb_arr.Begin(); nb_it != nb_arr.End(); ++nb_it) { try { std::string prefix; JSON_GET_STRING_MEMBER(*nb_it, "prefix", prefix); std::string regex; JSON_GET_STRING_MEMBER(*nb_it, "regex", regex); // Entry is well-formed, so add it. TRC_DEBUG("Found valid number prefix block %s", prefix.c_str()); NumberPrefix *pfix = new NumberPrefix; pfix->prefix = prefix; if (parse_regex_replace(regex, pfix->match, pfix->replace)) { _number_prefixes.push_back(pfix); TRC_STATUS(" Adding number prefix %s, regex=%s", pfix->prefix.c_str(), regex.c_str()); } else { TRC_WARNING("Badly formed regular expression in ENUM number block %s", regex.c_str()); delete pfix; } } catch (JsonFormatError err) { // Badly formed number block. TRC_WARNING("Badly formed ENUM number block (hit error at %s:%d)", err._file, err._line); CL_SPROUT_ENUM_FILE_INVALID.log(configuration.c_str()); } } } catch (JsonFormatError err) { TRC_ERROR("Badly formed ENUM configuration data - missing number_blocks object"); CL_SPROUT_ENUM_FILE_INVALID.log(configuration.c_str()); } }
void ControllerTask::handle_delete() { // Check the request has a valid JSON body std::string body = _req.get_rx_body(); rapidjson::Document doc; doc.Parse<0>(body.c_str()); if (doc.HasParseError()) { TRC_INFO("Failed to parse document as JSON"); send_http_reply(HTTP_BAD_REQUEST); return; } // Now loop through the body, pulling out the IDs/replica numbers // The JSON body should have the format: // {"IDs": [{"ID": 123, "ReplicaIndex": 0}, // {"ID": 456, "ReplicaIndex": 2}, // ...] // The replica_index is zero-indexed (so the primary replica has an // index of 0). try { JSON_ASSERT_CONTAINS(doc, JSON_IDS); JSON_ASSERT_ARRAY(doc[JSON_IDS]); const rapidjson::Value& ids_arr = doc[JSON_IDS]; // The request is valid, so respond with a 202. Now loop through the // the body and update the replica trackers. send_http_reply(HTTP_ACCEPTED); for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin(); ids_it != ids_arr.End(); ++ids_it) { try { TimerID timer_id; int replica_index; JSON_GET_INT_64_MEMBER(*ids_it, JSON_ID, timer_id); JSON_GET_INT_MEMBER(*ids_it, JSON_REPLICA_INDEX, replica_index); // Update the timer's replica_tracker to show that the replicas // at level 'replica_index' and higher have been informed // about the timer. This will tombstone the timer if all // replicas have been informed. _cfg->_handler->update_replica_tracker_for_timer(timer_id, replica_index); } catch (JsonFormatError err) { TRC_INFO("JSON entry was invalid (hit error at %s:%d)", err._file, err._line); } } } catch (JsonFormatError err) { TRC_INFO("JSON body didn't contain the IDs array"); send_http_reply(HTTP_BAD_REQUEST); } }
Timer* Timer::from_json_obj(TimerID id, uint64_t replica_hash, std::string& error, bool& replicated, rapidjson::Value& doc) { Timer* timer = NULL; try { JSON_ASSERT_CONTAINS(doc, "timing"); JSON_ASSERT_CONTAINS(doc, "callback"); // Parse out the timing block rapidjson::Value& timing = doc["timing"]; JSON_ASSERT_OBJECT(timing); JSON_ASSERT_CONTAINS(timing, "interval"); rapidjson::Value& interval = timing["interval"]; JSON_ASSERT_INT(interval); // Extract the repeat-for parameter, if it's absent, set it to the interval // instead. int repeat_for_int; if (timing.HasMember("repeat-for")) { JSON_GET_INT_MEMBER(timing, "repeat-for", repeat_for_int); } else { repeat_for_int = interval.GetInt(); } if ((interval.GetInt() == 0) && (repeat_for_int != 0)) { // If the interval time is 0 and the repeat_for_int isn't then reject the timer. error = "Can't have a zero interval time with a non-zero (%s) repeat-for time", std::to_string(repeat_for_int); return NULL; } timer = new Timer(id, (interval.GetInt() * 1000), (repeat_for_int * 1000)); if (timing.HasMember("start-time-delta")) { // Timer JSON specified a time offset, use that to determine the true // start time. uint64_t start_time_delta; JSON_GET_INT_64_MEMBER(timing, "start-time-delta", start_time_delta); // This cast is safe as this sum is deliberately designed to wrap over UINT_MAX. timer->start_time_mono_ms = (uint32_t)(clock_gettime_ms(CLOCK_MONOTONIC) + start_time_delta); } else if (timing.HasMember("start-time")) { // Timer JSON specifies a start-time, use that instead of now. uint64_t real_start_time; JSON_GET_INT_64_MEMBER(timing, "start-time", real_start_time); uint64_t real_time = clock_gettime_ms(CLOCK_REALTIME); uint64_t mono_time = clock_gettime_ms(CLOCK_MONOTONIC); // This cast is safe as this sum is deliberately designed to wrap over UINT_MAX. timer->start_time_mono_ms = (uint32_t)(mono_time + real_start_time - real_time); } if (timing.HasMember("sequence-number")) { JSON_GET_INT_MEMBER(timing, "sequence-number", timer->sequence_number); } // Parse out the 'callback' block rapidjson::Value& callback = doc["callback"]; JSON_ASSERT_OBJECT(callback); JSON_ASSERT_CONTAINS(callback, "http"); rapidjson::Value& http = callback["http"]; JSON_ASSERT_OBJECT(http); JSON_GET_STRING_MEMBER(http, "uri", timer->callback_url); JSON_GET_STRING_MEMBER(http, "opaque", timer->callback_body); if (doc.HasMember("reliability")) { // Parse out the 'reliability' block rapidjson::Value& reliability = doc["reliability"]; JSON_ASSERT_OBJECT(reliability); if (reliability.HasMember("cluster-view-id")) { JSON_GET_STRING_MEMBER(reliability, "cluster-view-id", timer->cluster_view_id); } if (reliability.HasMember("replicas")) { rapidjson::Value& replicas = reliability["replicas"]; JSON_ASSERT_ARRAY(replicas); if (replicas.Size() == 0) { error = "If replicas is specified it must be non-empty"; delete timer; timer = NULL; return NULL; } timer->_replication_factor = replicas.Size(); for (rapidjson::Value::ConstValueIterator it = replicas.Begin(); it != replicas.End(); ++it) { JSON_ASSERT_STRING(*it); timer->replicas.push_back(std::string(it->GetString(), it->GetStringLength())); } } else { if (reliability.HasMember("replication-factor")) { JSON_GET_INT_MEMBER(reliability, "replication-factor", timer->_replication_factor); } else { // Default replication factor is 2. timer->_replication_factor = 2; } } } else { // Default to 2 replicas timer->_replication_factor = 2; } timer->_replica_tracker = pow(2, timer->_replication_factor) - 1; if (timer->replicas.empty()) { // Replicas not determined above, determine them now. Note that this implies // the request is from a client, not another replica. replicated = false; timer->calculate_replicas(replica_hash); } else { // Replicas were specified in the request, must be a replication message // from another cluster node. replicated = true; } } catch (JsonFormatError err) { error = "Badly formed Timer entry - hit error on line " + std::to_string(err._line); delete timer; timer = NULL; return NULL; } return timer; }
void BgcfService::update_routes() { // Check whether the file exists. struct stat s; TRC_DEBUG("stat(%s) returns %d", _configuration.c_str(), stat(_configuration.c_str(), &s)); if ((stat(_configuration.c_str(), &s) != 0) && (errno == ENOENT)) { TRC_STATUS("No BGCF configuration (file %s does not exist)", _configuration.c_str()); CL_SPROUT_BGCF_FILE_MISSING.log(); return; } TRC_STATUS("Loading BGCF configuration from %s", _configuration.c_str()); // Read from the file std::ifstream fs(_configuration.c_str()); std::string bgcf_str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); if (bgcf_str == "") { // LCOV_EXCL_START TRC_ERROR("Failed to read BGCF configuration data from %s", _configuration.c_str()); CL_SPROUT_BGCF_FILE_EMPTY.log(); return; // LCOV_EXCL_STOP } // Now parse the document rapidjson::Document doc; doc.Parse<0>(bgcf_str.c_str()); if (doc.HasParseError()) { TRC_ERROR("Failed to read BGCF configuration data: %s\nError: %s", bgcf_str.c_str(), rapidjson::GetParseError_En(doc.GetParseError())); CL_SPROUT_BGCF_FILE_INVALID.log(); return; } try { std::map<std::string, std::vector<std::string>> new_domain_routes; std::map<std::string, std::vector<std::string>> new_number_routes; JSON_ASSERT_CONTAINS(doc, "routes"); JSON_ASSERT_ARRAY(doc["routes"]); const rapidjson::Value& routes_arr = doc["routes"]; for (rapidjson::Value::ConstValueIterator routes_it = routes_arr.Begin(); routes_it != routes_arr.End(); ++routes_it) { // An entry is valid if it has either a domain (string) OR a // number (string) AND an array of routes if ((((((*routes_it).HasMember("domain")) && ((*routes_it)["domain"].IsString())) && (!(*routes_it).HasMember("number"))) || ((!(*routes_it).HasMember("domain")) && (((*routes_it).HasMember("number")) && ((*routes_it)["number"].IsString())))) && ((*routes_it).HasMember("route") && (*routes_it)["route"].IsArray())) { std::vector<std::string> route_vec; const rapidjson::Value& route_arr = (*routes_it)["route"]; for (rapidjson::Value::ConstValueIterator route_it = route_arr.Begin(); route_it != route_arr.End(); ++route_it) { std::string route_uri = (*route_it).GetString(); TRC_DEBUG(" %s", route_uri.c_str()); route_vec.push_back(route_uri); } std::string routing_value; if ((*routes_it).HasMember("domain")) { routing_value = (*routes_it)["domain"].GetString(); new_domain_routes.insert(std::make_pair(routing_value, route_vec)); } else { routing_value = (*routes_it)["number"].GetString(); new_number_routes.insert( std::make_pair(PJUtils::remove_visual_separators(routing_value), route_vec)); } route_vec.clear(); TRC_DEBUG("Add route for %s", routing_value.c_str()); } else { TRC_WARNING("Badly formed BGCF route entry"); CL_SPROUT_BGCF_FILE_INVALID.log(); } } // Take a write lock on the mutex in RAII style boost::lock_guard<boost::shared_mutex> write_lock(_routes_rw_lock); _domain_routes = new_domain_routes; _number_routes = new_number_routes; } catch (JsonFormatError err) { TRC_ERROR("Badly formed BGCF configuration file - missing routes object"); CL_SPROUT_BGCF_FILE_INVALID.log(); } }
void JSONEnumService::update_enum() { // Check whether the file exists. struct stat s; if ((stat(_configuration.c_str(), &s) != 0) && (errno == ENOENT)) { TRC_STATUS("No ENUM configuration (file %s does not exist)", _configuration.c_str()); CL_SPROUT_ENUM_FILE_MISSING.log(_configuration.c_str()); return; } TRC_STATUS("Loading ENUM configuration from %s", _configuration.c_str()); // Read from the file std::ifstream fs(_configuration.c_str()); std::string enum_str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); if (enum_str == "") { // LCOV_EXCL_START TRC_ERROR("Failed to read ENUM configuration data from %s", _configuration.c_str()); CL_SPROUT_ENUM_FILE_EMPTY.log(_configuration.c_str()); return; // LCOV_EXCL_STOP } // Now parse the document rapidjson::Document doc; doc.Parse<0>(enum_str.c_str()); if (doc.HasParseError()) { TRC_ERROR("Failed to read ENUM configuration data: %s\nError: %s", enum_str.c_str(), rapidjson::GetParseError_En(doc.GetParseError())); CL_SPROUT_ENUM_FILE_INVALID.log(_configuration.c_str()); return; } try { std::vector<NumberPrefix> new_number_prefixes; std::map<std::string, NumberPrefix> new_prefix_regex_map; JSON_ASSERT_CONTAINS(doc, "number_blocks"); JSON_ASSERT_ARRAY(doc["number_blocks"]); const rapidjson::Value& nb_arr = doc["number_blocks"]; for (rapidjson::Value::ConstValueIterator nb_it = nb_arr.Begin(); nb_it != nb_arr.End(); ++nb_it) { try { std::string prefix; JSON_GET_STRING_MEMBER(*nb_it, "prefix", prefix); std::string regex; JSON_GET_STRING_MEMBER(*nb_it, "regex", regex); // Entry is well-formed, so strip off visual separators and add it. TRC_DEBUG("Found valid number prefix block %s", prefix.c_str()); NumberPrefix pfix; prefix = PJUtils::remove_visual_separators(prefix); pfix.prefix = prefix; if (parse_regex_replace(regex, pfix.match, pfix.replace)) { // Create an array in order of entries in json file, and a map // (automatically sorted in order of key length) so we can later // match numbers to the most specific prefixes new_number_prefixes.push_back(pfix); new_prefix_regex_map.insert(std::make_pair(prefix, pfix)); TRC_STATUS(" Adding number prefix %s, regex=%s", pfix.prefix.c_str(), regex.c_str()); } else { TRC_WARNING("Badly formed regular expression in ENUM number block %s", regex.c_str()); } } catch (JsonFormatError err) { // Badly formed number block. TRC_WARNING("Badly formed ENUM number block (hit error at %s:%d)", err._file, err._line); CL_SPROUT_ENUM_FILE_INVALID.log(_configuration.c_str()); } } // Take a write lock on the mutex in RAII style boost::lock_guard<boost::shared_mutex> write_lock(_number_prefixes_rw_lock); _number_prefixes = new_number_prefixes; _prefix_regex_map = new_prefix_regex_map; } catch (JsonFormatError err) { TRC_ERROR("Badly formed ENUM configuration data - missing number_blocks object"); CL_SPROUT_ENUM_FILE_INVALID.log(_configuration.c_str()); } }
void SCSCFSelector::update_scscf() { // Check whether the file exists. struct stat s; if ((stat(_configuration.c_str(), &s) != 0) && (errno == ENOENT)) { TRC_STATUS("No S-CSCF configuration data (file %s does not exist)", _configuration.c_str()); CL_SPROUT_SCSCF_FILE_MISSING.log(); return; } TRC_STATUS("Loading S-CSCF configuration from %s", _configuration.c_str()); // Read from the file std::ifstream fs(_configuration.c_str()); std::string scscf_str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); if (scscf_str == "") { // LCOV_EXCL_START TRC_ERROR("Failed to read S-CSCF configuration data from %s", _configuration.c_str()); CL_SPROUT_SCSCF_FILE_EMPTY.log(); return; // LCOV_EXCL_STOP } // Now parse the document rapidjson::Document doc; doc.Parse<0>(scscf_str.c_str()); if (doc.HasParseError()) { TRC_ERROR("Failed to read S-CSCF configuration data: %s\nError: %s", scscf_str.c_str(), rapidjson::GetParseError_En(doc.GetParseError())); CL_SPROUT_SCSCF_FILE_INVALID.log(); return; } try { std::vector<scscf_t> new_scscfs; JSON_ASSERT_CONTAINS(doc, "s-cscfs"); JSON_ASSERT_ARRAY(doc["s-cscfs"]); const rapidjson::Value& scscfs_arr = doc["s-cscfs"]; for (rapidjson::Value::ConstValueIterator scscfs_it = scscfs_arr.Begin(); scscfs_it != scscfs_arr.End(); ++scscfs_it) { try { scscf_t new_scscf; JSON_GET_STRING_MEMBER(*scscfs_it, "server", new_scscf.server); JSON_GET_INT_MEMBER(*scscfs_it, "priority", new_scscf.priority); JSON_GET_INT_MEMBER(*scscfs_it, "weight", new_scscf.weight); JSON_ASSERT_CONTAINS(*scscfs_it, "capabilities"); JSON_ASSERT_ARRAY((*scscfs_it)["capabilities"]); const rapidjson::Value& cap_arr = (*scscfs_it)["capabilities"]; std::vector<int> capabilities_vec; for (rapidjson::Value::ConstValueIterator cap_it = cap_arr.Begin(); cap_it != cap_arr.End(); ++cap_it) { capabilities_vec.push_back((*cap_it).GetInt()); } // Sort the capabilities and remove duplicates std::sort(capabilities_vec.begin(), capabilities_vec.end()); capabilities_vec.erase(unique(capabilities_vec.begin(), capabilities_vec.end()), capabilities_vec.end() ); new_scscf.capabilities = capabilities_vec; new_scscfs.push_back(new_scscf); capabilities_vec.clear(); } catch (JsonFormatError err) { // Badly formed number block. TRC_WARNING("Badly formed S-CSCF entry (hit error at %s:%d)", err._file, err._line); CL_SPROUT_SCSCF_FILE_INVALID.log(); } } // Take a write lock on the mutex in RAII style boost::lock_guard<boost::shared_mutex> write_lock(_scscfs_rw_lock); _scscfs = new_scscfs; } catch (JsonFormatError err) { TRC_ERROR("Badly formed S-CSCF configuration file - missing s-cscfs object"); CL_SPROUT_SCSCF_FILE_INVALID.log(); } }
HTTPCode ChronosInternalConnection::resynchronise_with_single_node( const std::string server_to_sync, std::vector<std::string> cluster_nodes, std::string localhost) { TRC_DEBUG("Querying %s for timers", server_to_sync.c_str()); // Get the cluster view ID from the global configuration std::string cluster_view_id; __globals->get_cluster_view_id(cluster_view_id); std::string response; HTTPCode rc; // Loop sending GETs to the server while the response is a 206 do { std::map<TimerID, int> delete_map; rc = send_get(server_to_sync, localhost, PARAM_SYNC_MODE_VALUE_SCALE, cluster_view_id, MAX_TIMERS_IN_RESPONSE, response); if ((rc == HTTP_PARTIAL_CONTENT) || (rc == HTTP_OK)) { // Parse the GET response rapidjson::Document doc; doc.Parse<0>(response.c_str()); if (doc.HasParseError()) { // We've failed to parse the document as JSON. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("Failed to parse document as JSON"); rc = HTTP_BAD_REQUEST; break; } try { JSON_ASSERT_CONTAINS(doc, JSON_TIMERS); JSON_ASSERT_ARRAY(doc[JSON_TIMERS]); const rapidjson::Value& ids_arr = doc[JSON_TIMERS]; int total_timers = ids_arr.Size(); int count_invalid_timers = 0; for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin(); ids_it != ids_arr.End(); ++ids_it) { try { const rapidjson::Value& id_arr = *ids_it; JSON_ASSERT_OBJECT(id_arr); // Get the timer ID TimerID timer_id; JSON_GET_INT_MEMBER(id_arr, JSON_TIMER_ID, timer_id); // Get the old replicas std::vector<std::string> old_replicas; JSON_ASSERT_CONTAINS(id_arr, JSON_OLD_REPLICAS); JSON_ASSERT_ARRAY(id_arr[JSON_OLD_REPLICAS]); const rapidjson::Value& old_repl_arr = id_arr[JSON_OLD_REPLICAS]; for (rapidjson::Value::ConstValueIterator repl_it = old_repl_arr.Begin(); repl_it != old_repl_arr.End(); ++repl_it) { JSON_ASSERT_STRING(*repl_it); old_replicas.push_back(repl_it->GetString()); } // Get the timer. JSON_ASSERT_CONTAINS(id_arr, JSON_TIMER); JSON_ASSERT_OBJECT(id_arr[JSON_TIMER]); const rapidjson::Value& timer_obj = id_arr[JSON_TIMER]; bool store_timer = false; std::string error_str; bool replicated_timer; Timer* timer = Timer::from_json_obj(timer_id, 0, error_str, replicated_timer, (rapidjson::Value&)timer_obj); if (!timer) { count_invalid_timers++; TRC_INFO("Unable to create timer - error: %s", error_str.c_str()); continue; } else if (!replicated_timer) { count_invalid_timers++; TRC_INFO("Unreplicated timer in response - ignoring"); delete timer; timer = NULL; continue; } // Decide what we're going to do with this timer. int old_level = 0; bool in_old_replica_list = get_replica_level(old_level, localhost, old_replicas); int new_level = 0; bool in_new_replica_list = get_replica_level(new_level, localhost, timer->replicas); // Add the timer to the delete map we're building up delete_map.insert(std::pair<TimerID, int>(timer_id, new_level)); if (in_new_replica_list) { // Add the timer to my store if I can. if (in_old_replica_list) { if (old_level >= new_level) { // Add/update timer // LCOV_EXCL_START - Adding timer paths are tested elsewhere store_timer = true; // LCOV_EXCL_STOP } } else { // Add/update timer store_timer = true; } // Now loop through the new replicas. int index = 0; for (std::vector<std::string>::iterator it = timer->replicas.begin(); it != timer->replicas.end(); ++it, ++index) { if (index <= new_level) { // Do nothing. We've covered adding the timer to the store above } else { // We can potentially replicate the timer to one of these nodes. // Check whether the new replica was involved previously int old_rep_level = 0; bool is_new_rep_in_old_rep = get_replica_level(old_rep_level, *it, old_replicas); if (is_new_rep_in_old_rep) { if (old_rep_level >= new_level) { _replicator->replicate_timer_to_node(timer, *it); } } else { _replicator->replicate_timer_to_node(timer, *it); } } } // Now loop through the old replicas. We can send a tombstone // replication to any node that used to be a replica and was // higher in the replica list than the new replica. index = 0; for (std::vector<std::string>::iterator it = old_replicas.begin(); it != old_replicas.end(); ++it, ++index) { if (index >= new_level) { // We can potentially tombstone the timer to one of these nodes. bool old_rep_in_new_rep = get_replica_presence(*it, timer->replicas); if (!old_rep_in_new_rep) { Timer* timer_copy = new Timer(*timer); timer_copy->become_tombstone(); _replicator->replicate_timer_to_node(timer_copy, *it); delete timer_copy; timer_copy = NULL; } } } } // Add the timer to the store if we can. This is done // last so we don't invalidate the pointer to the timer. if (store_timer) { _handler->add_timer(timer); } else { delete timer; timer = NULL; } // Finally, note that we processed the timer _timers_processed_stat->increment(); } catch (JsonFormatError err) { // A single entry is badly formatted. This is unexpected but we'll try // to keep going and process the rest of the timers. count_invalid_timers++; _invalid_timers_processed_stat->increment(); TRC_INFO("JSON entry was invalid (hit error at %s:%d)", err._file, err._line); } } // Check if we were able to successfully process any timers - if not // then bail out as there's something wrong with the node we're // querying if ((total_timers != 0) && (count_invalid_timers == total_timers)) { TRC_WARNING("Unable to process any timer entries in GET response"); rc = HTTP_BAD_REQUEST; } } catch (JsonFormatError err) { // We've failed to find the Timers array. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("JSON body didn't contain the Timers array"); rc = HTTP_BAD_REQUEST; } // Send a DELETE to all the nodes to update their timer references if (delete_map.size() > 0) { std::string delete_body = create_delete_body(delete_map); for (std::vector<std::string>::iterator it = cluster_nodes.begin(); it != cluster_nodes.end(); ++it) { HTTPCode delete_rc = send_delete(*it, delete_body); if (delete_rc != HTTP_ACCEPTED) { // We've received an error response to the DELETE request. There's // not much more we can do here (a timeout will have already // been retried). A failed DELETE won't prevent the scaling operation // from finishing, it just means that we'll tell other nodes // about timers inefficiently. TRC_INFO("Error response (%d) to DELETE request to %s", delete_rc, (*it).c_str()); } } } } else { // We've received an error response to the GET request. A timeout // will already have been retried by the underlying HTTPConnection, // so don't retry again TRC_WARNING("Error response (%d) to GET request to %s", rc, server_to_sync.c_str()); } } while (rc == HTTP_PARTIAL_CONTENT); return rc; }
void SasService::extract_config() { // Check whether the file exists. struct stat s; TRC_DEBUG("stat(%s) returns %d", _configuration.c_str(), stat(_configuration.c_str(), &s)); if ((stat(_configuration.c_str(), &s) != 0) && (errno == ENOENT)) { TRC_STATUS("No SAS configuration (file %s does not exist)", _configuration.c_str()); CL_SAS_FILE_MISSING.log(); return; } TRC_STATUS("Loading SAS configuration from %s", _configuration.c_str()); // Read from the file std::ifstream fs(_configuration.c_str()); std::string sas_str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); if (sas_str == "") { TRC_ERROR("Failed to read SAS configuration data from %s", _configuration.c_str()); CL_SAS_FILE_EMPTY.log(); return; } // Now parse the document rapidjson::Document doc; doc.Parse<0>(sas_str.c_str()); if (doc.HasParseError()) { TRC_ERROR("Failed to read SAS configuration data: %s\nError: %s", sas_str.c_str(), rapidjson::GetParseError_En(doc.GetParseError())); CL_SAS_FILE_INVALID.log(); return; } try { JSON_ASSERT_CONTAINS(doc, "sas_servers"); JSON_ASSERT_ARRAY(doc["sas_servers"]); rapidjson::Value& sas_servers = doc["sas_servers"]; for (rapidjson::Value::ValueIterator sas_it = sas_servers.Begin(); sas_it != sas_servers.End(); ++sas_it) { JSON_ASSERT_OBJECT(*sas_it); JSON_ASSERT_CONTAINS(*sas_it, "ip"); JSON_ASSERT_STRING((*sas_it)["ip"]); boost::lock_guard<boost::shared_mutex> write_lock(_sas_server_lock); _single_sas_server = (*sas_it)["ip"].GetString(); } // We have a valid rapidjson object. Write this to the _sas_servers member rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); sas_servers.Accept(writer); TRC_DEBUG("New _sas_servers config: %s", buffer.GetString()); boost::lock_guard<boost::shared_mutex> write_lock(_sas_server_lock); _sas_servers = buffer.GetString(); } catch (JsonFormatError err) { TRC_ERROR("Badly formed SAS configuration file"); CL_SAS_FILE_INVALID.log(); } }