void RegistrationUtils::remove_bindings(RegStore* store, HSSConnection* hss, const std::string& aor, const std::string& binding_id, const std::string& dereg_type, SAS::TrailId trail) { TRC_INFO("Remove binding(s) %s from IMPU %s", binding_id.c_str(), aor.c_str()); if (expire_bindings(store, aor, binding_id, trail)) { // All bindings have been expired, so do deregistration processing for the // IMPU. TRC_INFO("All bindings for %s expired, so deregister at HSS and ASs", aor.c_str()); std::vector<std::string> uris; std::map<std::string, Ifcs> ifc_map; HTTPCode http_code = hss->update_registration_state(aor, "", dereg_type, ifc_map, uris, trail); if (http_code == HTTP_OK) { // Note that 3GPP TS 24.229 V12.0.0 (2013-03) 5.4.1.7 doesn't specify that any binding information // should be passed on the REGISTER message, so we don't need the binding ID. deregister_with_application_servers(ifc_map[aor], store, aor, third_party_reg_stats_tables, trail); notify_application_servers(); } } };
std::vector<std::string> BgcfService::get_route_from_domain( const std::string &domain, SAS::TrailId trail) const { TRC_DEBUG("Getting route for URI domain %s via BGCF lookup", domain.c_str()); // Take a read lock on the mutex in RAII style boost::shared_lock<boost::shared_mutex> read_lock(_routes_rw_lock); // First try the specified domain. std::map<std::string, std::vector<std::string>>::const_iterator i = _domain_routes.find(domain); if (i != _domain_routes.end()) { TRC_INFO("Found route to domain %s", domain.c_str()); SAS::Event event(trail, SASEvent::BGCF_FOUND_ROUTE_DOMAIN, 0); event.add_var_param(domain); std::string route_string; for (std::vector<std::string>::const_iterator ii = i->second.begin(); ii != i->second.end(); ++ii) { route_string = route_string + *ii + ";"; } event.add_var_param(route_string); SAS::report_event(event); return i->second; } // Then try the default domain (*). i = _domain_routes.find("*"); if (i != _domain_routes.end()) { TRC_INFO("Found default route"); SAS::Event event(trail, SASEvent::BGCF_DEFAULT_ROUTE_DOMAIN, 0); event.add_var_param(domain); std::string route_string; for (std::vector<std::string>::const_iterator ii = i->second.begin(); ii != i->second.end(); ++ii) { route_string = route_string + *ii + ";"; } event.add_var_param(route_string); SAS::report_event(event); return i->second; } SAS::Event event(trail, SASEvent::BGCF_NO_ROUTE_DOMAIN, 0); event.add_var_param(domain); SAS::report_event(event); return std::vector<std::string>(); }
std::string JSONEnumService::lookup_uri_from_user(const std::string &user, SAS::TrailId trail) const { std::string uri; TRC_DEBUG("Translating URI via JSON ENUM lookup"); if (user.empty()) { TRC_INFO("No dial string supplied, so don't do ENUM lookup"); return std::string(); } std::string aus = user_to_aus(user); // Take a read lock on the mutex in RAII style boost::shared_lock<boost::shared_mutex> read_lock(_number_prefixes_rw_lock); const struct NumberPrefix* pfix = prefix_match(aus); if (pfix == NULL) { TRC_WARNING("No matching number range %s from ENUM lookup", user.c_str()); SAS::Event event(trail, SASEvent::ENUM_INCOMPLETE, 0); event.add_var_param(user); SAS::report_event(event); return uri; } // Apply the regular expression to the user string to generate a new // URI. try { uri = boost::regex_replace(aus, pfix->match, pfix->replace); } catch(...) // LCOV_EXCL_START Only throws if expression too complex or similar hard-to-hit conditions { TRC_ERROR("Failed to translate number with regex"); SAS::Event event(trail, SASEvent::ENUM_INCOMPLETE, 1); event.add_var_param(user); SAS::report_event(event); return uri; // LCOV_EXCL_STOP } TRC_INFO("Number %s found, translated URI = %s", user.c_str(), uri.c_str()); SAS::Event event(trail, SASEvent::ENUM_COMPLETE, 0); event.add_var_param(user); event.add_var_param(uri); SAS::report_event(event); return uri; }
/// Retrieve a JSON object from a path on the server. Caller is responsible for deleting. HTTPCode HSSConnection::get_json_object(const std::string& path, rapidjson::Document*& json_object, SAS::TrailId trail) { std::string json_data; HTTPCode rc = _http->send_get(path, json_data, "", trail); if (rc == HTTP_OK) { json_object = new rapidjson::Document; json_object->Parse<0>(json_data.c_str()); if (json_object->HasParseError()) { TRC_INFO("Failed to parse Homestead response:\nPath: %s\nData: %s\nError: %s\n", path.c_str(), json_data.c_str(), rapidjson::GetParseError_En(json_object->GetParseError())); delete json_object; json_object = NULL; } } else { json_object = NULL; } return rc; }
static void send_register_cb(void* token, pjsip_event *event) { ThirdPartyRegData* tsxdata = (ThirdPartyRegData*)token; pjsip_transaction* tsx = event->body.tsx_state.tsx; if ((tsxdata->default_handling == SESSION_TERMINATED) && ((tsx->status_code == 408) || (PJSIP_IS_STATUS_IN_CLASS(tsx->status_code, 500)))) { std::string error_msg = "Third-party REGISTER transaction failed with code " + std::to_string(tsx->status_code); TRC_INFO(error_msg.c_str()); SAS::Event event(tsxdata->trail, SASEvent::REGISTER_AS_FAILED, 0); event.add_var_param(error_msg); SAS::report_event(event); third_party_register_failed(tsxdata->public_id, tsxdata->trail); } // printf("Expiry: %d, Is_initial_registration: %d\n", tsxdata->expires, tsxdata->is_initial_registration); if (tsx->status_code == 200) { if (tsxdata->expires == 0) { third_party_reg_stats_tables->de_reg_tbl->increment_successes(); } else if (tsxdata->is_initial_registration) { third_party_reg_stats_tables->init_reg_tbl->increment_successes(); } else { third_party_reg_stats_tables->re_reg_tbl->increment_successes(); } } else // Count all failed registration attempts, not just ones that result in user // being unsubscribed. { if (tsxdata->expires == 0) { third_party_reg_stats_tables->de_reg_tbl->increment_failures(); } else if (tsxdata->is_initial_registration) { third_party_reg_stats_tables->init_reg_tbl->increment_failures(); } else { third_party_reg_stats_tables->re_reg_tbl->increment_failures(); } } delete tsxdata; tsxdata = NULL; }
~TimeAndStringBasedEventTableImpl() { TRC_INFO("Destroying table with name %s", _name.c_str()); pthread_rwlock_destroy(&_table_lock); //Spin through the maps of 5s and 5m views and delete them. for (auto& kv : _five_second) { delete kv.second; } for (auto& kv : _five_minute) { delete kv.second; } }
/// Route the request to the BGCF. void ICSCFSproutletTsx::route_to_bgcf(pjsip_msg* req) { TRC_INFO("Routing to BGCF %s", PJUtils::uri_to_string(PJSIP_URI_IN_ROUTING_HDR, _icscf->bgcf_uri()).c_str()); PJUtils::add_route_header(req, (pjsip_sip_uri*)pjsip_uri_clone(get_pool(req), _icscf->bgcf_uri()), get_pool(req)); send_request(req); _routed_to_bgcf = true; }
Store::Status AuthStore::get_digest(const std::string& impi, const std::string& nonce, AuthStore::Digest*& digest, SAS::TrailId trail) { std::string key = impi + '\\' + nonce; std::string data; uint64_t cas; Store::Status status = _data_store->get_data("AuthStore", key, data, cas, trail); TRC_DEBUG("Get digest for %s", key.c_str()); if (status != Store::Status::OK) { TRC_DEBUG("Failed to retrieve digest for %s", key.c_str()); SAS::Event event(trail, SASEvent::AUTHSTORE_GET_FAILURE, 0); event.add_var_param(key); SAS::report_event(event); digest = NULL; } else { TRC_DEBUG("Retrieved Digest for %s\n%s", key.c_str(), data.c_str()); digest = deserialize_digest(data); if (digest != NULL) { digest->_cas = cas; digest->_impi = impi; digest->_nonce = nonce; SAS::Event event(trail, SASEvent::AUTHSTORE_GET_SUCCESS, 0); event.add_var_param(key); event.add_var_param(data); SAS::report_event(event); } else { TRC_INFO("Failed to deserialize record"); SAS::Event event(trail, SASEvent::AUTHSTORE_DESERIALIZATION_FAILURE, 0); event.add_var_param(key); event.add_var_param(data); SAS::report_event(event); // Handle as if the digest was not found. status = Store::NOT_FOUND; } } return status; }
std::string JSONEnumService::lookup_uri_from_user(const std::string &user, SAS::TrailId trail) const { std::string uri; TRC_DEBUG("Translating URI via JSON ENUM lookup"); if (user.empty()) { TRC_INFO("No dial string supplied, so don't do ENUM lookup"); return std::string(); } std::string aus = user_to_aus(user); struct NumberPrefix* pfix = prefix_match(aus); if (pfix == NULL) { TRC_INFO("No matching number range %s from ENUM lookup", user.c_str()); return uri; } // Apply the regular expression to the user string to generate a new // URI. try { uri = boost::regex_replace(aus, pfix->match, pfix->replace); } catch(...) // LCOV_EXCL_START Only throws if expression too complex or similar hard-to-hit conditions { TRC_ERROR("Failed to translate number with regex"); return uri; // LCOV_EXCL_STOP } TRC_INFO("Number %s found, translated URI = %s", user.c_str(), uri.c_str()); return uri; }
SessionStore::Session* SessionStore::JsonSerializerDeserializer:: deserialize_session(const std::string& data) { TRC_DEBUG("Deserialize JSON document: %s", data.c_str()); rapidjson::Document doc; doc.Parse<0>(data.c_str()); if (doc.HasParseError()) { TRC_DEBUG("Failed to parse document"); return NULL; } Session* session = new Session(); try { JSON_GET_STRING_MEMBER(doc, JSON_SESSION_ID, session->session_id); JSON_ASSERT_CONTAINS(doc, JSON_CCFS); JSON_ASSERT_ARRAY(doc[JSON_CCFS]); for (rapidjson::Value::ConstValueIterator ccfs_it = doc[JSON_CCFS].Begin(); ccfs_it != doc[JSON_CCFS].End(); ++ccfs_it) { JSON_ASSERT_STRING(*ccfs_it); session->ccf.push_back(ccfs_it->GetString()); } JSON_GET_INT_MEMBER(doc, JSON_ACCT_RECORD_NUM, session->acct_record_number); JSON_GET_STRING_MEMBER(doc, JSON_TIMER_ID, session->timer_id); JSON_GET_INT_MEMBER(doc, JSON_REFRESH_TIME, session->session_refresh_time); JSON_GET_INT_MEMBER(doc, JSON_INTERIM_INTERVAL, session->interim_interval); } catch(JsonFormatError err) { TRC_INFO("Failed to deserialize JSON document (hit error at %s:%d)", err._file, err._line); delete session; session = NULL; } return session; }
TimeAndStringBasedEventTableImpl(std::string name, std::string tbl_oid): ManagedTable<TimeAndStringBasedEventRow, int>(name, tbl_oid, 3, 7, { ASN_INTEGER , ASN_OCTET_STR }) { TRC_INFO("Created table with name %s, OID %s", name.c_str(), tbl_oid.c_str()); _table_rows = 0; // Create a lock to protect the maps in this table. Note that our policy // for stats is to minimize locking. We are prepared to tolerate some // invalid statistics readings in order to avoid locking on the call path. // However, we don't want to risk conflicting mutates of the maps that // track all of the underlying data from completely breaking all stats. pthread_rwlock_init(&_table_lock, NULL); }
AuthStore::Digest* AuthStore::JsonSerializerDeserializer:: deserialize_digest(const std::string& digest_s) { TRC_DEBUG("Deserialize JSON document: %s", digest_s.c_str()); rapidjson::Document doc; doc.Parse<0>(digest_s.c_str()); if (doc.HasParseError()) { TRC_DEBUG("Failed to parse document"); return NULL; } Digest* digest = new Digest(); try { JSON_ASSERT_OBJECT(doc); JSON_ASSERT_CONTAINS(doc, JSON_DIGEST); JSON_ASSERT_OBJECT(doc[JSON_DIGEST]); const rapidjson::Value& digest_block = doc[JSON_DIGEST]; { JSON_GET_STRING_MEMBER(digest_block, JSON_HA1, digest->_ha1); // The QoP is assumed to always be 'auth'. JSON_GET_STRING_MEMBER(digest_block, JSON_REALM, digest->_realm); } JSON_GET_STRING_MEMBER(doc, JSON_OPAQUE, digest->_opaque); JSON_GET_STRING_MEMBER(doc, JSON_IMPU, digest->_impu); JSON_GET_INT_MEMBER(doc, JSON_NC, digest->_nonce_count); } catch(JsonFormatError err) { TRC_INFO("Failed to deserialize JSON document (hit error at %s:%d)", err._file, err._line); delete digest; digest = NULL; } return digest; }
static bool expire_bindings(RegStore *store, const std::string& aor, const std::string& binding_id, SAS::TrailId trail) { // We need the retry loop to handle the store's compare-and-swap. bool all_bindings_expired = false; Store::Status set_rc; do { RegStore::AoR* aor_data = store->get_aor_data(aor, trail); if (aor_data == NULL) { break; // LCOV_EXCL_LINE No UT for lookup failure. } if (binding_id == "*") { // We only use this when doing some network-initiated deregistrations; // when the user deregisters all bindings another code path clears them TRC_INFO("Clearing all bindings!"); aor_data->clear(false); } else { aor_data->remove_binding(binding_id); // LCOV_EXCL_LINE No UT for network // initiated deregistration of a // single binding (flow failed). } set_rc = store->set_aor_data(aor, aor_data, false, trail, all_bindings_expired); delete aor_data; aor_data = NULL; // We can only say for sure that the bindings were expired if we were able // to update the store. all_bindings_expired = (all_bindings_expired && (set_rc == Store::OK)); } while (set_rc == Store::DATA_CONTENTION); return all_bindings_expired; }
SessionStore::Session* SessionStore::get_session_data(const std::string& call_id, const role_of_node_t role, const node_functionality_t function, SAS::TrailId trail) { std::string key = create_key(call_id, role, function); TRC_DEBUG("Retrieving session data for %s", key.c_str()); Session* session = NULL; std::string data; uint64_t cas; Store::Status status = _store->get_data("session", key, data, cas, trail); if (status == Store::Status::OK && !data.empty()) { // Retrieved the data, so deserialize it. TRC_DEBUG("Retrieved record, CAS = %ld", cas); session = deserialize_session(data); if (session != NULL) { session->_cas = cas; } else { // Could not deserialize the record. Treat it as not found. TRC_INFO("Failed to deserialize record"); SAS::Event event(trail, SASEvent::SESSION_DESERIALIZATION_FAILED, 0); event.add_var_param(call_id); event.add_var_param(data); SAS::report_event(event); } } return session; }
int init_options(int argc, char**argv, struct options& options) { int opt; int long_opt_ind; optind = 0; while ((opt = getopt_long(argc, argv, options_description.c_str(), long_opt, &long_opt_ind)) != -1) { switch (opt) { case 'l': TRC_INFO("Local host: %s", optarg); options.local_host = std::string(optarg); break; case 'r': TRC_INFO("Home domain: %s", optarg); options.home_domain = std::string(optarg); break; case 'c': TRC_INFO("Diameter configuration file: %s", optarg); options.diameter_conf = std::string(optarg); break; case 'H': TRC_INFO("HTTP address: %s", optarg); options.http_address = std::string(optarg); break; case 't': TRC_INFO("HTTP threads: %s", optarg); options.http_threads = atoi(optarg); break; case 'u': TRC_INFO("Cache threads: %s", optarg); options.cache_threads = atoi(optarg); break; case 'S': TRC_INFO("Cassandra host: %s", optarg); options.cassandra = std::string(optarg); break; case 'D': TRC_INFO("Destination realm: %s", optarg); options.dest_realm = std::string(optarg); break; case 'd': TRC_INFO("Destination host: %s", optarg); options.dest_host = std::string(optarg); break; case 'p': TRC_INFO("Maximum peers: %s", optarg); options.max_peers = atoi(optarg); break; case 's': TRC_INFO("Server name: %s", optarg); options.server_name = std::string(optarg); break; case 'i': TRC_INFO("IMPU cache TTL: %s", optarg); options.impu_cache_ttl = atoi(optarg); break; case 'I': TRC_INFO("HSS reregistration time: %s", optarg); options.hss_reregistration_time = atoi(optarg); break; case 'j': TRC_INFO("Sprout HTTP name: %s", optarg); options.sprout_http_name = std::string(optarg); break; case SCHEME_UNKNOWN: TRC_INFO("Scheme unknown: %s", optarg); options.scheme_unknown = std::string(optarg); break; case SCHEME_DIGEST: TRC_INFO("Scheme digest: %s", optarg); options.scheme_digest = std::string(optarg); break; case SCHEME_AKA: TRC_INFO("Scheme AKA: %s", optarg); options.scheme_aka = std::string(optarg); break; case 'a': TRC_INFO("Access log: %s", optarg); options.access_log_enabled = true; options.access_log_directory = std::string(optarg); break; case SAS_CONFIG: { std::vector<std::string> sas_options; Utils::split_string(std::string(optarg), ',', sas_options, 0, false); if (sas_options.size() == 2) { options.sas_server = sas_options[0]; options.sas_system_name = sas_options[1]; TRC_INFO("SAS set to %s\n", options.sas_server.c_str()); TRC_INFO("System name is set to %s\n", options.sas_system_name.c_str()); } else { CL_HOMESTEAD_INVALID_SAS_OPTION.log(); TRC_WARNING("Invalid --sas option, SAS disabled\n"); } } break; case DIAMETER_TIMEOUT_MS: TRC_INFO("Diameter timeout: %s", optarg); options.diameter_timeout_ms = atoi(optarg); break; case ALARMS_ENABLED: TRC_INFO("SNMP alarms are enabled"); options.alarms_enabled = true; break; case DNS_SERVER: options.dns_servers.clear(); Utils::split_string(std::string(optarg), ',', options.dns_servers, 0, false); TRC_INFO("%d DNS servers passed on the command line", options.dns_servers.size()); break; case TARGET_LATENCY_US: options.target_latency_us = atoi(optarg); if (options.target_latency_us <= 0) { TRC_ERROR("Invalid --target-latency-us option %s", optarg); return -1; } break; case MAX_TOKENS: options.max_tokens = atoi(optarg); if (options.max_tokens <= 0) { TRC_ERROR("Invalid --max-tokens option %s", optarg); return -1; } break; case INIT_TOKEN_RATE: options.init_token_rate = atoi(optarg); if (options.init_token_rate <= 0) { TRC_ERROR("Invalid --init-token-rate option %s", optarg); return -1; } break; case MIN_TOKEN_RATE: options.min_token_rate = atoi(optarg); if (options.min_token_rate <= 0) { TRC_ERROR("Invalid --min-token-rate option %s", optarg); return -1; } break; case EXCEPTION_MAX_TTL: options.exception_max_ttl = atoi(optarg); TRC_INFO("Max TTL after an exception set to %d", options.exception_max_ttl); break; case HTTP_BLACKLIST_DURATION: options.http_blacklist_duration = atoi(optarg); TRC_INFO("HTTP blacklist duration set to %d", options.http_blacklist_duration); break; case DIAMETER_BLACKLIST_DURATION: options.diameter_blacklist_duration = atoi(optarg); TRC_INFO("Diameter blacklist duration set to %d", options.diameter_blacklist_duration); break; case 'F': case 'L': // Ignore F and L - these are handled by init_logging_options break; case 'h': usage(); return -1; default: CL_HOMESTEAD_INVALID_OPTION_C.log(opt); TRC_ERROR("Unknown option. Run with --help for options.\n"); return -1; } } return 0; }
HTTPCode ChronosInternalConnection::resynchronise_with_single_node( const std::string server_to_sync, std::vector<std::string> cluster_nodes, std::string localhost) { TRC_DEBUG("Querying %s for timers", server_to_sync.c_str()); // Get the cluster view ID from the global configuration std::string cluster_view_id; __globals->get_cluster_view_id(cluster_view_id); std::string response; HTTPCode rc; // Loop sending GETs to the server while the response is a 206 do { std::map<TimerID, int> delete_map; rc = send_get(server_to_sync, localhost, PARAM_SYNC_MODE_VALUE_SCALE, cluster_view_id, MAX_TIMERS_IN_RESPONSE, response); if ((rc == HTTP_PARTIAL_CONTENT) || (rc == HTTP_OK)) { // Parse the GET response rapidjson::Document doc; doc.Parse<0>(response.c_str()); if (doc.HasParseError()) { // We've failed to parse the document as JSON. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("Failed to parse document as JSON"); rc = HTTP_BAD_REQUEST; break; } try { JSON_ASSERT_CONTAINS(doc, JSON_TIMERS); JSON_ASSERT_ARRAY(doc[JSON_TIMERS]); const rapidjson::Value& ids_arr = doc[JSON_TIMERS]; int total_timers = ids_arr.Size(); int count_invalid_timers = 0; for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin(); ids_it != ids_arr.End(); ++ids_it) { try { const rapidjson::Value& id_arr = *ids_it; JSON_ASSERT_OBJECT(id_arr); // Get the timer ID TimerID timer_id; JSON_GET_INT_MEMBER(id_arr, JSON_TIMER_ID, timer_id); // Get the old replicas std::vector<std::string> old_replicas; JSON_ASSERT_CONTAINS(id_arr, JSON_OLD_REPLICAS); JSON_ASSERT_ARRAY(id_arr[JSON_OLD_REPLICAS]); const rapidjson::Value& old_repl_arr = id_arr[JSON_OLD_REPLICAS]; for (rapidjson::Value::ConstValueIterator repl_it = old_repl_arr.Begin(); repl_it != old_repl_arr.End(); ++repl_it) { JSON_ASSERT_STRING(*repl_it); old_replicas.push_back(repl_it->GetString()); } // Get the timer. JSON_ASSERT_CONTAINS(id_arr, JSON_TIMER); JSON_ASSERT_OBJECT(id_arr[JSON_TIMER]); const rapidjson::Value& timer_obj = id_arr[JSON_TIMER]; bool store_timer = false; std::string error_str; bool replicated_timer; Timer* timer = Timer::from_json_obj(timer_id, 0, error_str, replicated_timer, (rapidjson::Value&)timer_obj); if (!timer) { count_invalid_timers++; TRC_INFO("Unable to create timer - error: %s", error_str.c_str()); continue; } else if (!replicated_timer) { count_invalid_timers++; TRC_INFO("Unreplicated timer in response - ignoring"); delete timer; timer = NULL; continue; } // Decide what we're going to do with this timer. int old_level = 0; bool in_old_replica_list = get_replica_level(old_level, localhost, old_replicas); int new_level = 0; bool in_new_replica_list = get_replica_level(new_level, localhost, timer->replicas); // Add the timer to the delete map we're building up delete_map.insert(std::pair<TimerID, int>(timer_id, new_level)); if (in_new_replica_list) { // Add the timer to my store if I can. if (in_old_replica_list) { if (old_level >= new_level) { // Add/update timer // LCOV_EXCL_START - Adding timer paths are tested elsewhere store_timer = true; // LCOV_EXCL_STOP } } else { // Add/update timer store_timer = true; } // Now loop through the new replicas. int index = 0; for (std::vector<std::string>::iterator it = timer->replicas.begin(); it != timer->replicas.end(); ++it, ++index) { if (index <= new_level) { // Do nothing. We've covered adding the timer to the store above } else { // We can potentially replicate the timer to one of these nodes. // Check whether the new replica was involved previously int old_rep_level = 0; bool is_new_rep_in_old_rep = get_replica_level(old_rep_level, *it, old_replicas); if (is_new_rep_in_old_rep) { if (old_rep_level >= new_level) { _replicator->replicate_timer_to_node(timer, *it); } } else { _replicator->replicate_timer_to_node(timer, *it); } } } // Now loop through the old replicas. We can send a tombstone // replication to any node that used to be a replica and was // higher in the replica list than the new replica. index = 0; for (std::vector<std::string>::iterator it = old_replicas.begin(); it != old_replicas.end(); ++it, ++index) { if (index >= new_level) { // We can potentially tombstone the timer to one of these nodes. bool old_rep_in_new_rep = get_replica_presence(*it, timer->replicas); if (!old_rep_in_new_rep) { Timer* timer_copy = new Timer(*timer); timer_copy->become_tombstone(); _replicator->replicate_timer_to_node(timer_copy, *it); delete timer_copy; timer_copy = NULL; } } } } // Add the timer to the store if we can. This is done // last so we don't invalidate the pointer to the timer. if (store_timer) { _handler->add_timer(timer); } else { delete timer; timer = NULL; } // Finally, note that we processed the timer _timers_processed_stat->increment(); } catch (JsonFormatError err) { // A single entry is badly formatted. This is unexpected but we'll try // to keep going and process the rest of the timers. count_invalid_timers++; _invalid_timers_processed_stat->increment(); TRC_INFO("JSON entry was invalid (hit error at %s:%d)", err._file, err._line); } } // Check if we were able to successfully process any timers - if not // then bail out as there's something wrong with the node we're // querying if ((total_timers != 0) && (count_invalid_timers == total_timers)) { TRC_WARNING("Unable to process any timer entries in GET response"); rc = HTTP_BAD_REQUEST; } } catch (JsonFormatError err) { // We've failed to find the Timers array. This suggests that // there's something seriously wrong with the node we're trying // to query so don't retry TRC_WARNING("JSON body didn't contain the Timers array"); rc = HTTP_BAD_REQUEST; } // Send a DELETE to all the nodes to update their timer references if (delete_map.size() > 0) { std::string delete_body = create_delete_body(delete_map); for (std::vector<std::string>::iterator it = cluster_nodes.begin(); it != cluster_nodes.end(); ++it) { HTTPCode delete_rc = send_delete(*it, delete_body); if (delete_rc != HTTP_ACCEPTED) { // We've received an error response to the DELETE request. There's // not much more we can do here (a timeout will have already // been retried). A failed DELETE won't prevent the scaling operation // from finishing, it just means that we'll tell other nodes // about timers inefficiently. TRC_INFO("Error response (%d) to DELETE request to %s", delete_rc, (*it).c_str()); } } } } else { // We've received an error response to the GET request. A timeout // will already have been retried by the underlying HTTPConnection, // so don't retry again TRC_WARNING("Error response (%d) to GET request to %s", rc, server_to_sync.c_str()); } } while (rc == HTTP_PARTIAL_CONTENT); return rc; }
int main(int argc, char**argv) { // Set up our exception signal handler for asserts and segfaults. signal(SIGABRT, signal_handler); signal(SIGSEGV, signal_handler); sem_init(&term_sem, 0, 0); signal(SIGTERM, terminate_handler); AstaireResolver* astaire_resolver = NULL; struct options options; options.local_host = "127.0.0.1"; options.http_address = "0.0.0.0"; options.http_port = 11888; options.http_threads = 1; options.http_worker_threads = 50; options.homestead_http_name = "homestead-http-name.unknown"; options.digest_timeout = 300; options.home_domain = "home.domain"; options.sas_system_name = ""; options.access_log_enabled = false; options.log_to_file = false; options.log_level = 0; options.astaire = ""; options.cassandra = ""; options.memcached_write_format = MemcachedWriteFormat::JSON; options.target_latency_us = 100000; options.max_tokens = 1000; options.init_token_rate = 100.0; options.min_token_rate = 10.0; options.min_token_rate = 0.0; options.exception_max_ttl = 600; options.astaire_blacklist_duration = AstaireResolver::DEFAULT_BLACKLIST_DURATION; options.http_blacklist_duration = HttpResolver::DEFAULT_BLACKLIST_DURATION; options.pidfile = ""; options.daemon = false; if (init_logging_options(argc, argv, options) != 0) { return 1; } Utils::daemon_log_setup(argc, argv, options.daemon, options.log_directory, options.log_level, options.log_to_file); std::stringstream options_ss; for (int ii = 0; ii < argc; ii++) { options_ss << argv[ii]; options_ss << " "; } std::string options_str = "Command-line options were: " + options_ss.str(); TRC_INFO(options_str.c_str()); if (init_options(argc, argv, options) != 0) { return 1; } if (options.pidfile != "") { int rc = Utils::lock_and_write_pidfile(options.pidfile); if (rc == -1) { // Failure to acquire pidfile lock TRC_ERROR("Could not write pidfile - exiting"); return 2; } } start_signal_handlers(); AccessLogger* access_logger = NULL; if (options.access_log_enabled) { TRC_STATUS("Access logging enabled to %s", options.access_log_directory.c_str()); access_logger = new AccessLogger(options.access_log_directory); } HealthChecker* hc = new HealthChecker(); hc->start_thread(); // Create an exception handler. The exception handler doesn't need // to quiesce the process before killing it. exception_handler = new ExceptionHandler(options.exception_max_ttl, false, hc); // Initialise the SasService, to read the SAS config to pass into SAS::Init SasService* sas_service = new SasService(options.sas_system_name, "memento", false); // Ensure our random numbers are unpredictable. unsigned int seed; seed = time(NULL) ^ getpid(); srand(seed); // Create a DNS resolver. int af = AF_INET; struct in6_addr dummy_addr; if (inet_pton(AF_INET6, options.local_host.c_str(), &dummy_addr) == 1) { TRC_DEBUG("Local host is an IPv6 address"); af = AF_INET6; } DnsCachedResolver* dns_resolver = new DnsCachedResolver("127.0.0.1"); // Create alarm and communication monitor objects for the conditions // reported by memento. AlarmManager* alarm_manager = new AlarmManager(); CommunicationMonitor* astaire_comm_monitor = new CommunicationMonitor(new Alarm(alarm_manager, "memento", AlarmDef::MEMENTO_ASTAIRE_COMM_ERROR, AlarmDef::CRITICAL), "Memento", "Astaire"); CommunicationMonitor* hs_comm_monitor = new CommunicationMonitor(new Alarm(alarm_manager, "memento", AlarmDef::MEMENTO_HOMESTEAD_COMM_ERROR, AlarmDef::CRITICAL), "Memento", "Homestead"); CommunicationMonitor* cass_comm_monitor = new CommunicationMonitor(new Alarm(alarm_manager, "memento", AlarmDef::MEMENTO_CASSANDRA_COMM_ERROR, AlarmDef::CRITICAL), "Memento", "Cassandra"); astaire_resolver = new AstaireResolver(dns_resolver, af, options.astaire_blacklist_duration); // Default the astaire hostname to the loopback IP if (options.astaire == "") { if (af == AF_INET6) { options.astaire = "[::1]"; } else { options.astaire = "127.0.0.1"; } } memcached_store = (Store*)new TopologyNeutralMemcachedStore(options.astaire, astaire_resolver, false, astaire_comm_monitor); AuthStore::SerializerDeserializer* serializer; std::vector<AuthStore::SerializerDeserializer*> deserializers; if (options.memcached_write_format == MemcachedWriteFormat::JSON) { serializer = new AuthStore::JsonSerializerDeserializer(); } else { serializer = new AuthStore::BinarySerializerDeserializer(); } deserializers.push_back(new AuthStore::JsonSerializerDeserializer()); deserializers.push_back(new AuthStore::BinarySerializerDeserializer()); AuthStore* auth_store = new AuthStore(memcached_store, serializer, deserializers, options.digest_timeout); LoadMonitor* load_monitor = new LoadMonitor(options.target_latency_us, options.max_tokens, options.init_token_rate, options.min_token_rate, options.max_token_rate); LastValueCache* stats_aggregator = new MementoLVC(); // Create a HTTP specific resolver. HttpResolver* http_resolver = new HttpResolver(dns_resolver, af, options.http_blacklist_duration); HttpClient* http_client = new HttpClient(false, http_resolver, nullptr, load_monitor, SASEvent::HttpLogLevel::PROTOCOL, hs_comm_monitor); HttpConnection* http_connection = new HttpConnection(options.homestead_http_name, http_client); HomesteadConnection* homestead_conn = new HomesteadConnection(http_connection); // Default to a 30s blacklist/graylist duration and port 9160 CassandraResolver* cass_resolver = new CassandraResolver(dns_resolver, af, 30, 30, 9160); // Default the cassandra hostname to the loopback IP if (options.cassandra == "") { if (af == AF_INET6) { options.cassandra = "[::1]"; } else { options.cassandra = "127.0.0.1"; } } // Create and start the call list store. CallListStore::Store* call_list_store = new CallListStore::Store(); call_list_store->configure_connection(options.cassandra, 9160, cass_comm_monitor, cass_resolver); // Test Cassandra connectivity. CassandraStore::ResultCode store_rc = call_list_store->connection_test(); if (store_rc == CassandraStore::OK) { // Store can connect to Cassandra, so start it. store_rc = call_list_store->start(); } if (store_rc != CassandraStore::OK) { TRC_ERROR("Unable to create call list store (RC = %d)", store_rc); exit(3); } HttpStackUtils::SimpleStatsManager stats_manager(stats_aggregator); HttpStack* http_stack = new HttpStack(options.http_threads, exception_handler, access_logger, load_monitor, &stats_manager); CallListTask::Config call_list_config(auth_store, homestead_conn, call_list_store, options.home_domain, stats_aggregator, hc, options.api_key); MementoSasLogger sas_logger; HttpStackUtils::PingHandler ping_handler; HttpStackUtils::SpawningHandler<CallListTask, CallListTask::Config> call_list_handler(&call_list_config, &sas_logger); HttpStackUtils::HandlerThreadPool pool(options.http_worker_threads, exception_handler); try { http_stack->initialize(); http_stack->bind_tcp_socket(options.http_address, options.http_port); http_stack->register_handler("^/ping$", &ping_handler); http_stack->register_handler("^/org.projectclearwater.call-list/users/[^/]*/call-list.xml$", pool.wrap(&call_list_handler)); http_stack->start(); } catch (HttpStack::Exception& e) { TRC_ERROR("Failed to initialize HttpStack stack - function %s, rc %d", e._func, e._rc); exit(2); } TRC_STATUS("Start-up complete - wait for termination signal"); sem_wait(&term_sem); TRC_STATUS("Termination signal received - terminating"); try { http_stack->stop(); http_stack->wait_stopped(); } catch (HttpStack::Exception& e) { TRC_ERROR("Failed to stop HttpStack stack - function %s, rc %d", e._func, e._rc); } call_list_store->stop(); call_list_store->wait_stopped(); hc->stop_thread(); delete homestead_conn; homestead_conn = NULL; delete http_connection; http_connection = NULL; delete http_client; http_client = NULL; delete call_list_store; call_list_store = NULL; delete http_resolver; http_resolver = NULL; delete cass_resolver; cass_resolver = NULL; delete dns_resolver; dns_resolver = NULL; delete load_monitor; load_monitor = NULL; delete auth_store; auth_store = NULL; delete call_list_store; call_list_store = NULL; delete astaire_resolver; astaire_resolver = NULL; delete memcached_store; memcached_store = NULL; delete exception_handler; exception_handler = NULL; delete hc; hc = NULL; delete http_stack; http_stack = NULL; delete astaire_comm_monitor; astaire_comm_monitor = NULL; delete hs_comm_monitor; hs_comm_monitor = NULL; delete cass_comm_monitor; cass_comm_monitor = NULL; delete alarm_manager; alarm_manager = NULL; delete sas_service; sas_service = NULL; signal(SIGTERM, SIG_DFL); sem_destroy(&term_sem); }
/// Update the data for the specified namespace and key. Writes the data /// atomically, so if the underlying data has changed since it was last /// read, the update is rejected and this returns Store::Status::CONTENTION. Store::Status BaseMemcachedStore::set_data(const std::string& table, const std::string& key, const std::string& data, uint64_t cas, int expiry, SAS::TrailId trail) { Store::Status status = Store::Status::OK; TRC_DEBUG("Writing %d bytes to table %s key %s, CAS = %ld, expiry = %d", data.length(), table.c_str(), key.c_str(), cas, expiry); // Construct the fully qualified key. std::string fqkey = table + "\\\\" + key; const char* key_ptr = fqkey.data(); const size_t key_len = fqkey.length(); int vbucket = vbucket_for_key(fqkey); const std::vector<memcached_st*>& replicas = get_replicas(vbucket, Op::WRITE); if (trail != 0) { SAS::Event start(trail, SASEvent::MEMCACHED_SET_START, 0); start.add_var_param(fqkey); start.add_var_param(data); start.add_static_param(cas); start.add_static_param(expiry); SAS::report_event(start); } TRC_DEBUG("%d write replicas for key %s", replicas.size(), fqkey.c_str()); // Calculate a timestamp (least-significant 32 bits of milliseconds since the // epoch) for the current time. We store this in the flags field to allow us // to resolve conflicts when resynchronizing between memcached servers. struct timespec ts; (void)clock_gettime(CLOCK_REALTIME, &ts); uint32_t flags = (uint32_t)((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000)); // Memcached uses a flexible mechanism for specifying expiration. // - 0 indicates never expire. // - <= MEMCACHED_EXPIRATION_MAXDELTA indicates a relative (delta) time. // - > MEMCACHED_EXPIRATION_MAXDELTA indicates an absolute time. // Absolute time is the only way to force immediate expiry. Unfortunately, // it's not reliable - see https://github.com/Metaswitch/cpp-common/issues/160 // for details. Instead, we use relative time for future times (expiry > 0) // and the earliest absolute time for immediate expiry (expiry == 0). time_t memcached_expiration = (time_t)((expiry > 0) ? expiry : MEMCACHED_EXPIRATION_MAXDELTA + 1); // First try to write the primary data record to the first responding // server. memcached_return_t rc = MEMCACHED_ERROR; size_t ii; size_t replica_idx; // If we only have one replica, we should try it twice - // libmemcached won't notice a dropped TCP connection until it tries // to make a request on it, and will fail the request then // reconnect, so the second attempt could still work. size_t attempts = (replicas.size() == 1) ? 2: replicas.size(); for (ii = 0; ii < attempts; ++ii) { if ((replicas.size() == 1) && (ii == 1)) { if (rc != MEMCACHED_CONNECTION_FAILURE) { // This is a legitimate error, not a transient server failure, so we // shouldn't retry. break; } replica_idx = 0; TRC_WARNING("Failed to write to sole memcached replica: retrying once"); } else { replica_idx = ii; } TRC_DEBUG("Attempt conditional write to vbucket %d on replica %d (connection %p), CAS = %ld, expiry = %d", vbucket, replica_idx, replicas[replica_idx], cas, expiry); if (cas == 0) { // New record, so attempt to add (but overwrite any tombstones we // encounter). This will fail if someone else got there first and some // data already exists in memcached for this key. rc = add_overwriting_tombstone(replicas[replica_idx], key_ptr, key_len, vbucket, data, memcached_expiration, flags, trail); } else { // This is an update to an existing record, so use memcached_cas // to make sure it is atomic. rc = memcached_cas_vb(replicas[replica_idx], key_ptr, key_len, _binary ? vbucket : 0, data.data(), data.length(), memcached_expiration, flags, cas); if (!memcached_success(rc)) { TRC_DEBUG("memcached_cas command failed, rc = %d (%s)\n%s", rc, memcached_strerror(replicas[replica_idx], rc), memcached_last_error_message(replicas[replica_idx])); } } if (memcached_success(rc)) { TRC_DEBUG("Conditional write succeeded to replica %d", replica_idx); break; } else if ((rc == MEMCACHED_NOTSTORED) || (rc == MEMCACHED_DATA_EXISTS)) { if (trail != 0) { SAS::Event err(trail, SASEvent::MEMCACHED_SET_CONTENTION, 0); err.add_var_param(fqkey); SAS::report_event(err); } // A NOT_STORED or EXISTS response indicates a concurrent write failure, // so return this to the application immediately - don't go on to // other replicas. TRC_INFO("Contention writing data for %s to store", fqkey.c_str()); status = Store::Status::DATA_CONTENTION; break; } } if ((rc == MEMCACHED_SUCCESS) && (replica_idx < replicas.size())) { // Write has succeeded, so write unconditionally (and asynchronously) // to the replicas. for (size_t jj = replica_idx + 1; jj < replicas.size(); ++jj) { TRC_DEBUG("Attempt unconditional write to replica %d", jj); memcached_behavior_set(replicas[jj], MEMCACHED_BEHAVIOR_NOREPLY, 1); memcached_set_vb(replicas[jj], key_ptr, key_len, _binary ? vbucket : 0, data.data(), data.length(), memcached_expiration, flags); memcached_behavior_set(replicas[jj], MEMCACHED_BEHAVIOR_NOREPLY, 0); } } if ((!memcached_success(rc)) && (rc != MEMCACHED_NOTSTORED) && (rc != MEMCACHED_DATA_EXISTS)) { if (trail != 0) { SAS::Event err(trail, SASEvent::MEMCACHED_SET_FAILED, 0); err.add_var_param(fqkey); SAS::report_event(err); } update_vbucket_comm_state(vbucket, FAILED); if (_comm_monitor) { _comm_monitor->inform_failure(); } TRC_ERROR("Failed to write data for %s to %d replicas", fqkey.c_str(), replicas.size()); status = Store::Status::ERROR; } else { update_vbucket_comm_state(vbucket, OK); if (_comm_monitor) { _comm_monitor->inform_success(); } } return status; }
// Determine whether this request should be challenged (and SAS log appropriately). static pj_bool_t needs_authentication(pjsip_rx_data* rdata, SAS::TrailId trail) { if (rdata->tp_info.transport->local_name.port != stack_data.scscf_port) { TRC_DEBUG("Request does not need authentication - not on S-CSCF port"); // Request not received on S-CSCF port, so don't authenticate it. SAS::Event event(trail, SASEvent::AUTHENTICATION_NOT_NEEDED_NOT_SCSCF_PORT, 0); SAS::report_event(event); return PJ_FALSE; } if (rdata->msg_info.msg->line.req.method.id == PJSIP_REGISTER_METHOD) { // Authentication isn't required for emergency registrations. An emergency // registration is one where each Contact header contains 'sos' as the SIP // URI parameter. bool emergency_reg = true; pjsip_contact_hdr* contact_hdr = (pjsip_contact_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_CONTACT, NULL); while ((contact_hdr != NULL) && (emergency_reg)) { emergency_reg = PJUtils::is_emergency_registration(contact_hdr); contact_hdr = (pjsip_contact_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_CONTACT, contact_hdr->next); } if (emergency_reg) { SAS::Event event(trail, SASEvent::AUTHENTICATION_NOT_NEEDED_EMERGENCY_REGISTER, 0); SAS::report_event(event); return PJ_FALSE; } // Check to see if the request has already been integrity protected? pjsip_authorization_hdr* auth_hdr = (pjsip_authorization_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_AUTHORIZATION, NULL); if (auth_hdr != NULL) { // There is an authorization header, so check for the integrity-protected // indication. TRC_DEBUG("Authorization header in request"); pjsip_param* integrity = pjsip_param_find(&auth_hdr->credential.digest.other_param, &STR_INTEGRITY_PROTECTED); if ((integrity != NULL) && ((pj_stricmp(&integrity->value, &STR_TLS_YES) == 0) || (pj_stricmp(&integrity->value, &STR_IP_ASSOC_YES) == 0))) { // The integrity protected indicator is included and set to tls-yes or // ip-assoc-yes. This indicates the client has already been authenticated // so we will accept this REGISTER even if there is a challenge response. // (Values of tls-pending or ip-assoc-pending indicate the challenge // should be checked.) TRC_INFO("SIP Digest authenticated request integrity protected by edge proxy"); SAS::Event event(trail, SASEvent::AUTHENTICATION_NOT_NEEDED_INTEGRITY_PROTECTED, 0); SAS::report_event(event); return PJ_FALSE; } else if ((integrity != NULL) && (pj_stricmp(&integrity->value, &STR_YES) == 0) && (auth_hdr->credential.digest.response.slen == 0)) { // The integrity protected indicator is include and set to yes. This // indicates that AKA authentication is in use and the REGISTER was // received on an integrity protected channel, so we will let the // request through if there is no challenge response, but must check // the challenge response if included. TRC_INFO("AKA authenticated request integrity protected by edge proxy"); SAS::Event event(trail, SASEvent::AUTHENTICATION_NOT_NEEDED_INTEGRITY_PROTECTED, 1); SAS::report_event(event); return PJ_FALSE; } } return PJ_TRUE; } else { if (PJSIP_MSG_TO_HDR(rdata->msg_info.msg)->tag.slen != 0) { // This is an in-dialog request which needs no authentication. return PJ_FALSE; } // Check to see if we should authenticate this non-REGISTER message - this if (non_register_auth_mode == NonRegisterAuthentication::NEVER) { // Configured to never authenticate non-REGISTER requests. SAS::Event event(trail, SASEvent::AUTHENTICATION_NOT_NEEDED_NEVER_AUTH_NON_REG, 0); SAS::report_event(event); return PJ_FALSE; } else if (non_register_auth_mode == NonRegisterAuthentication::IF_PROXY_AUTHORIZATION_PRESENT) { // Only authenticate the request if it has a Proxy-Authorization header. pjsip_proxy_authorization_hdr* auth_hdr = (pjsip_proxy_authorization_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_PROXY_AUTHORIZATION, NULL); if (auth_hdr != NULL) { // Edge proxy has explicitly asked us to authenticate this non-REGISTER // message SAS::Event event(trail, SASEvent::AUTHENTICATION_NEEDED_PROXY_AUTHORIZATION, 0); SAS::report_event(event); return PJ_TRUE; } else { // No Proxy-Authorization header - this indicates the P-CSCF trusts this // message so we don't need to perform further authentication. SAS::Event event(trail, SASEvent::AUTHENTICATION_NOT_NEEDED_PROXY_AUTHORIZATION, 0); SAS::report_event(event); return PJ_FALSE; } } else { // Unrecognized authentication mode - should never happen. LCOV_EXCL_START assert(!"Unrecognized authentication mode"); return PJ_FALSE; // LCOV_EXCL_STOP } } }
void RegistrationUtils::register_with_application_servers(Ifcs& ifcs, RegStore* store, pjsip_rx_data *received_register, pjsip_tx_data *ok_response, // Can only be NULL if received_register is int expires, bool is_initial_registration, const std::string& served_user, SNMP::RegistrationStatsTables* third_party_reg_stats_tbls, SAS::TrailId trail) { // Function preconditions if (received_register == NULL) { // We should have both messages or neither assert(ok_response == NULL); } else { // We should have both messages or neither assert(ok_response != NULL); } if (third_party_reg_stats_tbls != NULL) { third_party_reg_stats_tables = third_party_reg_stats_tbls; } std::vector<AsInvocation> as_list; // Choice of SessionCase::Originating is not arbitrary - we don't expect iFCs to specify SessionCase // constraints for REGISTER messages, but we only get the served user from the From address in an // Originating message, otherwise we use the Request-URI. We need to use the From for REGISTERs. // See 3GPP TS 23.218 s5.2.1 note 2: "REGISTER is considered part of the UE-originating". if (received_register == NULL) { pj_status_t status; pjsip_method method; pjsip_method_set(&method, PJSIP_REGISTER_METHOD); pjsip_tx_data *tdata; std::string served_user_uri_string = "<"+served_user+">"; const pj_str_t served_user_uri = pj_str(const_cast<char *>(served_user_uri_string.c_str())); TRC_INFO("Generating a fake REGISTER to send to IfcHandler using AOR %s", served_user.c_str()); SAS::Event event(trail, SASEvent::REGISTER_AS_START, 0); event.add_var_param(served_user); SAS::report_event(event); status = pjsip_endpt_create_request(stack_data.endpt, &method, // Method &stack_data.scscf_uri, // Target &served_user_uri, // From &served_user_uri, // To &served_user_uri, // Contact NULL, // Auto-generate Call-ID 1, // CSeq NULL, // No body &tdata); // OUT if (status == PJ_SUCCESS) { // As per TS 24.229, section 5.4.1.7, note 1, we don't fill in any // P-Associated-URI details. ifcs.interpret(SessionCase::Originating, true, is_initial_registration, tdata->msg, as_list, trail); pjsip_tx_data_dec_ref(tdata); } else { TRC_DEBUG("Unable to create third party registration for %s", served_user.c_str()); SAS::Event event(trail, SASEvent::DEREGISTER_AS_FAILED, 0); event.add_var_param(served_user); SAS::report_event(event); } } else { ifcs.interpret(SessionCase::Originating, true, is_initial_registration, received_register->msg_info.msg, as_list, trail); } TRC_INFO("Found %d Application Servers", as_list.size()); // Loop through the as_list for (std::vector<AsInvocation>::iterator as_iter = as_list.begin(); as_iter != as_list.end(); as_iter++) { if (expires == 0) { third_party_reg_stats_tables->de_reg_tbl->increment_attempts(); } else if (is_initial_registration) { third_party_reg_stats_tables->init_reg_tbl->increment_attempts(); } else { third_party_reg_stats_tables->re_reg_tbl->increment_attempts(); } send_register_to_as(received_register, ok_response, *as_iter, expires, is_initial_registration, served_user, trail); } }
void SubscriptionSproutletTsx::on_rx_in_dialog_request(pjsip_msg* req) { TRC_INFO("Subscription sproutlet received in dialog request"); return on_rx_request(req); }
void ControllerTask::handle_delete() { // Check the request has a valid JSON body std::string body = _req.get_rx_body(); rapidjson::Document doc; doc.Parse<0>(body.c_str()); if (doc.HasParseError()) { TRC_INFO("Failed to parse document as JSON"); send_http_reply(HTTP_BAD_REQUEST); return; } // Now loop through the body, pulling out the IDs/replica numbers // The JSON body should have the format: // {"IDs": [{"ID": 123, "ReplicaIndex": 0}, // {"ID": 456, "ReplicaIndex": 2}, // ...] // The replica_index is zero-indexed (so the primary replica has an // index of 0). try { JSON_ASSERT_CONTAINS(doc, JSON_IDS); JSON_ASSERT_ARRAY(doc[JSON_IDS]); const rapidjson::Value& ids_arr = doc[JSON_IDS]; // The request is valid, so respond with a 202. Now loop through the // the body and update the replica trackers. send_http_reply(HTTP_ACCEPTED); for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin(); ids_it != ids_arr.End(); ++ids_it) { try { TimerID timer_id; int replica_index; JSON_GET_INT_64_MEMBER(*ids_it, JSON_ID, timer_id); JSON_GET_INT_MEMBER(*ids_it, JSON_REPLICA_INDEX, replica_index); // Update the timer's replica_tracker to show that the replicas // at level 'replica_index' and higher have been informed // about the timer. This will tombstone the timer if all // replicas have been informed. _cfg->_handler->update_replica_tracker_for_timer(timer_id, replica_index); } catch (JsonFormatError err) { TRC_INFO("JSON entry was invalid (hit error at %s:%d)", err._file, err._line); } } } catch (JsonFormatError err) { TRC_INFO("JSON body didn't contain the IDs array"); send_http_reply(HTTP_BAD_REQUEST); } }
void ControllerTask::handle_get() { // Check the request is valid. It must have the node-for-replicas, // sync-mode and cluster-view-id parameters set, the sync-mode parameter // must be SCALE (this will be extended later), the request-node // must correspond to a node in the Chronos cluster (it can be a // leaving node), and the cluster-view-id request must correspond to // the receiving nodes view of the cluster configuration std::string node_for_replicas = _req.param(PARAM_NODE_FOR_REPLICAS); std::string sync_mode = _req.param(PARAM_SYNC_MODE); std::string cluster_view_id = _req.param(PARAM_CLUSTER_VIEW_ID); if ((node_for_replicas == "") || (sync_mode == "") || (cluster_view_id == "")) { TRC_INFO("GET request doesn't have mandatory parameters"); send_http_reply(HTTP_BAD_REQUEST); return; } std::string global_cluster_view_id; __globals->get_cluster_view_id(global_cluster_view_id); if (cluster_view_id != global_cluster_view_id) { TRC_INFO("GET request is for an out of date cluster (%s and %s)", cluster_view_id.c_str(), global_cluster_view_id.c_str()); send_http_reply(HTTP_BAD_REQUEST); return; } if (!node_is_in_cluster(node_for_replicas)) { TRC_DEBUG("The request node isn't a Chronos node: %s", node_for_replicas.c_str()); send_http_reply(HTTP_BAD_REQUEST); return; } if (sync_mode == PARAM_SYNC_MODE_VALUE_SCALE) { std::string max_timers_from_req = _req.header(HEADER_RANGE); int max_timers_to_get = atoi(max_timers_from_req.c_str()); TRC_DEBUG("Range value is %d", max_timers_to_get); std::string get_response; HTTPCode rc = _cfg->_handler->get_timers_for_node(node_for_replicas, max_timers_to_get, cluster_view_id, get_response); _req.add_content(get_response); if (rc == HTTP_PARTIAL_CONTENT) { _req.add_header(HEADER_CONTENT_RANGE, max_timers_from_req); } send_http_reply(rc); } else { TRC_DEBUG("Sync mode is unsupported: %s", sync_mode.c_str()); send_http_reply(HTTP_BAD_REQUEST); } }
int main(int argc, char**argv) { CommunicationMonitor* hss_comm_monitor = NULL; CommunicationMonitor* cassandra_comm_monitor = NULL; // Set up our exception signal handler for asserts and segfaults. signal(SIGABRT, signal_handler); signal(SIGSEGV, signal_handler); sem_init(&term_sem, 0, 0); signal(SIGTERM, terminate_handler); struct options options; options.local_host = "127.0.0.1"; options.home_domain = "dest-realm.unknown"; options.diameter_conf = "homestead.conf"; options.dns_servers.push_back("127.0.0.1"); options.http_address = "0.0.0.0"; options.http_port = 8888; options.http_threads = 1; options.cache_threads = 10; options.cassandra = "localhost"; options.dest_realm = ""; options.dest_host = "dest-host.unknown"; options.max_peers = 2; options.server_name = "sip:server-name.unknown"; options.scheme_unknown = "Unknown"; options.scheme_digest = "SIP Digest"; options.scheme_aka = "Digest-AKAv1-MD5"; options.access_log_enabled = false; options.impu_cache_ttl = 0; options.hss_reregistration_time = 1800; options.sprout_http_name = "sprout-http-name.unknown"; options.log_to_file = false; options.log_level = 0; options.sas_server = "0.0.0.0"; options.sas_system_name = ""; options.diameter_timeout_ms = 200; options.alarms_enabled = false; options.target_latency_us = 100000; options.max_tokens = 20; options.init_token_rate = 100.0; options.min_token_rate = 10.0; options.exception_max_ttl = 600; options.http_blacklist_duration = HttpResolver::DEFAULT_BLACKLIST_DURATION; options.diameter_blacklist_duration = DiameterResolver::DEFAULT_BLACKLIST_DURATION; boost::filesystem::path p = argv[0]; // Copy the filename to a string so that we can be sure of its lifespan - // the value passed to openlog must be valid for the duration of the program. std::string filename = p.filename().c_str(); openlog(filename.c_str(), PDLOG_PID, PDLOG_LOCAL6); CL_HOMESTEAD_STARTED.log(); if (init_logging_options(argc, argv, options) != 0) { closelog(); return 1; } Log::setLoggingLevel(options.log_level); if ((options.log_to_file) && (options.log_directory != "")) { // Work out the program name from argv[0], stripping anything before the final slash. char* prog_name = argv[0]; char* slash_ptr = rindex(argv[0], '/'); if (slash_ptr != NULL) { prog_name = slash_ptr + 1; } Log::setLogger(new Logger(options.log_directory, prog_name)); } TRC_STATUS("Log level set to %d", options.log_level); std::stringstream options_ss; for (int ii = 0; ii < argc; ii++) { options_ss << argv[ii]; options_ss << " "; } std::string options_str = "Command-line options were: " + options_ss.str(); TRC_INFO(options_str.c_str()); if (init_options(argc, argv, options) != 0) { closelog(); return 1; } AccessLogger* access_logger = NULL; if (options.access_log_enabled) { TRC_STATUS("Access logging enabled to %s", options.access_log_directory.c_str()); access_logger = new AccessLogger(options.access_log_directory); } // Create a DNS resolver and a SIP specific resolver. int af = AF_INET; struct in6_addr dummy_addr; if (inet_pton(AF_INET6, options.local_host.c_str(), &dummy_addr) == 1) { TRC_DEBUG("Local host is an IPv6 address"); af = AF_INET6; } SAS::init(options.sas_system_name, "homestead", SASEvent::CURRENT_RESOURCE_BUNDLE, options.sas_server, sas_write); // Set up the statistics (Homestead specific and Diameter) const static std::string known_stats[] = { "H_latency_us", "H_hss_latency_us", "H_hss_digest_latency_us", "H_hss_subscription_latency_us", "H_cache_latency_us", "H_incoming_requests", "H_rejected_overload", "H_diameter_invalid_dest_host", "H_diameter_invalid_dest_realm", }; const static int num_known_stats = sizeof(known_stats) / sizeof(std::string); LastValueCache* lvc = new LastValueCache(num_known_stats, known_stats, "homestead", 1000); StatisticsManager* stats_manager = new StatisticsManager(lvc); StatisticCounter* realm_counter = new StatisticCounter("H_diameter_invalid_dest_realm", lvc); StatisticCounter* host_counter = new StatisticCounter("H_diameter_invalid_dest_host", lvc); if (options.alarms_enabled) { // Create Homesteads's alarm objects. Note that the alarm identifier strings must match those // in the alarm definition JSON file exactly. hss_comm_monitor = new CommunicationMonitor(new Alarm("homestead", AlarmDef::HOMESTEAD_HSS_COMM_ERROR, AlarmDef::CRITICAL)); cassandra_comm_monitor = new CommunicationMonitor(new Alarm("homestead", AlarmDef::HOMESTEAD_CASSANDRA_COMM_ERROR, AlarmDef::CRITICAL)); // Start the alarm request agent AlarmReqAgent::get_instance().start(); AlarmState::clear_all("homestead"); } // Create an exception handler. The exception handler doesn't need // to quiesce the process before killing it. HealthChecker* hc = new HealthChecker(); pthread_t health_check_thread; pthread_create(&health_check_thread, NULL, &HealthChecker::static_main_thread_function, (void*)hc); exception_handler = new ExceptionHandler(options.exception_max_ttl, false, hc); LoadMonitor* load_monitor = new LoadMonitor(options.target_latency_us, options.max_tokens, options.init_token_rate, options.min_token_rate); DnsCachedResolver* dns_resolver = new DnsCachedResolver(options.dns_servers); HttpResolver* http_resolver = new HttpResolver(dns_resolver, af, options.http_blacklist_duration); Cache* cache = Cache::get_instance(); cache->configure_connection(options.cassandra, 9160, cassandra_comm_monitor); cache->configure_workers(exception_handler, options.cache_threads, 0); // Test the connection to Cassandra before starting the store. CassandraStore::ResultCode rc = cache->connection_test(); if (rc == CassandraStore::OK) { // Cassandra connection is good, so start the store. rc = cache->start(); } if (rc != CassandraStore::OK) { CL_HOMESTEAD_CASSANDRA_CACHE_INIT_FAIL.log(rc); closelog(); TRC_ERROR("Failed to initialize the Cassandra cache with error code %d.", rc); TRC_STATUS("Homestead is shutting down"); exit(2); } HttpConnection* http = new HttpConnection(options.sprout_http_name, false, http_resolver, SASEvent::HttpLogLevel::PROTOCOL, NULL); SproutConnection* sprout_conn = new SproutConnection(http); RegistrationTerminationTask::Config* rtr_config = NULL; PushProfileTask::Config* ppr_config = NULL; Diameter::SpawningHandler<RegistrationTerminationTask, RegistrationTerminationTask::Config>* rtr_task = NULL; Diameter::SpawningHandler<PushProfileTask, PushProfileTask::Config>* ppr_task = NULL; Cx::Dictionary* dict = NULL; Diameter::Stack* diameter_stack = Diameter::Stack::get_instance(); try { diameter_stack->initialize(); diameter_stack->configure(options.diameter_conf, exception_handler, hss_comm_monitor, realm_counter, host_counter); dict = new Cx::Dictionary(); rtr_config = new RegistrationTerminationTask::Config(cache, dict, sprout_conn, options.hss_reregistration_time); ppr_config = new PushProfileTask::Config(cache, dict, options.impu_cache_ttl, options.hss_reregistration_time); rtr_task = new Diameter::SpawningHandler<RegistrationTerminationTask, RegistrationTerminationTask::Config>(dict, rtr_config); ppr_task = new Diameter::SpawningHandler<PushProfileTask, PushProfileTask::Config>(dict, ppr_config); diameter_stack->advertize_application(Diameter::Dictionary::Application::AUTH, dict->TGPP, dict->CX); diameter_stack->register_handler(dict->CX, dict->REGISTRATION_TERMINATION_REQUEST, rtr_task); diameter_stack->register_handler(dict->CX, dict->PUSH_PROFILE_REQUEST, ppr_task); diameter_stack->register_fallback_handler(dict->CX); diameter_stack->start(); } catch (Diameter::Stack::Exception& e) { CL_HOMESTEAD_DIAMETER_INIT_FAIL.log(e._func, e._rc); closelog(); TRC_ERROR("Failed to initialize Diameter stack - function %s, rc %d", e._func, e._rc); TRC_STATUS("Homestead is shutting down"); exit(2); } HttpStack* http_stack = HttpStack::get_instance(); HssCacheTask::configure_diameter(diameter_stack, options.dest_realm.empty() ? options.home_domain : options.dest_realm, options.dest_host == "0.0.0.0" ? "" : options.dest_host, options.server_name, dict); HssCacheTask::configure_cache(cache); HssCacheTask::configure_health_checker(hc); HssCacheTask::configure_stats(stats_manager); // We should only query the cache for AV information if there is no HSS. If there is an HSS, we // should always hit it. If there is not, the AV information must have been provisioned in the // "cache" (which becomes persistent). bool hss_configured = !(options.dest_realm.empty() && (options.dest_host.empty() || options.dest_host == "0.0.0.0")); ImpiTask::Config impi_handler_config(hss_configured, options.impu_cache_ttl, options.scheme_unknown, options.scheme_digest, options.scheme_aka, options.diameter_timeout_ms); ImpiRegistrationStatusTask::Config registration_status_handler_config(hss_configured, options.diameter_timeout_ms); ImpuLocationInfoTask::Config location_info_handler_config(hss_configured, options.diameter_timeout_ms); ImpuRegDataTask::Config impu_handler_config(hss_configured, options.hss_reregistration_time, options.diameter_timeout_ms); ImpuIMSSubscriptionTask::Config impu_handler_config_old(hss_configured, options.hss_reregistration_time, options.diameter_timeout_ms); HttpStackUtils::PingHandler ping_handler; HttpStackUtils::SpawningHandler<ImpiDigestTask, ImpiTask::Config> impi_digest_handler(&impi_handler_config); HttpStackUtils::SpawningHandler<ImpiAvTask, ImpiTask::Config> impi_av_handler(&impi_handler_config); HttpStackUtils::SpawningHandler<ImpiRegistrationStatusTask, ImpiRegistrationStatusTask::Config> impi_reg_status_handler(®istration_status_handler_config); HttpStackUtils::SpawningHandler<ImpuLocationInfoTask, ImpuLocationInfoTask::Config> impu_loc_info_handler(&location_info_handler_config); HttpStackUtils::SpawningHandler<ImpuRegDataTask, ImpuRegDataTask::Config> impu_reg_data_handler(&impu_handler_config); HttpStackUtils::SpawningHandler<ImpuIMSSubscriptionTask, ImpuIMSSubscriptionTask::Config> impu_ims_sub_handler(&impu_handler_config_old); try { http_stack->initialize(); http_stack->configure(options.http_address, options.http_port, options.http_threads, exception_handler, access_logger, load_monitor, stats_manager); http_stack->register_handler("^/ping$", &ping_handler); http_stack->register_handler("^/impi/[^/]*/digest$", &impi_digest_handler); http_stack->register_handler("^/impi/[^/]*/av", &impi_av_handler); http_stack->register_handler("^/impi/[^/]*/registration-status$", &impi_reg_status_handler); http_stack->register_handler("^/impu/[^/]*/location$", &impu_loc_info_handler); http_stack->register_handler("^/impu/[^/]*/reg-data$", &impu_reg_data_handler); http_stack->register_handler("^/impu/", &impu_ims_sub_handler); http_stack->start(); } catch (HttpStack::Exception& e) { CL_HOMESTEAD_HTTP_INIT_FAIL.log(e._func, e._rc); closelog(); TRC_ERROR("Failed to initialize HttpStack stack - function %s, rc %d", e._func, e._rc); TRC_STATUS("Homestead is shutting down"); exit(2); } DiameterResolver* diameter_resolver = NULL; RealmManager* realm_manager = NULL; if (hss_configured) { diameter_resolver = new DiameterResolver(dns_resolver, af, options.diameter_blacklist_duration); realm_manager = new RealmManager(diameter_stack, options.dest_realm, options.dest_host, options.max_peers, diameter_resolver); realm_manager->start(); } TRC_STATUS("Start-up complete - wait for termination signal"); sem_wait(&term_sem); TRC_STATUS("Termination signal received - terminating"); CL_HOMESTEAD_ENDED.log(); try { http_stack->stop(); http_stack->wait_stopped(); } catch (HttpStack::Exception& e) { CL_HOMESTEAD_HTTP_STOP_FAIL.log(e._func, e._rc); TRC_ERROR("Failed to stop HttpStack stack - function %s, rc %d", e._func, e._rc); } cache->stop(); cache->wait_stopped(); try { diameter_stack->stop(); diameter_stack->wait_stopped(); } catch (Diameter::Stack::Exception& e) { CL_HOMESTEAD_DIAMETER_STOP_FAIL.log(e._func, e._rc); TRC_ERROR("Failed to stop Diameter stack - function %s, rc %d", e._func, e._rc); } delete dict; dict = NULL; delete ppr_config; ppr_config = NULL; delete rtr_config; rtr_config = NULL; delete ppr_task; ppr_task = NULL; delete rtr_task; rtr_task = NULL; delete sprout_conn; sprout_conn = NULL; if (hss_configured) { realm_manager->stop(); delete realm_manager; realm_manager = NULL; delete diameter_resolver; diameter_resolver = NULL; delete dns_resolver; dns_resolver = NULL; } delete realm_counter; realm_counter = NULL; delete host_counter; host_counter = NULL; delete stats_manager; stats_manager = NULL; delete lvc; lvc = NULL; hc->terminate(); pthread_join(health_check_thread, NULL); delete hc; hc = NULL; delete exception_handler; exception_handler = NULL; delete load_monitor; load_monitor = NULL; SAS::term(); closelog(); if (options.alarms_enabled) { // Stop the alarm request agent AlarmReqAgent::get_instance().stop(); // Delete Homestead's alarm objects delete hss_comm_monitor; delete cassandra_comm_monitor; } signal(SIGTERM, SIG_DFL); sem_destroy(&term_sem); }
/// Parses the response from the HSS. int ICSCFRouter::parse_hss_response(rapidjson::Document*& rsp, bool queried_caps) { int status_code = PJSIP_SC_OK; // Clear out any older response. _queried_caps = false; _hss_rsp.mandatory_caps.clear(); _hss_rsp.optional_caps.clear(); _hss_rsp.scscf = ""; if ((!rsp->HasMember("result-code")) || (!(*rsp)["result-code"].IsInt())) { // Error from HSS, so respond with 404 Not Found. (This may be changed // to 403 Forbidden if request is a REGISTER.) status_code = PJSIP_SC_NOT_FOUND; } else { int rc = (*rsp)["result-code"].GetInt(); if ((rc == 2001) || (rc == 2002) || (rc == 2003)) { // Successful response from HSS, so parse it. if ((rsp->HasMember("scscf")) && ((*rsp)["scscf"].IsString())) { // Response specifies a S-CSCF, so select this as the target. TRC_DEBUG("HSS returned S-CSCF %s as target", (*rsp)["scscf"].GetString()); _hss_rsp.scscf = (*rsp)["scscf"].GetString(); } if ((rsp->HasMember("mandatory-capabilities")) && ((*rsp)["mandatory-capabilities"].IsArray()) && (rsp->HasMember("optional-capabilities")) && ((*rsp)["optional-capabilities"].IsArray())) { // Response specifies capabilities - we might have explicitly // queried capabilities or implicitly because there was no // server assigned. TRC_DEBUG("HSS returned capabilities"); queried_caps = true; if ((!parse_capabilities((*rsp)["mandatory-capabilities"], _hss_rsp.mandatory_caps)) || (!parse_capabilities((*rsp)["optional-capabilities"], _hss_rsp.optional_caps))) { // Failed to parse capabilities, so reject with 480 response. TRC_INFO("Malformed required capabilities returned by HSS\n"); status_code = PJSIP_SC_TEMPORARILY_UNAVAILABLE; } } } else if (rc == 5003) { // Failure response from HSS indicating that a subscriber exists but is unregistered and // has no unregistered services, so respond with 480 Temporarily Unavailable. status_code = PJSIP_SC_TEMPORARILY_UNAVAILABLE; } else { // Error from HSS, so respond with 404 Not Found. (This may be changed // to 403 Forbidden if request is a REGISTER.) status_code = PJSIP_SC_NOT_FOUND; } } // Record whether or not we got valid capabilities from the HSS. This can // either be because we forced capabilities in the query (in this case, empty // capabilities means the HSS doesn't care which S-CSCF we select) or because // the HSS decided to return capabilities anyway. _queried_caps = (status_code == PJSIP_SC_OK) ? queried_caps : false; if (_acr != NULL) { // Pass the server capabilities to the ACR for reporting. _acr->server_capabilities(_hss_rsp); } return status_code; }
int init_options(int argc, char**argv, struct options& options) { int opt; int long_opt_ind; optind = 0; while ((opt = getopt_long(argc, argv, "", long_opt, &long_opt_ind)) != -1) { switch (opt) { case LOCAL_HOST: TRC_INFO("Local host: %s", optarg); options.local_host = std::string(optarg); break; case HTTP_ADDRESS: TRC_INFO("HTTP bind address: %s", optarg); options.http_address = std::string(optarg); break; case HTTP_THREADS: TRC_INFO("Number of HTTP threads: %s", optarg); options.http_threads = atoi(optarg); break; case HTTP_WORKER_THREADS: TRC_INFO("Number of HTTP worker threads: %s", optarg); options.http_worker_threads = atoi(optarg); break; case HOMESTEAD_HTTP_NAME: TRC_INFO("Homestead HTTP address: %s", optarg); options.homestead_http_name = std::string(optarg); break; case DIGEST_TIMEOUT: options.digest_timeout = atoi(optarg); if (options.digest_timeout == 0) { // If the supplied option is invalid then revert to the // default five minutes options.digest_timeout = 300; } TRC_INFO("Digest timeout: %s", optarg); break; case HOME_DOMAIN: options.home_domain = std::string(optarg); TRC_INFO("Home domain: %s", optarg); break; case SAS_CONFIG: { std::vector<std::string> sas_options; Utils::split_string(std::string(optarg), ',', sas_options, 0, false); if ((sas_options.size() == 2) && !sas_options[0].empty() && !sas_options[1].empty()) { options.sas_server = sas_options[0]; options.sas_system_name = sas_options[1]; TRC_INFO("SAS set to %s\n", options.sas_server.c_str()); TRC_INFO("System name is set to %s\n", options.sas_system_name.c_str()); } else { TRC_INFO("Invalid --sas option, SAS disabled\n"); } } break; case ACCESS_LOG: TRC_INFO("Access log: %s", optarg); options.access_log_enabled = true; options.access_log_directory = std::string(optarg); break; case MEMCACHED_WRITE_FORMAT: if (strcmp(optarg, "binary") == 0) { TRC_INFO("Memcached write format set to 'binary'"); options.memcached_write_format = MemcachedWriteFormat::BINARY; } else if (strcmp(optarg, "json") == 0) { TRC_INFO("Memcached write format set to 'json'"); options.memcached_write_format = MemcachedWriteFormat::JSON; } else { TRC_WARNING("Invalid value for memcached-write-format, using '%s'." "Got '%s', valid vales are 'json' and 'binary'", ((options.memcached_write_format == MemcachedWriteFormat::JSON) ? "json" : "binary"), optarg); } break; case TARGET_LATENCY_US: options.target_latency_us = atoi(optarg); if (options.target_latency_us <= 0) { TRC_ERROR("Invalid --target-latency-us option %s", optarg); return -1; } break; case MAX_TOKENS: options.max_tokens = atoi(optarg); if (options.max_tokens <= 0) { TRC_ERROR("Invalid --max-tokens option %s", optarg); return -1; } break; case INIT_TOKEN_RATE: options.init_token_rate = atoi(optarg); if (options.init_token_rate <= 0) { TRC_ERROR("Invalid --init-token-rate option %s", optarg); return -1; } break; case MIN_TOKEN_RATE: options.min_token_rate = atoi(optarg); if (options.min_token_rate <= 0) { TRC_ERROR("Invalid --min-token-rate option %s", optarg); return -1; } break; case EXCEPTION_MAX_TTL: options.exception_max_ttl = atoi(optarg); TRC_INFO("Max TTL after an exception set to %d", options.exception_max_ttl); break; case HTTP_BLACKLIST_DURATION: options.http_blacklist_duration = atoi(optarg); TRC_INFO("HTTP blacklist duration set to %d", options.http_blacklist_duration); break; case API_KEY: options.api_key = std::string(optarg); TRC_INFO("HTTP API key set to %s", options.api_key.c_str()); break; case PIDFILE: options.pidfile = std::string(optarg); break; case DAEMON: options.daemon = true; break; case LOG_FILE: case LOG_LEVEL: // Ignore these options - they're handled by init_logging_options break; case HELP: usage(); return -1; default: TRC_ERROR("Unknown option. Run with --help for options.\n"); return -1; } } return 0; }
int main(int argc, char**argv) { // Set up our exception signal handler for asserts and segfaults. signal(SIGABRT, signal_handler); signal(SIGSEGV, signal_handler); sem_init(&term_sem, 0, 0); signal(SIGTERM, terminate_handler); struct options options; options.local_host = "127.0.0.1"; options.http_address = "0.0.0.0"; options.http_port = 11888; options.http_threads = 1; options.http_worker_threads = 50; options.homestead_http_name = "homestead-http-name.unknown"; options.digest_timeout = 300; options.home_domain = "home.domain"; options.sas_server = "0.0.0.0"; options.sas_system_name = ""; options.access_log_enabled = false; options.log_to_file = false; options.log_level = 0; options.memcached_write_format = MemcachedWriteFormat::JSON; options.target_latency_us = 100000; options.max_tokens = 1000; options.init_token_rate = 100.0; options.min_token_rate = 10.0; options.exception_max_ttl = 600; options.http_blacklist_duration = HttpResolver::DEFAULT_BLACKLIST_DURATION; options.pidfile = ""; options.daemon = false; if (init_logging_options(argc, argv, options) != 0) { return 1; } Log::setLoggingLevel(options.log_level); if ((options.log_to_file) && (options.log_directory != "")) { // Work out the program name from argv[0], stripping anything before the final slash. char* prog_name = argv[0]; char* slash_ptr = rindex(argv[0], '/'); if (slash_ptr != NULL) { prog_name = slash_ptr + 1; } Log::setLogger(new Logger(options.log_directory, prog_name)); } TRC_STATUS("Log level set to %d", options.log_level); std::stringstream options_ss; for (int ii = 0; ii < argc; ii++) { options_ss << argv[ii]; options_ss << " "; } std::string options_str = "Command-line options were: " + options_ss.str(); TRC_INFO(options_str.c_str()); if (init_options(argc, argv, options) != 0) { return 1; } if (options.daemon) { // Options parsed and validated, time to demonize before writing out our // pidfile or spwaning threads. int errnum = Utils::daemonize(); if (errnum != 0) { TRC_ERROR("Failed to convert to daemon, %d (%s)", errnum, strerror(errnum)); exit(0); } } if (options.pidfile != "") { int rc = Utils::lock_and_write_pidfile(options.pidfile); if (rc == -1) { // Failure to acquire pidfile lock TRC_ERROR("Could not write pidfile - exiting"); return 2; } } start_signal_handlers(); AccessLogger* access_logger = NULL; if (options.access_log_enabled) { TRC_STATUS("Access logging enabled to %s", options.access_log_directory.c_str()); access_logger = new AccessLogger(options.access_log_directory); } HealthChecker* hc = new HealthChecker(); hc->start_thread(); // Create an exception handler. The exception handler doesn't need // to quiesce the process before killing it. exception_handler = new ExceptionHandler(options.exception_max_ttl, false, hc); SAS::init(options.sas_system_name, "memento", SASEvent::CURRENT_RESOURCE_BUNDLE, options.sas_server, sas_write, create_connection_in_management_namespace); // Ensure our random numbers are unpredictable. unsigned int seed; seed = time(NULL) ^ getpid(); srand(seed); // Create alarm and communication monitor objects for the conditions // reported by memento. CommunicationMonitor* mc_comm_monitor = new CommunicationMonitor(new Alarm("memento", AlarmDef::MEMENTO_MEMCACHED_COMM_ERROR, AlarmDef::CRITICAL), "Memento", "Memcached"); Alarm* mc_vbucket_alarm = new Alarm("memento", AlarmDef::MEMENTO_MEMCACHED_VBUCKET_ERROR, AlarmDef::MAJOR); CommunicationMonitor* hs_comm_monitor = new CommunicationMonitor(new Alarm("memento", AlarmDef::MEMENTO_HOMESTEAD_COMM_ERROR, AlarmDef::CRITICAL), "Memento", "Homestead"); CommunicationMonitor* cass_comm_monitor = new CommunicationMonitor(new Alarm("memento", AlarmDef::MEMENTO_CASSANDRA_COMM_ERROR, AlarmDef::CRITICAL), "Memento", "Cassandra"); TRC_DEBUG("Starting alarm request agent"); AlarmReqAgent::get_instance().start(); MemcachedStore* m_store = new MemcachedStore(true, "./cluster_settings", mc_comm_monitor, mc_vbucket_alarm); AuthStore::SerializerDeserializer* serializer; std::vector<AuthStore::SerializerDeserializer*> deserializers; if (options.memcached_write_format == MemcachedWriteFormat::JSON) { serializer = new AuthStore::JsonSerializerDeserializer(); } else { serializer = new AuthStore::BinarySerializerDeserializer(); } deserializers.push_back(new AuthStore::JsonSerializerDeserializer()); deserializers.push_back(new AuthStore::BinarySerializerDeserializer()); AuthStore* auth_store = new AuthStore(m_store, serializer, deserializers, options.digest_timeout); LoadMonitor* load_monitor = new LoadMonitor(options.target_latency_us, options.max_tokens, options.init_token_rate, options.min_token_rate); LastValueCache* stats_aggregator = new MementoLVC(); // Create a DNS resolver and an HTTP specific resolver. int af = AF_INET; struct in6_addr dummy_addr; if (inet_pton(AF_INET6, options.local_host.c_str(), &dummy_addr) == 1) { TRC_DEBUG("Local host is an IPv6 address"); af = AF_INET6; } DnsCachedResolver* dns_resolver = new DnsCachedResolver("127.0.0.1"); HttpResolver* http_resolver = new HttpResolver(dns_resolver, af, options.http_blacklist_duration); HomesteadConnection* homestead_conn = new HomesteadConnection(options.homestead_http_name, http_resolver, load_monitor, hs_comm_monitor); // Create and start the call list store. CallListStore::Store* call_list_store = new CallListStore::Store(); call_list_store->configure_connection("localhost", 9160, cass_comm_monitor); // Test Cassandra connectivity. CassandraStore::ResultCode store_rc = call_list_store->connection_test(); if (store_rc == CassandraStore::OK) { // Store can connect to Cassandra, so start it. store_rc = call_list_store->start(); } if (store_rc != CassandraStore::OK) { TRC_ERROR("Unable to create call list store (RC = %d)", store_rc); exit(3); } HttpStack* http_stack = HttpStack::get_instance(); HttpStackUtils::SimpleStatsManager stats_manager(stats_aggregator); CallListTask::Config call_list_config(auth_store, homestead_conn, call_list_store, options.home_domain, stats_aggregator, hc, options.api_key); MementoSasLogger sas_logger; HttpStackUtils::PingHandler ping_handler; HttpStackUtils::SpawningHandler<CallListTask, CallListTask::Config> call_list_handler(&call_list_config, &sas_logger); HttpStackUtils::HandlerThreadPool pool(options.http_worker_threads, exception_handler); try { http_stack->initialize(); http_stack->configure(options.http_address, options.http_port, options.http_threads, exception_handler, access_logger, load_monitor, &stats_manager); http_stack->register_handler("^/ping$", &ping_handler); http_stack->register_handler("^/org.projectclearwater.call-list/users/[^/]*/call-list.xml$", pool.wrap(&call_list_handler)); http_stack->start(); } catch (HttpStack::Exception& e) { TRC_ERROR("Failed to initialize HttpStack stack - function %s, rc %d", e._func, e._rc); exit(2); } TRC_STATUS("Start-up complete - wait for termination signal"); sem_wait(&term_sem); TRC_STATUS("Termination signal received - terminating"); try { http_stack->stop(); http_stack->wait_stopped(); } catch (HttpStack::Exception& e) { TRC_ERROR("Failed to stop HttpStack stack - function %s, rc %d", e._func, e._rc); } call_list_store->stop(); call_list_store->wait_stopped(); hc->stop_thread(); delete homestead_conn; homestead_conn = NULL; delete call_list_store; call_list_store = NULL; delete http_resolver; http_resolver = NULL; delete dns_resolver; dns_resolver = NULL; delete load_monitor; load_monitor = NULL; delete auth_store; auth_store = NULL; delete call_list_store; call_list_store = NULL; delete m_store; m_store = NULL; delete exception_handler; exception_handler = NULL; delete hc; hc = NULL; // Stop the alarm request agent AlarmReqAgent::get_instance().stop(); delete mc_comm_monitor; mc_comm_monitor = NULL; delete mc_vbucket_alarm; mc_vbucket_alarm = NULL; delete hs_comm_monitor; hs_comm_monitor = NULL; delete cass_comm_monitor; cass_comm_monitor = NULL; SAS::term(); signal(SIGTERM, SIG_DFL); sem_destroy(&term_sem); }
void LoadMonitor::request_complete(int latency) { pthread_mutex_lock(&_lock); pending_count -= 1; smoothed_latency = (7 * smoothed_latency + latency) / 8; adjust_count += 1; if (adjust_count >= REQUESTS_BEFORE_ADJUSTMENT) { // We've seen the right number of requests, but ensure // that an appropriate amount of time has passed, so the rate doesn't // fluctuate wildly if latency spikes for a few milliseconds timespec current_time; clock_gettime(CLOCK_MONOTONIC_COARSE, ¤t_time); unsigned long current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_nsec / 1000); if (current_time_ms >= (last_adjustment_time_ms + (SECONDS_BEFORE_ADJUSTMENT * 1000))) { // This algorithm is based on the Welsh and Culler "Adaptive Overload // Control for Busy Internet Servers" paper, although based on a smoothed // mean latency, rather than the 90th percentile as per the paper. // Also, the additive increase is scaled as a proportion of the maximum // bucket size, rather than an absolute number as per the paper. float err = ((float) (smoothed_latency - target_latency)) / target_latency; // Work out the percentage of accepted requests (for logs) float accepted_percent = (accepted + rejected == 0) ? 100.0 : 100 * (((float) accepted) / (accepted + rejected)); TRC_INFO("Accepted %f%% of requests, latency error = %f, overload responses = %d", accepted_percent, err, penalties); // latency is above where we want it to be, or we are getting overload responses from // Homer/Homestead, so adjust the rate downwards by a multiplicative factor if (err > DECREASE_THRESHOLD || penalties > 0) { float new_rate = bucket.rate / DECREASE_FACTOR; if (new_rate < min_token_rate) { new_rate = min_token_rate; } bucket.update_rate(new_rate); TRC_STATUS("Maximum incoming request rate/second decreased to %f " "(based on a smoothed mean latency of %d and %d upstream overload responses)", bucket.rate, smoothed_latency, penalties); } else if (err < INCREASE_THRESHOLD) { // Our latency is below the threshold, so increasing our permitted request rate would be // sensible. Before doing that, we check that we're using a significant proportion of our // current rate - if we're allowing 100 requests/sec, and we get 1 request/sec because it's // a quiet period, then it's going to be handled quickly, but that's not sufficient evidence // to increase our rate. float maximum_permitted_requests = bucket.rate * (current_time_ms - last_adjustment_time_ms) / 1000; // Arbitrary threshold - require 50% of our current permitted rate to be used float minimum_threshold = maximum_permitted_requests * 0.5; if (accepted > minimum_threshold) { float new_rate = bucket.rate + (-1 * err * bucket.max_size * INCREASE_FACTOR); bucket.update_rate(new_rate); TRC_STATUS("Maximum incoming request rate/second increased to %f " "(based on a smoothed mean latency of %d and %d upstream overload responses)", bucket.rate, smoothed_latency, penalties); } else { TRC_STATUS("Maximum incoming request rate/second unchanged - only handled %d requests" " recently, minimum threshold for a change is %f", accepted, minimum_threshold); } } else { TRC_DEBUG("Maximum incoming request rate/second is unchanged at %f", bucket.rate); } update_statistics(); // Reset counts last_adjustment_time_ms = current_time_ms; adjust_count = 0; accepted = 0; rejected = 0; penalties = 0; } } pthread_mutex_unlock(&_lock); }
std::string DNSEnumService::lookup_uri_from_user(const std::string& user, SAS::TrailId trail) const { if (user.empty()) { TRC_INFO("No dial string supplied, so don't do ENUM lookup"); return std::string(); } // Log starting ENUM processing. SAS::Event event(trail, SASEvent::ENUM_START, 0); event.add_var_param(user); SAS::report_event(event); // Determine the Application Unique String (AUS) from the user. This is // used to form the first key, and also as the input into the regular // expressions. std::string aus = user_to_aus(user); std::string string = aus; // Get the resolver to use. This comes from thread-local data. DNSResolver* resolver = get_resolver(); // Spin round until we've finished (successfully or otherwise) or we've done // the maximum number of queries. bool complete = false; bool failed = false; bool server_failed = false; int dns_queries = 0; while ((!complete) && (!failed) && (dns_queries < MAX_DNS_QUERIES)) { // Translate the key into a domain and issue a query for it. std::string domain = key_to_domain(string); struct ares_naptr_reply* naptr_reply = NULL; int status = resolver->perform_naptr_query(domain, naptr_reply, trail); if (status == ARES_SUCCESS) { // Parse the reply into a sorted list of rules. std::vector<Rule> rules; parse_naptr_reply(naptr_reply, rules); // Now spin through the rules, looking for the first match. std::vector<DNSEnumService::Rule>::const_iterator rule; for (rule = rules.begin(); rule != rules.end(); ++rule) { if (rule->matches(string)) { // We found a match, so apply the regular expression to the AUS (not // the previous string - this is what ENUM mandates). If this was a // terminal rule, we now have a SIP URI and we're finished. // Otherwise, the output of the regular expression is used as the // next key. try { string = rule->replace(aus, trail); complete = rule->is_terminal(); } catch(...) // LCOV_EXCL_START Only throws if expression too complex or similar hard-to-hit conditions { TRC_ERROR("Failed to translate number with regex"); failed = true; // LCOV_EXCL_STOP } break; } } // If we didn't find a match (and so hit the end of the list), consider // this a failure. failed = failed || (rule == rules.end()); } else if (status == ARES_ENOTFOUND) { // Our DNS query failed, so give up, but this is not an ENUM server issue - // we just tried to look up an unknown name. failed = true; } else { // Our DNS query failed. Give up, and track an ENUM server failure. failed = true; server_failed = true; } // Free off the NAPTR reply if we have one. if (naptr_reply != NULL) { resolver->free_naptr_reply(naptr_reply); naptr_reply = NULL; } dns_queries++; } // Log that we've finished processing (and whether it was successful or not). if (complete) { TRC_DEBUG("Enum lookup completes: %s", string.c_str()); SAS::Event event(trail, SASEvent::ENUM_COMPLETE, 0); event.add_var_param(user); event.add_var_param(string); SAS::report_event(event); } else { TRC_WARNING("Enum lookup did not complete for user %s", user.c_str()); SAS::Event event(trail, SASEvent::ENUM_INCOMPLETE, 0); event.add_var_param(user); SAS::report_event(event); // On failure, we must return an empty (rather than incomplete) string. string = std::string(""); } // Report state of last communication attempt (which may potentially set/clear // an associated alarm). if (_comm_monitor) { if (server_failed) { _comm_monitor->inform_failure(); } else { _comm_monitor->inform_success(); } } return string; }
/// Verifies that the supplied authentication vector is valid. bool verify_auth_vector(rapidjson::Document* av, const std::string& impi, SAS::TrailId trail) { bool rc = true; rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); av->Accept(writer); std::string av_str = buffer.GetString(); TRC_DEBUG("Verifying AV: %s", av_str.c_str()); // Check the AV is well formed. if (av->HasMember("aka")) { // AKA is specified, check all the expected parameters are present. TRC_DEBUG("AKA specified"); rapidjson::Value& aka = (*av)["aka"]; if (!(((aka.HasMember("challenge")) && (aka["challenge"].IsString())) && ((aka.HasMember("response")) && (aka["response"].IsString())) && ((aka.HasMember("cryptkey")) && (aka["cryptkey"].IsString())) && ((aka.HasMember("integritykey")) && (aka["integritykey"].IsString())))) { // Malformed AKA entry TRC_INFO("Badly formed AKA authentication vector for %s", impi.c_str()); rc = false; SAS::Event event(trail, SASEvent::AUTHENTICATION_FAILED_MALFORMED, 0); std::string error_msg = std::string("AKA authentication vector is malformed: ") + av_str.c_str(); event.add_var_param(error_msg); SAS::report_event(event); } } else if (av->HasMember("digest")) { // Digest is specified, check all the expected parameters are present. TRC_DEBUG("Digest specified"); rapidjson::Value& digest = (*av)["digest"]; if (!(((digest.HasMember("realm")) && (digest["realm"].IsString())) && ((digest.HasMember("qop")) && (digest["qop"].IsString())) && ((digest.HasMember("ha1")) && (digest["ha1"].IsString())))) { // Malformed digest entry TRC_INFO("Badly formed Digest authentication vector for %s", impi.c_str()); rc = false; SAS::Event event(trail, SASEvent::AUTHENTICATION_FAILED_MALFORMED, 0); std::string error_msg = std::string("Digest authentication vector is malformed: ") + av_str.c_str();; event.add_var_param(error_msg); SAS::report_event(event); } } else { // Neither AKA nor Digest information present. TRC_INFO("No AKA or Digest object in authentication vector for %s", impi.c_str()); rc = false; SAS::Event event(trail, SASEvent::AUTHENTICATION_FAILED_MALFORMED, 0); std::string error_msg = std::string("Authentication vector is malformed: ") + av_str.c_str(); event.add_var_param(error_msg); SAS::report_event(event); } return rc; }