HTTPCode HttpConnection::send_delete(const std::string& path,
                                     SAS::TrailId trail,
                                     const std::string& body,
                                     std::string& response)
{
  std::map<std::string, std::string> unused_headers;

  return send_delete(path, unused_headers, response, trail, body);
}
Beispiel #2
0
/*
 * All done talking...hang up the phone and reset terminal thingy's
 */
void
quit(void)
{

    if (curses_initialized) {
        wmove(his_win.x_win, his_win.x_nlines-1, 0);
        wclrtoeol(his_win.x_win);
        wrefresh(his_win.x_win);
        endwin();
    }
    if (invitation_waiting)
        send_delete();
    exit(0);
}
/*
 * Thread main loop.
 */
void *thread_main(void *arg)
{
	while (1) {
		int want_get = (random() % 100) < probability_get;
		int fd = unused_fd();
		int usec;

		if (want_get)
			send_get(fd);
		else
			send_delete(fd);

		release_fd(fd);


		pthread_mutex_lock(&stats_mutex);
		iteration_count++;
		pthread_mutex_unlock(&stats_mutex);
	}
}
Beispiel #4
0
/* delete a state object */
void
delete_state(struct state *st)
{
    struct connection *const c = st->st_connection;
    struct state *old_cur_state = cur_state == st? NULL : cur_state;

    openswan_log("deleting state #%lu (%s)",
                 st->st_serialno,
                 enum_show(&state_names, st->st_state));

    /*
     * for most IKEv2 things, we may have further things to do after marking the state deleted,
     * so we do not actually free it here at all, but back in the main loop when all the work is done.
     */
    if(st->st_ikev2) {
        /* child sa*/
        if(st->st_clonedfrom != 0) {
            DBG(DBG_CONTROL, DBG_log("received request to delete child state"));
            if(st->st_state == STATE_CHILDSA_DEL) {
		DBG(DBG_CONTROL, DBG_log("now deleting the child state"));

            } else {
                /* Only send request if child sa is established
		 * otherwise continue with deletion
		 */
		if(IS_CHILD_SA_ESTABLISHED(st)) {
                    DBG(DBG_CONTROL, DBG_log("sending Child SA delete equest"));
                    send_delete(st);
                    change_state(st, STATE_CHILDSA_DEL);

                    /* actual deletion when we receive peer response*/
                    return;
		}
            }

        } else {
            DBG(DBG_CONTROL, DBG_log("considering request to delete IKE parent state"));
            /* parent sa */
            if(st->st_state == STATE_IKESA_DEL) {
                DBG(DBG_CONTROL, DBG_log("now deleting the IKE (or parent) state"));

            } else {
		/* Another check to verify if a secured
		 * INFORMATIONAL exchange can be sent or not
		 */
		if(st->st_skey_ei.ptr && st->st_skey_ai.ptr
                   && st->st_skey_er.ptr && st->st_skey_ar.ptr) {
                    DBG(DBG_CONTROL, DBG_log("sending IKE SA delete request"));
                    send_delete(st);
                    change_state(st, STATE_IKESA_DEL);

                    /* actual deletion when we receive peer response*/
                    return;
		}
            }
        }
    }

    /* If DPD is enabled on this state object, clear any pending events */
    if(st->st_dpd_event != NULL)
            delete_dpd_event(st);

    /* if there is a suspended state transition, disconnect us */
    if (st->st_suspended_md != NULL)
    {
	passert(st->st_suspended_md->st == st);
	DBG(DBG_CONTROL, DBG_log("disconnecting state #%lu from md",
	    st->st_serialno));
	st->st_suspended_md->st = NULL;
    }

    /* tell the other side of any IPSEC SAs that are going down */
    if (IS_IPSEC_SA_ESTABLISHED(st->st_state)
    || IS_ISAKMP_SA_ESTABLISHED(st->st_state))
	send_delete(st);

    delete_event(st);	/* delete any pending timer event */

    /* Ditch anything pending on ISAKMP SA being established.
     * Note: this must be done before the unhash_state to prevent
     * flush_pending_by_state inadvertently and prematurely
     * deleting our connection.
     */
    flush_pending_by_state(st);

    /* if there is anything in the cryptographic queue, then remove this
     * state from it.
     */
    delete_cryptographic_continuation(st);

    /* effectively, this deletes any ISAKMP SA that this state represents */
    unhash_state(st);

    /* tell kernel to delete any IPSEC SA
     * ??? we ought to tell peer to delete IPSEC SAs
     */
    if (IS_IPSEC_SA_ESTABLISHED(st->st_state)
	|| IS_CHILD_SA_ESTABLISHED(st))
	delete_ipsec_sa(st, FALSE);
    else if (IS_ONLY_INBOUND_IPSEC_SA_ESTABLISHED(st->st_state))
	delete_ipsec_sa(st, TRUE);

    if (c->newest_ipsec_sa == st->st_serialno)
	c->newest_ipsec_sa = SOS_NOBODY;

    if (c->newest_isakmp_sa == st->st_serialno)
	c->newest_isakmp_sa = SOS_NOBODY;

    /*
     * fake a state change here while we are still associated with a
     * connection.  Without this the state logging (when enabled) cannot
     * work out what happened.
     */
    fake_state(st, STATE_UNDEFINED);

    st->st_connection = NULL;	/* we might be about to free it */
    cur_state = old_cur_state;	/* without st_connection, st isn't complete */
    connection_discard(c);

    change_state(st, STATE_UNDEFINED);

    release_whack(st);

    change_state(st, STATE_CHILDSA_DEL);
}
HTTPCode ChronosInternalConnection::resynchronise_with_single_node(
                             const std::string server_to_sync,
                             std::vector<std::string> cluster_nodes,
                             std::string localhost)
{
  TRC_DEBUG("Querying %s for timers", server_to_sync.c_str());

  // Get the cluster view ID from the global configuration
  std::string cluster_view_id;
  __globals->get_cluster_view_id(cluster_view_id);

  std::string response;
  HTTPCode rc;

  // Loop sending GETs to the server while the response is a 206
  do
  {
    std::map<TimerID, int> delete_map;

    rc = send_get(server_to_sync, 
                  localhost, 
                  PARAM_SYNC_MODE_VALUE_SCALE, 
                  cluster_view_id,
                  MAX_TIMERS_IN_RESPONSE, 
                  response);

    if ((rc == HTTP_PARTIAL_CONTENT) ||
        (rc == HTTP_OK))
    {
      // Parse the GET response
      rapidjson::Document doc;
      doc.Parse<0>(response.c_str());

      if (doc.HasParseError())
      {
        // We've failed to parse the document as JSON. This suggests that
        // there's something seriously wrong with the node we're trying
        // to query so don't retry
        TRC_WARNING("Failed to parse document as JSON");
        rc = HTTP_BAD_REQUEST;
        break;
      }

      try
      {
        JSON_ASSERT_CONTAINS(doc, JSON_TIMERS);
        JSON_ASSERT_ARRAY(doc[JSON_TIMERS]);
        const rapidjson::Value& ids_arr = doc[JSON_TIMERS];
        int total_timers = ids_arr.Size();
        int count_invalid_timers = 0;

        for (rapidjson::Value::ConstValueIterator ids_it = ids_arr.Begin();
             ids_it != ids_arr.End();
             ++ids_it)
        {
          try
          {
            const rapidjson::Value& id_arr = *ids_it;
            JSON_ASSERT_OBJECT(id_arr);

            // Get the timer ID
            TimerID timer_id;
            JSON_GET_INT_MEMBER(id_arr, JSON_TIMER_ID, timer_id);

            // Get the old replicas
            std::vector<std::string> old_replicas;
            JSON_ASSERT_CONTAINS(id_arr, JSON_OLD_REPLICAS);
            JSON_ASSERT_ARRAY(id_arr[JSON_OLD_REPLICAS]);
            const rapidjson::Value& old_repl_arr = id_arr[JSON_OLD_REPLICAS];
            for (rapidjson::Value::ConstValueIterator repl_it = old_repl_arr.Begin();
                                                      repl_it != old_repl_arr.End();
                                                      ++repl_it)
            {
              JSON_ASSERT_STRING(*repl_it);
              old_replicas.push_back(repl_it->GetString());
            }

            // Get the timer.
            JSON_ASSERT_CONTAINS(id_arr, JSON_TIMER);
            JSON_ASSERT_OBJECT(id_arr[JSON_TIMER]);
            const rapidjson::Value& timer_obj = id_arr[JSON_TIMER];

            bool store_timer = false;
            std::string error_str;
            bool replicated_timer; 
            Timer* timer = Timer::from_json_obj(timer_id,
                                                0,
                                                error_str,
                                                replicated_timer,
                                                (rapidjson::Value&)timer_obj);

            if (!timer)
            {
              count_invalid_timers++;
              TRC_INFO("Unable to create timer - error: %s", error_str.c_str());
              continue;
            }
            else if (!replicated_timer)
            {
              count_invalid_timers++;
              TRC_INFO("Unreplicated timer in response - ignoring");
              delete timer; timer = NULL;
              continue;
            }

            // Decide what we're going to do with this timer.
            int old_level = 0;
            bool in_old_replica_list = get_replica_level(old_level, 
                                                         localhost,
                                                         old_replicas);
            int new_level = 0;
            bool in_new_replica_list = get_replica_level(new_level,
                                                         localhost,
                                                         timer->replicas);

            // Add the timer to the delete map we're building up
            delete_map.insert(std::pair<TimerID, int>(timer_id, new_level));

            if (in_new_replica_list)
            {
              // Add the timer to my store if I can. 
              if (in_old_replica_list)
              {
                if (old_level >= new_level)
                {
                  // Add/update timer
                  // LCOV_EXCL_START - Adding timer paths are tested elsewhere
                  store_timer = true;
                  // LCOV_EXCL_STOP
                }
              }
              else
              {
                // Add/update timer
                store_timer = true;
              }

              // Now loop through the new replicas.
              int index = 0;
              for (std::vector<std::string>::iterator it = timer->replicas.begin();
                                                      it != timer->replicas.end();
                                                      ++it, ++index)
              {
                if (index <= new_level)
                {
                  // Do nothing. We've covered adding the timer to the store above
                }
                else
                {
                  // We can potentially replicate the timer to one of these nodes. 
                  // Check whether the new replica was involved previously
                  int old_rep_level = 0;
                  bool is_new_rep_in_old_rep = get_replica_level(old_rep_level,
                                                                 *it,
                                                                 old_replicas);
                  if (is_new_rep_in_old_rep)
                  {
                    if (old_rep_level >= new_level)
                    {
                      _replicator->replicate_timer_to_node(timer, *it);
                    }
                  }
                  else
                  {
                    _replicator->replicate_timer_to_node(timer, *it);
                  }
                }
              }

              // Now loop through the old replicas. We can send a tombstone 
              // replication to any node that used to be a replica and was 
              // higher in the replica list than the new replica.
              index = 0;
              for (std::vector<std::string>::iterator it = old_replicas.begin();
                                                      it != old_replicas.end();
                                                      ++it, ++index)
              {
                if (index >= new_level)
                {
                  // We can potentially tombstone the timer to one of these nodes.
                  bool old_rep_in_new_rep = get_replica_presence(*it,
                                                                 timer->replicas);

                  if (!old_rep_in_new_rep)
                  {
                    Timer* timer_copy = new Timer(*timer);
                    timer_copy->become_tombstone();
                    _replicator->replicate_timer_to_node(timer_copy, *it);
                    delete timer_copy; timer_copy = NULL;
                  }
                }
              }
            }

            // Add the timer to the store if we can. This is done 
            // last so we don't invalidate the pointer to the timer.
            if (store_timer)
            {
              _handler->add_timer(timer);
            }
            else
            {
              delete timer; timer = NULL;
            }

            // Finally, note that we processed the timer
            _timers_processed_stat->increment();
          }
          catch (JsonFormatError err)
          {
            // A single entry is badly formatted. This is unexpected but we'll try 
            // to keep going and process the rest of the timers. 
            count_invalid_timers++;
            _invalid_timers_processed_stat->increment();
            TRC_INFO("JSON entry was invalid (hit error at %s:%d)",
                     err._file, err._line);
          }
        }

        // Check if we were able to successfully process any timers - if not
        // then bail out as there's something wrong with the node we're
        // querying
        if ((total_timers != 0) && 
           (count_invalid_timers == total_timers))
        {
          TRC_WARNING("Unable to process any timer entries in GET response");
          rc = HTTP_BAD_REQUEST;
        }
      }
      catch (JsonFormatError err)
      {
        // We've failed to find the Timers array. This suggests that
        // there's something seriously wrong with the node we're trying
        // to query so don't retry
        TRC_WARNING("JSON body didn't contain the Timers array");
        rc = HTTP_BAD_REQUEST;
      }

      // Send a DELETE to all the nodes to update their timer references
      if (delete_map.size() > 0)
      {
        std::string delete_body = create_delete_body(delete_map);
        for (std::vector<std::string>::iterator it = cluster_nodes.begin();
                                                it != cluster_nodes.end();
                                                ++it)
        {
          HTTPCode delete_rc = send_delete(*it, delete_body);
          if (delete_rc != HTTP_ACCEPTED)
          {
            // We've received an error response to the DELETE request. There's
            // not much more we can do here (a timeout will have already 
            // been retried). A failed DELETE won't prevent the scaling operation
            // from finishing, it just means that we'll tell other nodes
            // about timers inefficiently. 
            TRC_INFO("Error response (%d) to DELETE request to %s", 
                     delete_rc,
                    (*it).c_str());
          }
        }
      }
    }
    else
    {
      // We've received an error response to the GET request. A timeout
      // will already have been retried by the underlying HTTPConnection, 
      // so don't retry again
      TRC_WARNING("Error response (%d) to GET request to %s", 
                  rc, 
                  server_to_sync.c_str());
    }
  }
  while (rc == HTTP_PARTIAL_CONTENT);

  return rc;
}