コード例 #1
0
void update_time_checks(discovery_service *svc) {
    time_t last_update_time = get_last_update_time(svc);
    int i;
    for (i = 0; i < 5; i++) {
        printf(".");
        fflush(stdout);
        sleep(peer_update_interval);
        time_t temp = get_last_update_time(svc);
        JNXCHECK(temp > last_update_time);
        last_update_time = temp;
    }
    printf("\n");
}
コード例 #2
0
ファイル: discovery.c プロジェクト: azalucel/whisper-core
// Polling update strategy
void *polling_update_loop(void *data) {
  // The slightly more complicated logic is here to ensure that updates
  // do not happen more frequently than peer_update_interval on average.
  // 
  // This means that whichever node sends LIST packet last, all of the
  // discovery services on the network will synchronise to that time. Since
  // every time a LIST packet is received the service sends local peer packet,
  // we update the last_update time in send_peer_packet function. This way
  // we ensure that regardless of which service sends the LIST packets, all
  // services update the last_updated time, i.e. synchronise on that packet.
  //
  // The only time we may get a time shorter than peer_update_interval between
  // updates is when a new discovery service joins the broadcast group.
  int old_cancel_state;
  pthread_setcancelstate(PTHREAD_CANCEL_ASYNCHRONOUS, &old_cancel_state);
  discovery_service *svc = (discovery_service *) data;
  time_t next_update = get_last_update_time(svc) + peer_update_interval;
  while (1) {
    if (!svc->isrunning) {
      return NULL;
    }
    if (next_update <= time(0)) {
      send_discovery_request(svc);
    }
    next_update += peer_update_interval;
    sleep(next_update - time(0));
  }
  return NULL;
}
コード例 #3
0
void ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db(const ns_synchronized_time & update_start_time,ns_local_buffer_connection & local_buffer_sql, ns_sql & central_db) {
    if (time_of_last_update_from_central_db.local_time == ns_default_update_time)
        get_last_update_time(local_buffer_sql);
    buffered_capture_schedule.load_if_needed(&local_buffer_sql);

    ns_sql_result updated_data;

    const std::string altered_data_condition(
        std::string("time_stamp > FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.local_time) +
        ") AND time_stamp <= FROM_UNIXTIME(" + ns_to_string(update_start_time.local_time) + ") ");

    const unsigned long new_timestamp(time_of_last_update_from_central_db.remote_time);

    ns_get_all_column_data_from_table("buffered_capture_schedule",buffered_capture_schedule.table_format.column_names,
                                      std::string("WHERE ") + altered_data_condition + " AND uploaded_to_central_db != 3",
                                      updated_data,&local_buffer_sql);

    std::vector<ns_db_key_mapping> mappings(updated_data.size());
    if (updated_data.size() > 8)
        image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::Committing ") << updated_data.size() << " recorded capture events to the central database.",&central_db);
    std::vector<ns_ex *> errors;
    for (unsigned long i = 0; i < updated_data.size(); i++) {
        try {
            unsigned long captured_image_id = atol(updated_data[i][buffered_capture_schedule.image_id_column].c_str());
            unsigned long problem_id = atol(updated_data[i][buffered_capture_schedule.problem_column].c_str());

            unsigned long central_captured_image_id(0),
                     central_problem_id(0);
            if (captured_image_id != 0 || problem_id != 0) {
                central_db << "SELECT captured_image_id,problem FROM capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                ns_sql_result res;
                central_db.get_rows(res);
                if (res.size() == 0)
                    throw ns_ex("Could not find capture schedule entry in central db for sample id " ) << updated_data[i][buffered_capture_schedule.id_column] << " finishing at time " << updated_data[i][buffered_capture_schedule.time_at_finish_column];
                central_captured_image_id = atol(res[0][0].c_str());
                central_problem_id = atol(res[0][1].c_str());
            }

            const bool need_to_make_new_capture_image(captured_image_id != 0 && central_captured_image_id != captured_image_id);
            //we need to make new entries in the central database for any new images or events
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.load_from_db(captured_image_id,&local_buffer_sql);
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].old_image = mappings[i].image;
                if (mappings[i].captured_image.capture_images_image_id != 0)
                    mappings[i].image.load_from_db(mappings[i].captured_image.capture_images_image_id,&local_buffer_sql);


            }
            else {
                mappings[i].old_image = mappings[i].image;
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].captured_image.captured_images_id = central_captured_image_id;
            }

            bool need_to_make_new_problem(problem_id != 0 && central_problem_id != problem_id);
            if (need_to_make_new_problem) {
                local_buffer_sql << "SELECT id,event,time,minor FROM buffered_host_event_log WHERE id = " << updated_data[i][buffered_capture_schedule.problem_column];
                ns_sql_result res;
                local_buffer_sql.get_rows(res);
                mappings[i].old_problem_id = mappings[i].problem_id;
                if (res.size() == 0) {
                    mappings[i].problem_id = image_server.register_server_event(ns_ex("Could not find problem id ") << updated_data[i][buffered_capture_schedule.problem_column] << " in local database buffer!",&central_db);
                    need_to_make_new_problem=false;
                }
                else {
                    mappings[i].problem_text = res[0][1];
                    mappings[i].problem_time = atol(res[0][2].c_str());
                    mappings[i].problem_minor = res[0][3] != "0";
                }
            }
            else {
                mappings[i].old_problem_id = mappings[i].problem_id;
                mappings[i].problem_id = central_problem_id;
            }

            if (need_to_make_new_capture_image && mappings[i].image.id != 0) {
                mappings[i].image.id = 0;
                mappings[i].image.save_to_db(0,&central_db,false);
                mappings[i].captured_image.capture_images_image_id = mappings[i].image.id;
            }
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.captured_images_id = 0;
                mappings[i].captured_image.save(&central_db);
            }
            if (need_to_make_new_problem) {
                mappings[i].old_problem_id = mappings[i].problem_id;
                ns_image_server_event ev;
                ev << mappings[i].problem_text;
                if (mappings[i].problem_minor) ev << ns_ts_minor_event;
                ev.set_time(mappings[i].problem_time);
                mappings[i].problem_id = image_server.register_server_event(ev,&central_db);
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error while making mapping: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }

    for (unsigned long i = 0; i < updated_data.size(); i++) {
        if (mappings[i].error.text().size() > 0)
            continue;
        try {
            central_db << "Update capture_schedule SET ";
            for (unsigned int j = 0; j < buffered_capture_schedule.table_format.column_names.size(); ++j) {
                if (j == buffered_capture_schedule.id_column
                        || j == buffered_capture_schedule.image_id_column ||
                        j == buffered_capture_schedule.problem_column || j ==
                        buffered_capture_schedule.timestamp_column) continue;
                central_db  << "`" << buffered_capture_schedule.table_format.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]) << "',";
            }
            central_db << "captured_image_id = " << mappings[i].captured_image.captured_images_id
                       << ", problem = " << mappings[i].problem_id;
            //we set the timestamp as old so that it does not trigger a re-download when the server tries to update its cache
            central_db << ", time_stamp=FROM_UNIXTIME("<< new_timestamp <<") ";
            central_db << " WHERE id = " << updated_data[i][0];
            central_db.send_query();
            if (mappings[i].old_captured_image.captured_images_id != mappings[i].captured_image.captured_images_id ||
                    mappings[i].old_problem_id != mappings[i].problem_id) {
                local_buffer_sql << "UPDATE buffered_capture_schedule SET captured_image_id = " << mappings[i].captured_image.captured_images_id
                                 << ", problem = " << mappings[i].problem_id << ", time_stamp = FROM_UNIXTIME("<< new_timestamp
                                 <<") WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_captured_images WHERE id = " << mappings[i].old_captured_image.captured_images_id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_images WHERE id = " << mappings[i].old_image.id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_host_event_log WHERE id = " << mappings[i].old_problem_id;
                local_buffer_sql.send_query();
            }
            if (updated_data[i][buffered_capture_schedule.time_at_finish_column] != "0") {
                local_buffer_sql << "DELETE FROM buffered_capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                //	local_buffer_sql.clear_query();
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error during central update: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }
    for (unsigned int i = 0; i < mappings.size(); i++) {
        if (mappings[i].error.text().size() > 0) {

            local_buffer_sql << "UPDATE buffered_capture_schedule SET uploaded_to_central_db=3 WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
            local_buffer_sql.send_query();
            image_server.register_server_event(ns_image_server::ns_register_in_central_db_with_fallback,ns_ex("Could not update central db: ") << mappings[i].error.text());
        }

    }
    //update modified sample data.
    updated_data.resize(0);
    if (capture_samples.column_names.size() == 0) {
        capture_samples.load_column_names_from_db("capture_samples",&central_db);
        if (capture_samples.column_names.size() == 0)
            throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table appears to have no columns!");
    }
    if (capture_samples.column_names[0] != "id")
        throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table does not have its id in the first column!");

    ns_get_all_column_data_from_table("buffered_capture_samples",capture_samples.column_names,
                                      std::string("WHERE ") + altered_data_condition,
                                      updated_data,&local_buffer_sql);
    if (capture_samples.time_stamp_column_id == -1)
        throw ns_ex("Could not find capture sample time stamp column!");
    for (unsigned int i = 0; i < updated_data.size(); i++) {
        central_db << "UPDATE capture_samples SET ";
        //skip id column; we don't want to cause any unneccisary db shuffling by changing ids (even if we are changing the ids to the value they already are)
        central_db  << capture_samples.column_names[1] << "='" << central_db.escape_string(updated_data[i][1]);
        for (unsigned int j = 2; j < capture_samples.column_names.size(); ++j) {
            if (j == capture_samples.time_stamp_column_id)
                continue;
            else central_db  << "',`" << capture_samples.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]);
        }
        central_db << "',time_stamp=FROM_UNIXTIME(" << new_timestamp << ") ";
        central_db << "WHERE id = " << updated_data[i][0];
        central_db.send_query();
    }

}