void ns_buffered_capture_scheduler::commit_local_changes_to_central_server(ns_local_buffer_connection & local_buffer, ns_sql & central_db) { ns_acquire_lock_for_scope lock(buffer_capture_scheduler_lock,__FILE__,__LINE__); local_buffer.clear_query(); central_db.clear_query(); std::string local_time = local_buffer.get_value("SELECT UNIX_TIMESTAMP(NOW())"), central_time = central_db.get_value("SELECT UNIX_TIMESTAMP(NOW())"); const ns_synchronized_time update_start_time(atol(local_time.c_str()),atol(central_time.c_str())); //now we update the local buffer to the central node. commit_all_local_schedule_changes_to_central_db(update_start_time,local_buffer,central_db); commit_all_local_non_schedule_changes_to_central_db(update_start_time,local_buffer,central_db); lock.release(); }
bool ns_processing_job_scheduler::run_a_job(ns_sql & sql,bool first_in_first_out_job_queue){ //if we can't talk to the long term storage we're bound to fail, so don't try. image_server.image_storage.test_connection_to_long_term_storage(true); if (!image_server.image_storage.long_term_storage_was_recently_writeable()) return false; ns_image_server_push_job_scheduler push_scheduler; ns_processing_job job = push_scheduler.request_job(sql,first_in_first_out_job_queue); if (job.id == 0) return false; //refresh flag labels from db ns_death_time_annotation_flag::get_flags_from_db(sql); if (job.maintenance_task == ns_maintenance_update_processing_job_queue){ image_server.register_server_event(ns_image_server_event("Updating job queue"),&sql); sql << "DELETE from processing_jobs WHERE maintenance_task =" << ns_maintenance_update_processing_job_queue; sql.send_query(); push_scheduler.report_job_as_finished(job,sql); push_scheduler.discover_new_jobs(sql); return true; } ns_acquire_for_scope<ns_processing_job_processor> processor( ns_processing_job_processor_factory::generate(job,image_server,this->pipeline->pipeline)); try{ std::string rejection_reason; if (!processor().job_is_still_relevant(sql,rejection_reason)){ image_server.register_server_event(ns_image_server_event("Encountered a processing job queue that had already been performed or invalidated: ") << rejection_reason << "[" << job.description() << "]",&sql); push_scheduler.report_job_as_finished(job,sql); if (processor().delete_job_after_processing()) processor().delete_job(sql); processor.release(); sql.send_query("COMMIT"); return true; } if(idle_timer_running) image_server.performance_statistics.register_job_duration(ns_performance_statistics_analyzer::ns_idle,idle_timer.stop()); idle_timer_running = false; ns_high_precision_timer tp; tp.start(); //mark the subject as busy to prevent multiple jobs running simultaneously on the same data processor().mark_subject_as_busy(true,sql); sql.send_query("COMMIT"); //update UI to show job is being performed, if requested. if (processor().flag_job_as_being_processed_before_processing()) processor().flag_job_as_being_processed(sql); sql.send_query("COMMIT"); if (processor().run_job(sql)) processor().mark_subject_as_busy(false,sql); push_scheduler.report_job_as_finished(job,sql); sql.send_query("COMMIT"); processor().handle_concequences_of_job_completion(sql); if (processor().delete_job_after_processing()) processor().delete_job(sql); sql.send_query("COMMIT"); processor.release(); image_server.performance_statistics.register_job_duration(ns_performance_statistics_analyzer::ns_running_a_job,tp.stop()); idle_timer_running = true; idle_timer.start(); return true; } catch(ns_ex & ex){ //we have found an error, handle it by registering it in the //host_event log, and annotate the current job (and any associated images) //with a reference to the error that occurred. sql.clear_query(); processor().mark_subject_as_busy(false,sql); ns_64_bit error_id(push_scheduler.report_job_as_problem(job,ex,sql)); sql.send_query("COMMIT"); //there are a variety of problems that could cause an exception to be thrown. //only mark the image itself as problematic if the error doesn't come from //any of the environmental problems that can crop up. bool problem_with_long_term_storage(!image_server.image_storage.long_term_storage_was_recently_writeable()); if (!problem_with_long_term_storage && ex.type() != ns_network_io && ex.type() != ns_sql_fatal && ex.type() != ns_memory_allocation && ex.type() != ns_cache) processor().mark_subject_as_problem(error_id,sql); image_server.performance_statistics.cancel_outstanding_jobs(); if (ex.type() == ns_memory_allocation) throw; //memory allocation errors can cause big, long-term problems, thus we need to pass //them downwards to be handled. else processor.release(); } catch(std::exception & e){ ns_ex ex(e); sql.clear_query(); processor().mark_subject_as_busy(false,sql); //we have found an error, handle it by registering it in the //host_event log, and annotate the current job (and any associated images) //with a reference to the error that occurred. ns_64_bit error_id(push_scheduler.report_job_as_problem(job,ex,sql)); sql.send_query("COMMIT"); processor().mark_subject_as_problem(error_id,sql); image_server.performance_statistics.cancel_outstanding_jobs(); processor.release(); if (ex.type() == ns_memory_allocation) throw; //memory allocation errors can cause big, long-term problems, thus we need to pass //them downwards to be handled. } return true; }
void ns_buffered_capture_scheduler::update_local_buffer_from_central_server(ns_image_server_device_manager::ns_device_name_list & connected_devices,ns_local_buffer_connection & local_buffer, ns_sql & central_db) { if (connected_devices.size() == 0) return; ns_acquire_lock_for_scope lock(buffer_capture_scheduler_lock,__FILE__,__LINE__); local_buffer.clear_query(); central_db.clear_query(); std::string local_time = local_buffer.get_value("SELECT UNIX_TIMESTAMP(NOW())"), central_time = central_db.get_value("SELECT UNIX_TIMESTAMP(NOW())"); const ns_synchronized_time update_start_time(atol(local_time.c_str())-10,atol(central_time.c_str())-10);//go ten seconds into the past //to make sure all writes //are committed //now we update the local buffer to the central node. commit_all_local_schedule_changes_to_central_db(update_start_time,local_buffer,central_db); //now that all the local buffer data is reflected in the central database, we check to see if there is any new data in the central database. //if so, we wipe the local buffer and update everything. capture_schedule.load_if_needed(¢ral_db); //get any new or updated capture schedule events central_db << "SELECT sched.id, samp.id, samp.experiment_id, UNIX_TIMESTAMP(sched.time_stamp),UNIX_TIMESTAMP(samp.time_stamp)"; for (unsigned int i = 0; i < capture_schedule.table_format.column_names.size(); i++) central_db << ",`sched`.`" << capture_schedule.table_format.column_names[i] << "`"; central_db << " FROM capture_schedule as sched, capture_samples as samp " << "WHERE (samp.device_name='" << connected_devices[0].name << "'"; for (unsigned int i = 1; i < connected_devices.size(); i++) central_db << " OR samp.device_name='" << connected_devices[i].name << "'"; central_db << ")" << " AND sched.time_at_start = 0 " << " AND sched.sample_id = samp.id " << " AND sched.time_at_finish = 0 " //here, we could bring the entire local database completely up to date //but only scans in the future will make any difference, so we only download //those who are still scheduled for the future //this old command would fully update the database, as time_of_last_update_from_central_db //would be set to 0 //<< " AND sched.scheduled_time > " << (time_of_last_update_from_central_db.remote_time-image_server.maximum_allowed_local_scan_delay()) //only get events in the future //however, now we only grab the future, relevant scans. << " AND sched.scheduled_time > " << (update_start_time.remote_time-image_server.maximum_allowed_local_scan_delay()) //only get events in the future << " AND sched.time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time <<") " << " AND sched.time_stamp <= FROM_UNIXTIME(" << update_start_time.remote_time << ") " << " ORDER BY sched.scheduled_time ASC"; ns_sql_result new_schedule; central_db.get_rows(new_schedule); std::set<unsigned long> altered_experiment_ids; std::set<unsigned long> altered_sample_ids; for (unsigned int i = 0; i < new_schedule.size(); i++) { // if (atol(new_schedule[i][4].c_str()) > central_time_of_last_update_from_central_db){ altered_sample_ids.insert(atol(new_schedule[i][1].c_str())); altered_experiment_ids.insert(atol(new_schedule[i][2].c_str())); // } } const unsigned long new_timestamp(update_start_time.local_time); if (new_schedule.size() != 0) { if (new_schedule.size() > 4) image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::") << new_schedule.size() << " new capture schedule entries found. Updating local buffer.",¢ral_db); //if samples or experiments have changed or added, update them. //we need to do this *before* updating the capture schedule, //as the addition of a capture schedule item might trigger a scan immediately //and that scan will fail if the sample and experiemnts information isn't already in the local database. if (altered_sample_ids.size() > 0) { capture_samples.load_if_needed("capture_samples",¢ral_db); experiments.load_if_needed("experiments",¢ral_db); std::string sample_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_sample_ids,"id")), experiment_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_experiment_ids,"id")); ns_sql_result capture_sample_data; ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,sample_where_clause,capture_sample_data,¢ral_db); ns_sql_result experiment_data; ns_get_all_column_data_from_table("experiments",experiments.column_names,experiment_where_clause,experiment_data,¢ral_db); std::cerr << "Updating local buffer with information about " << capture_sample_data.size() << " samples\n"; //local_buffer_db.send_query("DELETE FROM buffered_capture_samples"); if (capture_samples.time_stamp_column_id == -1) throw ns_ex("Could not find capture sample time stamp column!"); long last_displayed_percent(-5); for(unsigned int i = 0; i < capture_sample_data.size(); i++) { const long percent((100*i)/capture_sample_data.size()); if (percent >= last_displayed_percent+5) { std::cerr << percent << "%..."; last_displayed_percent = percent; } std::string values; values += "`"; values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'"; for (unsigned int j = 1; j < capture_samples.column_names.size(); j++) { if (j == capture_samples.time_stamp_column_id) //we need to update the local time stamp here, so that if there might be a clock asynchrony between the continue; //central server and local server that would allow remote timestamps to be in the future according to local //which would trigger the local server to update the central in the next check, ad infinitum. values += std::string(",`") + capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'"; } values += std::string(",`time_stamp`=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")"; local_buffer << "INSERT INTO buffered_capture_samples SET " << values << " ON DUPLICATE KEY UPDATE " << values; local_buffer.send_query(); } std::cerr << "Done.\n"; //local_buffer.send_query("DELETE FROM buffered_experiments"); for(unsigned int i = 0; i < experiment_data.size(); i++) { std::string values; values += "`"; values += experiments.column_names[0] + "`='" + local_buffer.escape_string(experiment_data[i][0]) + "'"; for (unsigned int j = 1; j < experiments.column_names.size(); j++) { if (experiments.time_stamp_column_id == j) continue; values += std::string(",`") + experiments.column_names[j] + "`='" + local_buffer.escape_string(experiment_data[i][j]) + "'"; } values += std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")"; local_buffer << "INSERT INTO buffered_experiments SET " << values; local_buffer << " ON DUPLICATE KEY UPDATE " << values; local_buffer.send_query(); } } std::cerr << "Updating local buffer with information about " << new_schedule.size() << " schedule time points...\n"; long last_displayed_percent = -5; for (unsigned int i = 0; i < new_schedule.size(); i++) { const long percent((100*i)/new_schedule.size()); if (percent >= last_displayed_percent+5) { std::cerr << percent << "%..."; last_displayed_percent = percent; } std::string all_values; all_values += "`"; all_values += capture_schedule.table_format.column_names[0] + "`='" + local_buffer.escape_string(new_schedule[i][5]) + "'"; for (unsigned int j = 1; j < capture_schedule.table_format.column_names.size(); j++) { if (j == capture_schedule.time_stamp_column) continue; all_values += std::string( ", `") + capture_schedule.table_format.column_names[j] + "`='" + local_buffer.escape_string(new_schedule[i][5+j]) + "'"; } all_values+=std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")"; std::string update_values; update_values += std::string("problem=") + new_schedule[i][5+capture_schedule.problem_column] + "," + std::string("scheduled_time=") + new_schedule[i][5+capture_schedule.scheduled_time_column] + "," + std::string("missed=") + new_schedule[i][5+capture_schedule.missed_column] + "," + std::string("censored=") + new_schedule[i][5+capture_schedule.censored_column] +"," + std::string("transferred_to_long_term_storage=") + new_schedule[i][5+capture_schedule.transferred_to_long_term_storage_column] +"," + std::string("time_during_transfer_to_long_term_storage=") + new_schedule[i][5+capture_schedule.time_during_transfer_to_long_term_storage_column] +"," + std::string("time_during_deletion_from_local_storage=") + new_schedule[i][5+capture_schedule.time_during_deletion_from_local_storage_column] + "," + std::string("time_stamp=FROM_UNIXTIME(") + ns_to_string(update_start_time.local_time) + ")"; local_buffer << "INSERT INTO buffered_capture_schedule SET " << all_values << " ON DUPLICATE KEY UPDATE " << update_values; local_buffer.send_query(); } std::cerr << "Done.\n"; } //if no changes to the schedule were made, look to see find changes made to any capture samples else { ns_sql_result capture_sample_data; ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names, std::string("WHERE time_stamp >= FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.remote_time) +") " " AND time_stamp < FROM_UNIXTIME(" + ns_to_string(update_start_time.remote_time) +") " ,capture_sample_data,¢ral_db); if (capture_sample_data.size() > 0) { std::cerr << "Copying over " << capture_sample_data.size() << " samples\n"; //local_buffer_db.send_query("DELETE FROM buffered_capture_samples"); for(unsigned int i = 0; i < capture_sample_data.size(); i++) { std::string values; values += "`"; values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'"; for (unsigned int j = 1; j < capture_samples.column_names.size(); j++) values += std::string(",`") + capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'"; local_buffer << "INSERT INTO buffered_capture_samples SET " << values << " ON DUPLICATE KEY UPDATE " << values; local_buffer.send_query(); } } } local_buffer.send_query("COMMIT"); //lock.unlock(); commit_all_local_non_schedule_changes_to_central_db(update_start_time,local_buffer,central_db); central_db << "SELECT k,v FROM constants WHERE time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time << ")"; ns_sql_result cres; central_db.get_rows(cres); if (cres.size() > 0) { std::cerr << "Updating " << cres.size() << " constants in local buffer\n"; } for (unsigned int i = 0; i < cres.size(); i++) image_server.set_cluster_constant_value(local_buffer.escape_string(cres[i][0]),local_buffer.escape_string(cres[i][1]),&local_buffer,update_start_time.local_time); time_of_last_update_from_central_db = update_start_time; store_last_update_time_in_db(time_of_last_update_from_central_db,local_buffer); lock.release(); }
std::string ns_experiment_capture_specification::submit_schedule_to_db(std::vector<std::string> & warnings,ns_sql & sql,bool actually_write,bool overwrite_previous){ string debug; if (!device_schedule_produced) throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db()::The device schedule has not yet been compiled"); if (name.length() > 40) throw ns_ex("To avoid lengthy filenames, experiment names must contain 40 characters or less."); ns_sql_result res; //check that all devices requested exist for (unsigned long i = 0; i < capture_schedules.size(); i++){ ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules); for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){ sql << "SELECT name FROM devices WHERE name = '" << sql.escape_string(p->second.device_name) << "'"; sql.get_rows(res); if (res.size() == 0) throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db()::Could not find device ") << p->second.device_name << " attached to cluster"; for (ns_device_capture_schedule::ns_sample_group_list::iterator q = p->second.sample_groups.begin(); q!= p->second.sample_groups.end(); ++q){ if (q->second.samples.size() != 4 && q->second.samples.size() != 6){ string warning; warning+="Device "; warning+=p->second.device_name + " has " + ns_to_string(q->second.samples.size()) + " samples scheduled on a single device"; warnings.push_back(warning); debug += "WARNING: "; debug += warning + ".\n\n"; } for (unsigned int k = 0; k < q->second.samples.size(); k++){ if (q->second.samples[k]->width < .75 || q->second.samples[k]->height < .75 || q->second.samples[k]->width > 2.5 || q->second.samples[k]->height > 10){ string warning; warning+="Sample "; warning+=q->second.samples[i]->sample_name + " has unusual dimensions: " + ns_to_string(q->second.samples[i]->width) + "x" + ns_to_string(q->second.samples[i]->height); warnings.push_back(warning); debug += "WARNING: "; debug += warning + ".\n\n"; } } if (q->second.schedule->device_capture_period < 10*60 || q->second.schedule->device_capture_period > 20*60){ string warning; warning+="The schedule contains an unusual device capture period: "; warning+=ns_to_string(q->second.schedule->device_capture_period/60); warnings.push_back(warning); debug += "WARNING: "; debug += warning + ".\n\n"; } } } } std::map<std::string,std::string> incubator_assignments; std::map<std::string,std::string> incubator_location_assignments; sql << "SELECT device_name, incubator_name, incubator_location FROM device_inventory"; //ns_sql_result res; sql.get_rows(res); for (unsigned int i = 0; i < res.size(); ++i){ incubator_assignments[res[i][0]] = res[i][1]; incubator_location_assignments[res[i][0]] = res[i][2]; } res.resize(0); sql.clear_query(); sql.send_query("BEGIN"); sql << "SELECT id FROM experiments WHERE name='" << sql.escape_string(name) << "'"; sql.get_rows(res); if(res.size() == 0){ sql << "INSERT INTO experiments SET name='" << sql.escape_string(name) << "',description='',`partition`='', time_stamp=0"; if (!actually_write){ experiment_id = 0; debug+="Creating a new experiment named "; debug+= name + "\n"; } else experiment_id = sql.send_query_get_id(); } else{ if (!overwrite_previous) throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db::Experiment already exists and overwrite_previous set to false"); if (!actually_write){ debug+="Overwriting an existing experiment named "; debug+= name + " with id = " + res[0][0] + "\n"; } experiment_id = atol(res[0][0].c_str()); } sql.clear_query(); res.resize(0); try{ for (unsigned int i = 0; i < samples.size(); i++){ sql << "SELECT id, name, device_name,parameters FROM capture_samples WHERE experiment_id = " << experiment_id << " AND name='" << sql.escape_string(samples[i].sample_name) << "'"; sql.get_rows(res); if(res.size() != 0){ if (!overwrite_previous) throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db::Sample ") << samples[i].sample_name << " already exists and overwrite_previous set to false"; samples[i].sample_id = atol(res[0][0].c_str()); ns_processing_job job; job.sample_id = samples[i].sample_id; if (!actually_write) debug+="Deleting previous sample (id=" + ns_to_string(job.sample_id) + ").\n"; else ns_handle_image_metadata_delete_action(job,sql); } sql << "INSERT INTO capture_samples SET experiment_id = " << ns_to_string(experiment_id) << ",name='" << sql.escape_string(samples[i].sample_name) << "'" << ",device_name='" << sql.escape_string(samples[i].device) << "',parameters='" << sql.escape_string(samples[i].capture_parameters()) << "'" << ",position_x=" << samples[i].x_position << ",position_y=" << samples[i].y_position << ",size_x=" << samples[i].width << ",size_y="<<samples[i].height << ",incubator_name='" << sql.escape_string(incubator_assignments[samples[i].device]) << "',incubator_location='" << sql.escape_string(incubator_location_assignments[samples[i].device]) << "',desired_capture_duration_in_seconds=" <<samples[i].desired_minimum_capture_duration << ",description='',model_filename='',reason_censored='',image_resolution_dpi='" << samples[i].resolution << "',device_capture_period_in_seconds=" << capture_schedules[samples[i].internal_schedule_id].device_capture_period << ",number_of_consecutive_captures_per_sample=" << capture_schedules[samples[i].internal_schedule_id].number_of_consecutive_captures_per_sample << ", time_stamp=0"; if (!actually_write){ samples[i].sample_id = 0; debug+="Creating a new sample: name:"; debug += samples[i].sample_name + ", device:" + samples[i].device + "\n\tcapture parameters: \""; debug += samples[i].capture_parameters() + "\"\n"; } else{ samples[i].sample_id = sql.send_query_get_id(); } sql.clear_query(); res.resize(0); } sql.clear_query(); res.resize(0); for (unsigned long i = 0; i < capture_schedules.size(); i++){ unsigned long device_start_offset = 2*60; unsigned long s_offset(0); ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules); ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets); for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){ device_start_offsets[p->first] = s_offset; s_offset+=device_start_offset; if (s_offset >= 20*60) s_offset = 0; } } for (unsigned int i = 0; i < capture_schedules.size(); i++){ //compile correct start and stop time for each device. if (capture_schedules[i].start_time == 0) capture_schedules[i].start_time = ns_current_time() + 2*60; capture_schedules[i].stop_time = 0; ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules); ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets); for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){ const string & device_name = p->first; if (p->second.sample_groups.size() == 0) continue; p->second.effective_device_period = p->second.sample_groups.begin()->second.schedule->device_capture_period; p->second.number_of_consecutive_captures_per_sample = p->second.sample_groups.begin()->second.schedule->number_of_consecutive_captures_per_sample; if (p->second.effective_device_period == 0) throw ns_ex("Device period specified as zero!"); if (p->second.number_of_consecutive_captures_per_sample == 0) throw ns_ex("Number of consecutive_captures_per_sample specified as zero!"); //find earliest start time, stop time for (ns_device_capture_schedule::ns_sample_group_list::iterator q = p->second.sample_groups.begin(); q != p->second.sample_groups.end(); q++){ if (q->second.schedule->start_time != 0 && q->second.schedule->start_time < ns_current_time()) throw ns_ex("Start time specified is in the past") << q->second.schedule->start_time; if (q->first->start_time != 0) q->second.schedule->effective_start_time = q->first->start_time + device_start_offsets[device_name]; else q->second.schedule->effective_start_time = capture_schedules[i].start_time + device_start_offsets[device_name]; q->second.schedule->effective_stop_time = q->second.schedule->effective_start_time + q->second.schedule->duration + device_start_offsets[device_name]; if (q->second.schedule->effective_start_time < capture_schedules[i].start_time) capture_schedules[i].start_time = q->second.schedule->effective_start_time; if (q->second.schedule->effective_stop_time > capture_schedules[i].stop_time) capture_schedules[i].stop_time = q->second.schedule->effective_stop_time; if (q->second.schedule->device_capture_period != p->second.effective_device_period) throw ns_ex("Invalid device capture period specified for device") << p->second.device_name; if (q->second.schedule->number_of_consecutive_captures_per_sample != p->second.number_of_consecutive_captures_per_sample) throw ns_ex("Invalid device consecutive samples per sample specified for device") << p->second.device_name; if (q->second.samples.size() == 0) throw ns_ex("Empty device sample group found!"); } } std::set<string> incubators; for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){ incubators.insert(incubator_assignments[device->second.device_name]); } debug += string("Schedule Involves ") + ns_to_string(device_schedules.size()) + " devices in " + ns_to_string( incubators.size()) + " location"; if (incubators.size() != 1) debug+="s"; debug +=":"; for(std::set<string>::const_iterator p = incubators.begin(); p != incubators.end(); p++){ debug+=*p; debug+=","; } debug += "\n"; for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){ if (!actually_write){ debug+=string("\tDevice ") + device->second.device_name + " runs between " + ns_format_time_string_for_human(capture_schedules[i].start_time + device_start_offsets[device->second.device_name]) + " and " + ns_format_time_string_for_human(capture_schedules[i].stop_time + device_start_offsets[device->second.device_name]); debug+=" with a capture period of " + ns_capture_schedule::time_string(device->second.effective_device_period) + "\n"; } } } ns_device_start_offset_list device_stop_times; ns_device_start_offset_list device_interval_at_stop; for (unsigned int i = 0; i < capture_schedules.size(); i++){ ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules); ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets); unsigned long number_of_captures(0); for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){ const string & device_name = device->first; ns_device_start_offset_list::iterator stop_time(device_stop_times.find(device_name)); if(stop_time == device_stop_times.end()){ device_stop_times[device_name] = 0; stop_time = device_stop_times.find(device_name); } if (!actually_write){ debug+=string("Schedule for device ") + device->second.device_name + ":\n"; } char have_started(false); ns_device_capture_schedule::ns_sample_group_list::iterator current_sample_group(device->second.sample_groups.begin()); unsigned long current_sample_id = 0; for (unsigned long t = capture_schedules[i].start_time+device_start_offsets[device_name]; t < capture_schedules[i].stop_time+device_start_offsets[device_name];){ ns_device_capture_schedule::ns_sample_group_list::iterator loop_start_group = current_sample_group; unsigned long loop_start_sample_id = current_sample_id; //find the next active sample at this time while(true){ if (have_started){ current_sample_id++; if (current_sample_id >= current_sample_group->second.samples.size()){ current_sample_group++; current_sample_id = 0; if (current_sample_group == device->second.sample_groups.end()) current_sample_group = device->second.sample_groups.begin(); } } else have_started = true; if (current_sample_group->second.schedule->effective_start_time <= t+device->second.effective_device_period && current_sample_group->second.schedule->effective_stop_time >= t+device->second.effective_device_period) break; if (current_sample_group == loop_start_group && current_sample_id == loop_start_sample_id ) break; } //schedule the scans unsigned long dt_total = device->second.effective_device_period*device->second.number_of_consecutive_captures_per_sample; for (unsigned int dt = 0; dt < dt_total; dt+=device->second.effective_device_period){ sql << "INSERT INTO capture_schedule SET experiment_id = " << experiment_id << ", scheduled_time = " << t+dt << "," << "sample_id = " << current_sample_group->second.samples[current_sample_id]->sample_id << ", time_stamp = 0"; if (!actually_write){ sql.clear_query(); debug +="\t"; debug+= current_sample_group->second.samples[current_sample_id]->sample_name + ": " + ns_format_time_string_for_human(t+dt) + "\n"; } else{ sql.send_query(); number_of_captures++; } } if (t+dt_total> stop_time->second){ stop_time->second = t+dt_total; device_interval_at_stop[device_name] = device->second.effective_device_period; } t+=dt_total; } sql << "UPDATE experiments SET num_time_points = " << number_of_captures << ", first_time_point=" << capture_schedules[i].start_time << ", last_time_point= " << capture_schedules[i].stop_time << " WHERE id=" << experiment_id; if (actually_write) sql.send_query(); sql.clear_query(); } } //start autoscans to keep scanners running after the end of the experiment for (ns_device_start_offset_list::iterator p = device_stop_times.begin(); p != device_stop_times.end(); p++){ sql << "INSERT INTO autoscan_schedule SET device_name='" << p->first << "', autoscan_start_time=" << (p->second + device_interval_at_stop[p->first]) << ", scan_interval = " << device_interval_at_stop[p->first]; if (actually_write) sql.send_query(); else{ debug+="Scheduling an "; debug+=ns_to_string(device_interval_at_stop[p->first]) + " second autoscan sequence on device " + p->first + " at " + ns_format_time_string_for_human(p->second + device_interval_at_stop[p->first]) + "\n"; } } sql.send_query("COMMIT"); sql.send_query("UPDATE experiments SET time_stamp = NOW() WHERE time_stamp = 0"); sql.send_query("UPDATE capture_samples SET time_stamp = NOW() WHERE time_stamp = 0"); sql.send_query("UPDATE capture_schedule SET time_stamp = NOW() WHERE time_stamp = 0"); } catch(...){ sql.send_query("ROLLBACK"); throw; } return debug; }