void ns_image_server_captured_image_region::create_storage_for_worm_results(ns_image_server_image & im, const bool interpolated,ns_sql & sql){
	im.host_id = image_server.host_id();
	im.capture_time = ns_current_time();
	if (experiment_name.size() == 0 || experiment_id == 0 || sample_name.size() == 0)
		load_from_db(region_images_id,&sql);
	const std::string experiment_dir(ns_image_server_captured_image::experiment_directory(experiment_name,experiment_id));
	const std::string region_dir(region_base_directory(region_name,ns_sample_directory(sample_name,sample_id,experiment_dir),experiment_dir));
	im.path = region_dir + DIR_CHAR + "detected_data";
	im.filename = filename(&sql);
	if (interpolated) im.filename += "_i";
	im.filename += ".wrm";
	im.partition = image_server.image_storage.get_partition_for_experiment(experiment_id,&sql);

	sql.send_query("BEGIN");

	if (im.id != 0){
		sql << "UPDATE images SET host_id = " << im.host_id  << ", creation_time=" << ns_current_time() << ", currently_under_processing=1, "
			<< "path = '" << sql.escape_string(im.path) << "', filename='" << sql.escape_string(im.filename) << "', partition='" << im.partition << "' "
			<< "WHERE id = " << im.id;
		sql.send_query();
	}
	else{
		//create a new image if it doesn't exist.
		sql << "INSERT INTO images SET host_id = " << im.host_id << ", creation_time=" << ns_current_time() << ", currently_under_processing=1, "
			<< "path = '" << sql.escape_string(im.path) << "', filename='" << sql.escape_string(im.filename) << "', partition='" << im.partition << "' ";
		im.id = sql.send_query_get_id();
	}
	sql.send_query("COMMIT");
}
Exemple #2
0
void ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db(const ns_synchronized_time & update_start_time,ns_local_buffer_connection & local_buffer_sql, ns_sql & central_db) {
    if (time_of_last_update_from_central_db.local_time == ns_default_update_time)
        get_last_update_time(local_buffer_sql);
    buffered_capture_schedule.load_if_needed(&local_buffer_sql);

    ns_sql_result updated_data;

    const std::string altered_data_condition(
        std::string("time_stamp > FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.local_time) +
        ") AND time_stamp <= FROM_UNIXTIME(" + ns_to_string(update_start_time.local_time) + ") ");

    const unsigned long new_timestamp(time_of_last_update_from_central_db.remote_time);

    ns_get_all_column_data_from_table("buffered_capture_schedule",buffered_capture_schedule.table_format.column_names,
                                      std::string("WHERE ") + altered_data_condition + " AND uploaded_to_central_db != 3",
                                      updated_data,&local_buffer_sql);

    std::vector<ns_db_key_mapping> mappings(updated_data.size());
    if (updated_data.size() > 8)
        image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::Committing ") << updated_data.size() << " recorded capture events to the central database.",&central_db);
    std::vector<ns_ex *> errors;
    for (unsigned long i = 0; i < updated_data.size(); i++) {
        try {
            unsigned long captured_image_id = atol(updated_data[i][buffered_capture_schedule.image_id_column].c_str());
            unsigned long problem_id = atol(updated_data[i][buffered_capture_schedule.problem_column].c_str());

            unsigned long central_captured_image_id(0),
                     central_problem_id(0);
            if (captured_image_id != 0 || problem_id != 0) {
                central_db << "SELECT captured_image_id,problem FROM capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                ns_sql_result res;
                central_db.get_rows(res);
                if (res.size() == 0)
                    throw ns_ex("Could not find capture schedule entry in central db for sample id " ) << updated_data[i][buffered_capture_schedule.id_column] << " finishing at time " << updated_data[i][buffered_capture_schedule.time_at_finish_column];
                central_captured_image_id = atol(res[0][0].c_str());
                central_problem_id = atol(res[0][1].c_str());
            }

            const bool need_to_make_new_capture_image(captured_image_id != 0 && central_captured_image_id != captured_image_id);
            //we need to make new entries in the central database for any new images or events
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.load_from_db(captured_image_id,&local_buffer_sql);
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].old_image = mappings[i].image;
                if (mappings[i].captured_image.capture_images_image_id != 0)
                    mappings[i].image.load_from_db(mappings[i].captured_image.capture_images_image_id,&local_buffer_sql);


            }
            else {
                mappings[i].old_image = mappings[i].image;
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].captured_image.captured_images_id = central_captured_image_id;
            }

            bool need_to_make_new_problem(problem_id != 0 && central_problem_id != problem_id);
            if (need_to_make_new_problem) {
                local_buffer_sql << "SELECT id,event,time,minor FROM buffered_host_event_log WHERE id = " << updated_data[i][buffered_capture_schedule.problem_column];
                ns_sql_result res;
                local_buffer_sql.get_rows(res);
                mappings[i].old_problem_id = mappings[i].problem_id;
                if (res.size() == 0) {
                    mappings[i].problem_id = image_server.register_server_event(ns_ex("Could not find problem id ") << updated_data[i][buffered_capture_schedule.problem_column] << " in local database buffer!",&central_db);
                    need_to_make_new_problem=false;
                }
                else {
                    mappings[i].problem_text = res[0][1];
                    mappings[i].problem_time = atol(res[0][2].c_str());
                    mappings[i].problem_minor = res[0][3] != "0";
                }
            }
            else {
                mappings[i].old_problem_id = mappings[i].problem_id;
                mappings[i].problem_id = central_problem_id;
            }

            if (need_to_make_new_capture_image && mappings[i].image.id != 0) {
                mappings[i].image.id = 0;
                mappings[i].image.save_to_db(0,&central_db,false);
                mappings[i].captured_image.capture_images_image_id = mappings[i].image.id;
            }
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.captured_images_id = 0;
                mappings[i].captured_image.save(&central_db);
            }
            if (need_to_make_new_problem) {
                mappings[i].old_problem_id = mappings[i].problem_id;
                ns_image_server_event ev;
                ev << mappings[i].problem_text;
                if (mappings[i].problem_minor) ev << ns_ts_minor_event;
                ev.set_time(mappings[i].problem_time);
                mappings[i].problem_id = image_server.register_server_event(ev,&central_db);
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error while making mapping: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }

    for (unsigned long i = 0; i < updated_data.size(); i++) {
        if (mappings[i].error.text().size() > 0)
            continue;
        try {
            central_db << "Update capture_schedule SET ";
            for (unsigned int j = 0; j < buffered_capture_schedule.table_format.column_names.size(); ++j) {
                if (j == buffered_capture_schedule.id_column
                        || j == buffered_capture_schedule.image_id_column ||
                        j == buffered_capture_schedule.problem_column || j ==
                        buffered_capture_schedule.timestamp_column) continue;
                central_db  << "`" << buffered_capture_schedule.table_format.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]) << "',";
            }
            central_db << "captured_image_id = " << mappings[i].captured_image.captured_images_id
                       << ", problem = " << mappings[i].problem_id;
            //we set the timestamp as old so that it does not trigger a re-download when the server tries to update its cache
            central_db << ", time_stamp=FROM_UNIXTIME("<< new_timestamp <<") ";
            central_db << " WHERE id = " << updated_data[i][0];
            central_db.send_query();
            if (mappings[i].old_captured_image.captured_images_id != mappings[i].captured_image.captured_images_id ||
                    mappings[i].old_problem_id != mappings[i].problem_id) {
                local_buffer_sql << "UPDATE buffered_capture_schedule SET captured_image_id = " << mappings[i].captured_image.captured_images_id
                                 << ", problem = " << mappings[i].problem_id << ", time_stamp = FROM_UNIXTIME("<< new_timestamp
                                 <<") WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_captured_images WHERE id = " << mappings[i].old_captured_image.captured_images_id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_images WHERE id = " << mappings[i].old_image.id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_host_event_log WHERE id = " << mappings[i].old_problem_id;
                local_buffer_sql.send_query();
            }
            if (updated_data[i][buffered_capture_schedule.time_at_finish_column] != "0") {
                local_buffer_sql << "DELETE FROM buffered_capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                //	local_buffer_sql.clear_query();
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error during central update: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }
    for (unsigned int i = 0; i < mappings.size(); i++) {
        if (mappings[i].error.text().size() > 0) {

            local_buffer_sql << "UPDATE buffered_capture_schedule SET uploaded_to_central_db=3 WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
            local_buffer_sql.send_query();
            image_server.register_server_event(ns_image_server::ns_register_in_central_db_with_fallback,ns_ex("Could not update central db: ") << mappings[i].error.text());
        }

    }
    //update modified sample data.
    updated_data.resize(0);
    if (capture_samples.column_names.size() == 0) {
        capture_samples.load_column_names_from_db("capture_samples",&central_db);
        if (capture_samples.column_names.size() == 0)
            throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table appears to have no columns!");
    }
    if (capture_samples.column_names[0] != "id")
        throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table does not have its id in the first column!");

    ns_get_all_column_data_from_table("buffered_capture_samples",capture_samples.column_names,
                                      std::string("WHERE ") + altered_data_condition,
                                      updated_data,&local_buffer_sql);
    if (capture_samples.time_stamp_column_id == -1)
        throw ns_ex("Could not find capture sample time stamp column!");
    for (unsigned int i = 0; i < updated_data.size(); i++) {
        central_db << "UPDATE capture_samples SET ";
        //skip id column; we don't want to cause any unneccisary db shuffling by changing ids (even if we are changing the ids to the value they already are)
        central_db  << capture_samples.column_names[1] << "='" << central_db.escape_string(updated_data[i][1]);
        for (unsigned int j = 2; j < capture_samples.column_names.size(); ++j) {
            if (j == capture_samples.time_stamp_column_id)
                continue;
            else central_db  << "',`" << capture_samples.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]);
        }
        central_db << "',time_stamp=FROM_UNIXTIME(" << new_timestamp << ") ";
        central_db << "WHERE id = " << updated_data[i][0];
        central_db.send_query();
    }

}
Exemple #3
0
std::string ns_experiment_capture_specification::submit_schedule_to_db(std::vector<std::string> & warnings,ns_sql & sql,bool actually_write,bool overwrite_previous){
	string debug;
	if (!device_schedule_produced) 
		throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db()::The device schedule has not yet been compiled");
	if (name.length() > 40)
			throw ns_ex("To avoid lengthy filenames, experiment names must contain 40 characters or less.");
	ns_sql_result res;
	//check that all devices requested exist
	for (unsigned long i = 0; i < capture_schedules.size(); i++){
		ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
		for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){
			sql << "SELECT name FROM devices WHERE name = '" << sql.escape_string(p->second.device_name) << "'";
			sql.get_rows(res);
			if (res.size() == 0)
				throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db()::Could not find device ") << p->second.device_name << " attached to cluster";
			for (ns_device_capture_schedule::ns_sample_group_list::iterator q = p->second.sample_groups.begin(); q!= p->second.sample_groups.end(); ++q){
				if (q->second.samples.size() != 4 && q->second.samples.size() != 6){
					string warning;
					warning+="Device ";
					warning+=p->second.device_name + " has " + ns_to_string(q->second.samples.size()) + " samples scheduled on a single device";
					warnings.push_back(warning);
					debug += "WARNING: ";
					debug += warning + ".\n\n";
				}
				for (unsigned int k = 0; k < q->second.samples.size(); k++){
					if (q->second.samples[k]->width < .75 || q->second.samples[k]->height < .75 || 
						q->second.samples[k]->width > 2.5 || q->second.samples[k]->height > 10){
						string warning;
						warning+="Sample ";
						warning+=q->second.samples[i]->sample_name + " has unusual dimensions: " + ns_to_string(q->second.samples[i]->width) + "x" + ns_to_string(q->second.samples[i]->height);
						warnings.push_back(warning);
						debug += "WARNING: ";
						debug += warning + ".\n\n";
					}
				}
				if (q->second.schedule->device_capture_period < 10*60 || q->second.schedule->device_capture_period > 20*60){
						string warning;
						warning+="The schedule contains an unusual device capture period: ";
						warning+=ns_to_string(q->second.schedule->device_capture_period/60);
						warnings.push_back(warning);
						debug += "WARNING: ";
						debug += warning + ".\n\n";
				}
			}
		}
	}

	std::map<std::string,std::string> incubator_assignments;
	std::map<std::string,std::string> incubator_location_assignments;
	sql << "SELECT device_name, incubator_name, incubator_location FROM device_inventory";
	//ns_sql_result res;
	sql.get_rows(res);
	for (unsigned int i = 0; i < res.size(); ++i){
		incubator_assignments[res[i][0]] = res[i][1];
		incubator_location_assignments[res[i][0]] = res[i][2];
	}
	
	res.resize(0);
	sql.clear_query();
	sql.send_query("BEGIN");
	sql << "SELECT id FROM experiments WHERE name='" << sql.escape_string(name) << "'";
	sql.get_rows(res);
	if(res.size() == 0){
		sql << "INSERT INTO experiments SET name='" << sql.escape_string(name) << "',description='',`partition`='', time_stamp=0";
		if (!actually_write){
			experiment_id = 0;
			debug+="Creating a new experiment named "; 
			debug+= name + "\n";
		}
		else experiment_id = sql.send_query_get_id();
	}
	else{
		if (!overwrite_previous)
			throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db::Experiment already exists and overwrite_previous set to false");
		if (!actually_write){
			debug+="Overwriting an existing experiment named "; 
			debug+= name + " with id = " + res[0][0] + "\n";
		}
		experiment_id = atol(res[0][0].c_str());
	}
	
	
	
	sql.clear_query();
	res.resize(0);
	try{
		for (unsigned int i = 0; i < samples.size(); i++){
			sql << "SELECT id, name, device_name,parameters FROM capture_samples WHERE experiment_id = " << experiment_id << " AND name='" << sql.escape_string(samples[i].sample_name) << "'";
			sql.get_rows(res);
			if(res.size() != 0){
				if (!overwrite_previous)
					throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db::Sample ") << samples[i].sample_name << " already exists and overwrite_previous set to false";
				

				samples[i].sample_id = atol(res[0][0].c_str());
				ns_processing_job job;
				job.sample_id = samples[i].sample_id;
				if (!actually_write)
							debug+="Deleting previous sample (id=" + ns_to_string(job.sample_id) + ").\n";
				else ns_handle_image_metadata_delete_action(job,sql);
			}
			
			sql << "INSERT INTO capture_samples SET experiment_id = " << ns_to_string(experiment_id) << ",name='" << sql.escape_string(samples[i].sample_name) << "'"
				<< ",device_name='" << sql.escape_string(samples[i].device) << "',parameters='" << sql.escape_string(samples[i].capture_parameters()) << "'"
				<< ",position_x=" << samples[i].x_position << ",position_y=" << samples[i].y_position
				<< ",size_x=" << samples[i].width << ",size_y="<<samples[i].height 
				<< ",incubator_name='" << sql.escape_string(incubator_assignments[samples[i].device]) 
				<< "',incubator_location='" << sql.escape_string(incubator_location_assignments[samples[i].device])
				<< "',desired_capture_duration_in_seconds=" <<samples[i].desired_minimum_capture_duration
				<< ",description='',model_filename='',reason_censored='',image_resolution_dpi='" << samples[i].resolution
				<< "',device_capture_period_in_seconds=" << capture_schedules[samples[i].internal_schedule_id].device_capture_period 
				<< ",number_of_consecutive_captures_per_sample=" << capture_schedules[samples[i].internal_schedule_id].number_of_consecutive_captures_per_sample
				<< ", time_stamp=0";
			if (!actually_write){
				samples[i].sample_id = 0;
				debug+="Creating a new sample: name:"; 
				debug += samples[i].sample_name + ", device:" + samples[i].device + "\n\tcapture parameters: \"";
				debug += samples[i].capture_parameters() + "\"\n";
			}
			else{
				samples[i].sample_id = sql.send_query_get_id();
			}
			
			sql.clear_query();
			res.resize(0);
		}
		
		sql.clear_query();
		res.resize(0);


		
		for (unsigned long i = 0; i < capture_schedules.size(); i++){
			unsigned long device_start_offset = 2*60;
			unsigned long s_offset(0);
			ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
			ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets);
			for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){
				device_start_offsets[p->first] = s_offset;
				s_offset+=device_start_offset;
				if (s_offset >= 20*60)
					s_offset = 0;
			}
		}

		for (unsigned int i = 0; i < capture_schedules.size(); i++){
			//compile correct start and stop time for each device.
			if (capture_schedules[i].start_time == 0) capture_schedules[i].start_time =  ns_current_time() + 2*60;
			capture_schedules[i].stop_time = 0;

			ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
			ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets);
			for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){
				const string & device_name = p->first;
				if (p->second.sample_groups.size() == 0) continue;
				p->second.effective_device_period = p->second.sample_groups.begin()->second.schedule->device_capture_period;
				p->second.number_of_consecutive_captures_per_sample = p->second.sample_groups.begin()->second.schedule->number_of_consecutive_captures_per_sample;
				if (p->second.effective_device_period == 0) throw ns_ex("Device period specified as zero!");
				if (p->second.number_of_consecutive_captures_per_sample == 0) throw ns_ex("Number of consecutive_captures_per_sample specified as zero!");
				
				//find earliest start time, stop time
				for (ns_device_capture_schedule::ns_sample_group_list::iterator q = p->second.sample_groups.begin(); q != p->second.sample_groups.end(); q++){
					if (q->second.schedule->start_time != 0 && q->second.schedule->start_time < ns_current_time())
						throw ns_ex("Start time specified is in the past") << q->second.schedule->start_time;
					if (q->first->start_time != 0)
						q->second.schedule->effective_start_time = q->first->start_time + device_start_offsets[device_name];
					else 
						q->second.schedule->effective_start_time = capture_schedules[i].start_time + device_start_offsets[device_name];
					
					q->second.schedule->effective_stop_time = q->second.schedule->effective_start_time + q->second.schedule->duration  + device_start_offsets[device_name]; 

					if (q->second.schedule->effective_start_time < capture_schedules[i].start_time)
						capture_schedules[i].start_time = q->second.schedule->effective_start_time;
					if (q->second.schedule->effective_stop_time > capture_schedules[i].stop_time)
						capture_schedules[i].stop_time = q->second.schedule->effective_stop_time;

					if (q->second.schedule->device_capture_period != p->second.effective_device_period)
						throw ns_ex("Invalid device capture period specified for device") << p->second.device_name;
					if (q->second.schedule->number_of_consecutive_captures_per_sample != p->second.number_of_consecutive_captures_per_sample)
						throw ns_ex("Invalid device consecutive samples per sample specified for device") << p->second.device_name;
					if (q->second.samples.size() == 0)
						throw ns_ex("Empty device sample group found!");
				}
			}
			std::set<string> incubators;
			
			for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){
				incubators.insert(incubator_assignments[device->second.device_name]);
			}
			
			debug +=  string("Schedule Involves ") + ns_to_string(device_schedules.size()) + " devices in " + ns_to_string( incubators.size()) + " location";
			if (incubators.size() != 1)
				debug+="s";
			debug +=":";
			for(std::set<string>::const_iterator p = incubators.begin(); p != incubators.end(); p++){
				debug+=*p;
				debug+=",";
			}
			debug += "\n";
			for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){
				if (!actually_write){
					debug+=string("\tDevice ") + device->second.device_name + " runs between " + 
						ns_format_time_string_for_human(capture_schedules[i].start_time + device_start_offsets[device->second.device_name]) + 
							" and " + 
							ns_format_time_string_for_human(capture_schedules[i].stop_time +  device_start_offsets[device->second.device_name]);
					debug+=" with a capture period of " + ns_capture_schedule::time_string(device->second.effective_device_period) + "\n";
				}
			}
		}
		ns_device_start_offset_list device_stop_times;
		ns_device_start_offset_list device_interval_at_stop;

		for (unsigned int i = 0; i < capture_schedules.size(); i++){
			ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
			ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets);
			unsigned long number_of_captures(0);
			for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){
				const string & device_name = device->first;
				
				ns_device_start_offset_list::iterator stop_time(device_stop_times.find(device_name));
				if(stop_time == device_stop_times.end()){
					device_stop_times[device_name] = 0;
					stop_time = device_stop_times.find(device_name);
				}
				

				if (!actually_write){
					debug+=string("Schedule for device ") + device->second.device_name + ":\n";
				}
				char have_started(false);
				ns_device_capture_schedule::ns_sample_group_list::iterator current_sample_group(device->second.sample_groups.begin());
				unsigned long current_sample_id = 0;


				for (unsigned long t = capture_schedules[i].start_time+device_start_offsets[device_name];  t < capture_schedules[i].stop_time+device_start_offsets[device_name];){

					ns_device_capture_schedule::ns_sample_group_list::iterator loop_start_group = current_sample_group;
					unsigned long loop_start_sample_id = current_sample_id;
					//find the next active sample at this time
					while(true){
						if (have_started){
							current_sample_id++;
							if (current_sample_id >= current_sample_group->second.samples.size()){
									current_sample_group++;
								current_sample_id = 0;
								if (current_sample_group == device->second.sample_groups.end())
									current_sample_group = device->second.sample_groups.begin();
							}
						}
						else have_started = true;
						if (current_sample_group->second.schedule->effective_start_time <= t+device->second.effective_device_period && 
							current_sample_group->second.schedule->effective_stop_time >= t+device->second.effective_device_period)
							break;
						if (current_sample_group == loop_start_group && current_sample_id == loop_start_sample_id )
							break;
					}

					//schedule the scans
					unsigned long dt_total = device->second.effective_device_period*device->second.number_of_consecutive_captures_per_sample;
					for (unsigned int dt = 0; dt < dt_total; dt+=device->second.effective_device_period){
						sql << "INSERT INTO capture_schedule SET experiment_id = " << experiment_id << ", scheduled_time = " << t+dt << ","
							<< "sample_id = " << current_sample_group->second.samples[current_sample_id]->sample_id << ", time_stamp = 0";
						if (!actually_write){
							sql.clear_query();
							debug +="\t";
							debug+= current_sample_group->second.samples[current_sample_id]->sample_name + ": " + ns_format_time_string_for_human(t+dt) + "\n";
						}
						else{
							sql.send_query();
							number_of_captures++;
						}
					}
					if (t+dt_total> stop_time->second){
						stop_time->second = t+dt_total;
						device_interval_at_stop[device_name] = device->second.effective_device_period;
					}

					t+=dt_total;
				}
				sql << "UPDATE experiments SET num_time_points = " << number_of_captures << ", first_time_point=" << capture_schedules[i].start_time
					<< ", last_time_point= " << capture_schedules[i].stop_time << " WHERE id=" << experiment_id;
				if (actually_write)
					sql.send_query();
				sql.clear_query();
			}
		}

		//start autoscans to keep scanners running after the end of the experiment
		for (ns_device_start_offset_list::iterator p = device_stop_times.begin(); p != device_stop_times.end(); p++){
			sql << "INSERT INTO autoscan_schedule SET device_name='" << p->first
					<< "', autoscan_start_time=" << (p->second + device_interval_at_stop[p->first])
					<< ", scan_interval = " << device_interval_at_stop[p->first];
				if (actually_write)
					sql.send_query();
				else{
					debug+="Scheduling an ";
					debug+=ns_to_string(device_interval_at_stop[p->first]) + " second autoscan sequence on device "
						+ p->first + " at " + ns_format_time_string_for_human(p->second + device_interval_at_stop[p->first]) + "\n";
				}
		}
		sql.send_query("COMMIT");

		sql.send_query("UPDATE experiments SET time_stamp = NOW() WHERE time_stamp = 0");
		sql.send_query("UPDATE capture_samples SET time_stamp = NOW() WHERE time_stamp = 0");
		sql.send_query("UPDATE capture_schedule SET time_stamp = NOW() WHERE time_stamp = 0");
		
	}
	catch(...){
		sql.send_query("ROLLBACK");
		throw;
	}
	return debug;
}