void ns_image_server_captured_image_region::wait_for_finished_processing_and_take_ownership(ns_sql & sql){
	sql.set_autocommit(false);
	for (unsigned int i = 0; i < 20; i++){
		sql.send_query("BEGIN");
		sql << "SELECT currently_under_processing FROM sample_region_images WHERE id = " << region_images_id << " FOR UPDATE";
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0){
			sql.send_query("COMMIT");
			sql.set_autocommit(true);
			throw ns_ex("ns_image_server_captured_image_region::Could not wait for finished processing on non-existant image");
		}
		if (res[0][0] == "0"){		
			sql << "UPDATE sample_region_images SET currently_under_processing = 1 WHERE id=" << region_images_id;
			sql.send_query();
			sql.send_query("COMMIT");
			sql.set_autocommit(true);
			return;
		}
		ns_thread::sleep(10);
	}
	sql.send_query("COMMIT");
	sql.set_autocommit(true);
	throw ns_ex("ns_image_server_captured_image_region::Timed out on waiting for image to be finished processing.");
}
const ns_image_server_image ns_image_server_captured_image_region::create_storage_for_aligned_path_image(const unsigned long frame_index,const unsigned long alignment_type,const ns_image_type & image_type, ns_sql & sql, const std::string & filename_suffix){
	ns_image_server_image im;
	sql << "SELECT id,image_id FROM sample_region_image_aligned_path_images WHERE region_info_id=" << region_info_id << " AND frame_index = " << frame_index;
	ns_sql_result res;
	sql.get_rows(res);
	unsigned long db_id(0);
	if(res.size() == 0)
		im.id = 0;
	else{
		db_id = atol(res[0][0].c_str());
		im.load_from_db(atol(res[0][1].c_str()),&sql);
	}
	
	//delete the old file if it exists
	if (im.id != 0)
		image_server.image_storage.delete_from_storage(im,ns_delete_both_volatile_and_long_term,&sql);

	im.host_id = image_server.host_id();
	im.capture_time = ns_current_time();
	im.path = directory(&sql,ns_process_movement_posture_aligned_visualization);
	if (experiment_id == 0 || experiment_name.size() == 0 || sample_id == 0 || sample_name.size() == 0 || region_name.size() == 0){
		if (region_info_id == 0){
			if (region_info_id == 0)
				throw ns_ex("ns_image_server_captured_image_region::create_storage_for_aligned_path_image()::No image information provided");
			else load_from_db(region_info_id,&sql);
		}
		else{
			ns_64_bit d;
			ns_region_info_lookup::get_region_info(region_info_id,&sql,region_name,sample_name,d,experiment_name,d);
		}
	}
			
	im.filename = ns_format_base_image_filename(experiment_id,experiment_name,sample_id,sample_name,0,0,0)
					+ "=" + region_name + "=" + ns_to_string(region_info_id)
					+ "=" + ns_to_string(alignment_type) + "=" + filename_suffix + "=" + ns_to_string(frame_index);

	im.partition = image_server.image_storage.get_partition_for_experiment(experiment_id,&sql);
	ns_add_image_suffix(im.filename,image_type);

	
	im.save_to_db(im.id,&sql,true);
	if (db_id==0)
		sql << "INSERT INTO ";
	else sql << "UPDATE ";
	sql << "sample_region_image_aligned_path_images SET image_id=" << im.id << ", frame_index=" << frame_index << ", region_info_id=" << region_info_id;

	if (db_id!=0)
		sql << " WHERE id = " << db_id;
	sql.send_query();
	return im;
}
void ns_machine_analysis_data_loader::set_up_spec_to_load(const unsigned long & region_id, unsigned long & sample_id, unsigned long & experiment_id_a, ns_sql & sql, const bool load_excluded_regions){
	const bool region_specified(region_id != 0);
	const bool sample_specified(sample_id != 0);
	if (region_id == 0 && sample_id == 0 && experiment_id_a==0)
		throw ns_ex("No data requested!");
		
	if (region_id != 0){
		sql << "SELECT sample_id FROM sample_region_image_info WHERE id = " << region_id;
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0)
			throw ns_ex("ns_experiment_movement_results::load()::Could not load region information ") << region_id;
		sample_id = atol(res[0][0].c_str());
	}
	if (sample_id != 0){
		sql << "SELECT experiment_id FROM capture_samples WHERE id = " << sample_id;
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0)
			throw ns_ex("ns_experiment_movement_results::load()::Could not load sample information ") << sample_id;
		experiment_id_a = atol(res[0][0].c_str());
	}

	sql << "SELECT name FROM experiments WHERE id=" << experiment_id_a;
	ns_sql_result res;
	sql.get_rows(res);

	if (res.size() == 0)
		throw ns_ex("ns_experiment_movement_results::load()::Could not load experiment id=") << experiment_id_a;

	experiment_name_ = res[0][0];
	experiment_id_ = experiment_id_a;
	std::vector<unsigned long> sample_ids;

	if (!region_specified && !sample_specified){
		sql << "SELECT id FROM capture_samples WHERE censored=0 AND experiment_id = " << experiment_id_a;
		if (sample_id != 0)
			sql << " AND id = " << sample_id;
		ns_sql_result samp;
		sql.get_rows(samp);
		samples.resize(samp.size());
		for (unsigned int i = 0; i < samp.size(); i++)
			samples[i].set_id(atol(samp[i][0].c_str()));
	}
	else{
		//add just the sample
		samples.resize(1,sample_id);
	}
	
}
void ns_image_server_captured_image_region::create_storage_for_worm_results(ns_image_server_image & im, const bool interpolated,ns_sql & sql){
	im.host_id = image_server.host_id();
	im.capture_time = ns_current_time();
	if (experiment_name.size() == 0 || experiment_id == 0 || sample_name.size() == 0)
		load_from_db(region_images_id,&sql);
	const std::string experiment_dir(ns_image_server_captured_image::experiment_directory(experiment_name,experiment_id));
	const std::string region_dir(region_base_directory(region_name,ns_sample_directory(sample_name,sample_id,experiment_dir),experiment_dir));
	im.path = region_dir + DIR_CHAR + "detected_data";
	im.filename = filename(&sql);
	if (interpolated) im.filename += "_i";
	im.filename += ".wrm";
	im.partition = image_server.image_storage.get_partition_for_experiment(experiment_id,&sql);

	sql.send_query("BEGIN");

	if (im.id != 0){
		sql << "UPDATE images SET host_id = " << im.host_id  << ", creation_time=" << ns_current_time() << ", currently_under_processing=1, "
			<< "path = '" << sql.escape_string(im.path) << "', filename='" << sql.escape_string(im.filename) << "', partition='" << im.partition << "' "
			<< "WHERE id = " << im.id;
		sql.send_query();
	}
	else{
		//create a new image if it doesn't exist.
		sql << "INSERT INTO images SET host_id = " << im.host_id << ", creation_time=" << ns_current_time() << ", currently_under_processing=1, "
			<< "path = '" << sql.escape_string(im.path) << "', filename='" << sql.escape_string(im.filename) << "', partition='" << im.partition << "' ";
		im.id = sql.send_query_get_id();
	}
	sql.send_query("COMMIT");
}
Exemple #5
0
void ns_buffered_capture_scheduler::commit_local_changes_to_central_server(ns_local_buffer_connection & local_buffer, ns_sql & central_db) {
    ns_acquire_lock_for_scope lock(buffer_capture_scheduler_lock,__FILE__,__LINE__);

    local_buffer.clear_query();
    central_db.clear_query();
    std::string local_time = local_buffer.get_value("SELECT UNIX_TIMESTAMP(NOW())"),
                central_time = central_db.get_value("SELECT UNIX_TIMESTAMP(NOW())");

    const ns_synchronized_time update_start_time(atol(local_time.c_str()),atol(central_time.c_str()));
    //now we update the local buffer to the central node.
    commit_all_local_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);
    commit_all_local_non_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);
    lock.release();
}
const ns_image_server_image ns_image_server_captured_image_region::request_processed_image(const ns_processing_task & task, ns_sql & sql){
	ns_image_server_image im;
	
	//no database information is stored for training set images.
//	if (task == ns_process_add_to_training_set)
	//	throw ns_ex("ns_image_server_captured_image_region::Data on training set images is not stored.");

	std::string db_table = ns_processing_step_db_table_name(task);
	unsigned int db_row_id = region_images_id;
	if (db_table ==  "sample_region_image_info")
		db_row_id = region_info_id;

	sql << "SELECT " << ns_processing_step_db_column_name(task) << " FROM " << db_table << " WHERE id = " << db_row_id;
	ns_sql_result res;
	sql.get_rows(res);
	if (res.size() == 0)
		throw ns_ex("Sample region image ") << region_info_id << " could not be found in the database.";
	im.id = atol(res[0][0].c_str());

	if (task != ns_process_static_mask && task != ns_process_heat_map && im.id == 0)
		throw ns_ex("ns_image_server_captured_image_region::Required image processing step, ") << ns_processing_task_to_string(task) << " has not yet been completed.";
	
	if (im.id != 0)
		im.load_from_db(im.id,&sql);

	return im;

}
void ns_machine_analysis_sample_data::load(const ns_death_time_annotation_set::ns_annotation_type_to_load & annotation_type_to_load,const unsigned long sample_id, const ns_region_metadata & sample_metadata,ns_sql & sql, 
	const unsigned long specific_region_id, const bool include_excluded_regions, const  ns_machine_analysis_region_data::ns_loading_details & loading_details){
	bool calculate_missing_data = false;
	device_name_ = sample_metadata.device;
	ns_sql_result reg;
	sql << "SELECT r.id FROM sample_region_image_info as r WHERE r.sample_id = " << sample_id << " AND r.censored=0 ";
	if (!include_excluded_regions)
			sql << " AND r.excluded_from_analysis=0";
	if (specific_region_id!=0)
		sql << " AND r.id = " << specific_region_id;
	sql << " ORDER BY r.name";

	sql.get_rows(reg);
	if (reg.empty() && specific_region_id!=0)
		throw ns_ex("Could not identify region ") << specific_region_id << ".  Was it excluded?";
	regions.reserve(reg.size());
	for (unsigned int i = 0; i < reg.size(); i++){
		try{
			unsigned int s = regions.size();
			regions.resize(s+1);
			unsigned long region_id = atol(reg[i][0].c_str());
			regions[s].metadata = sample_metadata;
			regions[s].metadata.load_only_region_info_from_db(region_id,"",sql);
			regions[s].metadata.technique = "Lifespan Machine";
			regions[s].load_from_db(annotation_type_to_load,loading_details,region_id,sql);
			//break;
		}
		catch(ns_ex & ex){
			std::cerr << regions.rbegin()->metadata.sample_name << "::" << regions.rbegin()->metadata.region_name << ": " << ex.text() << "\n";
			regions.pop_back();
		}
	}
	sample_name_ = sample_metadata.sample_name;
	sample_id_ = sample_id;
}
void ns_worm_multi_frame_interpolation::load_all_region_worms(const unsigned int region_info_id, ns_sql & sql, bool only_use_processed_frames){

	cerr << "Downloading Dataset...\n";
	sql << "SELECT capture_time, id, worm_detection_results_id, worm_interpolation_results_id, worm_movement_id, "
		<< "op" << (unsigned int)ns_process_threshold << "_image_id FROM sample_region_images "
		<< "WHERE region_info_id = " << region_info_id;
	if (only_use_processed_frames)
		sql << " AND worm_detection_results_id != 0 AND op" << (unsigned int)ns_process_region_vis << "_image_id!=0";
	sql	<< " AND op" << (unsigned int)ns_process_threshold << "_image_id!=0"
		<< " AND problem = 0 AND censored = 0 ";
	sql << " ORDER BY capture_time ASC";
	ns_sql_result res;
	sql.get_rows(res);
	time_points_storage.resize(res.size());
	if (time_points_storage.size() == 0)
		return;
	ns_progress_reporter pr(time_points_storage.size(),10);
	for (unsigned int i = 0; i < time_points_storage.size(); i++){
		pr(i);
		time_points_storage[i].time = atol(res[i][0].c_str());
		//time_points_storage[i].region.region_images_id = atol(res[i][1].c_str());
		//time_points_storage[i].detected_worms.id =  atol(res[i][2].c_str());
		//time_points_storage[i].interpolated_worms.id =  atol(res[i][3].c_str());
		time_points_storage[i].threshold_image.id = atol(res[i][5].c_str());

		//if (only_use_processed_frames){
		//	time_points_storage[i].detected_worms.load_from_db(sql);
		//	
		//	if (time_points_storage[i].interpolated_worms.id != 0){
		//		time_points_storage[i].interpolated_worms.load_from_db(sql);
			//	cerr << time_points_storage[i].interpolated_worms.interpolated_worm_area_list().size() << " interpolated worms loaded from disk\n";
		//	}
		//}
		
		time_points.push_back(&time_points_storage[i]);
	}
	pr(time_points_storage.size());
	//step = (unsigned int)time_points_storage.size()/10;
	//percent = 0;
	//load all region images, deleting time points that do not have valid region images assigned.
	/*cerr << "Loading region images\n";
	for (unsigned int i = 0; i < time_points_storage.size(); i++){
		try{
			time_points_storage[i].detected_worms.load_images_from_db(time_points_storage[i].region,sql);
			if (time_points_storage[i].interpolated_worms.id != 0)
				time_points_storage[i].detected_worms.load_images_from_db(time_points_storage[i].region,sql,true);
		}
		catch(ns_ex & ex){
			cerr << ex.text() << "\n";
		}
		if (i%step == 0){
			cerr << percent << "%...";
			percent+=10;
		}
	}*/
}
ns_time_series_denoising_parameters ns_time_series_denoising_parameters::load_from_db(const unsigned long region_id, ns_sql & sql){
	sql << "SELECT time_series_denoising_flag FROM sample_region_image_info WHERE id = " << region_id;
	ns_sql_result res;
	sql.get_rows(res);
	if (res.size() == 0)
		throw ns_ex("ns_time_series_denoising_parameters::load_from_db()::Could not find region ") << region_id << " in db";
	ns_time_series_denoising_parameters p;
	p.subtract_out_median_movement_score_from_time_series = res[0][0] == "1";
	return p;
}
ns_time_series_denoising_parameters ns_time_series_denoising_parameters::load_from_db(const unsigned long region_id, ns_sql & sql){
	sql << "SELECT time_series_denoising_flag FROM sample_region_image_info WHERE id = " << region_id;
	ns_sql_result res;
	sql.get_rows(res);
	if (res.size() == 0)
		throw ns_ex("ns_time_series_denoising_parameters::load_from_db()::Could not find region ") << region_id << " in db";
	ns_time_series_denoising_parameters p;
	p.movement_score_normalization = (ns_time_series_denoising_parameters::ns_movement_score_normalization_type)atol(res[0][0].c_str());

	return p;
}
ns_image_server_captured_image_region ns_image_server_captured_image_region::get_next_long_time_point(ns_sql & sql) const{
	sql << "SELECT region_id_long FROM worm_movement WHERE id=" << movement_characterization_id;
	ns_sql_result res;
	sql.get_rows(res);
	if (res.size() == 0)
		throw ns_ex("ns_image_server_captured_image_region::Could not locate image's movement characterization record (id=") << movement_characterization_id;
	if (res[0][0] == "0")
		throw ns_ex("ns_image_server_captured_image_region::image's movement characterization record does not have a long time point specified!");
	ns_image_server_captured_image_region region;
	region.load_from_db(atol(res[0][0].c_str()),&sql);
	return region;
}
Exemple #12
0
void ns_capture_sample_statistics_set::load_whole_experiment(const unsigned long experiment_id, ns_sql & sql){
	ns_sql_result res;
	sql << "SELECT id FROM capture_samples WHERE censored=0 AND excluded_from_analysis=0 AND experiment_id=" << experiment_id;
	sql.get_rows(res);
	samples.resize(res.size());
	samples_sorted_by_id.clear();
	for (unsigned int j = 0; j < res.size(); j++){
		samples[j].load_from_db(atol(res[j][0].c_str()),sql);
		samples_sorted_by_id[samples[j].sample_id] = &samples[j];
	}
	calculate_scanner_behavior();
}
void ns_capture_sample_region_data::load_from_db(const unsigned long region_id_, 
					const ns_region_metadata & metadata_,
					const bool region_is_censored,
					const bool region_is_excluded,
					ns_sql & sql){
	metadata = metadata_;
	metadata.region_id = region_id_;
	censored = region_is_censored;
	excluded = region_is_excluded;

	sql << "SELECT " << ns_image_statistics::produce_sql_query_stub() 
						<< ", sample_region_images.censored, sample_region_images.problem, sample_region_images.capture_time FROM image_statistics,sample_region_images WHERE sample_region_images.region_info_id=" << metadata.region_id
						<< " AND sample_region_images.image_statistics_id!=0 AND sample_region_images.image_statistics_id = image_statistics.id ORDER BY sample_region_images.capture_time ASC";
	ns_sql_result res;
	sql.get_rows(res);
	timepoints.resize(res.size());
	for (unsigned int i = 0; i < res.size(); i++){
		timepoints[i].statistics.from_sql_result(res[i]);
		timepoints[i].timepoint_is_censored = atol(res[i][ns_image_statistics::sql_query_stub_field_count()].c_str())!=0;
		timepoints[i].timepoint_has_a_problem = atol(res[i][ns_image_statistics::sql_query_stub_field_count()+1].c_str())!=0;
		timepoints[i].time = atol(res[i][ns_image_statistics::sql_query_stub_field_count()+2].c_str());
	}
}
Exemple #14
0
/*
ns_64_bit ns_atoi64(const char * s){
  #ifdef _WIN32
  return _atoi64(s);
#else
  return atoll(s);
#endif
}*/
void ns_capture_sample_image_statistics::load_from_db(unsigned long id,ns_sql & sql){
		
	sample_id = id;
	sql << "SELECT name,device_name, position_x,position_y,size_x,size_y, experiment_id FROM capture_samples WHERE id="<<id;
	ns_sql_result res;
	sql.get_rows(res);
	if(res.size()==0)
		throw ns_ex("Could not find sample id ") << sample_id << " in the db.";
	sample_name = res[0][0];
	device_name = res[0][1];
	position.x = atof(res[0][2].c_str());
	position.y = atof(res[0][3].c_str());
	size.x = atof(res[0][4].c_str());
	size.y = atof(res[0][5].c_str());
	sql << "SELECT name FROM experiments WHERE id = " << res[0][6];
	experiment_name = sql.get_value();
	sql << "SELECT s.scheduled_time, s.time_at_start, s.time_at_finish, s.missed, s.problem,s.time_at_imaging_start,"
		 "s.time_spent_reading_from_device,s.time_spent_writing_to_disk,"
		 "s.total_time_during_read,s.time_during_transfer_to_long_term_storage,"
		 "s.time_during_deletion_from_local_storage, "
		 "s.total_time_spent_during_programmed_delay,"
		 "t.intensity_average,t.intensity_std,t.intensity_entropy, t.intensity_top_percentile,t.intensity_bottom_percentile, "
		 "i.registration_vertical_offset,i.registration_horizontal_offset "
		 "FROM (capture_schedule as s LEFT OUTER JOIN captured_images as i ON i.id = s.captured_image_id) "
		 "LEFT OUTER JOIN image_statistics as t ON  i.image_statistics_id = t.id "
		 "WHERE s.sample_id=" << sample_id << " AND s.scheduled_time < UNIX_TIMESTAMP(NOW()) ORDER BY s.scheduled_time ASC";
	sql.get_rows(res);
	scans.resize(res.size());
	for (unsigned int i = 0; i < scans.size(); i++){
		scans[i].scheduled_time_date = atol(res[i][0].c_str());
		scans[i].start_time_date = atol(res[i][1].c_str());
		scans[i].stop_time_date = atol(res[i][2].c_str());
		scans[i].data_start_time_date = atol(res[i][5].c_str());
		scans[i].scan_position = position;
		scans[i].scan_size = size;

		scans[i].missed = (res[i][3]!="0");
		scans[i].problem = (res[i][4]!="0");
	
		scans[i].time_spent_reading_from_device = ns_atoi64(res[i][6].c_str())/1000.0/1000.0/60;
		scans[i].time_spent_writing_to_disk = ns_atoi64(res[i][7].c_str())/1000.0/1000.0;
		scans[i].total_time_during_read = ns_atoi64(res[i][8].c_str())/1000.0/1000.0/60.0;
		scans[i].total_time_spent_during_programmed_delay = ns_atoi64(res[i][11].c_str())/1000.0/60.0;

		scans[i].time_during_transfer_to_long_term_storage = ns_atoi64(res[i][9].c_str())/1000.0/1000.0;
		scans[i].time_during_deletion_from_local_storage = ns_atoi64(res[i][10].c_str())/1000.0/1000.0;
		scans[i].registration_offset.y = atol(res[i][17].c_str());
		scans[i].registration_offset.x = atol(res[i][18].c_str());
		if (res[i][10] != "NULL"){
			scans[i].image_stats.image_statistics.mean = atof(res[i][12].c_str());
			scans[i].image_stats.image_statistics.variance = atof(res[i][13].c_str());
			scans[i].image_stats.image_statistics.entropy = atof(res[i][14].c_str());
			scans[i].image_stats.image_statistics.top_percentile_average= atof(res[i][15].c_str());
			scans[i].image_stats.image_statistics.bottom_percentile_average= atof(res[i][16].c_str());
		}
		else{		
			scans[i].image_stats.image_statistics.mean = 0;
			scans[i].image_stats.image_statistics.variance = 0;
			scans[i].image_stats.image_statistics.entropy = 0;
			scans[i].image_stats.image_statistics.top_percentile_average = 0;
			scans[i].image_stats.image_statistics.bottom_percentile_average = 0;
		}
	//	if (scans[i].error != 0) 
	//		cerr << "ERROR FOUND";

	}

	if (scans.size() == 0)
		return;
	//normalize times to start of experiment
	date_of_first_sample_scan = scans[0].scheduled_time_date;

	for (unsigned int i = 0; i < scans.size(); i++){
		scans[i].date_of_first_sample_scan = date_of_first_sample_scan;
		if (scans[i].scheduled_time_date != 0)
			scans[i].scheduled_time=scans[i].scheduled_time_date-date_of_first_sample_scan;
		else scans[i].scheduled_time = 0;
		if (scans[i].start_time_date != 0)
			scans[i].start_time=scans[i].start_time_date-date_of_first_sample_scan;
		else scans[i].start_time = 0;
		if (scans[i].stop_time_date != 0)
			scans[i].stop_time=scans[i].stop_time_date-date_of_first_sample_scan;
		else scans[i].stop_time = 0;
		if (scans[i].data_start_time_date != 0)
			scans[i].data_start_time=scans[i].data_start_time_date - date_of_first_sample_scan;
		else scans[i].data_start_time = 0;
	}
		
	calculate_running_statistics();
}
void ns_image_server_captured_image_region::register_worm_detection(ns_image_worm_detection_results * wi, const bool interpolated,ns_sql & sql,const bool calculate_stats){
		
	ns_image_statistics stats;
	ns_detected_worm_stats w_stats;
	if (calculate_stats){
		//save a summary of the current image to the image_statistics table
		const std::vector<const ns_detected_worm_info *> & actual_worms(wi->actual_worm_list());

		stats.worm_statistics.count = actual_worms.size();
		for (unsigned int i = 0; i < actual_worms.size(); i++){
			w_stats = actual_worms[i]->generate_stats();
			stats.worm_statistics.area_mean			+=w_stats[ns_stat_pixel_area];
			stats.worm_statistics.length_mean			+=w_stats[ns_stat_spine_length];
			stats.worm_statistics.width_mean			+=w_stats[ns_stat_average_width];

			stats.worm_statistics.absolute_intensity.mean		+=w_stats[ns_stat_absolute_intensity_average];	
			stats.worm_statistics.absolute_intensity.variance	+=w_stats[ns_stat_absolute_intensity_variance];
			stats.worm_statistics.absolute_intensity.bottom_percentile_average	+=w_stats[ns_stat_absolute_intensity_dark_pixel_average];
			stats.worm_statistics.absolute_intensity.entropy	+=w_stats[ns_stat_absolute_intensity_roughness_1];
			stats.worm_statistics.absolute_intensity.top_percentile_average =0;

			stats.worm_statistics.relative_intensity.mean		+=w_stats[ns_stat_relative_intensity_average];	
			stats.worm_statistics.relative_intensity.variance	+=w_stats[ns_stat_relative_intensity_variance];
			stats.worm_statistics.relative_intensity.bottom_percentile_average	+=w_stats[ns_stat_relative_intensity_dark_pixel_average];
			stats.worm_statistics.relative_intensity.entropy	+=w_stats[ns_stat_relative_intensity_roughness_1];
			stats.worm_statistics.relative_intensity.top_percentile_average =0;


			stats.worm_statistics.area_variance			+=w_stats[ns_stat_pixel_area]*w_stats[ns_stat_pixel_area];
			stats.worm_statistics.length_variance		+=w_stats[ns_stat_spine_length]*w_stats[ns_stat_spine_length];
			stats.worm_statistics.width_variance		+=w_stats[ns_stat_average_width]*w_stats[ns_stat_average_width];
		}	

		const std::vector<const ns_detected_worm_info *> & not_worms(wi->non_worm_list());
		stats.non_worm_statistics.count = not_worms.size();

		for (unsigned int i = 0 ; i < not_worms.size(); i++){
			w_stats = not_worms[i]->generate_stats();
			stats.non_worm_statistics.area_mean			+=w_stats[ns_stat_pixel_area];
			stats.non_worm_statistics.length_mean			+=w_stats[ns_stat_spine_length];
			stats.non_worm_statistics.width_mean			+=w_stats[ns_stat_average_width];

			stats.non_worm_statistics.absolute_intensity.mean		+=w_stats[ns_stat_absolute_intensity_average];	
			stats.non_worm_statistics.absolute_intensity.variance	+=w_stats[ns_stat_absolute_intensity_variance];
			stats.non_worm_statistics.absolute_intensity.bottom_percentile_average	+=w_stats[ns_stat_absolute_intensity_dark_pixel_average];
			stats.non_worm_statistics.absolute_intensity.entropy	+=w_stats[ns_stat_absolute_intensity_roughness_1];
			stats.non_worm_statistics.absolute_intensity.top_percentile_average =0;

			stats.non_worm_statistics.relative_intensity.mean		+=w_stats[ns_stat_relative_intensity_average];	
			stats.non_worm_statistics.relative_intensity.variance	+=w_stats[ns_stat_relative_intensity_variance];
			stats.non_worm_statistics.relative_intensity.bottom_percentile_average	+=w_stats[ns_stat_relative_intensity_dark_pixel_average];
			stats.non_worm_statistics.relative_intensity.entropy	+=w_stats[ns_stat_relative_intensity_roughness_1];
			stats.non_worm_statistics.relative_intensity.top_percentile_average =0;


			stats.non_worm_statistics.area_variance			+=w_stats[ns_stat_pixel_area]*w_stats[ns_stat_pixel_area];
			stats.non_worm_statistics.length_variance		+=w_stats[ns_stat_spine_length]*w_stats[ns_stat_spine_length];
			stats.non_worm_statistics.width_variance		+=w_stats[ns_stat_average_width]*w_stats[ns_stat_average_width];
		}

		if (stats.worm_statistics.count > 0){
			stats.worm_statistics.area_variance		/=stats.worm_statistics.count;
			stats.worm_statistics.length_variance		/=stats.worm_statistics.count;
			stats.worm_statistics.width_variance		/=stats.worm_statistics.count;
			stats.worm_statistics.absolute_intensity.variance /=stats.worm_statistics.count;
			stats.worm_statistics.relative_intensity.variance /=stats.worm_statistics.count;
		
			stats.worm_statistics.area_mean			/=stats.worm_statistics.count;
			stats.worm_statistics.length_mean			/=stats.worm_statistics.count;
			stats.worm_statistics.width_mean			/=stats.worm_statistics.count;
			stats.worm_statistics.absolute_intensity.mean		/=stats.worm_statistics.count;
			stats.worm_statistics.relative_intensity.mean /=stats.worm_statistics.count;

			// E[x^2]-E[x]^2
			stats.worm_statistics.area_variance		-= stats.worm_statistics.area_mean*stats.worm_statistics.area_mean;
			stats.worm_statistics.length_variance		-= stats.worm_statistics.length_mean*stats.worm_statistics.length_mean;
			stats.worm_statistics.width_variance		-= stats.worm_statistics.width_mean*stats.worm_statistics.width_mean;
			stats.worm_statistics.absolute_intensity.variance	-= stats.worm_statistics.absolute_intensity.mean*stats.worm_statistics.absolute_intensity.mean;
			stats.worm_statistics.relative_intensity.variance	-= stats.worm_statistics.relative_intensity.mean*stats.worm_statistics.relative_intensity.mean;

			stats.worm_statistics.area_variance		= sqrt(stats.worm_statistics.area_variance);
			stats.worm_statistics.length_variance		= sqrt(stats.worm_statistics.length_variance);
			stats.worm_statistics.width_variance		= sqrt(stats.worm_statistics.width_variance);
			stats.worm_statistics.absolute_intensity.variance = sqrt(stats.worm_statistics.absolute_intensity.variance);
			stats.worm_statistics.relative_intensity.variance = sqrt(stats.worm_statistics.relative_intensity.variance);
		}

		if (stats.non_worm_statistics.count > 0){
			stats.non_worm_statistics.area_variance		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.length_variance		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.width_variance		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.absolute_intensity.variance /=stats.non_worm_statistics.count;
			stats.non_worm_statistics.relative_intensity.variance /=stats.non_worm_statistics.count;
		
			stats.non_worm_statistics.area_mean			/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.length_mean			/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.width_mean			/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.absolute_intensity.mean		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.relative_intensity.mean		/=stats.non_worm_statistics.count;

			// E[x^2]-E[x]^2
			stats.non_worm_statistics.area_variance		-= stats.non_worm_statistics.area_mean*stats.non_worm_statistics.area_mean;
			stats.non_worm_statistics.length_variance		-= stats.non_worm_statistics.length_mean*stats.non_worm_statistics.length_mean;
			stats.non_worm_statistics.width_variance		-= stats.non_worm_statistics.width_mean*stats.non_worm_statistics.width_mean;
			stats.non_worm_statistics.absolute_intensity.variance	-= stats.non_worm_statistics.absolute_intensity.mean*stats.non_worm_statistics.absolute_intensity.mean;
			stats.non_worm_statistics.relative_intensity.variance	-= stats.non_worm_statistics.relative_intensity.mean*stats.non_worm_statistics.relative_intensity.mean;

			stats.non_worm_statistics.area_variance		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.length_variance		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.width_variance		/=stats.non_worm_statistics.count;
			stats.non_worm_statistics.absolute_intensity.variance /=stats.non_worm_statistics.count;
			stats.non_worm_statistics.relative_intensity.variance /=stats.non_worm_statistics.count;
		}
	
		sql << "SELECT image_statistics_id FROM sample_region_images WHERE id= " << region_images_id;
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0)
			stats.db_id = 0;
		else stats.db_id = atol(res[0][0].c_str());
		stats.submit_to_db(stats.db_id,sql,false,true);
	}
	
	ns_sql_result res;
	//save a detailed summary of each worm to disk

	//if a results object has previously been made, overwrite it.  Otherwise, create a new record.
	sql << "SELECT worm_detection_results_id, worm_interpolation_results_id FROM sample_region_images WHERE id = " << region_images_id;

	sql.get_rows(res);
	if (res.size() != 0){
		if (!interpolated)	wi->id = atol(res[0][0].c_str());
		else				wi->id = atol(res[0][1].c_str());
	}
	else wi->id = 0;
	wi->save_to_disk(*this,interpolated,sql);
	//update the region record to reflect the new results
	sql << "UPDATE sample_region_images SET ";
	if (calculate_stats)
		sql << "image_statistics_id=" << stats.db_id <<",";
	if (!interpolated) sql << "worm_detection_results_id=";
	else			   sql << "worm_interpolation_results_id=";
	sql << wi->id << " WHERE id = " << region_images_id;
	sql.send_query();

	//DEPRECIATED
	if (0){
		//Update worm_movement results list the results have been made invalid by new changes
		sql << "SELECT id FROM worm_movement WHERE region_id_short_1 = " << region_images_id;
		ns_sql_result a;
		sql.get_rows(a);
		for (unsigned int i = 0; i < (unsigned int)a.size(); i++){
			sql << "UPDATE worm_movement SET calculated = 0 WHERE id=" << a[i][0];
			sql.send_query();
		}
		sql << "SELECT id FROM worm_movement WHERE region_id_short_2 = " << region_images_id;
		ns_sql_result b;
		sql.get_rows(b);		
		for (unsigned int i = 0; i < (unsigned int)b.size(); i++){
			sql << "UPDATE worm_movement SET calculated = 0 WHERE id=" << b[i][0];
			sql.send_query();
		}
		sql << "SELECT id FROM worm_movement WHERE region_id_long = " << region_images_id;
		ns_sql_result c;
		sql.get_rows(c);
		for (unsigned int i = 0; i < (unsigned int)c.size(); i++){
			sql << "UPDATE worm_movement SET calculated = 0 WHERE id=" << c[i][0];
			sql.send_query();
		}
	}
	return;
}
Exemple #16
0
void ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db(const ns_synchronized_time & update_start_time,ns_local_buffer_connection & local_buffer_sql, ns_sql & central_db) {
    if (time_of_last_update_from_central_db.local_time == ns_default_update_time)
        get_last_update_time(local_buffer_sql);
    buffered_capture_schedule.load_if_needed(&local_buffer_sql);

    ns_sql_result updated_data;

    const std::string altered_data_condition(
        std::string("time_stamp > FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.local_time) +
        ") AND time_stamp <= FROM_UNIXTIME(" + ns_to_string(update_start_time.local_time) + ") ");

    const unsigned long new_timestamp(time_of_last_update_from_central_db.remote_time);

    ns_get_all_column_data_from_table("buffered_capture_schedule",buffered_capture_schedule.table_format.column_names,
                                      std::string("WHERE ") + altered_data_condition + " AND uploaded_to_central_db != 3",
                                      updated_data,&local_buffer_sql);

    std::vector<ns_db_key_mapping> mappings(updated_data.size());
    if (updated_data.size() > 8)
        image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::Committing ") << updated_data.size() << " recorded capture events to the central database.",&central_db);
    std::vector<ns_ex *> errors;
    for (unsigned long i = 0; i < updated_data.size(); i++) {
        try {
            unsigned long captured_image_id = atol(updated_data[i][buffered_capture_schedule.image_id_column].c_str());
            unsigned long problem_id = atol(updated_data[i][buffered_capture_schedule.problem_column].c_str());

            unsigned long central_captured_image_id(0),
                     central_problem_id(0);
            if (captured_image_id != 0 || problem_id != 0) {
                central_db << "SELECT captured_image_id,problem FROM capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                ns_sql_result res;
                central_db.get_rows(res);
                if (res.size() == 0)
                    throw ns_ex("Could not find capture schedule entry in central db for sample id " ) << updated_data[i][buffered_capture_schedule.id_column] << " finishing at time " << updated_data[i][buffered_capture_schedule.time_at_finish_column];
                central_captured_image_id = atol(res[0][0].c_str());
                central_problem_id = atol(res[0][1].c_str());
            }

            const bool need_to_make_new_capture_image(captured_image_id != 0 && central_captured_image_id != captured_image_id);
            //we need to make new entries in the central database for any new images or events
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.load_from_db(captured_image_id,&local_buffer_sql);
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].old_image = mappings[i].image;
                if (mappings[i].captured_image.capture_images_image_id != 0)
                    mappings[i].image.load_from_db(mappings[i].captured_image.capture_images_image_id,&local_buffer_sql);


            }
            else {
                mappings[i].old_image = mappings[i].image;
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].captured_image.captured_images_id = central_captured_image_id;
            }

            bool need_to_make_new_problem(problem_id != 0 && central_problem_id != problem_id);
            if (need_to_make_new_problem) {
                local_buffer_sql << "SELECT id,event,time,minor FROM buffered_host_event_log WHERE id = " << updated_data[i][buffered_capture_schedule.problem_column];
                ns_sql_result res;
                local_buffer_sql.get_rows(res);
                mappings[i].old_problem_id = mappings[i].problem_id;
                if (res.size() == 0) {
                    mappings[i].problem_id = image_server.register_server_event(ns_ex("Could not find problem id ") << updated_data[i][buffered_capture_schedule.problem_column] << " in local database buffer!",&central_db);
                    need_to_make_new_problem=false;
                }
                else {
                    mappings[i].problem_text = res[0][1];
                    mappings[i].problem_time = atol(res[0][2].c_str());
                    mappings[i].problem_minor = res[0][3] != "0";
                }
            }
            else {
                mappings[i].old_problem_id = mappings[i].problem_id;
                mappings[i].problem_id = central_problem_id;
            }

            if (need_to_make_new_capture_image && mappings[i].image.id != 0) {
                mappings[i].image.id = 0;
                mappings[i].image.save_to_db(0,&central_db,false);
                mappings[i].captured_image.capture_images_image_id = mappings[i].image.id;
            }
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.captured_images_id = 0;
                mappings[i].captured_image.save(&central_db);
            }
            if (need_to_make_new_problem) {
                mappings[i].old_problem_id = mappings[i].problem_id;
                ns_image_server_event ev;
                ev << mappings[i].problem_text;
                if (mappings[i].problem_minor) ev << ns_ts_minor_event;
                ev.set_time(mappings[i].problem_time);
                mappings[i].problem_id = image_server.register_server_event(ev,&central_db);
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error while making mapping: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }

    for (unsigned long i = 0; i < updated_data.size(); i++) {
        if (mappings[i].error.text().size() > 0)
            continue;
        try {
            central_db << "Update capture_schedule SET ";
            for (unsigned int j = 0; j < buffered_capture_schedule.table_format.column_names.size(); ++j) {
                if (j == buffered_capture_schedule.id_column
                        || j == buffered_capture_schedule.image_id_column ||
                        j == buffered_capture_schedule.problem_column || j ==
                        buffered_capture_schedule.timestamp_column) continue;
                central_db  << "`" << buffered_capture_schedule.table_format.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]) << "',";
            }
            central_db << "captured_image_id = " << mappings[i].captured_image.captured_images_id
                       << ", problem = " << mappings[i].problem_id;
            //we set the timestamp as old so that it does not trigger a re-download when the server tries to update its cache
            central_db << ", time_stamp=FROM_UNIXTIME("<< new_timestamp <<") ";
            central_db << " WHERE id = " << updated_data[i][0];
            central_db.send_query();
            if (mappings[i].old_captured_image.captured_images_id != mappings[i].captured_image.captured_images_id ||
                    mappings[i].old_problem_id != mappings[i].problem_id) {
                local_buffer_sql << "UPDATE buffered_capture_schedule SET captured_image_id = " << mappings[i].captured_image.captured_images_id
                                 << ", problem = " << mappings[i].problem_id << ", time_stamp = FROM_UNIXTIME("<< new_timestamp
                                 <<") WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_captured_images WHERE id = " << mappings[i].old_captured_image.captured_images_id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_images WHERE id = " << mappings[i].old_image.id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_host_event_log WHERE id = " << mappings[i].old_problem_id;
                local_buffer_sql.send_query();
            }
            if (updated_data[i][buffered_capture_schedule.time_at_finish_column] != "0") {
                local_buffer_sql << "DELETE FROM buffered_capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                //	local_buffer_sql.clear_query();
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error during central update: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }
    for (unsigned int i = 0; i < mappings.size(); i++) {
        if (mappings[i].error.text().size() > 0) {

            local_buffer_sql << "UPDATE buffered_capture_schedule SET uploaded_to_central_db=3 WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
            local_buffer_sql.send_query();
            image_server.register_server_event(ns_image_server::ns_register_in_central_db_with_fallback,ns_ex("Could not update central db: ") << mappings[i].error.text());
        }

    }
    //update modified sample data.
    updated_data.resize(0);
    if (capture_samples.column_names.size() == 0) {
        capture_samples.load_column_names_from_db("capture_samples",&central_db);
        if (capture_samples.column_names.size() == 0)
            throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table appears to have no columns!");
    }
    if (capture_samples.column_names[0] != "id")
        throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table does not have its id in the first column!");

    ns_get_all_column_data_from_table("buffered_capture_samples",capture_samples.column_names,
                                      std::string("WHERE ") + altered_data_condition,
                                      updated_data,&local_buffer_sql);
    if (capture_samples.time_stamp_column_id == -1)
        throw ns_ex("Could not find capture sample time stamp column!");
    for (unsigned int i = 0; i < updated_data.size(); i++) {
        central_db << "UPDATE capture_samples SET ";
        //skip id column; we don't want to cause any unneccisary db shuffling by changing ids (even if we are changing the ids to the value they already are)
        central_db  << capture_samples.column_names[1] << "='" << central_db.escape_string(updated_data[i][1]);
        for (unsigned int j = 2; j < capture_samples.column_names.size(); ++j) {
            if (j == capture_samples.time_stamp_column_id)
                continue;
            else central_db  << "',`" << capture_samples.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]);
        }
        central_db << "',time_stamp=FROM_UNIXTIME(" << new_timestamp << ") ";
        central_db << "WHERE id = " << updated_data[i][0];
        central_db.send_query();
    }

}
Exemple #17
0
void ns_buffered_capture_scheduler::update_local_buffer_from_central_server(ns_image_server_device_manager::ns_device_name_list & connected_devices,ns_local_buffer_connection & local_buffer, ns_sql & central_db) {

    if (connected_devices.size() == 0)
        return;

    ns_acquire_lock_for_scope lock(buffer_capture_scheduler_lock,__FILE__,__LINE__);

    local_buffer.clear_query();
    central_db.clear_query();

    std::string local_time = local_buffer.get_value("SELECT UNIX_TIMESTAMP(NOW())"),
                central_time = central_db.get_value("SELECT UNIX_TIMESTAMP(NOW())");

    const ns_synchronized_time update_start_time(atol(local_time.c_str())-10,atol(central_time.c_str())-10);//go ten seconds into the past
    //to make sure all writes
    //are committed

    //now we update the local buffer to the central node.
    commit_all_local_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);
    //now that all the local buffer data is reflected in the central database, we check to see if there is any new data in the central database.
    //if so, we wipe the local buffer and update everything.

    capture_schedule.load_if_needed(&central_db);
    //get any new or updated capture schedule events

    central_db << "SELECT sched.id, samp.id, samp.experiment_id, UNIX_TIMESTAMP(sched.time_stamp),UNIX_TIMESTAMP(samp.time_stamp)";

    for (unsigned int i = 0; i < capture_schedule.table_format.column_names.size(); i++)
        central_db << ",`sched`.`" << capture_schedule.table_format.column_names[i] << "`";

    central_db << " FROM capture_schedule as sched, capture_samples as samp "
               << "WHERE (samp.device_name='" << connected_devices[0].name << "'";

    for (unsigned int i = 1; i < connected_devices.size(); i++)
        central_db << " OR samp.device_name='" << connected_devices[i].name << "'";

    central_db << ")"
               << " AND sched.time_at_start = 0 "
               << " AND sched.sample_id = samp.id "
               << " AND sched.time_at_finish = 0 "
               //here, we could bring the entire local database completely up to date
               //but only scans in the future will make any difference, so we only download
               //those who are still scheduled for the future
               //this old command would fully update the database, as time_of_last_update_from_central_db
               //would be set to 0
               //<< " AND sched.scheduled_time > " << (time_of_last_update_from_central_db.remote_time-image_server.maximum_allowed_local_scan_delay())  //only get events in the future
               //however, now we only grab the future, relevant scans.
               << " AND sched.scheduled_time > " << (update_start_time.remote_time-image_server.maximum_allowed_local_scan_delay())  //only get events in the future

               << " AND sched.time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time <<") "
               << " AND sched.time_stamp <= FROM_UNIXTIME(" << update_start_time.remote_time << ") "
               << " ORDER BY sched.scheduled_time ASC";


    ns_sql_result new_schedule;
    central_db.get_rows(new_schedule);
    std::set<unsigned long> altered_experiment_ids;
    std::set<unsigned long> altered_sample_ids;
    for (unsigned int i = 0; i < new_schedule.size(); i++) {
        //	if (atol(new_schedule[i][4].c_str()) > central_time_of_last_update_from_central_db){
        altered_sample_ids.insert(atol(new_schedule[i][1].c_str()));
        altered_experiment_ids.insert(atol(new_schedule[i][2].c_str()));
        //	}
    }
    const unsigned long new_timestamp(update_start_time.local_time);

    if (new_schedule.size() != 0) {
        if (new_schedule.size() > 4)
            image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::")
                                               << new_schedule.size() << " new capture schedule entries found.  Updating local buffer.",&central_db);

        //if samples or experiments have changed or added, update them.
        //we need to do this *before* updating the capture schedule,
        //as the addition of a capture schedule item might trigger a scan immediately
        //and that scan will fail if the sample and experiemnts information isn't already in the local database.
        if (altered_sample_ids.size() > 0) {
            capture_samples.load_if_needed("capture_samples",&central_db);
            experiments.load_if_needed("experiments",&central_db);
            std::string sample_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_sample_ids,"id")),
                experiment_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_experiment_ids,"id"));

            ns_sql_result capture_sample_data;
            ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,sample_where_clause,capture_sample_data,&central_db);
            ns_sql_result experiment_data;
            ns_get_all_column_data_from_table("experiments",experiments.column_names,experiment_where_clause,experiment_data,&central_db);

            std::cerr << "Updating local buffer with information about " << capture_sample_data.size() << " samples\n";
            //local_buffer_db.send_query("DELETE FROM buffered_capture_samples");
            if (capture_samples.time_stamp_column_id == -1)
                throw ns_ex("Could not find capture sample time stamp column!");
            long last_displayed_percent(-5);
            for(unsigned int i = 0; i < capture_sample_data.size(); i++) {
                const long percent((100*i)/capture_sample_data.size());
                if (percent >= last_displayed_percent+5) {
                    std::cerr << percent << "%...";
                    last_displayed_percent = percent;
                }
                std::string values;

                values += "`";
                values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'";
                for (unsigned int j = 1; j < capture_samples.column_names.size(); j++) {
                    if (j == capture_samples.time_stamp_column_id)	//we need to update the local time stamp here, so that if there might be a clock asynchrony between the
                        continue;									//central server and local server that would allow remote timestamps to be in the future according to local
                    //which would trigger the local server to update the central in the next check, ad infinitum.
                    values += std::string(",`") +  capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'";
                }
                values += std::string(",`time_stamp`=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";
                local_buffer << "INSERT INTO buffered_capture_samples SET " << values
                             << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
            std::cerr << "Done.\n";
            //local_buffer.send_query("DELETE FROM buffered_experiments");
            for(unsigned int i = 0; i < experiment_data.size(); i++) {
                std::string values;
                values += "`";
                values += experiments.column_names[0] + "`='" + local_buffer.escape_string(experiment_data[i][0]) + "'";
                for (unsigned int j = 1; j < experiments.column_names.size(); j++) {
                    if (experiments.time_stamp_column_id == j)
                        continue;
                    values += std::string(",`") + experiments.column_names[j] + "`='" + local_buffer.escape_string(experiment_data[i][j]) + "'";
                }
                values += std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";

                local_buffer << "INSERT INTO buffered_experiments SET " << values;
                local_buffer << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
        }
        std::cerr << "Updating local buffer with information about " << new_schedule.size() << " schedule time points...\n";
        long last_displayed_percent = -5;
        for (unsigned int i = 0; i < new_schedule.size(); i++) {
            const long percent((100*i)/new_schedule.size());
            if (percent >= last_displayed_percent+5) {
                std::cerr << percent << "%...";
                last_displayed_percent = percent;
            }
            std::string all_values;
            all_values += "`";
            all_values += capture_schedule.table_format.column_names[0] + "`='" + local_buffer.escape_string(new_schedule[i][5]) + "'";
            for (unsigned int j = 1; j < capture_schedule.table_format.column_names.size(); j++) {
                if (j == capture_schedule.time_stamp_column)
                    continue;
                all_values += std::string( ", `") + capture_schedule.table_format.column_names[j] + "`='" + local_buffer.escape_string(new_schedule[i][5+j]) + "'";
            }
            all_values+=std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";


            std::string update_values;
            update_values += std::string("problem=") + new_schedule[i][5+capture_schedule.problem_column] + ","
                             + std::string("scheduled_time=") + new_schedule[i][5+capture_schedule.scheduled_time_column] + ","
                             + std::string("missed=") + new_schedule[i][5+capture_schedule.missed_column] + ","
                             + std::string("censored=") + new_schedule[i][5+capture_schedule.censored_column] +","
                             + std::string("transferred_to_long_term_storage=") + new_schedule[i][5+capture_schedule.transferred_to_long_term_storage_column] +","
                             + std::string("time_during_transfer_to_long_term_storage=") + new_schedule[i][5+capture_schedule.time_during_transfer_to_long_term_storage_column] +","
                             + std::string("time_during_deletion_from_local_storage=") + new_schedule[i][5+capture_schedule.time_during_deletion_from_local_storage_column] + ","
                             + std::string("time_stamp=FROM_UNIXTIME(") + ns_to_string(update_start_time.local_time) + ")";


            local_buffer << "INSERT INTO buffered_capture_schedule SET " << all_values
                         << " ON DUPLICATE KEY UPDATE " << update_values;
            local_buffer.send_query();
        }
        std::cerr << "Done.\n";
    }
    //if no changes to the schedule were made, look to see find changes made to any capture samples
    else {

        ns_sql_result capture_sample_data;
        ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,
                                          std::string("WHERE time_stamp >= FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.remote_time) +") "
                                          " AND time_stamp < FROM_UNIXTIME(" + ns_to_string(update_start_time.remote_time) +") "
                                          ,capture_sample_data,&central_db);
        if (capture_sample_data.size() > 0) {
            std::cerr << "Copying over " << capture_sample_data.size() << " samples\n";
            //local_buffer_db.send_query("DELETE FROM buffered_capture_samples");
            for(unsigned int i = 0; i < capture_sample_data.size(); i++) {
                std::string values;
                values += "`";
                values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'";
                for (unsigned int j = 1; j < capture_samples.column_names.size(); j++)
                    values += std::string(",`") +  capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'";

                local_buffer << "INSERT INTO buffered_capture_samples SET " << values
                             << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
        }
    }

    local_buffer.send_query("COMMIT");
    //lock.unlock();

    commit_all_local_non_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);

    central_db << "SELECT k,v FROM constants WHERE time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time << ")";
    ns_sql_result cres;
    central_db.get_rows(cres);
    if (cres.size() > 0) {
        std::cerr << "Updating " << cres.size() << " constants in local buffer\n";
    }
    for (unsigned int i = 0; i < cres.size(); i++)
        image_server.set_cluster_constant_value(local_buffer.escape_string(cres[i][0]),local_buffer.escape_string(cres[i][1]),&local_buffer,update_start_time.local_time);
    time_of_last_update_from_central_db = update_start_time;
    store_last_update_time_in_db(time_of_last_update_from_central_db,local_buffer);

    lock.release();
}
void ns_image_server_dispatcher::scan_for_problems(ns_sql & sql){
	std::vector<std::string> devices_to_suppress,
							 experiments_to_suppress;

	image_server.get_alert_suppression_lists(devices_to_suppress,experiments_to_suppress,&sql);
		
	image_server.device_manager.scan_and_report_problems(devices_to_suppress,experiments_to_suppress,sql);
	//automatically search for new scanners
	//if(!image_server.server_is_paused() && image_server.query_cluster_for_device_names())
	//	run_hotplug(false,false);
	
	//look for missed scans anywhere on the cluster (these will start to accumulate if, for example, a node with attach devices crashes.)
	unsigned long current_time = ns_current_time();
	if (image_server.maximum_allowed_remote_scan_delay() == 0)
		image_server.update_scan_delays_from_db(&sql);


	if (image_server.maximum_allowed_remote_scan_delay() != 0){


		string conditions_for_missed;
		conditions_for_missed = 
				"c.censored = 0 AND c.time_at_finish = 0 AND c.time_at_start = 0 "
				"AND c.problem = 0 AND c.missed = 0 AND "
				"c.scheduled_time < ";
		conditions_for_missed += ns_to_string(current_time - 60*image_server.maximum_allowed_remote_scan_delay());

		sql << "SELECT c.id, c.scheduled_time, s.device_name, e.name FROM capture_schedule as c, capture_samples as s, experiments as e WHERE " 
			<< conditions_for_missed << " AND c.sample_id = s.id AND s.experiment_id = e.id ORDER BY c.scheduled_time";
		ns_sql_result missed_schedule_events;
		sql.get_rows(missed_schedule_events);
		//missed_schedule_events.resize(1,ns_sql_result_row(4));
	/*	missed_schedule_events[0][0] = "0";
		missed_schedule_events[0][1] = "1322861884";
		missed_schedule_events[0][2] = "HARRY";
		missed_schedule_events[0][3] = "FESTIVUS";
		*/

		if (missed_schedule_events.size() > 0){
		
			//if there are missed scans, update them as missed


			string summary_text("Scheduled image captures are being missed.");
			string detailed_text("Scheduled image captures are being missed:\n");
			bool found_reportable_miss(false);
			string unsuppressed_text;
			for (unsigned int i = 0; i < missed_schedule_events.size(); i++){
				string tmp(ns_format_time_string_for_human(
					atol(missed_schedule_events[i][1].c_str())) 
					+ " " + missed_schedule_events[i][2] + " " + missed_schedule_events[i][3]);
				unsuppressed_text += tmp;

				bool suppressed_by_device(false);
				bool suppressed_by_experiment(false);
				for (unsigned int j = 0; j < devices_to_suppress.size(); j++){
					if (missed_schedule_events[i][2] == devices_to_suppress[j]){
						suppressed_by_device = true;
						break;
					}
				}		
				
				for (unsigned int j = 0; j < experiments_to_suppress.size(); j++){
							if (missed_schedule_events[0][3] == experiments_to_suppress[j]){
								suppressed_by_experiment = true;
								break;
							}
				}
				
				if (suppressed_by_device)
					unsuppressed_text += "(Suppressed by device request)";
				if (suppressed_by_experiment)
					unsuppressed_text += "(Suppressed by experiment request)";
				unsuppressed_text +="\n";

				if (suppressed_by_device || suppressed_by_experiment)
					continue;
				found_reportable_miss = true;

				detailed_text += tmp + "\n";
			}

			ns_image_server_event ev;
			if (missed_schedule_events.size() == 1) ev << "The image cluster has missed a scheduled image capture:";
			else ev << "The image cluster has missed " << (unsigned long)missed_schedule_events.size() << " scheduled image captures";
			ev << unsuppressed_text; 
			image_server.register_server_event(ns_image_server::ns_register_in_central_db,ev);

			sql << "UPDATE capture_schedule as c SET missed = 1 WHERE " << conditions_for_missed;
			sql.send_query();
			sql.send_query("COMMIT");
			if (found_reportable_miss){
				try{
					ns_alert alert(summary_text,
						detailed_text,
						ns_alert::ns_missed_capture,
						ns_alert::get_notification_type(ns_alert::ns_missed_capture,image_server.act_as_an_image_capture_server()),
						ns_alert::ns_rate_limited);
					image_server.alert_handler.submit_alert(alert,sql);
				
				}
				catch(ns_ex & ex){
					cerr << "Could not submit alert: " << summary_text << " : " << ex.text();
				}
			}
		}
	}
}
bool ns_processing_job_scheduler::run_a_job(ns_sql & sql,bool first_in_first_out_job_queue){
	//if we can't talk to the long term storage we're bound to fail, so don't try.
	image_server.image_storage.test_connection_to_long_term_storage(true);
	if (!image_server.image_storage.long_term_storage_was_recently_writeable())
		return false;
	ns_image_server_push_job_scheduler push_scheduler;

	ns_processing_job job = push_scheduler.request_job(sql,first_in_first_out_job_queue);
	
	if (job.id == 0)
		return false;
	
	//refresh flag labels from db
	ns_death_time_annotation_flag::get_flags_from_db(sql);

	if (job.maintenance_task == ns_maintenance_update_processing_job_queue){
		image_server.register_server_event(ns_image_server_event("Updating job queue"),&sql);

		sql << "DELETE from processing_jobs WHERE maintenance_task =" << ns_maintenance_update_processing_job_queue;
		sql.send_query();
		push_scheduler.report_job_as_finished(job,sql);
		push_scheduler.discover_new_jobs(sql);
		return true;		
	}

	ns_acquire_for_scope<ns_processing_job_processor> processor(
		ns_processing_job_processor_factory::generate(job,image_server,this->pipeline->pipeline));
		
	try{
		std::string rejection_reason;
		if (!processor().job_is_still_relevant(sql,rejection_reason)){
			image_server.register_server_event(ns_image_server_event("Encountered a processing job queue that had already been performed or invalidated: ") << rejection_reason << "[" << job.description() << "]",&sql);
			push_scheduler.report_job_as_finished(job,sql);
			if (processor().delete_job_after_processing())
				processor().delete_job(sql);
			processor.release();
			sql.send_query("COMMIT");
			return true;
		}
		if(idle_timer_running)
			image_server.performance_statistics.register_job_duration(ns_performance_statistics_analyzer::ns_idle,idle_timer.stop());
		idle_timer_running = false;
		ns_high_precision_timer tp;
		tp.start();

		//mark the subject as busy to prevent multiple jobs running simultaneously on the same data
		processor().mark_subject_as_busy(true,sql);
		sql.send_query("COMMIT");

		//update UI to show job is being performed, if requested.
		if (processor().flag_job_as_being_processed_before_processing())
			processor().flag_job_as_being_processed(sql);
		sql.send_query("COMMIT");

		if (processor().run_job(sql))
		processor().mark_subject_as_busy(false,sql);

		push_scheduler.report_job_as_finished(job,sql);
		sql.send_query("COMMIT");

		processor().handle_concequences_of_job_completion(sql);

		if (processor().delete_job_after_processing())
			processor().delete_job(sql);
		sql.send_query("COMMIT");

		processor.release();
		image_server.performance_statistics.register_job_duration(ns_performance_statistics_analyzer::ns_running_a_job,tp.stop());
		
		idle_timer_running = true;
		idle_timer.start();

		return true;
	}
	catch(ns_ex & ex){
		//we have found an error, handle it by registering it in the
		//host_event log, and annotate the current job (and any associated images)
		//with a reference to the error that occurred.
		sql.clear_query();
		
		processor().mark_subject_as_busy(false,sql);


		ns_64_bit error_id(push_scheduler.report_job_as_problem(job,ex,sql));
		sql.send_query("COMMIT");

		//there are a variety of problems that could cause an exception to be thrown.
		//only mark the image itself as problematic if the error doesn't come from 
		//any of the environmental problems that can crop up.
		bool problem_with_long_term_storage(!image_server.image_storage.long_term_storage_was_recently_writeable());
		if (!problem_with_long_term_storage &&
			ex.type() != ns_network_io && 
			ex.type() != ns_sql_fatal &&  
			ex.type() != ns_memory_allocation && 
			ex.type() != ns_cache)
			processor().mark_subject_as_problem(error_id,sql);

		image_server.performance_statistics.cancel_outstanding_jobs();
		if (ex.type() == ns_memory_allocation)
			throw;	//memory allocation errors can cause big, long-term problems, thus we need to pass
					//them downwards to be handled.
		else 
		processor.release();
	}
	catch(std::exception & e){
		ns_ex ex(e);
		
		sql.clear_query();
		
		processor().mark_subject_as_busy(false,sql);
		//we have found an error, handle it by registering it in the
		//host_event log, and annotate the current job (and any associated images)
		//with a reference to the error that occurred.
		ns_64_bit error_id(push_scheduler.report_job_as_problem(job,ex,sql));
		sql.send_query("COMMIT");
		
		
		processor().mark_subject_as_problem(error_id,sql);

		image_server.performance_statistics.cancel_outstanding_jobs();
		
		processor.release();
		if (ex.type() == ns_memory_allocation)
			throw; //memory allocation errors can cause big, long-term problems, thus we need to pass
					//them downwards to be handled.
	}
	return true;
}
Exemple #20
0
/*
void ns_capture_sample_region_statistics_set::output_plate_statistics_with_mortality_data(const ns_survival_data_summary_aggregator & survival_data, std::ostream & o){
		
	ns_capture_sample_region_data_timepoint mean,first,last;
	for (unsigned int i = 0; i < regions.size(); i++){
			
		regions[i].metadata.out_JMP_plate_identity_data(o);
		o << ",";
		o << regions[i].censored?"1":"0";
		o << ",";
		o << regions[i].excluded?"1":"0";
		o << ",";

		ns_survival_data_summary_aggregator::ns_plate_list::const_iterator region_mortality_data(survival_data.plate_list.find(regions[i].metadata.plate_name()));
		if (region_mortality_data == survival_data.plate_list.end()){
			survival_data.out_JMP_empty_summary_data(o);
		}
		else{
			survival_data.out_JMP_summary_data(region_mortality_data,o);
		}
		o << ",";

		regions[i].generate_summary_info(mean,first,last);
		mean.output_jmp_data(o,mean.time,false,false,",");
		first.output_jmp_data(o,mean.time,false,false,",");
		last.output_jmp_data(o,mean.time,false,false,"\n");
	}

}
	*/
void ns_capture_sample_region_statistics_set::load_whole_experiment(const unsigned long experiment_id,ns_sql & sql,bool process_raw_image_stats){
		
	std::string experiment_name;
	sql << "SELECT name FROM experiments WHERE id = " << experiment_id;
	ns_sql_result res1;
	sql.get_rows(res1);
	if (res1.size() == 0)
		throw ns_ex("Could not find experiment id ") << experiment_id;
	experiment_name = res1[0][0];

	ns_sql_result res;
	sql << "SELECT id,name,device_name,censored,description,excluded_from_analysis,incubator_name, incubator_location "
			    "FROM capture_samples WHERE experiment_id=" << experiment_id;
	sql.get_rows(res);
		
	ns_genotype_fetcher genotypes;
	genotypes.load_from_db(&sql);

	for (unsigned int j = 0; j < res.size(); j++){
		
		ns_region_metadata sample_metadata;
		sample_metadata.sample_id = atol(res[j][0].c_str());
		sample_metadata.experiment_name = experiment_name;
		sample_metadata.sample_name = res[j][1];
		sample_metadata.device = res[j][2];
		sample_metadata.incubator_name = res[j][6];
		sample_metadata.incubator_location = res[j][7];

		bool sample_censored=(res[j][3]!="0"),
				sample_excluded=(res[j][5]!="0");
		std::string sample_details = res[j][4];
			


			
		sql << "SELECT id,censored,excluded_from_analysis FROM sample_region_image_info WHERE sample_id=" << sample_metadata.sample_id;
		ns_sql_result res2;
		sql.get_rows(res2);

		unsigned long plate_index(regions.size());
		regions.resize(plate_index+res2.size());

		for (unsigned long k = 0; k < res2.size(); ++k){
			ns_region_metadata metadata(sample_metadata);
			unsigned long region_id(atol(res2[k][0].c_str()));
			metadata.load_from_db(region_id,"",sql);
			
			bool region_censored(res2[k][1]!="0"),
					region_excluded(res2[k][2]!="0");
//				if (region_censored ||  region_excluded){
//					char a;
//					a++;
//				}
				//std::cerr << "EX";
				
			regions[plate_index+k].load_from_db(region_id,metadata,sample_censored || region_censored,sample_excluded || region_excluded,sql,process_raw_image_stats);
			if (sample_details.size() > 0) 
				regions[plate_index+k].metadata.details += sample_details;
		}
	}
	build_id_mapping();
}
Exemple #21
0
void ns_capture_sample_region_data::load_from_db(const unsigned long region_id_, 
					const ns_region_metadata & metadata_,
					const bool region_is_censored,
					const bool region_is_excluded,
					ns_sql & sql,bool recalculate_raw_image_statistics){
	metadata = metadata_;
	metadata.region_id = region_id_;
	censored = region_is_censored;
	excluded = region_is_excluded;

	sql << "SELECT " << ns_image_statistics::produce_sql_query_stub() 
						<< ", sample_region_images.censored, sample_region_images.problem, sample_region_images.capture_time FROM image_statistics,sample_region_images WHERE sample_region_images.region_info_id=" << metadata.region_id
						<< " AND sample_region_images.image_statistics_id!=0 AND sample_region_images.image_statistics_id = image_statistics.id ORDER BY sample_region_images.capture_time ASC";
	ns_sql_result res;
	sql.get_rows(res);
	//try to load cached data
	if (res.size() > 0){
		timepoints.resize(res.size());
		for (unsigned int i = 0; i < res.size(); i++){
			timepoints[i].statistics.from_sql_result(res[i]);
			timepoints[i].timepoint_is_censored = atol(res[i][ns_image_statistics::sql_query_stub_field_count()].c_str())!=0;
			timepoints[i].timepoint_has_a_problem = atol(res[i][ns_image_statistics::sql_query_stub_field_count()+1].c_str())!=0;
			timepoints[i].time = atol(res[i][ns_image_statistics::sql_query_stub_field_count()+2].c_str());
		}
	}
	//regenerate some data if all is missing
	else{
		std::cout << "No cached whole image statistics could be found.  Reloading just the cached worm statistics...\n";
		sql << "SELECT id, censored, problem, capture_time, worm_detection_results_id,"
			<< ns_processing_step_db_column_name(ns_unprocessed) << "," 
			<< ns_processing_step_db_column_name(ns_process_spatial) << ",image_statistics_id FROM sample_region_images WHERE region_info_id=" << metadata.region_id
			<< " AND worm_detection_results_id != 0 ORDER BY capture_time ASC";
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0){
			std::cout << "No worm detection appears to have been performed in this region.\n";
			return;
		}
		timepoints.resize(res.size());
		unsigned int pos=0;
		ns_image_standard image_buffer;
		for (unsigned int i = 0; i < res.size(); i++){
			std::cout << (100*i/res.size()) << "%...";
			ns_image_server_captured_image_region im;
			im.region_info_id = metadata.region_id;
			im.region_images_id = ns_atoi64(res[i][0].c_str());
			ns_image_worm_detection_results results;
			results.id =  ns_atoi64(res[i][4].c_str());
			try{
				//load,calculate.and save worm stats
				results.load_from_db(true,false,sql,false);
				results.load_images_from_db(im,sql);
				im.summarize_stats(&results,&timepoints[pos].statistics,false);
				if (recalculate_raw_image_statistics){
					ns_image_server_image unpr_im;
					unpr_im.id = ns_atoi64(res[i][5].c_str());
					timepoints[pos].statistics.calculate_statistics_from_image(unpr_im,sql);
				}
				ns_64_bit stats_db_id = ns_atoi64(res[i][7].c_str());
				bool made_new_database_entry( timepoints[pos].statistics.submit_to_db(stats_db_id,sql,false,true) );
				if (made_new_database_entry){
					sql << "UPDATE sample_region_images SET image_statistics_id = " << stats_db_id << " WHERE id = " << im.region_images_id;
					sql.send_query();
				}

				timepoints[pos].timepoint_is_censored = atol(res[i][1].c_str())!=0;
				timepoints[pos].timepoint_has_a_problem = atol(res[i][2].c_str())!=0;
				timepoints[pos].time = atol(res[i][3].c_str());
				
				pos++;

			}
			catch(ns_ex & ex){
				ex;
			}
			std::cout << "\n";
		}
		timepoints.resize(pos);
	}
	
}
Exemple #22
0
std::string ns_experiment_capture_specification::submit_schedule_to_db(std::vector<std::string> & warnings,ns_sql & sql,bool actually_write,bool overwrite_previous){
	string debug;
	if (!device_schedule_produced) 
		throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db()::The device schedule has not yet been compiled");
	if (name.length() > 40)
			throw ns_ex("To avoid lengthy filenames, experiment names must contain 40 characters or less.");
	ns_sql_result res;
	//check that all devices requested exist
	for (unsigned long i = 0; i < capture_schedules.size(); i++){
		ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
		for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){
			sql << "SELECT name FROM devices WHERE name = '" << sql.escape_string(p->second.device_name) << "'";
			sql.get_rows(res);
			if (res.size() == 0)
				throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db()::Could not find device ") << p->second.device_name << " attached to cluster";
			for (ns_device_capture_schedule::ns_sample_group_list::iterator q = p->second.sample_groups.begin(); q!= p->second.sample_groups.end(); ++q){
				if (q->second.samples.size() != 4 && q->second.samples.size() != 6){
					string warning;
					warning+="Device ";
					warning+=p->second.device_name + " has " + ns_to_string(q->second.samples.size()) + " samples scheduled on a single device";
					warnings.push_back(warning);
					debug += "WARNING: ";
					debug += warning + ".\n\n";
				}
				for (unsigned int k = 0; k < q->second.samples.size(); k++){
					if (q->second.samples[k]->width < .75 || q->second.samples[k]->height < .75 || 
						q->second.samples[k]->width > 2.5 || q->second.samples[k]->height > 10){
						string warning;
						warning+="Sample ";
						warning+=q->second.samples[i]->sample_name + " has unusual dimensions: " + ns_to_string(q->second.samples[i]->width) + "x" + ns_to_string(q->second.samples[i]->height);
						warnings.push_back(warning);
						debug += "WARNING: ";
						debug += warning + ".\n\n";
					}
				}
				if (q->second.schedule->device_capture_period < 10*60 || q->second.schedule->device_capture_period > 20*60){
						string warning;
						warning+="The schedule contains an unusual device capture period: ";
						warning+=ns_to_string(q->second.schedule->device_capture_period/60);
						warnings.push_back(warning);
						debug += "WARNING: ";
						debug += warning + ".\n\n";
				}
			}
		}
	}

	std::map<std::string,std::string> incubator_assignments;
	std::map<std::string,std::string> incubator_location_assignments;
	sql << "SELECT device_name, incubator_name, incubator_location FROM device_inventory";
	//ns_sql_result res;
	sql.get_rows(res);
	for (unsigned int i = 0; i < res.size(); ++i){
		incubator_assignments[res[i][0]] = res[i][1];
		incubator_location_assignments[res[i][0]] = res[i][2];
	}
	
	res.resize(0);
	sql.clear_query();
	sql.send_query("BEGIN");
	sql << "SELECT id FROM experiments WHERE name='" << sql.escape_string(name) << "'";
	sql.get_rows(res);
	if(res.size() == 0){
		sql << "INSERT INTO experiments SET name='" << sql.escape_string(name) << "',description='',`partition`='', time_stamp=0";
		if (!actually_write){
			experiment_id = 0;
			debug+="Creating a new experiment named "; 
			debug+= name + "\n";
		}
		else experiment_id = sql.send_query_get_id();
	}
	else{
		if (!overwrite_previous)
			throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db::Experiment already exists and overwrite_previous set to false");
		if (!actually_write){
			debug+="Overwriting an existing experiment named "; 
			debug+= name + " with id = " + res[0][0] + "\n";
		}
		experiment_id = atol(res[0][0].c_str());
	}
	
	
	
	sql.clear_query();
	res.resize(0);
	try{
		for (unsigned int i = 0; i < samples.size(); i++){
			sql << "SELECT id, name, device_name,parameters FROM capture_samples WHERE experiment_id = " << experiment_id << " AND name='" << sql.escape_string(samples[i].sample_name) << "'";
			sql.get_rows(res);
			if(res.size() != 0){
				if (!overwrite_previous)
					throw ns_ex("ns_experiment_capture_specification::submit_schedule_to_db::Sample ") << samples[i].sample_name << " already exists and overwrite_previous set to false";
				

				samples[i].sample_id = atol(res[0][0].c_str());
				ns_processing_job job;
				job.sample_id = samples[i].sample_id;
				if (!actually_write)
							debug+="Deleting previous sample (id=" + ns_to_string(job.sample_id) + ").\n";
				else ns_handle_image_metadata_delete_action(job,sql);
			}
			
			sql << "INSERT INTO capture_samples SET experiment_id = " << ns_to_string(experiment_id) << ",name='" << sql.escape_string(samples[i].sample_name) << "'"
				<< ",device_name='" << sql.escape_string(samples[i].device) << "',parameters='" << sql.escape_string(samples[i].capture_parameters()) << "'"
				<< ",position_x=" << samples[i].x_position << ",position_y=" << samples[i].y_position
				<< ",size_x=" << samples[i].width << ",size_y="<<samples[i].height 
				<< ",incubator_name='" << sql.escape_string(incubator_assignments[samples[i].device]) 
				<< "',incubator_location='" << sql.escape_string(incubator_location_assignments[samples[i].device])
				<< "',desired_capture_duration_in_seconds=" <<samples[i].desired_minimum_capture_duration
				<< ",description='',model_filename='',reason_censored='',image_resolution_dpi='" << samples[i].resolution
				<< "',device_capture_period_in_seconds=" << capture_schedules[samples[i].internal_schedule_id].device_capture_period 
				<< ",number_of_consecutive_captures_per_sample=" << capture_schedules[samples[i].internal_schedule_id].number_of_consecutive_captures_per_sample
				<< ", time_stamp=0";
			if (!actually_write){
				samples[i].sample_id = 0;
				debug+="Creating a new sample: name:"; 
				debug += samples[i].sample_name + ", device:" + samples[i].device + "\n\tcapture parameters: \"";
				debug += samples[i].capture_parameters() + "\"\n";
			}
			else{
				samples[i].sample_id = sql.send_query_get_id();
			}
			
			sql.clear_query();
			res.resize(0);
		}
		
		sql.clear_query();
		res.resize(0);


		
		for (unsigned long i = 0; i < capture_schedules.size(); i++){
			unsigned long device_start_offset = 2*60;
			unsigned long s_offset(0);
			ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
			ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets);
			for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){
				device_start_offsets[p->first] = s_offset;
				s_offset+=device_start_offset;
				if (s_offset >= 20*60)
					s_offset = 0;
			}
		}

		for (unsigned int i = 0; i < capture_schedules.size(); i++){
			//compile correct start and stop time for each device.
			if (capture_schedules[i].start_time == 0) capture_schedules[i].start_time =  ns_current_time() + 2*60;
			capture_schedules[i].stop_time = 0;

			ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
			ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets);
			for (ns_device_schedule_list::iterator p = device_schedules.begin(); p != device_schedules.end(); p++){
				const string & device_name = p->first;
				if (p->second.sample_groups.size() == 0) continue;
				p->second.effective_device_period = p->second.sample_groups.begin()->second.schedule->device_capture_period;
				p->second.number_of_consecutive_captures_per_sample = p->second.sample_groups.begin()->second.schedule->number_of_consecutive_captures_per_sample;
				if (p->second.effective_device_period == 0) throw ns_ex("Device period specified as zero!");
				if (p->second.number_of_consecutive_captures_per_sample == 0) throw ns_ex("Number of consecutive_captures_per_sample specified as zero!");
				
				//find earliest start time, stop time
				for (ns_device_capture_schedule::ns_sample_group_list::iterator q = p->second.sample_groups.begin(); q != p->second.sample_groups.end(); q++){
					if (q->second.schedule->start_time != 0 && q->second.schedule->start_time < ns_current_time())
						throw ns_ex("Start time specified is in the past") << q->second.schedule->start_time;
					if (q->first->start_time != 0)
						q->second.schedule->effective_start_time = q->first->start_time + device_start_offsets[device_name];
					else 
						q->second.schedule->effective_start_time = capture_schedules[i].start_time + device_start_offsets[device_name];
					
					q->second.schedule->effective_stop_time = q->second.schedule->effective_start_time + q->second.schedule->duration  + device_start_offsets[device_name]; 

					if (q->second.schedule->effective_start_time < capture_schedules[i].start_time)
						capture_schedules[i].start_time = q->second.schedule->effective_start_time;
					if (q->second.schedule->effective_stop_time > capture_schedules[i].stop_time)
						capture_schedules[i].stop_time = q->second.schedule->effective_stop_time;

					if (q->second.schedule->device_capture_period != p->second.effective_device_period)
						throw ns_ex("Invalid device capture period specified for device") << p->second.device_name;
					if (q->second.schedule->number_of_consecutive_captures_per_sample != p->second.number_of_consecutive_captures_per_sample)
						throw ns_ex("Invalid device consecutive samples per sample specified for device") << p->second.device_name;
					if (q->second.samples.size() == 0)
						throw ns_ex("Empty device sample group found!");
				}
			}
			std::set<string> incubators;
			
			for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){
				incubators.insert(incubator_assignments[device->second.device_name]);
			}
			
			debug +=  string("Schedule Involves ") + ns_to_string(device_schedules.size()) + " devices in " + ns_to_string( incubators.size()) + " location";
			if (incubators.size() != 1)
				debug+="s";
			debug +=":";
			for(std::set<string>::const_iterator p = incubators.begin(); p != incubators.end(); p++){
				debug+=*p;
				debug+=",";
			}
			debug += "\n";
			for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){
				if (!actually_write){
					debug+=string("\tDevice ") + device->second.device_name + " runs between " + 
						ns_format_time_string_for_human(capture_schedules[i].start_time + device_start_offsets[device->second.device_name]) + 
							" and " + 
							ns_format_time_string_for_human(capture_schedules[i].stop_time +  device_start_offsets[device->second.device_name]);
					debug+=" with a capture period of " + ns_capture_schedule::time_string(device->second.effective_device_period) + "\n";
				}
			}
		}
		ns_device_start_offset_list device_stop_times;
		ns_device_start_offset_list device_interval_at_stop;

		for (unsigned int i = 0; i < capture_schedules.size(); i++){
			ns_device_schedule_list & device_schedules(capture_schedules[i].device_schedules);
			ns_device_start_offset_list & device_start_offsets(capture_schedules[i].device_start_offsets);
			unsigned long number_of_captures(0);
			for (ns_device_schedule_list::iterator device = device_schedules.begin(); device != device_schedules.end(); device++){
				const string & device_name = device->first;
				
				ns_device_start_offset_list::iterator stop_time(device_stop_times.find(device_name));
				if(stop_time == device_stop_times.end()){
					device_stop_times[device_name] = 0;
					stop_time = device_stop_times.find(device_name);
				}
				

				if (!actually_write){
					debug+=string("Schedule for device ") + device->second.device_name + ":\n";
				}
				char have_started(false);
				ns_device_capture_schedule::ns_sample_group_list::iterator current_sample_group(device->second.sample_groups.begin());
				unsigned long current_sample_id = 0;


				for (unsigned long t = capture_schedules[i].start_time+device_start_offsets[device_name];  t < capture_schedules[i].stop_time+device_start_offsets[device_name];){

					ns_device_capture_schedule::ns_sample_group_list::iterator loop_start_group = current_sample_group;
					unsigned long loop_start_sample_id = current_sample_id;
					//find the next active sample at this time
					while(true){
						if (have_started){
							current_sample_id++;
							if (current_sample_id >= current_sample_group->second.samples.size()){
									current_sample_group++;
								current_sample_id = 0;
								if (current_sample_group == device->second.sample_groups.end())
									current_sample_group = device->second.sample_groups.begin();
							}
						}
						else have_started = true;
						if (current_sample_group->second.schedule->effective_start_time <= t+device->second.effective_device_period && 
							current_sample_group->second.schedule->effective_stop_time >= t+device->second.effective_device_period)
							break;
						if (current_sample_group == loop_start_group && current_sample_id == loop_start_sample_id )
							break;
					}

					//schedule the scans
					unsigned long dt_total = device->second.effective_device_period*device->second.number_of_consecutive_captures_per_sample;
					for (unsigned int dt = 0; dt < dt_total; dt+=device->second.effective_device_period){
						sql << "INSERT INTO capture_schedule SET experiment_id = " << experiment_id << ", scheduled_time = " << t+dt << ","
							<< "sample_id = " << current_sample_group->second.samples[current_sample_id]->sample_id << ", time_stamp = 0";
						if (!actually_write){
							sql.clear_query();
							debug +="\t";
							debug+= current_sample_group->second.samples[current_sample_id]->sample_name + ": " + ns_format_time_string_for_human(t+dt) + "\n";
						}
						else{
							sql.send_query();
							number_of_captures++;
						}
					}
					if (t+dt_total> stop_time->second){
						stop_time->second = t+dt_total;
						device_interval_at_stop[device_name] = device->second.effective_device_period;
					}

					t+=dt_total;
				}
				sql << "UPDATE experiments SET num_time_points = " << number_of_captures << ", first_time_point=" << capture_schedules[i].start_time
					<< ", last_time_point= " << capture_schedules[i].stop_time << " WHERE id=" << experiment_id;
				if (actually_write)
					sql.send_query();
				sql.clear_query();
			}
		}

		//start autoscans to keep scanners running after the end of the experiment
		for (ns_device_start_offset_list::iterator p = device_stop_times.begin(); p != device_stop_times.end(); p++){
			sql << "INSERT INTO autoscan_schedule SET device_name='" << p->first
					<< "', autoscan_start_time=" << (p->second + device_interval_at_stop[p->first])
					<< ", scan_interval = " << device_interval_at_stop[p->first];
				if (actually_write)
					sql.send_query();
				else{
					debug+="Scheduling an ";
					debug+=ns_to_string(device_interval_at_stop[p->first]) + " second autoscan sequence on device "
						+ p->first + " at " + ns_format_time_string_for_human(p->second + device_interval_at_stop[p->first]) + "\n";
				}
		}
		sql.send_query("COMMIT");

		sql.send_query("UPDATE experiments SET time_stamp = NOW() WHERE time_stamp = 0");
		sql.send_query("UPDATE capture_samples SET time_stamp = NOW() WHERE time_stamp = 0");
		sql.send_query("UPDATE capture_schedule SET time_stamp = NOW() WHERE time_stamp = 0");
		
	}
	catch(...){
		sql.send_query("ROLLBACK");
		throw;
	}
	return debug;
}