void ns_machine_analysis_data_loader::set_up_spec_to_load(const unsigned long & region_id, unsigned long & sample_id, unsigned long & experiment_id_a, ns_sql & sql, const bool load_excluded_regions){
	const bool region_specified(region_id != 0);
	const bool sample_specified(sample_id != 0);
	if (region_id == 0 && sample_id == 0 && experiment_id_a==0)
		throw ns_ex("No data requested!");
		
	if (region_id != 0){
		sql << "SELECT sample_id FROM sample_region_image_info WHERE id = " << region_id;
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0)
			throw ns_ex("ns_experiment_movement_results::load()::Could not load region information ") << region_id;
		sample_id = atol(res[0][0].c_str());
	}
	if (sample_id != 0){
		sql << "SELECT experiment_id FROM capture_samples WHERE id = " << sample_id;
		ns_sql_result res;
		sql.get_rows(res);
		if (res.size() == 0)
			throw ns_ex("ns_experiment_movement_results::load()::Could not load sample information ") << sample_id;
		experiment_id_a = atol(res[0][0].c_str());
	}

	sql << "SELECT name FROM experiments WHERE id=" << experiment_id_a;
	ns_sql_result res;
	sql.get_rows(res);

	if (res.size() == 0)
		throw ns_ex("ns_experiment_movement_results::load()::Could not load experiment id=") << experiment_id_a;

	experiment_name_ = res[0][0];
	experiment_id_ = experiment_id_a;
	std::vector<unsigned long> sample_ids;

	if (!region_specified && !sample_specified){
		sql << "SELECT id FROM capture_samples WHERE censored=0 AND experiment_id = " << experiment_id_a;
		if (sample_id != 0)
			sql << " AND id = " << sample_id;
		ns_sql_result samp;
		sql.get_rows(samp);
		samples.resize(samp.size());
		for (unsigned int i = 0; i < samp.size(); i++)
			samples[i].set_id(atol(samp[i][0].c_str()));
	}
	else{
		//add just the sample
		samples.resize(1,sample_id);
	}
	
}
Esempio n. 2
0
void ns_capture_sample_region_data::generate_summary_info(ns_capture_sample_region_data_timepoint & mean_timepoint,ns_capture_sample_region_data_timepoint & first_timepoint,ns_capture_sample_region_data_timepoint & last_timepoint){
	mean_timepoint.set_as_zero();
	first_timepoint.set_as_zero();
	last_timepoint.set_as_zero();

	if (timepoints.size() == 0)
		throw ns_ex("ns_capture_sample_region_data::generate_summary_info()::No timepoints present!");

	unsigned long decile_size = timepoints.size()/10;
	if (decile_size == 0)
		decile_size = 1;

	for (unsigned int i = 0; i < decile_size; i++){
		first_timepoint+=timepoints[i];
		mean_timepoint+=timepoints[i];
	}
	for (unsigned int i = decile_size; i < timepoints.size()-decile_size; i++)
		mean_timepoint+=timepoints[i];
	for (unsigned int i = timepoints.size()-decile_size; i < timepoints.size(); i++){
		last_timepoint+=timepoints[i];
		mean_timepoint+=timepoints[i];
	}
	first_timepoint/=(double)decile_size;
	last_timepoint/=(double)decile_size;
	mean_timepoint/=timepoints.size();
}
void ns_machine_analysis_sample_data::load(const ns_death_time_annotation_set::ns_annotation_type_to_load & annotation_type_to_load,const unsigned long sample_id, const ns_region_metadata & sample_metadata,ns_sql & sql, 
	const unsigned long specific_region_id, const bool include_excluded_regions, const  ns_machine_analysis_region_data::ns_loading_details & loading_details){
	bool calculate_missing_data = false;
	device_name_ = sample_metadata.device;
	ns_sql_result reg;
	sql << "SELECT r.id FROM sample_region_image_info as r WHERE r.sample_id = " << sample_id << " AND r.censored=0 ";
	if (!include_excluded_regions)
			sql << " AND r.excluded_from_analysis=0";
	if (specific_region_id!=0)
		sql << " AND r.id = " << specific_region_id;
	sql << " ORDER BY r.name";

	sql.get_rows(reg);
	if (reg.empty() && specific_region_id!=0)
		throw ns_ex("Could not identify region ") << specific_region_id << ".  Was it excluded?";
	regions.reserve(reg.size());
	for (unsigned int i = 0; i < reg.size(); i++){
		try{
			unsigned int s = regions.size();
			regions.resize(s+1);
			unsigned long region_id = atol(reg[i][0].c_str());
			regions[s].metadata = sample_metadata;
			regions[s].metadata.load_only_region_info_from_db(region_id,"",sql);
			regions[s].metadata.technique = "Lifespan Machine";
			regions[s].load_from_db(annotation_type_to_load,loading_details,region_id,sql);
			//break;
		}
		catch(ns_ex & ex){
			std::cerr << regions.rbegin()->metadata.sample_name << "::" << regions.rbegin()->metadata.region_name << ": " << ex.text() << "\n";
			regions.pop_back();
		}
	}
	sample_name_ = sample_metadata.sample_name;
	sample_id_ = sample_id;
}
Esempio n. 4
0
void ns_capture_sample_region_statistics_set::set_sample_data(const ns_capture_sample_statistics_set & samples){
	for (unsigned int i = 0; i < regions.size(); i++){
		std::map<unsigned long,ns_capture_sample_image_statistics *>::const_iterator p = samples.samples_sorted_by_id.find(regions[i].metadata.sample_id);
		if (p==samples.samples_sorted_by_id.end())
			throw ns_ex("Could not find sample statistics for sample id ") << regions[i].metadata.sample_id << "(" << regions[i].metadata.sample_name << ")" << "\n";
		regions[i].set_sample_info(*p->second);
	}
}
ns_time_series_denoising_parameters ns_time_series_denoising_parameters::load_from_db(const unsigned long region_id, ns_sql & sql){
	sql << "SELECT time_series_denoising_flag FROM sample_region_image_info WHERE id = " << region_id;
	ns_sql_result res;
	sql.get_rows(res);
	if (res.size() == 0)
		throw ns_ex("ns_time_series_denoising_parameters::load_from_db()::Could not find region ") << region_id << " in db";
	ns_time_series_denoising_parameters p;
	p.subtract_out_median_movement_score_from_time_series = res[0][0] == "1";
	return p;
}
Esempio n. 6
0
void ns_table_format_processor::sql_get_column_names(const std::string & table_name, std::vector<std::string> & column_names, ns_image_server_sql * sql) {
    *sql << "DESCRIBE " << table_name;
    ns_sql_result res;
    sql->get_rows(res);
    column_names.resize(res.size());
    for (unsigned int i = 0; i < res.size(); i++)
        column_names[i] = res[i][0];

    if (column_names.size() == 0)
        throw ns_ex("No column names received for table ") << table_name << "!";
}
ns_time_series_denoising_parameters ns_time_series_denoising_parameters::load_from_db(const unsigned long region_id, ns_sql & sql){
	sql << "SELECT time_series_denoising_flag FROM sample_region_image_info WHERE id = " << region_id;
	ns_sql_result res;
	sql.get_rows(res);
	if (res.size() == 0)
		throw ns_ex("ns_time_series_denoising_parameters::load_from_db()::Could not find region ") << region_id << " in db";
	ns_time_series_denoising_parameters p;
	p.movement_score_normalization = (ns_time_series_denoising_parameters::ns_movement_score_normalization_type)atol(res[0][0].c_str());

	return p;
}
Esempio n. 8
0
void ns_capture_sample_region_data::set_sample_info(const ns_capture_sample_image_statistics & sample_stats){

	unsigned long s(0);
	unsigned long last_rt(0),last_st(0);
	for (unsigned int i = 0; i < timepoints.size(); i++){
		while(true){
			if (timepoints[i].time == sample_stats.scans[s].scheduled_time_date)
				break;
			s++;
			if (s >= sample_stats.scans.size())
				throw ns_ex("Could not find time ") << timepoints[i].time;
		}
		if (timepoints[i].time < last_rt)
			throw ns_ex("Region Timepoints is not sorted!");
		if ((unsigned long)sample_stats.scans[s].start_time < last_st)
			throw ns_ex("Sample Timepoints is not sorted!");
		last_rt = timepoints[i].time;
		last_st = sample_stats.scans[s].start_time;

		timepoints[i].sample_statistics = sample_stats.scans[s];

	}
}
Esempio n. 9
0
void ns_table_format_processor::get_column_name_indicies(const std::vector<ns_table_column_spec> & spec) {
    unsigned long num_found(0);

    for (long i = 0; i < column_names.size() && num_found < column_names.size(); i++) {
        for (long j = 0; j < spec.size(); j++) {
            if (column_names[i] == spec[j].name) {
                *spec[j].index = i;
                num_found++;
            }
        }
    }
    if (num_found != spec.size())
        throw ns_ex("ns_column_name_info::get_column_name_indicies::Could not find all columns!");
}
Esempio n. 10
0
void * ns_usb_context::get_context(){
	#ifndef WIN32
#ifdef USE_NEW_USB
  if (context == 0){
    libusb_context * c;
    int r = libusb_init(&c);
    context = c;
    if (r < 0)
      throw ns_ex("ns_usb_control::Could not initialize libusb");
  }
#else
	if (context == 0){
  		usb_init();
		context = (void *)this;
	}
#endif
	#endif
  return context;
};
Esempio n. 11
0
bool ns_usb_control::reset_device(int bus,int device_id){
	#ifdef _WIN32 
	return false;
	#else
	
	#ifdef USE_NEW_USB
	libusb_device **devices;
	int r;
	ssize_t cnt;
	ns_acquire_lock_for_scope lock(usb_context.libusb_lock,__FILE__,__LINE__);

	libusb_context * context = (libusb_context *)usb_context.get_context();
	try{
	  cnt = libusb_get_device_list(context, &devices);
	  if (cnt < 0)
	    throw ns_ex("ns_usb_control::Could not enumerate devices!");
	  try{
	    libusb_device *d;
	    libusb_device *requested_device(0);
	    int i = 0;
	    
	    while ((d= devices[i++]) != NULL) {
	      struct libusb_device_descriptor desc;
	      unsigned long bus_number(libusb_get_bus_number(d));
	      unsigned long device_address(libusb_get_device_address(d));
	      if (bus_number == bus && device_address == device_id){
		requested_device = d;
	      }
	      else
		libusb_unref_device(d);//don't hold onto devices that aren't used
	    }
	    
	    
	    if(requested_device==0){
	      libusb_free_device_list(devices,0);
	      lock.release();
	      return 0;
	    }
	    struct libusb_device_handle *device_handle;
	    int err = libusb_open(requested_device,&device_handle);
	    if (err != 0)
	      throw ns_ex("ns_usb_control::Could not open device: ") << err;
	    
	    err = libusb_reset_device(device_handle);
	    if (err){
	      libusb_close(device_handle);
	      libusb_unref_device(requested_device);
	      throw ns_ex("ns_usb::reset_device::Could not reset device: ") << err;
	    }
	    libusb_close(device_handle);
	    libusb_unref_device(requested_device);
	  }
	  catch(...){
	    //don't leak device name lists
	    libusb_free_device_list(devices, 0);
	    throw;
	  }
	}
	catch(...){
	  //release the context so that it can be reset
	  usb_context.release_context();
	  throw;
	}
	
	libusb_free_device_list(devices, 0);
	lock.release();
	return true;
#else
	
	ns_acquire_lock_for_scope lock(usb_context.libusb_lock,__FILE__,__LINE__);
	usb_find_busses();
	usb_find_devices();

	struct usb_device *device(0);
	ssize_t i = 0;
	int err = 0;

	for (struct usb_bus * cur_bus = usb_busses; cur_bus; cur_bus= cur_bus->next){
		int cur_bus_number(atoi(cur_bus->dirname));
		for (struct usb_device *dev = cur_bus->devices; dev; dev = dev->next){
			int cur_device_id(dev->devnum);
			//char * filename(dev->filename);

			if (cur_bus_number == bus && device_id == cur_device_id){
				device = dev;
				break;
			}
		}
	}
     

	if(!device){
		lock.release();
		return 0;
	}
	struct usb_dev_handle *handle;
	handle = usb_open(device);
	try{
		if (err)
			throw ns_ex("ns_usb::reset_device::Could not acquire handle for device");

		err = usb_reset(handle);
		//err2 = usb_reset(handle);
		if (err)
			throw ns_ex("ns_usb::reset_device::Could not reset device");
		usb_close(handle);
	}
	catch(...){
		usb_close(handle);
		throw;
	}
	lock.release();
	return true;
#endif
#endif
}
Esempio n. 12
0
/*
void ns_capture_sample_region_statistics_set::output_plate_statistics_with_mortality_data(const ns_survival_data_summary_aggregator & survival_data, std::ostream & o){
		
	ns_capture_sample_region_data_timepoint mean,first,last;
	for (unsigned int i = 0; i < regions.size(); i++){
			
		regions[i].metadata.out_JMP_plate_identity_data(o);
		o << ",";
		o << regions[i].censored?"1":"0";
		o << ",";
		o << regions[i].excluded?"1":"0";
		o << ",";

		ns_survival_data_summary_aggregator::ns_plate_list::const_iterator region_mortality_data(survival_data.plate_list.find(regions[i].metadata.plate_name()));
		if (region_mortality_data == survival_data.plate_list.end()){
			survival_data.out_JMP_empty_summary_data(o);
		}
		else{
			survival_data.out_JMP_summary_data(region_mortality_data,o);
		}
		o << ",";

		regions[i].generate_summary_info(mean,first,last);
		mean.output_jmp_data(o,mean.time,false,false,",");
		first.output_jmp_data(o,mean.time,false,false,",");
		last.output_jmp_data(o,mean.time,false,false,"\n");
	}

}
	*/
void ns_capture_sample_region_statistics_set::load_whole_experiment(const unsigned long experiment_id,ns_sql & sql,bool process_raw_image_stats){
		
	std::string experiment_name;
	sql << "SELECT name FROM experiments WHERE id = " << experiment_id;
	ns_sql_result res1;
	sql.get_rows(res1);
	if (res1.size() == 0)
		throw ns_ex("Could not find experiment id ") << experiment_id;
	experiment_name = res1[0][0];

	ns_sql_result res;
	sql << "SELECT id,name,device_name,censored,description,excluded_from_analysis,incubator_name, incubator_location "
			    "FROM capture_samples WHERE experiment_id=" << experiment_id;
	sql.get_rows(res);
		
	ns_genotype_fetcher genotypes;
	genotypes.load_from_db(&sql);

	for (unsigned int j = 0; j < res.size(); j++){
		
		ns_region_metadata sample_metadata;
		sample_metadata.sample_id = atol(res[j][0].c_str());
		sample_metadata.experiment_name = experiment_name;
		sample_metadata.sample_name = res[j][1];
		sample_metadata.device = res[j][2];
		sample_metadata.incubator_name = res[j][6];
		sample_metadata.incubator_location = res[j][7];

		bool sample_censored=(res[j][3]!="0"),
				sample_excluded=(res[j][5]!="0");
		std::string sample_details = res[j][4];
			


			
		sql << "SELECT id,censored,excluded_from_analysis FROM sample_region_image_info WHERE sample_id=" << sample_metadata.sample_id;
		ns_sql_result res2;
		sql.get_rows(res2);

		unsigned long plate_index(regions.size());
		regions.resize(plate_index+res2.size());

		for (unsigned long k = 0; k < res2.size(); ++k){
			ns_region_metadata metadata(sample_metadata);
			unsigned long region_id(atol(res2[k][0].c_str()));
			metadata.load_from_db(region_id,"",sql);
			
			bool region_censored(res2[k][1]!="0"),
					region_excluded(res2[k][2]!="0");
//				if (region_censored ||  region_excluded){
//					char a;
//					a++;
//				}
				//std::cerr << "EX";
				
			regions[plate_index+k].load_from_db(region_id,metadata,sample_censored || region_censored,sample_excluded || region_excluded,sql,process_raw_image_stats);
			if (sample_details.size() > 0) 
				regions[plate_index+k].metadata.details += sample_details;
		}
	}
	build_id_mapping();
}
Esempio n. 13
0
/*
ns_64_bit ns_atoi64(const char * s){
  #ifdef _WIN32
  return _atoi64(s);
#else
  return atoll(s);
#endif
}*/
void ns_capture_sample_image_statistics::load_from_db(unsigned long id,ns_sql & sql){
		
	sample_id = id;
	sql << "SELECT name,device_name, position_x,position_y,size_x,size_y, experiment_id FROM capture_samples WHERE id="<<id;
	ns_sql_result res;
	sql.get_rows(res);
	if(res.size()==0)
		throw ns_ex("Could not find sample id ") << sample_id << " in the db.";
	sample_name = res[0][0];
	device_name = res[0][1];
	position.x = atof(res[0][2].c_str());
	position.y = atof(res[0][3].c_str());
	size.x = atof(res[0][4].c_str());
	size.y = atof(res[0][5].c_str());
	sql << "SELECT name FROM experiments WHERE id = " << res[0][6];
	experiment_name = sql.get_value();
	sql << "SELECT s.scheduled_time, s.time_at_start, s.time_at_finish, s.missed, s.problem,s.time_at_imaging_start,"
		 "s.time_spent_reading_from_device,s.time_spent_writing_to_disk,"
		 "s.total_time_during_read,s.time_during_transfer_to_long_term_storage,"
		 "s.time_during_deletion_from_local_storage, "
		 "s.total_time_spent_during_programmed_delay,"
		 "t.intensity_average,t.intensity_std,t.intensity_entropy, t.intensity_top_percentile,t.intensity_bottom_percentile, "
		 "i.registration_vertical_offset,i.registration_horizontal_offset "
		 "FROM (capture_schedule as s LEFT OUTER JOIN captured_images as i ON i.id = s.captured_image_id) "
		 "LEFT OUTER JOIN image_statistics as t ON  i.image_statistics_id = t.id "
		 "WHERE s.sample_id=" << sample_id << " AND s.scheduled_time < UNIX_TIMESTAMP(NOW()) ORDER BY s.scheduled_time ASC";
	sql.get_rows(res);
	scans.resize(res.size());
	for (unsigned int i = 0; i < scans.size(); i++){
		scans[i].scheduled_time_date = atol(res[i][0].c_str());
		scans[i].start_time_date = atol(res[i][1].c_str());
		scans[i].stop_time_date = atol(res[i][2].c_str());
		scans[i].data_start_time_date = atol(res[i][5].c_str());
		scans[i].scan_position = position;
		scans[i].scan_size = size;

		scans[i].missed = (res[i][3]!="0");
		scans[i].problem = (res[i][4]!="0");
	
		scans[i].time_spent_reading_from_device = ns_atoi64(res[i][6].c_str())/1000.0/1000.0/60;
		scans[i].time_spent_writing_to_disk = ns_atoi64(res[i][7].c_str())/1000.0/1000.0;
		scans[i].total_time_during_read = ns_atoi64(res[i][8].c_str())/1000.0/1000.0/60.0;
		scans[i].total_time_spent_during_programmed_delay = ns_atoi64(res[i][11].c_str())/1000.0/60.0;

		scans[i].time_during_transfer_to_long_term_storage = ns_atoi64(res[i][9].c_str())/1000.0/1000.0;
		scans[i].time_during_deletion_from_local_storage = ns_atoi64(res[i][10].c_str())/1000.0/1000.0;
		scans[i].registration_offset.y = atol(res[i][17].c_str());
		scans[i].registration_offset.x = atol(res[i][18].c_str());
		if (res[i][10] != "NULL"){
			scans[i].image_stats.image_statistics.mean = atof(res[i][12].c_str());
			scans[i].image_stats.image_statistics.variance = atof(res[i][13].c_str());
			scans[i].image_stats.image_statistics.entropy = atof(res[i][14].c_str());
			scans[i].image_stats.image_statistics.top_percentile_average= atof(res[i][15].c_str());
			scans[i].image_stats.image_statistics.bottom_percentile_average= atof(res[i][16].c_str());
		}
		else{		
			scans[i].image_stats.image_statistics.mean = 0;
			scans[i].image_stats.image_statistics.variance = 0;
			scans[i].image_stats.image_statistics.entropy = 0;
			scans[i].image_stats.image_statistics.top_percentile_average = 0;
			scans[i].image_stats.image_statistics.bottom_percentile_average = 0;
		}
	//	if (scans[i].error != 0) 
	//		cerr << "ERROR FOUND";

	}

	if (scans.size() == 0)
		return;
	//normalize times to start of experiment
	date_of_first_sample_scan = scans[0].scheduled_time_date;

	for (unsigned int i = 0; i < scans.size(); i++){
		scans[i].date_of_first_sample_scan = date_of_first_sample_scan;
		if (scans[i].scheduled_time_date != 0)
			scans[i].scheduled_time=scans[i].scheduled_time_date-date_of_first_sample_scan;
		else scans[i].scheduled_time = 0;
		if (scans[i].start_time_date != 0)
			scans[i].start_time=scans[i].start_time_date-date_of_first_sample_scan;
		else scans[i].start_time = 0;
		if (scans[i].stop_time_date != 0)
			scans[i].stop_time=scans[i].stop_time_date-date_of_first_sample_scan;
		else scans[i].stop_time = 0;
		if (scans[i].data_start_time_date != 0)
			scans[i].data_start_time=scans[i].data_start_time_date - date_of_first_sample_scan;
		else scans[i].data_start_time = 0;
	}
		
	calculate_running_statistics();
}
bool ns_machine_analysis_region_data::load_from_db(const ns_death_time_annotation_set::ns_annotation_type_to_load & annotation_type_to_load,const ns_loading_details & details,const unsigned long region_id,ns_sql & sql){
	death_time_annotation_set.clear();
	metadata.region_id = region_id;
	if (annotation_type_to_load == ns_death_time_annotation_set::ns_recalculate_from_movement_quantification_data)
		return recalculate_from_saved_movement_quantification(region_id,sql);
	
	ns_image_server_results_subject results_subject;
	results_subject.region_id = region_id;
	std::vector<ns_image_server_results_storage::ns_death_time_annotation_file_type> files_to_open;
	switch(annotation_type_to_load){
		case ns_death_time_annotation_set::ns_all_annotations:
		case ns_death_time_annotation_set::ns_censoring_and_movement_states:
			files_to_open.push_back(ns_image_server_results_storage::ns_worm_position_annotations);
			files_to_open.push_back(ns_image_server_results_storage::ns_censoring_and_movement_transitions);
			break;
		case ns_death_time_annotation_set::ns_censoring_data:
		case ns_death_time_annotation_set::ns_movement_transitions:
		case ns_death_time_annotation_set::ns_censoring_and_movement_transitions:
			files_to_open.push_back(ns_image_server_results_storage::ns_censoring_and_movement_transitions);
			break;
		case ns_death_time_annotation_set::ns_movement_states:
			files_to_open.push_back(ns_image_server_results_storage::ns_worm_position_annotations);
		case ns_death_time_annotation_set::ns_no_annotations:
			break;
		default: throw ns_ex("ns_machine_analysis_data_loader::Unknown annotation type request");
	}
	bool could_load_all_files(true);
	for (unsigned int i = 0; i < files_to_open.size(); i++){
		ns_image_server_results_file results(image_server.results_storage.machine_death_times(results_subject,files_to_open[i],"time_path_image_analysis",sql));
		ns_acquire_for_scope<std::istream> tp_i(results.input());
		if (tp_i.is_null()){
			could_load_all_files = false;
			continue;
		}
		death_time_annotation_set.read(annotation_type_to_load,tp_i(),details==ns_machine_analysis_region_data::ns_exclude_fast_moving_animals);
		tp_i.release();
	}
	if (!could_load_all_files)
		return false;
	//remove out of bounds data
/*	for (unsigned int i = 0 ; i < death_time_annotation_set.size(); i++)
		if (death_time_annotation_set.events[i].type == ns_moving_worm_disappearance){
			ns_death_time_annotation a(death_time_annotation_set.events[i]);
			std::cerr << a.description() << "\n";

		}*/
	if (metadata.time_of_last_valid_sample != 0){
		for (ns_death_time_annotation_set::iterator p = death_time_annotation_set.begin(); p != death_time_annotation_set.end();){
//			if(p->stationary_path_id.group_id == 28)
//				std::cerr << "MA";
			if (p->time.fully_unbounded())
				throw ns_ex("Fully unbounded interval encountered!");
			if ((!p->time.period_start_was_not_observed && p->time.period_start > metadata.time_of_last_valid_sample))
				p->time.period_start_was_not_observed = true;
			if ((!p->time.period_end_was_not_observed && p->time.period_end > metadata.time_of_last_valid_sample))
				p->time.period_end_was_not_observed = true;
			if (p->time.fully_unbounded())
				p = death_time_annotation_set.erase(p);
			else{
				//events that straddle the externally specified last observation become unbounded
				if (!p->time.period_end > metadata.time_of_last_valid_sample){
					p->time.period_end_was_not_observed;
				}
				p++;

			}
		}
	}
	
	return true;
}
Esempio n. 15
0
void ns_buffered_capture_scheduler::update_local_buffer_from_central_server(ns_image_server_device_manager::ns_device_name_list & connected_devices,ns_local_buffer_connection & local_buffer, ns_sql & central_db) {

    if (connected_devices.size() == 0)
        return;

    ns_acquire_lock_for_scope lock(buffer_capture_scheduler_lock,__FILE__,__LINE__);

    local_buffer.clear_query();
    central_db.clear_query();

    std::string local_time = local_buffer.get_value("SELECT UNIX_TIMESTAMP(NOW())"),
                central_time = central_db.get_value("SELECT UNIX_TIMESTAMP(NOW())");

    const ns_synchronized_time update_start_time(atol(local_time.c_str())-10,atol(central_time.c_str())-10);//go ten seconds into the past
    //to make sure all writes
    //are committed

    //now we update the local buffer to the central node.
    commit_all_local_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);
    //now that all the local buffer data is reflected in the central database, we check to see if there is any new data in the central database.
    //if so, we wipe the local buffer and update everything.

    capture_schedule.load_if_needed(&central_db);
    //get any new or updated capture schedule events

    central_db << "SELECT sched.id, samp.id, samp.experiment_id, UNIX_TIMESTAMP(sched.time_stamp),UNIX_TIMESTAMP(samp.time_stamp)";

    for (unsigned int i = 0; i < capture_schedule.table_format.column_names.size(); i++)
        central_db << ",`sched`.`" << capture_schedule.table_format.column_names[i] << "`";

    central_db << " FROM capture_schedule as sched, capture_samples as samp "
               << "WHERE (samp.device_name='" << connected_devices[0].name << "'";

    for (unsigned int i = 1; i < connected_devices.size(); i++)
        central_db << " OR samp.device_name='" << connected_devices[i].name << "'";

    central_db << ")"
               << " AND sched.time_at_start = 0 "
               << " AND sched.sample_id = samp.id "
               << " AND sched.time_at_finish = 0 "
               //here, we could bring the entire local database completely up to date
               //but only scans in the future will make any difference, so we only download
               //those who are still scheduled for the future
               //this old command would fully update the database, as time_of_last_update_from_central_db
               //would be set to 0
               //<< " AND sched.scheduled_time > " << (time_of_last_update_from_central_db.remote_time-image_server.maximum_allowed_local_scan_delay())  //only get events in the future
               //however, now we only grab the future, relevant scans.
               << " AND sched.scheduled_time > " << (update_start_time.remote_time-image_server.maximum_allowed_local_scan_delay())  //only get events in the future

               << " AND sched.time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time <<") "
               << " AND sched.time_stamp <= FROM_UNIXTIME(" << update_start_time.remote_time << ") "
               << " ORDER BY sched.scheduled_time ASC";


    ns_sql_result new_schedule;
    central_db.get_rows(new_schedule);
    std::set<unsigned long> altered_experiment_ids;
    std::set<unsigned long> altered_sample_ids;
    for (unsigned int i = 0; i < new_schedule.size(); i++) {
        //	if (atol(new_schedule[i][4].c_str()) > central_time_of_last_update_from_central_db){
        altered_sample_ids.insert(atol(new_schedule[i][1].c_str()));
        altered_experiment_ids.insert(atol(new_schedule[i][2].c_str()));
        //	}
    }
    const unsigned long new_timestamp(update_start_time.local_time);

    if (new_schedule.size() != 0) {
        if (new_schedule.size() > 4)
            image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::")
                                               << new_schedule.size() << " new capture schedule entries found.  Updating local buffer.",&central_db);

        //if samples or experiments have changed or added, update them.
        //we need to do this *before* updating the capture schedule,
        //as the addition of a capture schedule item might trigger a scan immediately
        //and that scan will fail if the sample and experiemnts information isn't already in the local database.
        if (altered_sample_ids.size() > 0) {
            capture_samples.load_if_needed("capture_samples",&central_db);
            experiments.load_if_needed("experiments",&central_db);
            std::string sample_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_sample_ids,"id")),
                experiment_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_experiment_ids,"id"));

            ns_sql_result capture_sample_data;
            ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,sample_where_clause,capture_sample_data,&central_db);
            ns_sql_result experiment_data;
            ns_get_all_column_data_from_table("experiments",experiments.column_names,experiment_where_clause,experiment_data,&central_db);

            std::cerr << "Updating local buffer with information about " << capture_sample_data.size() << " samples\n";
            //local_buffer_db.send_query("DELETE FROM buffered_capture_samples");
            if (capture_samples.time_stamp_column_id == -1)
                throw ns_ex("Could not find capture sample time stamp column!");
            long last_displayed_percent(-5);
            for(unsigned int i = 0; i < capture_sample_data.size(); i++) {
                const long percent((100*i)/capture_sample_data.size());
                if (percent >= last_displayed_percent+5) {
                    std::cerr << percent << "%...";
                    last_displayed_percent = percent;
                }
                std::string values;

                values += "`";
                values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'";
                for (unsigned int j = 1; j < capture_samples.column_names.size(); j++) {
                    if (j == capture_samples.time_stamp_column_id)	//we need to update the local time stamp here, so that if there might be a clock asynchrony between the
                        continue;									//central server and local server that would allow remote timestamps to be in the future according to local
                    //which would trigger the local server to update the central in the next check, ad infinitum.
                    values += std::string(",`") +  capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'";
                }
                values += std::string(",`time_stamp`=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";
                local_buffer << "INSERT INTO buffered_capture_samples SET " << values
                             << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
            std::cerr << "Done.\n";
            //local_buffer.send_query("DELETE FROM buffered_experiments");
            for(unsigned int i = 0; i < experiment_data.size(); i++) {
                std::string values;
                values += "`";
                values += experiments.column_names[0] + "`='" + local_buffer.escape_string(experiment_data[i][0]) + "'";
                for (unsigned int j = 1; j < experiments.column_names.size(); j++) {
                    if (experiments.time_stamp_column_id == j)
                        continue;
                    values += std::string(",`") + experiments.column_names[j] + "`='" + local_buffer.escape_string(experiment_data[i][j]) + "'";
                }
                values += std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";

                local_buffer << "INSERT INTO buffered_experiments SET " << values;
                local_buffer << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
        }
        std::cerr << "Updating local buffer with information about " << new_schedule.size() << " schedule time points...\n";
        long last_displayed_percent = -5;
        for (unsigned int i = 0; i < new_schedule.size(); i++) {
            const long percent((100*i)/new_schedule.size());
            if (percent >= last_displayed_percent+5) {
                std::cerr << percent << "%...";
                last_displayed_percent = percent;
            }
            std::string all_values;
            all_values += "`";
            all_values += capture_schedule.table_format.column_names[0] + "`='" + local_buffer.escape_string(new_schedule[i][5]) + "'";
            for (unsigned int j = 1; j < capture_schedule.table_format.column_names.size(); j++) {
                if (j == capture_schedule.time_stamp_column)
                    continue;
                all_values += std::string( ", `") + capture_schedule.table_format.column_names[j] + "`='" + local_buffer.escape_string(new_schedule[i][5+j]) + "'";
            }
            all_values+=std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";


            std::string update_values;
            update_values += std::string("problem=") + new_schedule[i][5+capture_schedule.problem_column] + ","
                             + std::string("scheduled_time=") + new_schedule[i][5+capture_schedule.scheduled_time_column] + ","
                             + std::string("missed=") + new_schedule[i][5+capture_schedule.missed_column] + ","
                             + std::string("censored=") + new_schedule[i][5+capture_schedule.censored_column] +","
                             + std::string("transferred_to_long_term_storage=") + new_schedule[i][5+capture_schedule.transferred_to_long_term_storage_column] +","
                             + std::string("time_during_transfer_to_long_term_storage=") + new_schedule[i][5+capture_schedule.time_during_transfer_to_long_term_storage_column] +","
                             + std::string("time_during_deletion_from_local_storage=") + new_schedule[i][5+capture_schedule.time_during_deletion_from_local_storage_column] + ","
                             + std::string("time_stamp=FROM_UNIXTIME(") + ns_to_string(update_start_time.local_time) + ")";


            local_buffer << "INSERT INTO buffered_capture_schedule SET " << all_values
                         << " ON DUPLICATE KEY UPDATE " << update_values;
            local_buffer.send_query();
        }
        std::cerr << "Done.\n";
    }
    //if no changes to the schedule were made, look to see find changes made to any capture samples
    else {

        ns_sql_result capture_sample_data;
        ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,
                                          std::string("WHERE time_stamp >= FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.remote_time) +") "
                                          " AND time_stamp < FROM_UNIXTIME(" + ns_to_string(update_start_time.remote_time) +") "
                                          ,capture_sample_data,&central_db);
        if (capture_sample_data.size() > 0) {
            std::cerr << "Copying over " << capture_sample_data.size() << " samples\n";
            //local_buffer_db.send_query("DELETE FROM buffered_capture_samples");
            for(unsigned int i = 0; i < capture_sample_data.size(); i++) {
                std::string values;
                values += "`";
                values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'";
                for (unsigned int j = 1; j < capture_samples.column_names.size(); j++)
                    values += std::string(",`") +  capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'";

                local_buffer << "INSERT INTO buffered_capture_samples SET " << values
                             << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
        }
    }

    local_buffer.send_query("COMMIT");
    //lock.unlock();

    commit_all_local_non_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);

    central_db << "SELECT k,v FROM constants WHERE time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time << ")";
    ns_sql_result cres;
    central_db.get_rows(cres);
    if (cres.size() > 0) {
        std::cerr << "Updating " << cres.size() << " constants in local buffer\n";
    }
    for (unsigned int i = 0; i < cres.size(); i++)
        image_server.set_cluster_constant_value(local_buffer.escape_string(cres[i][0]),local_buffer.escape_string(cres[i][1]),&local_buffer,update_start_time.local_time);
    time_of_last_update_from_central_db = update_start_time;
    store_last_update_time_in_db(time_of_last_update_from_central_db,local_buffer);

    lock.release();
}
Esempio n. 16
0
void ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db(const ns_synchronized_time & update_start_time,ns_local_buffer_connection & local_buffer_sql, ns_sql & central_db) {
    if (time_of_last_update_from_central_db.local_time == ns_default_update_time)
        get_last_update_time(local_buffer_sql);
    buffered_capture_schedule.load_if_needed(&local_buffer_sql);

    ns_sql_result updated_data;

    const std::string altered_data_condition(
        std::string("time_stamp > FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.local_time) +
        ") AND time_stamp <= FROM_UNIXTIME(" + ns_to_string(update_start_time.local_time) + ") ");

    const unsigned long new_timestamp(time_of_last_update_from_central_db.remote_time);

    ns_get_all_column_data_from_table("buffered_capture_schedule",buffered_capture_schedule.table_format.column_names,
                                      std::string("WHERE ") + altered_data_condition + " AND uploaded_to_central_db != 3",
                                      updated_data,&local_buffer_sql);

    std::vector<ns_db_key_mapping> mappings(updated_data.size());
    if (updated_data.size() > 8)
        image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::Committing ") << updated_data.size() << " recorded capture events to the central database.",&central_db);
    std::vector<ns_ex *> errors;
    for (unsigned long i = 0; i < updated_data.size(); i++) {
        try {
            unsigned long captured_image_id = atol(updated_data[i][buffered_capture_schedule.image_id_column].c_str());
            unsigned long problem_id = atol(updated_data[i][buffered_capture_schedule.problem_column].c_str());

            unsigned long central_captured_image_id(0),
                     central_problem_id(0);
            if (captured_image_id != 0 || problem_id != 0) {
                central_db << "SELECT captured_image_id,problem FROM capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                ns_sql_result res;
                central_db.get_rows(res);
                if (res.size() == 0)
                    throw ns_ex("Could not find capture schedule entry in central db for sample id " ) << updated_data[i][buffered_capture_schedule.id_column] << " finishing at time " << updated_data[i][buffered_capture_schedule.time_at_finish_column];
                central_captured_image_id = atol(res[0][0].c_str());
                central_problem_id = atol(res[0][1].c_str());
            }

            const bool need_to_make_new_capture_image(captured_image_id != 0 && central_captured_image_id != captured_image_id);
            //we need to make new entries in the central database for any new images or events
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.load_from_db(captured_image_id,&local_buffer_sql);
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].old_image = mappings[i].image;
                if (mappings[i].captured_image.capture_images_image_id != 0)
                    mappings[i].image.load_from_db(mappings[i].captured_image.capture_images_image_id,&local_buffer_sql);


            }
            else {
                mappings[i].old_image = mappings[i].image;
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].captured_image.captured_images_id = central_captured_image_id;
            }

            bool need_to_make_new_problem(problem_id != 0 && central_problem_id != problem_id);
            if (need_to_make_new_problem) {
                local_buffer_sql << "SELECT id,event,time,minor FROM buffered_host_event_log WHERE id = " << updated_data[i][buffered_capture_schedule.problem_column];
                ns_sql_result res;
                local_buffer_sql.get_rows(res);
                mappings[i].old_problem_id = mappings[i].problem_id;
                if (res.size() == 0) {
                    mappings[i].problem_id = image_server.register_server_event(ns_ex("Could not find problem id ") << updated_data[i][buffered_capture_schedule.problem_column] << " in local database buffer!",&central_db);
                    need_to_make_new_problem=false;
                }
                else {
                    mappings[i].problem_text = res[0][1];
                    mappings[i].problem_time = atol(res[0][2].c_str());
                    mappings[i].problem_minor = res[0][3] != "0";
                }
            }
            else {
                mappings[i].old_problem_id = mappings[i].problem_id;
                mappings[i].problem_id = central_problem_id;
            }

            if (need_to_make_new_capture_image && mappings[i].image.id != 0) {
                mappings[i].image.id = 0;
                mappings[i].image.save_to_db(0,&central_db,false);
                mappings[i].captured_image.capture_images_image_id = mappings[i].image.id;
            }
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.captured_images_id = 0;
                mappings[i].captured_image.save(&central_db);
            }
            if (need_to_make_new_problem) {
                mappings[i].old_problem_id = mappings[i].problem_id;
                ns_image_server_event ev;
                ev << mappings[i].problem_text;
                if (mappings[i].problem_minor) ev << ns_ts_minor_event;
                ev.set_time(mappings[i].problem_time);
                mappings[i].problem_id = image_server.register_server_event(ev,&central_db);
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error while making mapping: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }

    for (unsigned long i = 0; i < updated_data.size(); i++) {
        if (mappings[i].error.text().size() > 0)
            continue;
        try {
            central_db << "Update capture_schedule SET ";
            for (unsigned int j = 0; j < buffered_capture_schedule.table_format.column_names.size(); ++j) {
                if (j == buffered_capture_schedule.id_column
                        || j == buffered_capture_schedule.image_id_column ||
                        j == buffered_capture_schedule.problem_column || j ==
                        buffered_capture_schedule.timestamp_column) continue;
                central_db  << "`" << buffered_capture_schedule.table_format.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]) << "',";
            }
            central_db << "captured_image_id = " << mappings[i].captured_image.captured_images_id
                       << ", problem = " << mappings[i].problem_id;
            //we set the timestamp as old so that it does not trigger a re-download when the server tries to update its cache
            central_db << ", time_stamp=FROM_UNIXTIME("<< new_timestamp <<") ";
            central_db << " WHERE id = " << updated_data[i][0];
            central_db.send_query();
            if (mappings[i].old_captured_image.captured_images_id != mappings[i].captured_image.captured_images_id ||
                    mappings[i].old_problem_id != mappings[i].problem_id) {
                local_buffer_sql << "UPDATE buffered_capture_schedule SET captured_image_id = " << mappings[i].captured_image.captured_images_id
                                 << ", problem = " << mappings[i].problem_id << ", time_stamp = FROM_UNIXTIME("<< new_timestamp
                                 <<") WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_captured_images WHERE id = " << mappings[i].old_captured_image.captured_images_id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_images WHERE id = " << mappings[i].old_image.id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_host_event_log WHERE id = " << mappings[i].old_problem_id;
                local_buffer_sql.send_query();
            }
            if (updated_data[i][buffered_capture_schedule.time_at_finish_column] != "0") {
                local_buffer_sql << "DELETE FROM buffered_capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                //	local_buffer_sql.clear_query();
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error during central update: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }
    for (unsigned int i = 0; i < mappings.size(); i++) {
        if (mappings[i].error.text().size() > 0) {

            local_buffer_sql << "UPDATE buffered_capture_schedule SET uploaded_to_central_db=3 WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
            local_buffer_sql.send_query();
            image_server.register_server_event(ns_image_server::ns_register_in_central_db_with_fallback,ns_ex("Could not update central db: ") << mappings[i].error.text());
        }

    }
    //update modified sample data.
    updated_data.resize(0);
    if (capture_samples.column_names.size() == 0) {
        capture_samples.load_column_names_from_db("capture_samples",&central_db);
        if (capture_samples.column_names.size() == 0)
            throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table appears to have no columns!");
    }
    if (capture_samples.column_names[0] != "id")
        throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table does not have its id in the first column!");

    ns_get_all_column_data_from_table("buffered_capture_samples",capture_samples.column_names,
                                      std::string("WHERE ") + altered_data_condition,
                                      updated_data,&local_buffer_sql);
    if (capture_samples.time_stamp_column_id == -1)
        throw ns_ex("Could not find capture sample time stamp column!");
    for (unsigned int i = 0; i < updated_data.size(); i++) {
        central_db << "UPDATE capture_samples SET ";
        //skip id column; we don't want to cause any unneccisary db shuffling by changing ids (even if we are changing the ids to the value they already are)
        central_db  << capture_samples.column_names[1] << "='" << central_db.escape_string(updated_data[i][1]);
        for (unsigned int j = 2; j < capture_samples.column_names.size(); ++j) {
            if (j == capture_samples.time_stamp_column_id)
                continue;
            else central_db  << "',`" << capture_samples.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]);
        }
        central_db << "',time_stamp=FROM_UNIXTIME(" << new_timestamp << ") ";
        central_db << "WHERE id = " << updated_data[i][0];
        central_db.send_query();
    }

}