Пример #1
0
	std::pair<testing_result_smart_ptr, training_stat_smart_ptr> network_updater::update(
		supervised_data_reader& reader,
		const std::vector<std::vector<float> >& learning_rates,
		network_data_smart_ptr data,
		unsigned int batch_size,
		float weight_decay,
		float momentum,
		const std::map<unsigned int, float>& layer_to_dropout_rate_map)
	{
		// Check data-schema consistency
		data->check_network_data_consistency(*schema);

		set_input_configuration_specific(reader.get_input_configuration());

		// Check schema-reader consistency
		layer_config_list[layer_config_list.size() - 1].check_equality(reader.get_output_configuration());

		nnforge_uniform_real_distribution<float> dist(0.0F, 1.0F);
		for(std::vector<float>::iterator it = random_uniform_list.begin(); it != random_uniform_list.end(); ++it)
			*it = dist(gen);

		std::pair<testing_result_smart_ptr, training_stat_smart_ptr> res = actual_update(reader, learning_rates, data, batch_size, weight_decay, momentum, layer_to_dropout_rate_map);

		return res;
	}
Пример #2
0
	network_data_smart_ptr hessian_calculator::get_hessian(
		unsupervised_data_reader& reader,
		network_data_smart_ptr data,
		unsigned int hessian_entry_to_process_count)
	{
		set_input_configuration_specific(reader.get_input_configuration());

		// Check data-schema consistency
		data->check_network_data_consistency(*schema);

		return actual_get_hessian(
			reader,
			data,
			hessian_entry_to_process_count);
	}
Пример #3
0
	backward_propagation::stat backward_propagation::run(
		structured_data_bunch_reader& reader,
		structured_data_bunch_writer& writer,
		network_data& data,
		network_data::ptr momentum_data,
		network_data::ptr momentum_data2,
		const std::map<std::string, std::vector<float> >& learning_rates,
		unsigned int batch_size,
		float weight_decay,
		training_momentum momentum,
		unsigned int epoch_id)
	{
		backward_propagation::stat res;

		// Check data-schema consistency
		data.check_network_data_consistency(schema->get_layers());

		std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
		set_input_configuration_specific(reader.get_config_map());
		structured_data_bunch_reader::ptr narrow_reader = reader.get_narrow_reader(data_layer_names);
		res.flops_per_entry = flops;
		std::vector<std::string> data_layer_name_list(data_layer_names.begin(), data_layer_names.end());
		std::map<std::string, layer_configuration_specific> output_config_map;
		for(std::vector<std::string>::const_iterator it = output_layer_names.begin(); it != output_layer_names.end(); ++it)
			output_config_map[*it] = layer_config_map[*it];
		writer.set_config_map(output_config_map);
		std::map<layer_name_with_action, float> action_seconds;
		actual_run(
			narrow_reader ? *narrow_reader : reader,
			writer,
			data,
			momentum_data,
			momentum_data2,
			learning_rates,
			batch_size,
			weight_decay,
			momentum,
			epoch_id,
			res.average_absolute_updates,
			res.entry_processed_count,
			action_seconds);
		std::chrono::duration<float> sec = std::chrono::high_resolution_clock::now() - start;
		res.total_seconds = sec.count();

		if (profile->is_profile() && !action_seconds.empty())
		{
			std::map<std::string, std::string> layer_name_to_layer_type_map;
			std::vector<layer::const_ptr> layer_list = schema->get_layers();
			for(std::vector<layer::const_ptr>::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
				layer_name_to_layer_type_map.insert(std::make_pair((*it)->instance_name, (*it)->get_type_name()));
			profile_util::dump_layer_action_performance(
				profile,
				get_max_flops(),
				"backward_prop",
				res.entry_processed_count,
				action_flops_per_entry,
				action_seconds,
				layer_name_to_layer_type_map,
				res.total_seconds);
		}

		return res;
	}