Exemple #1
0
float mlp::addWeights(){
  float tempVal = 0;
  tempVal += hiddenLayer->addWeights();
  tempVal += outputLayer->addWeights();

  return tempVal;
}
Exemple #2
0
void grid_renderer<T>::start_layer_processing(layer const& lay, box2d<double> const& query_extent)
{
    MAPNIK_LOG_DEBUG(grid_renderer) << "grid_renderer: Start processing layer=" << lay.name();
    MAPNIK_LOG_DEBUG(grid_renderer) << "grid_renderer: datasource=" << lay.datasource().get();
    MAPNIK_LOG_DEBUG(grid_renderer) << "grid_renderer: query_extent = " << query_extent;

    if (lay.clear_label_cache())
    {
        detector_->clear();
    }
    query_extent_ = query_extent;
    int buffer_size = lay.buffer_size();
    if (buffer_size != 0 )
    {
        double padding = buffer_size * (double)(query_extent.width()/pixmap_.width());
        double x0 = query_extent_.minx();
        double y0 = query_extent_.miny();
        double x1 = query_extent_.maxx();
        double y1 = query_extent_.maxy();
        query_extent_.init(x0 - padding, y0 - padding, x1 + padding , y1 + padding);
    }

    boost::optional<box2d<double> > const& maximum_extent = lay.maximum_extent();
    if (maximum_extent)
    {
        query_extent_.clip(*maximum_extent);
    }
}
Exemple #3
0
std::size_t layer_hasher::hash(const layer& v) {
    std::size_t seed(0);

    combine(seed, v.name());
    combine(seed, v.visible());
    combine(seed, v.active());
    combine(seed, hash_std_vector_dogen_dia_object(v.objects()));

    return seed;
}
void grid_renderer<T>::start_layer_processing(layer const& lay)
{
#ifdef MAPNIK_DEBUG
    std::clog << "start layer processing : " << lay.name()  << "\n";
    std::clog << "datasource = " << lay.datasource().get() << "\n";
#endif
    if (lay.clear_label_cache())
    {
        detector_.clear();
    }
}
Exemple #5
0
 static  boost::python::tuple
 getstate(const layer& l)
 {
     boost::python::list s;
     std::vector<std::string> const& style_names = l.styles();
     for (unsigned i = 0; i < style_names.size(); ++i)
     {
         s.append(style_names[i]);
     }
     return boost::python::make_tuple(l.clear_label_cache(),l.min_zoom(),l.max_zoom(),l.queryable(),l.datasource()->params(),l.cache_features(),s);
 }
Exemple #6
0
void cairo_renderer<T>::end_layer_processing(layer const& lay)
{
    MAPNIK_LOG_DEBUG(cairo_renderer) << "cairo_renderer: End layer processing";

    if (lay.comp_op() || lay.get_opacity() < 1.0)
    {
        context_.pop_group();
        composite_mode_e comp_op = lay.comp_op() ? *lay.comp_op() : src_over;
        context_.set_operator(comp_op);
        context_.paint(lay.get_opacity());
    }
}
/**
 * \brief Set the properties of the layer.
 * \param lay The layer from which we take the info.
 *
 * The new width is max(lay.get_width(), s_min_width), and the new height
 * max(lay.get_height(), s_min_height).
 */
void bf::layer_properties_frame::fill_from( const layer& lay )
{
  m_fit_level->SetValue( lay.fits_level() );
  m_width->SetValue
    ( wxString::Format(wxT("%d"), std::max(lay.get_width(), s_min_width)) );
  m_height->SetValue
    ( wxString::Format(wxT("%d"), std::max(lay.get_height(), s_min_height)) );

  m_height->Enable( !lay.fits_level() );
  m_width->Enable( !lay.fits_level() );
  m_name->SetValue( std_to_wx_string(lay.get_name()) );
  m_tag->SetValue( std_to_wx_string(lay.get_tag()) );

  unsigned int i=0;
  bool found = false;
  const wxString ref( std_to_wx_string(lay.get_class_name()) );

  while ( !found && (i!=m_class_name->GetCount()) )
    if ( m_class_name->GetString(i) == ref )
      found = true;
    else
      ++i;

  if ( found )
    m_class_name->SetSelection(i);
  else
    m_class_name->SetSelection(0);
} // layer_properties_frame::set_layer_size()
Exemple #8
0
	void create(int input_cnt,int output_cnt,int hidden_cnt,int neurons_cnt) {
		hidden_count=hidden_cnt;
		hidden_layers.resize(hidden_count);

		if(hidden_count) {
			hidden_layers[0].create(input_cnt,neurons_cnt);
			for(int i=1;i<hidden_count;i++) {
				hidden_layers[i].create(neurons_cnt,neurons_cnt);
			}
			output_layer.create(neurons_cnt,output_cnt);		
		}
		else {
			output_layer.create(input_cnt,output_cnt);
		}
	}
void feature_style_processor<Processor>::apply_to_layer(layer const& lay,
                                                        Processor & p,
                                                        projection const& proj0,
                                                        double scale,
                                                        double scale_denom,
                                                        unsigned width,
                                                        unsigned height,
                                                        box2d<double> const& extent,
                                                        int buffer_size,
                                                        std::set<std::string>& names)
{
    feature_style_context_map ctx_map;
    layer_rendering_material  mat(lay, proj0);

    prepare_layer(mat,
                  ctx_map,
                  p,
                  scale,
                  scale_denom,
                  width,
                  height,
                  extent,
                  buffer_size,
                  names);

    prepare_layers(mat, lay.layers(), ctx_map, p, scale_denom);

    if (!mat.active_styles_.empty())
    {
        render_material(mat,p);
        render_submaterials(mat, p);
    }
}
Exemple #10
0
void cairo_renderer<T>::start_layer_processing(layer const& lay, box2d<double> const& query_extent)
{
    MAPNIK_LOG_DEBUG(cairo_renderer) << "cairo_renderer: Start processing layer=" << lay.name() ;
    MAPNIK_LOG_DEBUG(cairo_renderer) << "cairo_renderer: -- datasource=" << lay.datasource().get();
    MAPNIK_LOG_DEBUG(cairo_renderer) << "cairo_renderer: -- query_extent=" << query_extent;

    if (lay.clear_label_cache())
    {
        common_.detector_->clear();
    }
    common_.query_extent_ = query_extent;

    if (lay.comp_op() || lay.get_opacity() < 1.0)
    {
        context_.push_group();
    }
}
Exemple #11
0
void grid_renderer<T>::start_layer_processing(layer const& lay, box2d<double> const& query_extent)
{
    MAPNIK_LOG_DEBUG(grid_renderer) << "grid_renderer: Start processing layer=" << lay.name();
    MAPNIK_LOG_DEBUG(grid_renderer) << "grid_renderer: datasource=" << lay.datasource().get();
    MAPNIK_LOG_DEBUG(grid_renderer) << "grid_renderer: query_extent = " << query_extent;

    if (lay.clear_label_cache())
    {
        detector_->clear();
    }
    query_extent_ = query_extent;
    boost::optional<box2d<double> > const& maximum_extent = lay.maximum_extent();
    if (maximum_extent)
    {
        query_extent_.clip(*maximum_extent);
    }
}
Exemple #12
0
    static void
    setstate (layer& l, boost::python::tuple state)
    {
        using namespace boost::python;
        if (len(state) != 9)
        {
            PyErr_SetObject(PyExc_ValueError,
                            ("expected 9-item tuple in call to __setstate__; got %s"
                             % state).ptr()
                );
            throw_error_already_set();
        }

        l.set_clear_label_cache(extract<bool>(state[0]));

        l.set_min_zoom(extract<double>(state[1]));

        l.set_max_zoom(extract<double>(state[2]));

        l.set_queryable(extract<bool>(state[3]));

        mapnik::parameters params = extract<parameters>(state[4]);
        l.set_datasource(datasource_cache::instance().create(params));

        boost::python::list s = extract<boost::python::list>(state[5]);
        for (int i=0;i<len(s);++i)
        {
            l.add_style(extract<std::string>(s[i]));
        }

        l.set_cache_features(extract<bool>(state[6]));
    }
void update_layer(layer l, network net)
{
    int update_batch = net.batch*net.subdivisions;
    float rate = get_current_rate(net);
    l.t = get_current_batch(net);
    if(l.update_gpu){
        l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
    }
}
Exemple #14
0
	void propagate (vector<double> input) {
		if(hidden_count==0) {
			output_layer.layer_input=input;
			// cout<<"Output Layer :\n";
			output_layer.calculate();
			return ;
		}
		hidden_layers[0].layer_input=input;
		// cout<<"Hidden Layer 0 : \n";
		hidden_layers[0].calculate();

		// Propogating the out values to input of next layer
		update(0);

		for(int i=1;i<hidden_count;i++) {
			hidden_layers[i].calculate();
			update(i);
		}
		output_layer.calculate();
	}
Exemple #15
0
void mlp::layersCalculate(float * inputArray, float * setOutputArray){
  hiddenLayer->neuronCalculate(inputArray);
  outputLayer->neuronCalculate(hiddenLayer, setOutputArray);
}
Exemple #16
0
void mlp::layersUpdate(){
  outputLayer->neuronUpdate();
  hiddenLayer->neuronUpdate();
}
Exemple #17
0
	void feed(const layer<lt>& p){
		neurons[0]=p;
		if(neurons[1].len()!=p.len()) neurons[1]=layer<lt>(p.len());
		hopstep=0;
		generateOrder();
	}
Exemple #18
0
void mlp::layersBackprop(float * trainArray){
  outputLayer->neuronBackProp(trainArray);
  hiddenLayer->neuronBackProp(outputLayer);
}
Exemple #19
0
 static boost::python::tuple
 getinitargs(const layer& l)
 {
     return boost::python::make_tuple(l.name(),l.srs());
 }
Exemple #20
0
void mlp::printWeights(){
  cout << "Hidden Layer: " << endl;
  hiddenLayer->printWeights();
  cout << "Output Layer: " << endl;
  outputLayer->printWeights();
}
 layer_rendering_material(layer const& lay, projection const& dest)
     :
     lay_(lay),
     proj0_(dest),
     proj1_(lay.srs(),true) {}