bool dllp::conv_layer::parse(const layers_t& layers, const std::vector<std::string>& lines, size_t& i) { std::string value; while (i < lines.size()) { if (dllp::extract_value(lines[i], "channels: ", value)) { c = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "filters: ", value)) { k = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "v1: ", value)) { v1 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "v2: ", value)) { v2 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "w1: ", value)) { w1 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "w2: ", value)) { w2 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "activation: ", activation)) { ++i; if (!dllp::valid_activation(activation)) { std::cout << "dllp: error: invalid activation function, must be [sigmoid,tanh,relu,softmax]" << std::endl; return false; } } else { break; } } if (layers.empty() && (!c || !v1 || !v2 || !k || !w1 || !w2)) { std::cout << "dllp: error: The first layer needs input and output sizes" << std::endl; return false; } else if (!layers.empty() && !k) { std::cout << "dllp: error: The number of filters is mandatory" << std::endl; return false; } else if (!layers.empty() && (!w1 || !w2)) { std::cout << "dllp: error: The size of the filters is mandatory" << std::endl; return false; } if (!layers.empty()) { size_t i = layers.size() - 1; while(layers[i]->is_transform() && i > 0){ --i; } c = layers[i]->hidden_get_1(); v1 = layers[i]->hidden_get_2(); v2 = layers[i]->hidden_get_3(); } return true; }
bool dllp::pooling_layer::parse(const layers_t& layers, const std::vector<std::string>& lines, size_t& i) { std::string value; while (i < lines.size()) { if (dllp::extract_value(lines[i], "channels: ", value)) { c = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "v1: ", value)) { v1 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "v2: ", value)) { v2 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "c1: ", value)) { c1 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "c2: ", value)) { c2 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "c3: ", value)) { c3 = std::stol(value); ++i; } else { break; } } if (layers.empty() && (!c || !v1 || !v2 || !c1 || !c2 || !c3)) { std::cout << "dllp: error: The first layer needs input and output sizes" << std::endl; return false; } else if (!layers.empty() && (!c1 || !c2 || !c3)) { std::cout << "dllp: error: The factors of the pooling is mandatory" << std::endl; return false; } if (!layers.empty()) { size_t i = layers.size() - 1; while(layers[i]->is_transform() && i > 0){ --i; } c = layers[i]->hidden_get_1(); v1 = layers[i]->hidden_get_2(); v2 = layers[i]->hidden_get_3(); } return true; }
void operator()(view_t& view) { using namespace boost::gil; int y = 0; std::vector<view_t> views; int rows = (int)ceil(layers.size() / (double)cols); sections ycurr(view.height() + margin, rows); for(; ycurr; ycurr++) { int yheight = *ycurr - margin; int x = 0; sections xcurr(view.width() + margin, cols); for(; xcurr; xcurr++) { int xwidth = *xcurr - margin; views.push_back(subimage_view(view, x, y, xwidth, yheight)); x += xwidth + margin; } y += yheight + margin; } tbb::parallel_for(tbb::blocked_range<std::size_t>(0, views.size()), parallel_draw<view_t>(layers, views), tbb::auto_partitioner()); }
parallel_grid(layer_t* p, int total, int cols, int margin = 5) : cols(cols) , margin(margin) { for(int n = 0; n < total; ++n) layers.push_back(p[n]); }
bool dllp::rbm_layer::parse(const layers_t& layers, const std::vector<std::string>& lines, size_t& i) { std::string value; while (i < lines.size()) { auto result = base_parse(lines, i); if (result == dllp::parse_result::PARSED) { ++i; continue; } else if (result == dllp::parse_result::ERROR) { return false; } if (dllp::extract_value(lines[i], "visible: ", value)) { visible = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "hidden: ", value)) { hidden = std::stol(value); ++i; } else { break; } } if (layers.empty() && !visible) { std::cout << "dllp: error: The first layer needs number of visible units" << std::endl; return false; } if (!hidden) { std::cout << "dllp: error: The number of hidden units is mandatory" << std::endl; return false; } if (!layers.empty()) { size_t i = layers.size() - 1; while(layers[i]->is_transform() && i > 0){ --i; } visible = layers[i]->hidden_get(); } return true; }
bool dllp::dense_layer::parse(const layers_t& layers, const std::vector<std::string>& lines, size_t& i) { std::string value; while (i < lines.size()) { if (dllp::extract_value(lines[i], "visible: ", value)) { visible = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "hidden: ", value)) { hidden = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "activation: ", activation)) { ++i; if (!dllp::valid_activation(activation)) { std::cout << "dllp: error: invalid activation function, must be [sigmoid,tanh,relu,softmax]" << std::endl; return false; } } else { break; } } if (layers.empty() && (visible == 0 || hidden == 0)) { std::cout << "dllp: error: The first layer needs visible and hidden sizes" << std::endl; return false; } else if (!layers.empty() && hidden == 0) { std::cout << "dllp: error: The number of hidden units is mandatory" << std::endl; return false; } if (!layers.empty()) { size_t i = layers.size() - 1; while(layers[i]->is_transform() && i > 0){ --i; } visible = layers[i]->hidden_get(); } return true; }
bool dllp::conv_rbm_mp_layer::parse(const layers_t& layers, const std::vector<std::string>& lines, size_t& i) { std::string value; while (i < lines.size()) { auto result = base_parse(lines, i); if (result == dllp::parse_result::PARSED) { ++i; continue; } else if (result == dllp::parse_result::ERROR) { return false; } if (dllp::extract_value(lines[i], "channels: ", value)) { c = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "filters: ", value)) { k = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "pool: ", value)) { p = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "v1: ", value)) { v1 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "v2: ", value)) { v2 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "w1: ", value)) { w1 = std::stol(value); ++i; } else if (dllp::extract_value(lines[i], "w2: ", value)) { w2 = std::stol(value); ++i; } else { break; } } if (layers.empty() && (!c || !v1 || !v2 || !k || !w1 || !w2)) { std::cout << "dllp: error: The first layer needs input and output sizes" << std::endl; return false; } else if (!p) { std::cout << "dllp: error: The pool parameter is mandatory" << std::endl; return false; } else if (!layers.empty() && !k) { std::cout << "dllp: error: The number of filters is mandatory" << std::endl; return false; } else if (!layers.empty() && (!w1 || !w2)) { std::cout << "dllp: error: The size of the filters is mandatory" << std::endl; return false; } if (!layers.empty()) { size_t i = layers.size() - 1; while(layers[i]->is_transform() && i > 0){ --i; } c = layers[i]->hidden_get_1(); v1 = layers[i]->hidden_get_2(); v2 = layers[i]->hidden_get_3(); } return true; }