void UpgradeV0PaddingLayers(const NetParameter& param, NetParameter* param_upgraded_pad) { // Copy everything other than the layers from the original param. param_upgraded_pad->Clear(); param_upgraded_pad->CopyFrom(param); param_upgraded_pad->clear_layers(); // Figure out which layer each bottom blob comes from. map<string, int> blob_name_to_last_top_idx; for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = -1; } for (int i = 0; i < param.layers_size(); ++i) { const V1LayerParameter& layer_connection = param.layers(i); const V0LayerParameter& layer_param = layer_connection.layer(); // Add the layer to the new net, unless it's a padding layer. if (layer_param.type() != "padding") { param_upgraded_pad->add_layers()->CopyFrom(layer_connection); } for (int j = 0; j < layer_connection.bottom_size(); ++j) { const string& blob_name = layer_connection.bottom(j); if (blob_name_to_last_top_idx.find(blob_name) == blob_name_to_last_top_idx.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; } const int top_idx = blob_name_to_last_top_idx[blob_name]; if (top_idx == -1) { continue; } const V1LayerParameter& source_layer = param.layers(top_idx); if (source_layer.layer().type() == "padding") { // This layer has a padding layer as input -- check that it is a conv // layer or a pooling layer and takes only one input. Also check that // the padding layer input has only one input and one output. Other // cases have undefined behavior in Caffe. CHECK((layer_param.type() == "conv") || (layer_param.type() == "pool")) << "Padding layer input to " "non-convolutional / non-pooling layer type " << layer_param.type(); CHECK_EQ(layer_connection.bottom_size(), 1) << "Conv Layer takes a single blob as input."; CHECK_EQ(source_layer.bottom_size(), 1) << "Padding Layer takes a single blob as input."; CHECK_EQ(source_layer.top_size(), 1) << "Padding Layer produces a single blob as output."; int layer_index = param_upgraded_pad->layers_size() - 1; param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() ->set_pad(source_layer.layer().pad()); param_upgraded_pad->mutable_layers(layer_index) ->set_bottom(j, source_layer.bottom(0)); } } for (int j = 0; j < layer_connection.top_size(); ++j) { const string& blob_name = layer_connection.top(j); blob_name_to_last_top_idx[blob_name] = i; } } }
void NetParameterToPrettyPrint(const NetParameter& param, NetParameterPrettyPrint* pretty_param) { pretty_param->Clear(); if (param.has_name()) { pretty_param->set_name(param.name()); } if (param.has_force_backward()) { pretty_param->set_force_backward(param.force_backward()); } for (int i = 0; i < param.input_size(); ++i) { pretty_param->add_input(param.input(i)); } for (int i = 0; i < param.input_dim_size(); ++i) { pretty_param->add_input_dim(param.input_dim(i)); } for (int i = 0; i < param.layers_size(); ++i) { pretty_param->add_layers()->CopyFrom(param.layers(i)); } }
void InsertSplits(const NetParameter& param, NetParameter* param_split) { // Initialize by copying from the input NetParameter. param_split->CopyFrom(param); param_split->clear_layer(); map<string, pair<int, int> > blob_name_to_last_top_idx; map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx; map<pair<int, int>, int> top_idx_to_bottom_count; map<pair<int, int>, float> top_idx_to_loss_weight; map<pair<int, int>, int> top_idx_to_bottom_split_idx; map<int, string> layer_idx_to_layer_name; layer_idx_to_layer_name[-1] = "input"; // Determine the number of times each blob is used as an input (bottom) blob. for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = make_pair(-1, i); } for (int i = 0; i < param.layer_size(); ++i) { const LayerParameter& layer_param = param.layer(i); layer_idx_to_layer_name[i] = layer_param.name(); for (int j = 0; j < layer_param.bottom_size(); ++j) { const string& blob_name = layer_param.bottom(j); if (blob_name_to_last_top_idx.find(blob_name) == blob_name_to_last_top_idx.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; } const pair<int, int>& bottom_idx = make_pair(i, j); const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name]; bottom_idx_to_source_top_idx[bottom_idx] = top_idx; ++top_idx_to_bottom_count[top_idx]; } for (int j = 0; j < layer_param.top_size(); ++j) { const string& blob_name = layer_param.top(j); blob_name_to_last_top_idx[blob_name] = make_pair(i, j); } // A use of a top blob as a loss should be handled similarly to the use of // a top blob as an input (bottom) blob to another layer. const int last_loss = std::min(layer_param.loss_weight_size(), layer_param.top_size()); for (int j = 0; j < last_loss; ++j) { const string& blob_name = layer_param.top(j); const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name]; top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j); if (top_idx_to_loss_weight[top_idx]) { ++top_idx_to_bottom_count[top_idx]; } } } // Create split layer for any input blobs used by other layer as bottom // blobs more than once. for (int i = 0; i < param.input_size(); ++i) { const int split_count = top_idx_to_bottom_count[make_pair(-1, i)]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[-1]; const string& blob_name = param.input(i); LayerParameter* split_layer_param = param_split->add_layer(); const float kZeroLossWeight = 0; ConfigureSplitLayer(layer_name, blob_name, i, split_count, kZeroLossWeight, split_layer_param); } } for (int i = 0; i < param.layer_size(); ++i) { LayerParameter* layer_param = param_split->add_layer(); layer_param->CopyFrom(param.layer(i)); // Replace any shared bottom blobs with split layer outputs. for (int j = 0; j < layer_param->bottom_size(); ++j) { const pair<int, int>& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)]; const int split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[top_idx.first]; const string& blob_name = layer_param->bottom(j); layer_param->set_bottom(j, SplitBlobName(layer_name, blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++)); } } // Create split layer for any top blobs used by other layer as bottom // blobs more than once. for (int j = 0; j < layer_param->top_size(); ++j) { const pair<int, int>& top_idx = make_pair(i, j); const int split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[i]; const string& blob_name = layer_param->top(j); LayerParameter* split_layer_param = param_split->add_layer(); const float loss_weight = top_idx_to_loss_weight[top_idx]; ConfigureSplitLayer(layer_name, blob_name, j, split_count, loss_weight, split_layer_param); if (loss_weight) { layer_param->clear_loss_weight(); top_idx_to_bottom_split_idx[top_idx]++; } } } } }
void Net < Dtype >::appendTop(const NetParameter& param, const int layer_id, const int top_id, set<string>* available_blobs, map<string, int>* blob_name_to_idx){ boost::shared_ptr<LayerParameter> layer_param( layer_id >= 0 ? new LayerParameter(param.layer(layer_id)) : NULL); // use (layer_id//top_id) or (-1//top_id) to get a blob name const string& blob_name = layer_param ? (top_id<layer_param->top_size() ? layer_param->top(top_id) : "(automatic)") : param.input(top_id); // in-place case (e.g: // I0721 10:38 : 16.722070 4692 net.cpp : 84] relu1 <-conv1 // I0721 10:38 : 16.722082 4692 net.cpp : 98] relu1->conv1(in-place) // check a blob whether at the same postion in both bottom and top if (blob_name_to_idx && layer_param && top_id < layer_param->bottom_size() && blob_name == layer_param->bottom(top_id)){ LOG_IF(INFO, Dragon::get_root_solver()) << layer_param->name() << "[Layer-Produce]->" << blob_name << " [Blob-Name] (in-place)"; // add into this layer's top blob using blob_name top_vecs[layer_id].push_back(blobs[(*blob_name_to_idx)[blob_name]].get()); // log the id top_id_vecs[layer_id].push_back((*blob_name_to_idx)[blob_name]); } else if (blob_name_to_idx && (*blob_name_to_idx).count(blob_name) ){ LOG(FATAL) << "Top blob:" << blob_name << " propogate from multiple sources."; } // normal top blob stuffing else{ // debug info if (Dragon::get_root_solver()){ if (layer_param) LOG(INFO) << layer_param->name() << "[Layer-Produce] ->" << blob_name << " [Blob-Name]"; // special case and only used when viewing a Net's structure // because they need not specify data source and can not train or test // virtual data input blobs do not belong to any layers // see more in insert_splits.cpp/void InsertSplits() else LOG(INFO) << "Input " << top_id << "[Blob-Code] -> " << blob_name << "[Blob - Name]"; } // allocate a null blob at first boost::shared_ptr<Blob<Dtype>> ptr_blob(new Blob<Dtype>()); // store global blob infos const int blob_id = blobs.size(); blobs.push_back(ptr_blob); blobs_name.push_back(blob_name); blobs_need_backward.push_back(false); // encode index number for a name // which also represent this top blob is binded from a bottom // check it before can know whether a top blob has multiple sources(Forbidden) if (blob_name_to_idx) (*blob_name_to_idx)[blob_name] = blob_id; // reshape for virtual input blobs solely // becaude they do not exist into a DataLayer(provide reshape/transfrom service) if (layer_id == -1){ ptr_blob->reshape(param.input_shape(top_id)); // store solely for virtual input blobs net_input_blobs.push_back(ptr_blob.get()); net_input_blob_indices.push_back(blob_id); } else{ top_vecs[layer_id].push_back(ptr_blob.get()); top_id_vecs[layer_id].push_back(blob_id); } } // a set used for listing all exsiting top blobs if (available_blobs) available_blobs->insert(blob_name); }
void Net<Dtype>::Init(const NetParameter& in_param) { // Create a copy of in_param with splits added where necessary. NetParameter param; InsertSplits(in_param, ¶m); // Basically, build all the layers and set up its connections. name_ = param.name(); map<string, int> blob_name_to_idx; set<string> available_blobs; int num_layers = param.layers_size(); CHECK_EQ(param.input_size() * 4, param.input_dim_size()) << "Incorrect bottom blob dimension specifications."; size_t memory_used = 0; // set the input blobs for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); shared_ptr<Blob<Dtype> > blob_pointer( new Blob<Dtype>(param.input_dim(i * 4), param.input_dim(i * 4 + 1), param.input_dim(i * 4 + 2), param.input_dim(i * 4 + 3))); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); net_input_blob_indices_.push_back(i); net_input_blobs_.push_back(blob_pointer.get()); blob_name_to_idx[blob_name] = i; available_blobs.insert(blob_name); memory_used += blob_pointer->count(); } DLOG(INFO) << "Memory required for Data" << memory_used*sizeof(Dtype); // For each layer, set up their input and output bottom_vecs_.resize(param.layers_size()); top_vecs_.resize(param.layers_size()); bottom_id_vecs_.resize(param.layers_size()); top_id_vecs_.resize(param.layers_size()); for (int i = 0; i < param.layers_size(); ++i) { bool in_place = false; const LayerParameter& layer_param = param.layers(i); layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param))); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = param.force_backward(); // Figure out this layer's input for (int j = 0; j < layer_param.bottom_size(); ++j) { const string& blob_name = layer_param.bottom(j); const int blob_id = blob_name_to_idx[blob_name]; if (available_blobs.find(blob_name) == available_blobs.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer" << j; } LOG(INFO) << layer_param.name() << " <- " << blob_name; bottom_vecs_[i].push_back(blobs_[blob_id].get()); bottom_id_vecs_[i].push_back(blob_id); // If a blob needs backward, this layer should provide it. need_backward |= blob_need_backward_[blob_id]; available_blobs.erase(blob_name); } // Figure out this layer's output for (int j = 0; j < layer_param.top_size(); ++j) { const string& blob_name = layer_param.top(j); // Check if we are doing in-place computation if (layer_param.bottom_size() > j && blob_name == layer_param.bottom(j)) { // In-place computation LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)"; in_place = true; available_blobs.insert(blob_name); top_vecs_[i].push_back( blobs_[blob_name_to_idx[blob_name]].get()); top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]); } else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) { // If we are not doing in-place computation but has duplicated blobs, // raise an error. LOG(FATAL) << "Duplicate blobs produced by multiple sources."; } else { // Normal output. LOG(INFO) << layer_param.name() << " -> " << blob_name; shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>()); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); blob_name_to_idx[blob_name] = blob_names_.size() - 1; available_blobs.insert(blob_name); top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get()); top_id_vecs_[i].push_back(blob_names_.size() - 1); } } // After this layer is connected, set it up. //LOG(INFO) << "Setting up " << layer_names_[i]; layers_[i]->SetUp(bottom_vecs_[i], &(top_vecs_[i])); for (int topid = 0; topid < top_vecs_[i].size(); ++topid) { LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->num() << " " << top_vecs_[i][topid]->channels() << " " << top_vecs_[i][topid]->height() << " " << top_vecs_[i][topid]->width() << " (" << top_vecs_[i][topid]->count() << ")"; if (!in_place) memory_used += top_vecs_[i][topid]->count(); } DLOG(INFO) << "Memory required for Data " << memory_used*sizeof(Dtype); // blobs: 0# weights, 1# bias term; blob_lr: 1# learning rate for weights, 2# learning rate for bias int blobs_lr_size = layers_[i]->layer_param().blobs_lr_size(); CHECK(blobs_lr_size == layers_[i]->blobs().size() || blobs_lr_size == 0) // 0, 1, 2 << "Incorrect blobs lr size: should be either 0 or the same as " "the number of the layer's parameter blobs."; if (blobs_lr_size) { // Check if this layer needs backward operation itself for (int j = 0; j < blobs_lr_size; ++j) { need_backward |= (layers_[i]->layer_param().blobs_lr(j) > 0); } } else if (layers_[i]->blobs().size()) { // catch: if a layer param does not specify blobs_lr, we should assume the // learning rate to be 1. Thus we will need to perform backward. need_backward = true; } // Finally, set the backward flag layer_need_backward_.push_back(need_backward); if (need_backward) { LOG(INFO) << layer_names_[i] << " needs backward computation."; for (int j = 0; j < top_id_vecs_[i].size(); ++j) { blob_need_backward_[top_id_vecs_[i][j]] = true; } } else { LOG(INFO) << layer_names_[i] << " does not need backward computation."; } } // In the end, all remaining blobs are considered output blobs. for (set<string>::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { LOG(INFO) << "This network produces output " << *it; net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } for (size_t i = 0; i < blob_names_.size(); ++i) { blob_names_index_[blob_names_[i]] = i; } for (size_t i = 0; i < layer_names_.size(); ++i) { layer_names_index_[layer_names_[i]] = i; } GetLearningRateAndWeightDecay(); LOG(INFO) << "Network initialization done."; LOG(INFO) << "Memory required for Data " << memory_used*sizeof(Dtype); }
void insertSplits(const NetParameter& param, NetParameter* splitted_param){ splitted_param->CopyFrom(param); splitted_param->clear_layer(); // pair<layer_idx,blob_idx> map<string, pair<int, int> > blob_name_to_last_top_idx; map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx; map<pair<int, int>, int> top_idx_to_bottom_count; map<pair<int, int>, float> top_idx_to_loss_weight; map<pair<int, int>, int> top_idx_to_bottom_split_idx; map<int, string> layer_idx_to_layer_name; layer_idx_to_layer_name[-1] = "input"; // scan and stuff all input blobs into a virtual layer named as "input" at -1 // input blobs do not belong to any layers and we stuff them into a virtual layer // usually use for viewing a Net(e.g: examples\cifar10\cifar10_full.prototxt // input: "data" *** ¡û_¡û specify it as a temporary data blob *** // input_shape{ *** ¡û_¡û specify it as shape*** // dim: 1 // dim : 3 // dim : 32 // dim : 32 // } // pay attention: input blobs should not use in train/test prototxt // because they are not specified vaild data sources // you can regard them as viewing toys for (int i = 0; i < param.input_size(); i++){ const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = make_pair(-1, i); } for (int i = 0; i < param.layer_size(); i++){ const LayerParameter& layer_param = param.layer(i); // bind layer idx to layer name layer_idx_to_layer_name[i] = layer_param.name(); // a layer has several bottom blobs(e.g DataLayer) for (int j = 0; j < layer_param.bottom_size(); j++){ const string& blob_name = layer_param.bottom(j); // ensure that all bottom blobs must have the same name as one top blob if (!blob_name_to_last_top_idx.count(blob_name)){ LOG(FATAL) << "Unknown bottom blob: " << blob_name << " at layer: " << layer_param.name() << "."; } const pair<int, int>& bottom_idx = make_pair(i, j); const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name]; // a bottom's name must be as same as one top's name // find a bottom's parent top (<- backward direction) // note that top name must declare before bottom name // or a bottom will bind to layer_{-1} bottom_idx_to_source_top_idx[bottom_idx] = top_idx; top_idx_to_bottom_count[top_idx]++; } // update top name's position for following bottom names for (int j = 0; j < layer_param.top_size(); j++){ const string& blob_name = layer_param.top(j); blob_name_to_last_top_idx[blob_name] = make_pair(i, j); } const int last_loss = min(layer_param.loss_weight_size(), layer_param.top_size()); // only work in LossLayer for (int j = 0; j < last_loss; j++){ const string& blob_name = layer_param.top(j); // updated before const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name]; top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j); // from loss(top) backward to bottom if (top_idx_to_loss_weight[top_idx]) top_idx_to_bottom_count[top_idx]++; } } // special case: data blob shared by other blobs in the virtual layer // split it also for (int i = 0; i < param.input_size(); i++){ const int split_count = top_idx_to_bottom_count[make_pair(-1, i)]; if (split_count > 1){ // "input" const string& layer_name = layer_idx_to_layer_name[-1]; const string& blob_name = param.input(i); // push_back a new param LayerParameter* split_layer_param = splitted_param->add_layer(); const float kZeroLossWeight = 0; configureSplitLayer(layer_name, blob_name, i, split_count, kZeroLossWeight, split_layer_param); } } for (int i = 0; i < param.layer_size(); i++){ // push_back a new param LayerParameter* layer_param = splitted_param->add_layer(); layer_param->CopyFrom(param.layer(i)); for (int j = 0; j < layer_param->bottom_size(); j++){ // call the top before bottom const pair<int, int>& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)]; // check top's count const int split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1){ // previous layer_name const string& layer_name = layer_idx_to_layer_name[top_idx.first]; const string& blob_name = layer_param->bottom(j); // e.g: conv1 => conv1_conv1_0_split_0 // once used then ++ for next layer_param->set_bottom(j, splitBlobName(layer_name, blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++)); } } for (int j = 0; j < layer_param->top_size(); j++){ const pair<int, int>& top_idx = make_pair(i, j); const int split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1){ // now layer_name const string& layer_name = layer_idx_to_layer_name[top_idx.first]; const string& blob_name = layer_param->top(j); // add a split layer LayerParameter *split_layer_param = splitted_param->add_layer(); const float loss_weight = top_idx_to_loss_weight[top_idx]; configureSplitLayer(layer_name, blob_name, j, split_count, loss_weight,split_layer_param); if (loss_weight){ layer_param->clear_loss_weight(); // loss as bottom should split from 1 ??? top_idx_to_bottom_split_idx[top_idx]++; } } } } }
void Net<Dtype>::Init(const NetParameter& param) { // Basically, build all the layers and set up its connections. name_ = param.name(); map<string, int> blob_name_to_idx; set<string> available_blobs; int num_layers = param.layers_size(); CHECK_EQ(param.input_size() * 4, param.input_dim_size()) << "Incorrect bottom blob dimension specifications."; // set the input blobs for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); shared_ptr<Blob<Dtype> > blob_pointer( new Blob<Dtype>(param.input_dim(i * 4), param.input_dim(i * 4 + 1), param.input_dim(i * 4 + 2), param.input_dim(i * 4 + 3))); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); net_input_blob_indices_.push_back(i); net_input_blobs_.push_back(blob_pointer.get()); blob_name_to_idx[blob_name] = i; available_blobs.insert(blob_name); } // For each layer, set up their input and output bottom_vecs_.resize(param.layers_size()); top_vecs_.resize(param.layers_size()); bottom_id_vecs_.resize(param.layers_size()); top_id_vecs_.resize(param.layers_size()); for (int i = 0; i < param.layers_size(); ++i) { const LayerConnection& layer_connection = param.layers(i); const LayerParameter& layer_param = layer_connection.layer(); layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param))); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = param.force_backward(); // Figure out this layer's input and output for (int j = 0; j < layer_connection.bottom_size(); ++j) { const string& blob_name = layer_connection.bottom(j); const int blob_id = blob_name_to_idx[blob_name]; if (available_blobs.find(blob_name) == available_blobs.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer" << j; } LOG(INFO) << layer_param.name() << " <- " << blob_name; bottom_vecs_[i].push_back( blobs_[blob_id].get()); bottom_id_vecs_[i].push_back(blob_id); // If a blob needs backward, this layer should provide it. need_backward |= blob_need_backward_[blob_id]; available_blobs.erase(blob_name); } for (int j = 0; j < layer_connection.top_size(); ++j) { const string& blob_name = layer_connection.top(j); // Check if we are doing in-place computation if (layer_connection.bottom_size() > j && blob_name == layer_connection.bottom(j)) { // In-place computation LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)"; available_blobs.insert(blob_name); top_vecs_[i].push_back( blobs_[blob_name_to_idx[blob_name]].get()); top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]); } else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) { // If we are not doing in-place computation but has duplicated blobs, // raise an error. LOG(FATAL) << "Duplicate blobs produced by multiple sources."; } else { // Normal output. LOG(INFO) << layer_param.name() << " -> " << blob_name; shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>()); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); blob_name_to_idx[blob_name] = blob_names_.size() - 1; available_blobs.insert(blob_name); top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get()); top_id_vecs_[i].push_back(blob_names_.size() - 1); } } // After this layer is connected, set it up. // LOG(INFO) << "Setting up " << layer_names_[i]; layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]); for (int topid = 0; topid < top_vecs_[i].size(); ++topid) { LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->channels() << " " << top_vecs_[i][topid]->height() << " " << top_vecs_[i][topid]->width(); } // Check if this layer needs backward operation itself for (int j = 0; j < layers_[i]->layer_param().blobs_lr_size(); ++j) { need_backward |= (layers_[i]->layer_param().blobs_lr(j) > 0); } // Finally, set the backward flag layer_need_backward_.push_back(need_backward); if (need_backward) { LOG(INFO) << layer_names_[i] << " needs backward computation."; for (int j = 0; j < top_id_vecs_[i].size(); ++j) { blob_need_backward_[top_id_vecs_[i][j]] = true; } } else { LOG(INFO) << layer_names_[i] << " does not need backward computation."; } } // In the end, all remaining blobs are considered output blobs. for (set<string>::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { LOG(INFO) << "This network produces output " << *it; net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); } GetLearningRateAndWeightDecay(); LOG(INFO) << "Network initialization done."; }
void insert_splits(const NetParameter& param, NetParameter* param_split) { // Initialize by copying from the input NetParameter. param_split->CopyFrom(param); param_split->clear_layers(); map<string, pair<int, int> > blob_name_to_last_top_idx; map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx; map<pair<int, int>, int> top_idx_to_bottom_count; map<pair<int, int>, int> top_idx_to_bottom_split_idx; map<int, string> layer_idx_to_layer_name; layer_idx_to_layer_name[-1] = "input"; // Determine the number of times each blob is used as an input (bottom) blob. for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = make_pair(-1, i); } for (int i = 0; i < param.layers_size(); ++i) { const LayerConnection& layer_connection = param.layers(i); layer_idx_to_layer_name[i] = layer_connection.layer().name(); for (int j = 0; j < layer_connection.bottom_size(); ++j) { const string& blob_name = layer_connection.bottom(j); if (blob_name_to_last_top_idx.find(blob_name) == blob_name_to_last_top_idx.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; } const pair<int, int>& bottom_idx = make_pair(i, j); const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name]; bottom_idx_to_source_top_idx[bottom_idx] = top_idx; ++top_idx_to_bottom_count[top_idx]; } for (int j = 0; j < layer_connection.top_size(); ++j) { const string& blob_name = layer_connection.top(j); blob_name_to_last_top_idx[blob_name] = make_pair(i, j); } } // Create split layer for any input blobs used by other layers as bottom // blobs more than once. for (int i = 0; i < param.input_size(); ++i) { const int split_count = top_idx_to_bottom_count[make_pair(-1, i)]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[-1]; const string& blob_name = param.input(i); LayerConnection* split_layer_connection = param_split->add_layers(); configure_split_layer(layer_name, blob_name, i, split_count, split_layer_connection); } } for (int i = 0; i < param.layers_size(); ++i) { LayerConnection* layer_connection = param_split->add_layers(); layer_connection->CopyFrom(param.layers(i)); // Replace any shared bottom blobs with split layer outputs. for (int j = 0; j < layer_connection->bottom_size(); ++j) { const pair<int, int>& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)]; const int split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[top_idx.first]; const string& blob_name = layer_connection->bottom(j); layer_connection->set_bottom(j, get_split_blob_name(layer_name, blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++)); } } // Create split layer for any top blobs used by other layers as bottom // blobs more than once. for (int j = 0; j < layer_connection->top_size(); ++j) { const int split_count = top_idx_to_bottom_count[make_pair(i, j)]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[i]; const string& blob_name = layer_connection->top(j); LayerConnection* split_layer_connection = param_split->add_layers(); configure_split_layer(layer_name, blob_name, j, split_count, split_layer_connection); } } } }