Exemple #1
0
shared_ptr<Blob<float> > device::Buffer(int id) {
  if (buff_f_.size() <= id) {
    shared_ptr<Blob<float> > blob_pointer(new Blob<float>(this));
    buff_f_.push_back(blob_pointer);
  }
  return buff_f_[id];
}
Exemple #2
0
shared_ptr<Blob<double> > DeviceContext::Buffer(int id) {
  if (buff_d_.size() <= id) {
    shared_ptr<Blob<double> > blob_pointer(new Blob<double>(this));
    buff_d_.push_back(blob_pointer);
  }
  return buff_d_[id];
}
Exemple #3
0
char *buffer_read_string(uint32_t len)
{
	char *s;
	blob_free(blob_pool.size);
	s = blob_pointer(blob_alloc(len + 1));
	s[fread(s, 1, len, infile)] = '\0';
	return ferror(infile) ? NULL : s;
}
Exemple #4
0
void AffinityLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
                                    const vector<Blob<Dtype>*>& top) {
  min_index_.clear();
  for (int bidx = 0; bidx < bottom.size(); ++bidx) {
    // 1, #edges, height, width
    top[bidx]->Reshape(1, 2, bottom[bidx]->height(), bottom[bidx]->width());

    shared_ptr<Blob<Dtype> > blob_pointer(
        new Blob<Dtype>(this->device_context()));
    min_index_.push_back(blob_pointer);

    // 1, #edges, height, width
    min_index_[bidx]->Reshape(1, 2, bottom[bidx]->height(),
                              bottom[bidx]->width());
  }
}
Exemple #5
0
Dtype ApolloNet<Dtype>::ForwardLayer(const string& layer_param_string, const string& runtime_param_string) {
    /* This function will
     * 1) Check if the layer name is in the cache
     * 2) Create the layer if it is new
     * 3) Set up the top blobs
     * 4) Set up the bottom blobs
     * 5) Set up the parameters
     * 6) Call the Forward function */ 
     
    LayerParameter active_layer_param;
    RuntimeParameter runtime_param;
    ASSERT(runtime_param.ParseFromString(runtime_param_string), "");
    ASSERT(active_layer_param.ParseFromString(layer_param_string), "");
    ASSERT(active_layer_param.has_name(), "");
    const string& layer_name = active_layer_param.name();
    shared_ptr<Layer<Dtype> > layer;
    const bool new_layer = layers_map_.find(layer_name) == layers_map_.end();
    if (new_layer) {
      layer = LayerRegistry<Dtype>::CreateLayer(active_layer_param);;
      LOG(INFO) << "Creating Layer " << layer_name;
      LOG(INFO) << active_layer_param.DebugString();
      layers_map_[layer_name] = layer;
      active_layers_set_.insert(layer_name);
    } else {
      layer = layers_map_[layer_name];
      std::pair<set<string>::iterator,bool> ret = active_layers_set_.insert(layer_name);
      ASSERT(ret.second, "Layer with name '" << layer_name << "' is already used");
      ASSERT(layer->layer_param().type() == active_layer_param.type(), 
          "WARNING: layer with name '" << active_layer_param.name() << "' and different type already exists.");
    }
    layer->set_runtime_param(runtime_param);

    active_layers_vec_.push_back(layer_name);
    vector<Blob<Dtype>*> bottom_vec;
    vector<Blob<Dtype>*> top_vec;

    const vector<string>& bottom_names = bottom_blob_names_[layer_name];
    bool reset_bottoms = active_layer_param.bottom_size() != bottom_names.size();
    for (int bottom_id = 0; bottom_id < active_layer_param.bottom_size(); ++bottom_id) {
      const string& blob_name = active_layer_param.bottom(bottom_id);
      ASSERT(tops_.find(blob_name) != tops_.end(), 
          "Could not find bottom: '" << blob_name << "' for layer: " << layer_name);
      if (bottom_names.size() > bottom_id && bottom_names[bottom_id] != blob_name) { reset_bottoms = true; }
    }

    if (new_layer || reset_bottoms) {
      // layer is new, or it's list of bottoms has changed. Reset the bottom blobs
      bottom_blobs_[layer_name].clear();
      bottom_blob_names_[layer_name].clear();
      for (int bottom_id = 0; bottom_id < active_layer_param.bottom_size(); ++bottom_id) {
        const string& blob_name = active_layer_param.bottom(bottom_id);
        bottom_blob_names_[layer_name].push_back(blob_name);
        shared_ptr<Blob<Dtype> > top_blob = tops_[blob_name];
        shared_ptr<Blob<Dtype> > bottom_blob(new Blob<Dtype>(top_blob->shape()));
        bottom_blob->ShareData(*top_blob);
        if (!layer->overwrites_bottom_diffs()) {
          // if layer accumulates delta rather than overwriting, we can save memory
          bottom_blob->ShareDiff(*top_blob);
        }
        bottom_blobs_[layer_name].push_back(bottom_blob);
      }
      layer->reset_bottoms(bottom_blob_names_[layer_name]);
    } else {
      // Reshape bottom_blobs to match their respective top blobs 
      for (int bottom_id = 0; bottom_id < active_layer_param.bottom_size(); ++bottom_id) {
        const string& blob_name = active_layer_param.bottom(bottom_id);
        shared_ptr<Blob<Dtype> > top_blob = tops_[blob_name];
        shared_ptr<Blob<Dtype> > bottom_blob = bottom_blobs_[layer_name][bottom_id];
        bottom_blob->ReshapeLike(*top_blob);
      }
    }

    for (int bottom_id = 0; bottom_id < active_layer_param.bottom_size(); ++bottom_id) {
      bottom_vec.push_back(bottom_blobs_[layer_name][bottom_id].get());
    }

    for (int top_id = 0; top_id < active_layer_param.top_size(); ++top_id) {
      const string& blob_name = active_layer_param.top(top_id);
      if (tops_.find(blob_name) == tops_.end()) {
        shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
        tops_[blob_name] = blob_pointer;
      }
      Blob<Dtype>* top_blob = tops_[blob_name].get();
      top_vec.push_back(top_blob);
      if (top_blob->DiffInitialized() && !layer->is_loss()) {
        // Zero out top_diffs, except for loss blobs, which never change
        top_blob->SetDiffValues(0.);
      }
    }

    if (new_layer) {
      layer->SetUp(bottom_vec, top_vec);
      AddLayerParams(layer);
    }

    for (int param_id = 0; param_id < layer->param_names().size(); ++param_id) {
      const string& param_name = layer->param_names()[param_id];
      active_params_set_.insert(param_name);
    }

    Dtype loss = 0;
    layer->set_phase(phase_);
    loss = layer->Forward(bottom_vec, top_vec);
    return loss;
}
	void Net<Dtype>::Init(const NetParameter& in_param) {
		// Create a copy of in_param with splits added where necessary.
		NetParameter param;
		InsertSplits(in_param, &param);
		
		// Basically, build all the layers and set up its connections.
		name_ = param.name();
		map<string, int> blob_name_to_idx;
		set<string> available_blobs;
		int num_layers = param.layers_size();
		
		CHECK_EQ(param.input_size() * 4, param.input_dim_size())
			<< "Incorrect bottom blob dimension specifications.";
		size_t memory_used = 0;
		
		// set the input blobs
		for (int i = 0; i < param.input_size(); ++i) {
			const string& blob_name = param.input(i);
			shared_ptr<Blob<Dtype> > blob_pointer(
				new Blob<Dtype>(param.input_dim(i * 4),
				param.input_dim(i * 4 + 1),
				param.input_dim(i * 4 + 2),
				param.input_dim(i * 4 + 3)));
			blobs_.push_back(blob_pointer);
			blob_names_.push_back(blob_name);
			blob_need_backward_.push_back(param.force_backward());
			net_input_blob_indices_.push_back(i);
			net_input_blobs_.push_back(blob_pointer.get());
			blob_name_to_idx[blob_name] = i;
			available_blobs.insert(blob_name);
			memory_used += blob_pointer->count();
		}
		DLOG(INFO) << "Memory required for Data" << memory_used*sizeof(Dtype);
		
		// For each layer, set up their input and output
		bottom_vecs_.resize(param.layers_size());
		top_vecs_.resize(param.layers_size());
		bottom_id_vecs_.resize(param.layers_size());
		top_id_vecs_.resize(param.layers_size());
		
		for (int i = 0; i < param.layers_size(); ++i) {
			bool in_place = false;
			const LayerParameter& layer_param = param.layers(i);
			layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
			layer_names_.push_back(layer_param.name());
			
			LOG(INFO) << "Creating Layer " << layer_param.name();
			bool need_backward = param.force_backward();
			
			// Figure out this layer's input
			for (int j = 0; j < layer_param.bottom_size(); ++j) {
				const string& blob_name = layer_param.bottom(j);
				const int blob_id = blob_name_to_idx[blob_name];
				if (available_blobs.find(blob_name) == available_blobs.end()) {
					LOG(FATAL) << "Unknown blob input " << blob_name <<
						" to layer" << j;
				}
				LOG(INFO) << layer_param.name() << " <- " << blob_name;
				bottom_vecs_[i].push_back(blobs_[blob_id].get());
				bottom_id_vecs_[i].push_back(blob_id);
				// If a blob needs backward, this layer should provide it.
				need_backward |= blob_need_backward_[blob_id];
				available_blobs.erase(blob_name);
			}

			// Figure out this layer's output
			for (int j = 0; j < layer_param.top_size(); ++j) {
				const string& blob_name = layer_param.top(j);
				
				// Check if we are doing in-place computation
				if (layer_param.bottom_size() > j &&
					blob_name == layer_param.bottom(j)) {
						// In-place computation
						LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)";
						in_place = true;
						available_blobs.insert(blob_name);
						top_vecs_[i].push_back(
							blobs_[blob_name_to_idx[blob_name]].get());
						top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
				} else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) {
					// If we are not doing in-place computation but has duplicated blobs,
					// raise an error.
					LOG(FATAL) << "Duplicate blobs produced by multiple sources.";
				} else {
					// Normal output.
					LOG(INFO) << layer_param.name() << " -> " << blob_name;
					shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
					blobs_.push_back(blob_pointer);
					blob_names_.push_back(blob_name);
					blob_need_backward_.push_back(param.force_backward());
					blob_name_to_idx[blob_name] = blob_names_.size() - 1;
					available_blobs.insert(blob_name);
					top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get());
					top_id_vecs_[i].push_back(blob_names_.size() - 1);
				}
			}
			
			// After this layer is connected, set it up.
			//LOG(INFO) << "Setting up " << layer_names_[i];
			layers_[i]->SetUp(bottom_vecs_[i], &(top_vecs_[i]));
			for (int topid = 0; topid < top_vecs_[i].size(); ++topid) {
				LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->num() << " "
					<< top_vecs_[i][topid]->channels() << " "
					<< top_vecs_[i][topid]->height() << " "
					<< top_vecs_[i][topid]->width() << " ("
					<< top_vecs_[i][topid]->count() << ")";
				if (!in_place)
					memory_used += top_vecs_[i][topid]->count();
			}
			DLOG(INFO) << "Memory  required for Data " << memory_used*sizeof(Dtype);

			// blobs: 0# weights, 1# bias term; blob_lr: 1# learning rate for weights, 2# learning rate for bias
			int blobs_lr_size = layers_[i]->layer_param().blobs_lr_size();
			CHECK(blobs_lr_size == layers_[i]->blobs().size() || blobs_lr_size == 0) // 0, 1, 2
				<< "Incorrect blobs lr size: should be either 0 or the same as "
				"the number of the layer's parameter blobs.";
			
			if (blobs_lr_size) {
				// Check if this layer needs backward operation itself
				for (int j = 0; j < blobs_lr_size; ++j) {
					need_backward |= (layers_[i]->layer_param().blobs_lr(j) > 0);
				}
			} else if (layers_[i]->blobs().size()) {
				// catch: if a layer param does not specify blobs_lr, we should assume the
				// learning rate to be 1. Thus we will need to perform backward.
				need_backward = true;
			}
			
			// Finally, set the backward flag
			layer_need_backward_.push_back(need_backward);
			
			if (need_backward) {
				LOG(INFO) << layer_names_[i] << " needs backward computation.";
				for (int j = 0; j < top_id_vecs_[i].size(); ++j) {
					blob_need_backward_[top_id_vecs_[i][j]] = true;
				}
			} else {
				LOG(INFO) << layer_names_[i] << " does not need backward computation.";
			}
		}
		
		// In the end, all remaining blobs are considered output blobs.
		for (set<string>::iterator it = available_blobs.begin();
			it != available_blobs.end(); ++it) {
				LOG(INFO) << "This network produces output " << *it;
				net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get());
				net_output_blob_indices_.push_back(blob_name_to_idx[*it]);
		}
			
		for (size_t i = 0; i < blob_names_.size(); ++i) {
			blob_names_index_[blob_names_[i]] = i;
		}
		
		for (size_t i = 0; i < layer_names_.size(); ++i) {
			layer_names_index_[layer_names_[i]] = i;
		}
		
		GetLearningRateAndWeightDecay();
		
		LOG(INFO) << "Network initialization done.";
		LOG(INFO) << "Memory required for Data " << memory_used*sizeof(Dtype);
	}
Exemple #7
0
void Net<Dtype>::Init(const NetParameter& param) {
  // Basically, build all the layers and set up its connections.
  name_ = param.name();
  map<string, int> blob_name_to_idx;
  set<string> available_blobs;
  int num_layers = param.layers_size();
  CHECK_EQ(param.input_size() * 4, param.input_dim_size())
      << "Incorrect bottom blob dimension specifications.";
  // set the input blobs
  for (int i = 0; i < param.input_size(); ++i) {
    const string& blob_name = param.input(i);
    shared_ptr<Blob<Dtype> > blob_pointer(
        new Blob<Dtype>(param.input_dim(i * 4),
                        param.input_dim(i * 4 + 1),
                        param.input_dim(i * 4 + 2),
                        param.input_dim(i * 4 + 3)));
    blobs_.push_back(blob_pointer);
    blob_names_.push_back(blob_name);
    blob_need_backward_.push_back(param.force_backward());
    net_input_blob_indices_.push_back(i);
    net_input_blobs_.push_back(blob_pointer.get());
    blob_name_to_idx[blob_name] = i;
    available_blobs.insert(blob_name);
  }
  // For each layer, set up their input and output
  bottom_vecs_.resize(param.layers_size());
  top_vecs_.resize(param.layers_size());
  bottom_id_vecs_.resize(param.layers_size());
  top_id_vecs_.resize(param.layers_size());
  for (int i = 0; i < param.layers_size(); ++i) {
    const LayerConnection& layer_connection = param.layers(i);
    const LayerParameter& layer_param = layer_connection.layer();
    layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
    layer_names_.push_back(layer_param.name());
    LOG(INFO) << "Creating Layer " << layer_param.name();
    bool need_backward = param.force_backward();
    // Figure out this layer's input and output
    for (int j = 0; j < layer_connection.bottom_size(); ++j) {
      const string& blob_name = layer_connection.bottom(j);
      const int blob_id = blob_name_to_idx[blob_name];
      if (available_blobs.find(blob_name) == available_blobs.end()) {
        LOG(FATAL) << "Unknown blob input " << blob_name <<
            " to layer" << j;
      }
      LOG(INFO) << layer_param.name() << " <- " << blob_name;
      bottom_vecs_[i].push_back(
          blobs_[blob_id].get());
      bottom_id_vecs_[i].push_back(blob_id);
      // If a blob needs backward, this layer should provide it.
      need_backward |= blob_need_backward_[blob_id];
      available_blobs.erase(blob_name);
    }
    for (int j = 0; j < layer_connection.top_size(); ++j) {
      const string& blob_name = layer_connection.top(j);
      // Check if we are doing in-place computation
      if (layer_connection.bottom_size() > j &&
          blob_name == layer_connection.bottom(j)) {
        // In-place computation
        LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)";
        available_blobs.insert(blob_name);
        top_vecs_[i].push_back(
            blobs_[blob_name_to_idx[blob_name]].get());
        top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
      } else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) {
        // If we are not doing in-place computation but has duplicated blobs,
        // raise an error.
        LOG(FATAL) << "Duplicate blobs produced by multiple sources.";
      } else {
        // Normal output.
        LOG(INFO) << layer_param.name() << " -> " << blob_name;
        shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
        blobs_.push_back(blob_pointer);
        blob_names_.push_back(blob_name);
        blob_need_backward_.push_back(param.force_backward());
        blob_name_to_idx[blob_name] = blob_names_.size() - 1;
        available_blobs.insert(blob_name);
        top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get());
        top_id_vecs_[i].push_back(blob_names_.size() - 1);
      }
    }
    // After this layer is connected, set it up.
    // LOG(INFO) << "Setting up " << layer_names_[i];
    layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]);
    for (int topid = 0; topid < top_vecs_[i].size(); ++topid) {
      LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->channels() << " "
          << top_vecs_[i][topid]->height() << " "
          << top_vecs_[i][topid]->width();
    }
    // Check if this layer needs backward operation itself
    for (int j = 0; j < layers_[i]->layer_param().blobs_lr_size(); ++j) {
      need_backward |= (layers_[i]->layer_param().blobs_lr(j) > 0);
    }
    // Finally, set the backward flag
    layer_need_backward_.push_back(need_backward);
    if (need_backward) {
      LOG(INFO) << layer_names_[i] << " needs backward computation.";
      for (int j = 0; j < top_id_vecs_[i].size(); ++j) {
        blob_need_backward_[top_id_vecs_[i][j]] = true;
      }
    } else {
      LOG(INFO) << layer_names_[i] << " does not need backward computation.";
    }
  }
  // In the end, all remaining blobs are considered output blobs.
  for (set<string>::iterator it = available_blobs.begin();
      it != available_blobs.end(); ++it) {
    LOG(INFO) << "This network produces output " << *it;
    net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get());
  }
  GetLearningRateAndWeightDecay();
  LOG(INFO) << "Network initialization done.";
}
Exemple #8
0
void TIConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype> *> &bottom,
                                      vector<Blob<Dtype> *> *top) {
  CHECK_EQ(bottom.size(), 1) << "TIConv Layer takes a single blob as input.";
  CHECK_EQ(top->size(), 1) << "TIConv Layer takes a single blob as output.";

  // NUM_T_ includes includes Identity transform
  this->NUM_T_ = this->layer_param_.transformations_size();

  LOG(INFO) << "TIConvolution layer using " << NUM_T_ << " transformations "
            << this->layer_param_.name() << " using interpolation: "
            << this->layer_param_.transformations(0).interp();
  TransParameter tparam;
  for (int t = 0; t < this->NUM_T_; ++t) {
    tparam = this->layer_param_.transformations(t);
    LOG(INFO) << " T" << t << " : "
              << " sc: " << tparam.scale() << ", rot: " << tparam.rotation();
  }
  if (Caffe::phase() == Caffe::TRAIN)
    LOG(INFO) << "  Creating Upsampling Layer in " << this->layer_param_.name();
  this->up_layer_ = new UpsamplingLayer<Dtype>(this->layer_param_);
  // The bottom that net provides is the bottom of UP layer
  // for T transformations, need to make blob for each
  for (int t = 0; t < this->NUM_T_; ++t) {
    shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
    activations_.push_back(blob_pointer);
    // and add them to up_top_vec_
    up_top_vec_.push_back(blob_pointer.get());
  }
  // Setup UP layer, print shape
  this->up_layer_->SetUp(bottom, &up_top_vec_);
  if (Caffe::phase() == Caffe::TRAIN) {
    for (int i = 0; i < up_top_vec_.size(); ++i) {
      LOG(INFO) << "  Top shape: " << up_top_vec_[i]->channels() << " "
                << up_top_vec_[i]->height() << " " << up_top_vec_[i]->width();
    }
  }

  // Tied Conv
  if (Caffe::phase() == Caffe::TRAIN)
    LOG(INFO) << "  Creating TiedConv Layer in " << this->layer_param_.name();
  this->tiedconv_layer_ = new TiedConvolutionLayer<Dtype>(this->layer_param_);
  // make new top blobs and add them to tiedconv_top_vec_
  for (int t = 0; t < this->NUM_T_; ++t) {
    shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
    activations_.push_back(blob_pointer);
    tiedconv_top_vec_.push_back(blob_pointer.get());
  }
  // Setup
  this->tiedconv_layer_->SetUp(up_top_vec_, &tiedconv_top_vec_);
  if (Caffe::phase() == Caffe::TRAIN) {
    for (int i = 0; i < up_top_vec_.size(); ++i) {
      LOG(INFO) << "  Top shape: " << tiedconv_top_vec_[i]->channels() << " "
                << tiedconv_top_vec_[i]->height() << " "
                << tiedconv_top_vec_[i]->width();
    }
  }

  // Connect the W and b of tiedconv_layer to TI's blobs_ to make everything
  // seem normal:
  this->blobs_ = tiedconv_layer_->blobs_;

  // DownPool
  if (Caffe::phase() == Caffe::TRAIN)
    LOG(INFO) << "  Creating DownPooling Layer in "
              << this->layer_param_.name();
  this->downpool_layer_ = new DownPoolingLayer<Dtype>(this->layer_param_);
  // no need to make new blobs here bc downpool_bottom_vec_ == tiedconv_top_vec_
  // and the top that net provides is the top for Downpool
  this->downpool_layer_->SetUp(tiedconv_top_vec_, top);
}
Exemple #9
0
/// @brief Append a new top blob to the net.
void faceRecognition::AppendTop(int layer_id)
{
	std::shared_ptr<caffe::Blob<float> > blob_pointer(new caffe::Blob<float>());
	blobs_.push_back(blob_pointer);
	top_vecs_[layer_id].push_back(blob_pointer.get());
}