void MaskingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top) {
		bias_term_ = this->layer_param_.masking_param().bias_term();

		if (this->blobs_.size() > 0) {
			LOG(INFO) << "Skipping parameter initialization";
		}
		else {
			// initialize weights (and bias), adjust parameter blob shape(s) and fill the values

			if (bias_term_) {
				this->blobs_.resize(2);
				this->blobs_[1].reset(new Blob<Dtype>(bottom[0]->shape()));
			}
			else {
				this->blobs_.resize(1);
			}
			this->blobs_[0].reset(new Blob<Dtype>(bottom[0]->shape()));

			if (bias_term_) {
				shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(this->layer_param_.masking_param().bias_filler()));
				bias_filler->Fill(this->blobs_[1].get());
			}
			shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(this->layer_param_.masking_param().weight_filler()));
			weight_filler->Fill(this->blobs_[0].get());
		}

		stable_prod_grad_ = this->layer_param_.masking_param().stable_prod_grad();
	}
Example #2
0
void ClusteringLossLayer<Dtype>::LayerSetUp(
      const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
  LossLayer<Dtype>::LayerSetUp(bottom, top);
  CHECK_EQ(bottom[0]->num()%TILE_DIM, 0) << "Only support" 
    "batch sizes that are multiples of " << TILE_DIM << ".";
  N_ = this->layer_param_.clustering_loss_param().num_center();
  lambda_ = this->layer_param_.clustering_loss_param().lambda();
  CHECK_EQ(N_%TILE_DIM, 0) << "Only support" 
    "center numbers that are multiples of " << TILE_DIM << ".";
  K_ = bottom[0]->count() / bottom[0]->num();
  CHECK_EQ(K_%TILE_DIM, 0) << "Only support" 
    "input dimensions that are multiples of " << TILE_DIM << ".";
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    this->blobs_.resize(2);
    this->blobs_[0].reset(new Blob<Dtype>(1, 1, K_, N_));
    this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, N_));

    coef_margin_.Reshape(1,1,1,N_);

    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.clustering_loss_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());

    FillerParameter filler_param;
    filler_param.set_value(this->layer_param_.clustering_loss_param().margin());
    ConstantFiller<Dtype> margin_filler(filler_param);
    margin_filler.Fill(this->blobs_[1].get());
  }  // parameter initialization

  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
Example #3
0
void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();
  N_ = num_output;
  K_ = bottom[0]->count() / bottom[0]->num();
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(1, 1, N_, K_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, N_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();
  transpose_ = this->layer_param_.inner_product_param().transpose();
  N_ = num_output;
  const int axis = bottom[0]->CanonicalAxisIndex(
      this->layer_param_.inner_product_param().axis());
  // Dimensions starting from "axis" are "flattened" into a single
  // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
  // and axis == 1, N inner products with dimension CHW are performed.
  K_ = bottom[0]->count(axis);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize the weights
    vector<int> weight_shape(2);
    if (transpose_) {
      weight_shape[0] = K_;
      weight_shape[1] = N_;
    } else {
      weight_shape[0] = N_;
      weight_shape[1] = K_;
    }
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      vector<int> bias_shape(1, N_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);

  //add by zhaoyang 4.18
  this->has_backward_filter = this->layer_param_.inner_product_param().has_backward_filter();
  if (this->has_backward_filter) {
    string backward_filter_path = this->layer_param_.inner_product_param().backward_filter_path();
    FILE *f = fopen(backward_filter_path.c_str(), "r");
    int temp;
    while (fscanf(f, "%d", &temp) != EOF) {
      this->filter.push_back((bool)temp);
    }
  }
  //----
}
Example #5
0
void LocalLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  CHECK_EQ(bottom.size(), 1) << "Conv Layer takes a single blob as input.";
  CHECK_EQ(top.size(), 1) << "Conv Layer takes a single blob as output.";

  kernel_size_ = this->layer_param_.local_param().kernel_size();
  stride_ = this->layer_param_.local_param().stride();
  pad_ = this->layer_param_.local_param().pad();
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  num_output_ = this->layer_param_.local_param().num_output();

  height_out_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1;
  width_out_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1;

  M_ = num_output_;
  K_ = channels_ * kernel_size_ * kernel_size_;
  N_ = height_out_ * width_out_;

  CHECK_GT(kernel_size_, 0); 
  CHECK_GT(num_output_, 0); 
  CHECK_GE(height_, kernel_size_) << "height smaller than kernel size";
  CHECK_GE(width_, kernel_size_) << "width smaller than kernel size";
  // Set the parameters
  bias_term_ = this->layer_param_.local_param().bias_term();

  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(
        num_output_, 1, K_, N_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.local_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, M_, N_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.local_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());  
    }
  }
}
Example #6
0
void CCCPPoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      //vector<Blob<Dtype>*>* top) {
      const vector<Blob<Dtype>*>& top) {
  CHECK_EQ(bottom.size(), 1) << "CCCP Pooling Layer takes a single blob as input.";
  CHECK_EQ(top.size(), 1) << "CCCP Pooling Layer takes a single blob as output.";

  NUM_OUTPUT_ = this->layer_param_.cccp_param().num_output();
  GROUP_      = this->layer_param_.cccp_param().group();
  biasterm_   = this->layer_param_.cccp_param().biasterm();

  // Figure out the dimensions
  CHANNEL_ = bottom[0]->channels();
  REST_ = bottom[0]->height() * bottom[0]->width();
  NUM_ = bottom[0]->num();

  CHECK_EQ(CHANNEL_%GROUP_, 0) << "CCCP Pooling input channel number is not divisible by group number.";

  //top[0]->Reshape(bottom[0]->num(), GROUP_*NUM_OUTPUT_, bottom[0]->height(), bottom[0]->width());
  
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (biasterm_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(1, 1, GROUP_*NUM_OUTPUT_, CHANNEL_/GROUP_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(
        GetFiller<Dtype>(this->layer_param_.cccp_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (biasterm_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, GROUP_*NUM_OUTPUT_));
      shared_ptr<Filler<Dtype> > bias_filler(
          GetFiller<Dtype>(this->layer_param_.cccp_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  // Setting up the bias multiplier
  if (biasterm_) {
    bias_multiplier_.reset(new SyncedMemory(REST_ * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
        reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < REST_; ++i) {
        bias_multiplier_data[i] = 1.;
    }
  }
}
void TopologyLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
                                          const vector<Blob<Dtype>*>& top) {

    const int num_output = this->layer_param_.topology_param().num_output();
    bias_term_ = this->layer_param_.topology_param().bias_term();
    N_ = num_output;
    weighted_bottom_.Reshape(bottom[0]->shape());

    const int axis = bottom[0]->CanonicalAxisIndex(this->layer_param_.topology_param().axis());

    // Dimensions starting from "axis" are "flattened" into a single
    // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
    // and axis == 1, N inner products with dimension CHW are performed.
    K_ = bottom[0]->count(axis);

    // Check if we need to set up the weights
    if (this->blobs_.size() > 0) {
        LOG(INFO) << "Skipping parameter initialization";

    } else {
        if (bias_term_) {
            this->blobs_.resize(2);
        } else {
            this->blobs_.resize(1);
        }

        // Intialize the weight
        vector<int> weight_shape(2);
        weight_shape[0] = N_;
        weight_shape[1] = K_;
        this->blobs_[0].reset(new Blob<Dtype>(weight_shape));

        // Fill the weights
        shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(this->layer_param_.topology_param().weight_filler()));
        weight_filler->Fill(this->blobs_[0].get());

        // If necessary, intiialize and fill the bias term
        if (bias_term_) {
            vector<int> bias_shape(1, N_);
            this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
            shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(this->layer_param_.topology_param().bias_filler()));
            bias_filler->Fill(this->blobs_[1].get());
        }
    }

    this->param_propagate_down_.resize(this->blobs_.size(), true);

    ConstructWeightMask();
}
void InnerProductDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();
  N_ = num_output;
  const int axis = bottom[0]->CanonicalAxisIndex(
      this->layer_param_.inner_product_param().axis());
  // Dimensions starting from "axis" are "flattened" into a single
  // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
  // and axis == 1, N inner products with dimension CHW are performed.
  K_ = bottom[0]->count(axis);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    vector<int> weight_shape(2);
    weight_shape[0] = N_;
    weight_shape[1] = K_;
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    // fill the weights
    if (Caffe::getThreadId() == 0) {
      shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().weight_filler()));
      weight_filler->Fill(this->blobs_[0].get());
    }
    Dtype* weight = this->blobs_[0]->mutable_cpu_data(); 
    MPI_Bcast(weight, this->blobs_[0]->count(), MPI_FLOAT, 0, MPI_COMM_WORLD);
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      vector<int> bias_shape(1, N_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      if (Caffe::getThreadId() == 0) {
        shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
            this->layer_param_.inner_product_param().bias_filler()));
        bias_filler->Fill(this->blobs_[1].get());
      }
      Dtype* bias = this->blobs_[1]->mutable_cpu_data(); 
      MPI_Bcast(bias, this->blobs_[1]->count(), MPI_FLOAT, 0, MPI_COMM_WORLD);
    }
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();//bool bias_term
  transpose_ = this->layer_param_.inner_product_param().transpose();//bool transpose_if true, transposed weights
  N_ = num_output;//表示全连接层输出神经元的个数
  const int axis = bottom[0]->CanonicalAxisIndex(
      this->layer_param_.inner_product_param().axis());
  // Dimensions starting from "axis" are "flattened" into a single
  // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
  // and axis == 1, N inner products with dimension CHW are performed.
  K_ = bottom[0]->count(axis);//表示单个样本特征向量长度
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) { //为什么blob_.size大于0就跳过参数初始化
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {//w blob和b blob
      this->blobs_.resize(2);
    } else { //否则只有w blob
      this->blobs_.resize(1);
    }
    // Initialize the weights
    vector<int> weight_shape(2);
    if (transpose_) {//如果权重被转置了
      weight_shape[0] = K_;
      weight_shape[1] = N_;
    } else {
      weight_shape[0] = N_;
      weight_shape[1] = K_;
    }
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));//根据是否转置调整权重W维度
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get()); //权重过滤器填充W过滤器
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {//有偏置项
      vector<int> bias_shape(1, N_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));//b blob重新调整维度
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().bias_filler()));//得到偏置过滤器
      bias_filler->Fill(this->blobs_[1].get());//权重过滤器填充
    }
  }  // parameter initialization //vector< bool > 	param_propagate_down_
  this->param_propagate_down_.resize(this->blobs_.size(), true);//?
}
Example #10
0
void InnerProductLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  CHECK_EQ(bottom.size(), 1) << "IP Layer takes a single blob as input.";
  CHECK_EQ(top->size(), 1) << "IP Layer takes a single blob as output.";
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();
  // Figure out the dimensions
  M_ = bottom[0]->num();
  K_ = bottom[0]->count() / bottom[0]->num();
  N_ = num_output;
  (*top)[0]->Reshape(bottom[0]->num(), num_output, 1, 1, 1);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(1, 1, 1, N_, K_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, 1, N_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  // Setting up the bias multiplier
  if (bias_term_) {
    bias_multiplier_.reset(new SyncedMemory(M_ * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
        reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < M_; ++i) {
        bias_multiplier_data[i] = 1.;
    }
  }
}
Example #11
0
void WeightPlusLayer<Dtype>::LayerSetUp(
	const vector<Blob<Dtype>*>& bottom,
	const vector<Blob<Dtype>*>& top){
	batch_ = bottom[0]->num();
	dim_ = this->layer_param_.weight_plus_param().dim();
	CHECK_EQ(bottom[0]->channels(), dim_)
		<< "Weight Plus Layer: the codelenght should match.";
	this->blobs_.resize(1); // for the scale hashing
	vector<int> weight_shape(1);
	weight_shape[0] = dim_; 
	this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
	shared_ptr<Filler<Dtype>> weight_filler(GetFiller<Dtype>(
		this->layer_param_.weight_plus_param().weight_filler()));
	weight_filler->Fill(this->blobs_[0].get()); // the weight is 1 first
	this->param_propagate_down_.resize(this->blobs_.size(), true);
	weight_pow_.Reshape(dim_, 1, 1, 1);
	weight_two_.Reshape(dim_, 1, 1, 1);
	data_meta_.Reshape(batch_, dim_, 1, 1);
}
void SparseInnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // three bottom blobs are: value, indices and ptr
  M_ = bottom[2]->count() - 1;
  K_ = this->layer_param_.sparse_inner_product_param().input_dim();
  N_ = this->layer_param_.sparse_inner_product_param().num_output();
  bias_term_ = this->layer_param_.sparse_inner_product_param().bias_term();
  transpose_ = this->layer_param_.sparse_inner_product_param().transpose();
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
  	if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize the weights
    vector<int> weight_shape(2);
    if (transpose_) {
      weight_shape[0] = K_;
      weight_shape[1] = N_;
    } else {
      weight_shape[0] = N_;
      weight_shape[1] = K_;
    }
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.sparse_inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      vector<int> bias_shape(1, N_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.sparse_inner_product_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
Example #13
0
void InnerProductLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  Layer<Dtype>::SetUp(bottom, top);
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();
  // Figure out the dimensions
  M_ = bottom[0]->num();
  K_ = bottom[0]->count() / bottom[0]->num();
  N_ = num_output;
  (*top)[0]->Reshape(bottom[0]->num(), num_output, 1, 1);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(1, 1, N_, K_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, N_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  // Setting up the bias multiplier
  if (bias_term_) {
    bias_multiplier_.Reshape(1, 1, 1, M_);
    caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data());
  }
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
Example #14
0
void EmbedLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  N_ = this->layer_param_.embed_param().num_output();
  CHECK_GT(N_, 0) << "EmbedLayer num_output must be positive.";
  K_ = this->layer_param_.embed_param().input_dim();
  CHECK_GT(K_, 0) << "EmbedLayer input_dim must be positive.";
  bias_term_ = this->layer_param_.embed_param().bias_term();
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize the weights --
    // transposed from InnerProductLayer for spatial locality.
    vector<int> weight_shape(2);
    weight_shape[0] = K_;
    weight_shape[1] = N_;
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.embed_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the bias term
    if (bias_term_) {
      vector<int> bias_shape(1, N_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.embed_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void IdToWeightMappingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  const int num_output = this->layer_param_.id_to_weight_mapping_param().num_output();
  
  K_ = this->layer_param_.id_to_weight_mapping_param().max_ids();
  N_ = num_output;
  CHECK_EQ(bottom[0]->count(), bottom[0]->num());

  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {

    this->blobs_.resize(1);
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(K_, N_, 1, 1));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.id_to_weight_mapping_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);

  float norm = 0, maxval = -1;
  const Dtype* blob_data = this->blobs_[0]->cpu_data();
  for (int i = 0; i < 10; ++i) {
    for (int j = 0; j < 64; ++j) {
      norm += static_cast<float>(blob_data[i*64 + j]) * static_cast<float>(blob_data[i*64 + j]);
      if (static_cast<float>(blob_data[i*64 + j]) > maxval)
        maxval = static_cast<float>(blob_data[i*64 + j]);
    }
  }
  LOG(INFO) << "Norm of init 10 vectors: " << (norm/640) << " , maxval: " << maxval;


}
Example #16
0
void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // Configure the kernel size, padding, stride, and inputs.
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  force_nd_im2col_ = conv_param.force_nd_im2col();
  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());
  const int num_axes = bottom[0]->num_axes();
  if (num_axes == 5 && channel_axis_ == 1 && bottom[0]->shape(2) == 1) {
    forced_3d_ = true;
  } else {
    forced_3d_ = false;
  }
  const int first_spatial_axis = channel_axis_ + 1 + forced_3d_;
  num_spatial_axes_ = num_axes - first_spatial_axis;
  CHECK_GE(num_spatial_axes_, 0);
  vector<int> spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1));
  // Setup filter kernel dimensions (kernel_shape_).
  kernel_shape_.Reshape(spatial_dim_blob_shape);
  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();
  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "kernel_h & kernel_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_shape_data[0] = conv_param.kernel_h();
    kernel_shape_data[1] = conv_param.kernel_w();
  } else {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)
        << "kernel_size must be specified once, or once per spatial dimension "
        << "(kernel_size specified " << num_kernel_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
      for (int i = 0; i < num_spatial_axes_; ++i) {
        kernel_shape_data[i] =
            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);
      }
  }
  for (int i = 0; i < num_spatial_axes_; ++i) {
    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";
  }
  // Setup stride dimensions (stride_).
  stride_.Reshape(spatial_dim_blob_shape);
  int* stride_data = stride_.mutable_cpu_data();
  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "stride_h & stride_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.stride_size())
        << "Either stride or stride_h/w should be specified; not both.";
    stride_data[0] = conv_param.stride_h();
    stride_data[1] = conv_param.stride_w();
  } else {
    const int num_stride_dims = conv_param.stride_size();
    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||
          num_stride_dims == num_spatial_axes_)
        << "stride must be specified once, or once per spatial dimension "
        << "(stride specified " << num_stride_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
    const int kDefaultStride = 1;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :
          conv_param.stride((num_stride_dims == 1) ? 0 : i);
      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";
    }
  }
  // Setup pad dimensions (pad_).
  pad_.Reshape(spatial_dim_blob_shape);
  int* pad_data = pad_.mutable_cpu_data();
  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "pad_h & pad_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.pad_size())
        << "Either pad or pad_h/w should be specified; not both.";
    pad_data[0] = conv_param.pad_h();
    pad_data[1] = conv_param.pad_w();
  } else {
    const int num_pad_dims = conv_param.pad_size();
    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||
          num_pad_dims == num_spatial_axes_)
        << "pad must be specified once, or once per spatial dimension "
        << "(pad specified " << num_pad_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
    const int kDefaultPad = 0;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :
          conv_param.pad((num_pad_dims == 1) ? 0 : i);
    }
  }
  // Setup dilation dimensions (dilation_).
  dilation_.Reshape(spatial_dim_blob_shape);
  int* dilation_data = dilation_.mutable_cpu_data();
  const int num_dilation_dims = conv_param.dilation_size();
  CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 ||
        num_dilation_dims == num_spatial_axes_)
      << "dilation must be specified once, or once per spatial dimension "
      << "(dilation specified " << num_dilation_dims << " times; "
      << num_spatial_axes_ << " spatial dims).";
  const int kDefaultDilation = 1;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation :
                       conv_param.dilation((num_dilation_dims == 1) ? 0 : i);
  }
  // Special case: im2col is the identity for 1x1 convolution with stride 1
  // and no padding, so flag for skipping the buffer and transformation.
  is_1x1_ = true;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    is_1x1_ &=
        kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0;
    if (!is_1x1_) { break; }
  }
  // Configure output channels and groups.
  channels_ = bottom[0]->shape(channel_axis_);
  num_output_ = this->layer_param_.convolution_param().num_output();
  CHECK_GT(num_output_, 0);
  group_ = this->layer_param_.convolution_param().group();
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";
  if (reverse_dimensions()) {
    conv_out_channels_ = channels_;
    conv_in_channels_ = num_output_;
  } else {
    conv_out_channels_ = num_output_;
    conv_in_channels_ = channels_;
  }
  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  vector<int> weight_shape(2);
  weight_shape[0] = conv_out_channels_;
  weight_shape[1] = conv_in_channels_ / group_;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    weight_shape.push_back(kernel_shape_data[i]);
  }
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  vector<int> bias_shape(bias_term_, num_output_);
  if (this->blobs_.size() > 0) {
    CHECK_EQ(1 + bias_term_, this->blobs_.size())
        << "Incorrect number of weight blobs.";
    // true_blob_shape is original blob_shape (n,c,h,w) in case of forced_3d_
    // where blob_shape is expanded to (n,c,1,h,w)
    vector<int> true_blob_shape = this->blobs_[0]->shape();
    if (forced_3d_) true_blob_shape.erase(true_blob_shape.begin()+2);
    if (weight_shape != true_blob_shape) {
      Blob<Dtype> weight_shaped_blob(weight_shape);
      LOG(FATAL) << "Incorrect weight shape: expected shape "
          << weight_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[0]->shape_string();
    }
    if (bias_term_ && bias_shape != this->blobs_[1]->shape()) {
      Blob<Dtype> bias_shaped_blob(bias_shape);
      LOG(FATAL) << "Incorrect bias shape: expected shape "
          << bias_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[1]->shape_string();
    }
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize and fill the weights:
    // output channels x input channels per-group x kernel height x kernel width
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the biases.
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  kernel_dim_ = this->blobs_[0]->count(1);
  weight_offset_ = conv_out_channels_ * kernel_dim_ / group_;
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void ConvolutionPerforatedLayer<Dtype>::LayerSetUp(
      const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  // Configure the kernel size, padding, stride, and inputs.
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
    CHECK_EQ(0, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_h_ = conv_param.kernel_h();
    kernel_w_ = conv_param.kernel_w();
  } else {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims == 1 || num_kernel_dims == 2)
        << "kernel_size must be specified once, or once per spatial dimension ";
    kernel_h_ = conv_param.kernel_size(0);
    kernel_w_ = conv_param.kernel_size((num_kernel_dims == 1) ? 0 : 1);
  }
  CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
  CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
  // Setup stride dimensions (stride_).
  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
    CHECK_EQ(0, conv_param.stride_size())
        << "Either stride or stride_h/w should be specified; not both.";
    stride_h_ = conv_param.stride_h();
    stride_w_ = conv_param.stride_w();
  } else {
    const int num_stride_dims = conv_param.stride_size();
    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||
          num_stride_dims == 2)
        << "stride must be specified once, or once per spatial dimension ";
    const int kDefaultStride = 1;
    stride_h_ = (num_stride_dims == 0) ? kDefaultStride :
        conv_param.stride(0);
    stride_w_ = (num_stride_dims == 0) ? kDefaultStride :
        conv_param.stride((num_stride_dims == 1) ? 0 : 1);
  }
  CHECK_GT(stride_h_, 0) << "Stride dimensions must be nonzero.";
  CHECK_GT(stride_w_, 0) << "Stride dimensions must be nonzero.";
  // Setup pad dimensions (pad_).
  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
    CHECK_EQ(0, conv_param.pad_size())
        << "Either pad or pad_h/w should be specified; not both.";
    pad_h_ = conv_param.pad_h();
    pad_w_ = conv_param.pad_w();
  } else {
    const int num_pad_dims = conv_param.pad_size();
    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||
          num_pad_dims == 2)
        << "pad must be specified once, or once per spatial dimension ";
    const int kDefaultPad = 0;
    pad_h_ = (num_pad_dims == 0) ? kDefaultPad :
        conv_param.pad(0);
    pad_w_ = (num_pad_dims == 0) ? kDefaultPad :
        conv_param.pad((num_pad_dims == 1) ? 0 : 1);
  }
  // Configure output channels and groups.
  channels_ = bottom[0]->channels();
  num_output_ = this->layer_param_.convolution_param().num_output();
  CHECK_GT(num_output_, 0);
  group_ = this->layer_param_.convolution_param().group();
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";
  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  vector<int> weight_shape(4);
  weight_shape[0] = num_output_;
  weight_shape[1] = channels_ / group_;
  weight_shape[2] = kernel_h_;
  weight_shape[3] = kernel_w_;
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  vector<int> bias_shape(bias_term_, num_output_);
  if (this->blobs_.size() > 0) {
    CHECK_EQ(1 + bias_term_, this->blobs_.size())
        << "Incorrect number of weight blobs.";
    if (weight_shape != this->blobs_[0]->shape()) {
      Blob<Dtype> weight_shaped_blob(weight_shape);
      LOG(FATAL) << "Incorrect weight shape: expected shape "
          << weight_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[0]->shape_string();
    }
    if (bias_term_ && bias_shape != this->blobs_[1]->shape()) {
      Blob<Dtype> bias_shaped_blob(bias_shape);
      LOG(FATAL) << "Incorrect bias shape: expected shape "
          << bias_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[1]->shape_string();
    }
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize and fill the weights:
    // output channels x input channels per-group x kernel height x kernel width
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the biases.
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
Example #18
0
void ConvolutionLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  Layer<Dtype>::SetUp(bottom, top);
  LOG(INFO)<<"start setup conv layer";
  //kernel_size_ = this->layer_param_.convolution_param().kernel_size();
  if(this->layer_param_.convolution_param().has_kernel_size()){
	kernel_h_=kernel_w_=kernel_d_=this->layer_param_.convolution_param().kernel_size();
  }else{
	kernel_h_=this->layer_param_.convolution_param().kernel_h();
	kernel_w_=this->layer_param_.convolution_param().kernel_w();
	kernel_d_=this->layer_param_.convolution_param().kernel_d();
  }
  if(this->layer_param_.convolution_param().has_stride()){
	stride_h_=stride_w_=stride_d_=this->layer_param_.convolution_param().stride();
  }else{
	stride_h_=this->layer_param_.convolution_param().stride_h();
	stride_w_=this->layer_param_.convolution_param().stride_w();
	stride_d_=this->layer_param_.convolution_param().stride_d();
  }
  
  if(this->layer_param_.convolution_param().has_pad()){
	pad_h_=pad_w_=pad_d_=this->layer_param_.convolution_param().pad();
  }else{
	pad_h_=this->layer_param_.convolution_param().pad_h();
	pad_w_=this->layer_param_.convolution_param().pad_w();
	pad_d_=this->layer_param_.convolution_param().pad_d();
  }
  
  //stride_ = this->layer_param_.convolution_param().stride();
  group_ = this->layer_param_.convolution_param().group();
 // pad_ = this->layer_param_.convolution_param().pad();
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  depth_ = bottom[0]->depth();
  // TODO: generalize to handle inputs of different shapes.
  for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) {
    CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num.";
    CHECK_EQ(channels_, bottom[bottom_id]->channels())
        << "Inputs must have same channels.";
    CHECK_EQ(height_, bottom[bottom_id]->height())
        << "Inputs must have same height.";
    CHECK_EQ(width_, bottom[bottom_id]->width())
        << "Inputs must have same width.";
	CHECK_EQ(depth_, bottom[bottom_id]->depth())
        << "Inputs must have same depth.";
  }
  num_output_ = this->layer_param_.convolution_param().num_output();
  LOG(INFO)<< "num of output from param = " <<num_output_;
  CHECK_GT(num_output_, 0);
  CHECK_EQ(channels_ % group_, 0);
  // The im2col result buffer would only hold one image at a time to avoid
  // overly large memory usage.
  int height_out = (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1;
  int width_out = (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1;
  int depth_out = (depth_ + 2 * pad_d_ - kernel_d_) / stride_d_ + 1;
  col_buffer_.Reshape(
      1, channels_ * kernel_h_ * kernel_w_* kernel_d_, height_out, width_out, depth_out);
	  
   LOG(INFO)<<"Done col_buffer_.Reshape";
  // Set the parameters
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  // Figure out the dimensions for individual gemms.
  M_ = num_output_ / group_;
  //K_ = channels_ * kernel_size_ * kernel_size_ / group_;
  //N_ = height_out * width_out;
  K_ = channels_ * kernel_h_ * kernel_w_ * kernel_d_ / group_;
  N_ = height_out * width_out * depth_out;
  for (int top_id = 0; top_id < top->size(); ++top_id) {
    (*top)[top_id]->Reshape(num_, num_output_, height_out, width_out, depth_out);
  }
  LOG(INFO)<<"Done (*top)[top_id]->Reshape";
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
	
	LOG(INFO)<<"start seting up weight";
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(
        num_output_, channels_ / group_, kernel_h_, kernel_w_, kernel_d_));
    // fill the weights
	
	LOG(INFO)<<"num_output = "<<num_output_<<" chanels /group = " << channels_ <<"/" <<group_<<" =" <<channels_ / group_;
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    LOG(INFO)<<"weight filler start  fill weight";
    weight_filler->Fill(this->blobs_[0].get());
	
	 LOG(INFO)<<"done setup weight";
    // If necessary, initialize and fill the bias term
    if (bias_term_) {
	   LOG(INFO)<<"convolution layer bias setting: there is/are " << num_output_ << " of bias";
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, 1, num_output_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  // Set up the bias filler
  if (bias_term_) {
    bias_multiplier_.reset(new SyncedMemory(N_ * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
        reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < N_; ++i) {
        bias_multiplier_data[i] = 1.;
    }
  }
  
  LOG(INFO)<<"Done setup conv layer";
}
Example #19
0
void ConvolutionLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  Layer<Dtype>::SetUp(bottom, top);
  kernel_size_ = this->layer_param_.convolution_param().kernel_size();
  stride_ = this->layer_param_.convolution_param().stride();
  group_ = this->layer_param_.convolution_param().group();
  pad_ = this->layer_param_.convolution_param().pad();
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  num_output_ = this->layer_param_.convolution_param().num_output();
  CHECK_GT(num_output_, 0);
  CHECK_EQ(channels_ % group_, 0);
  // The im2col result buffer would only hold one image at a time to avoid
  // overly large memory usage.
  int height_out = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1;
  int width_out = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1;
  col_buffer_.Reshape(
      1, channels_ * kernel_size_ * kernel_size_, height_out, width_out);
  // Set the parameters
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  // Figure out the dimensions for individual gemms.
  M_ = num_output_ / group_;
  K_ = channels_ * kernel_size_ * kernel_size_ / group_;
  N_ = height_out * width_out;
  (*top)[0]->Reshape(bottom[0]->num(), num_output_, height_out, width_out);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(
        num_output_, channels_ / group_, kernel_size_, kernel_size_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, num_output_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  // Set up the bias filler
  if (bias_term_) {
    bias_multiplier_.reset(new SyncedMemory(N_ * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
        reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < N_; ++i) {
        bias_multiplier_data[i] = 1.;
    }
  }
}
void ConvolutionSKLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top) {
	LOG(INFO)<< "Setting up CONV_SK_LAYER";

	CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
	<< "corresponding to (num, channels, height, width)";
	// Configure the kernel size, padding, stride, and inputs.
	ConvolutionParameter conv_param = this->layer_param_.convolution_param();
	CHECK(!conv_param.has_kernel_size() !=
			!(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
	<< "Filter size is kernel_size OR kernel_h and kernel_w; not both";
	CHECK(conv_param.has_kernel_size() ||
			(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
	<< "For non-square filters both kernel_h and kernel_w are required.";
	CHECK((!conv_param.has_pad() && conv_param.has_pad_h()
					&& conv_param.has_pad_w())
			|| (!conv_param.has_pad_h() && !conv_param.has_pad_w()))
	<< "pad is pad OR pad_h and pad_w are required.";
	CHECK((!conv_param.has_stride() && conv_param.has_stride_h()
					&& conv_param.has_stride_w())
			|| (!conv_param.has_stride_h() && !conv_param.has_stride_w()))
	<< "Stride is stride OR stride_h and stride_w are required.";
	if (conv_param.has_kernel_size()) {
		kernel_h_ = kernel_w_ = conv_param.kernel_size();
	} else {
		kernel_h_ = conv_param.kernel_h();
		kernel_w_ = conv_param.kernel_w();
	}
	CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
	CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
	if (!conv_param.has_pad_h()) {
		pad_h_ = pad_w_ = conv_param.pad();
	} else {
		pad_h_ = conv_param.pad_h();
		pad_w_ = conv_param.pad_w();
	}

	CHECK_EQ(this->pad_h_, 0) << "pad_h_ must be 0";
	CHECK_EQ(this->pad_w_, 0) << "pad_w_ must be 0";
	if (!conv_param.has_stride_h()) {
		stride_h_ = stride_w_ = conv_param.stride();
	} else {
		stride_h_ = conv_param.stride_h();
		stride_w_ = conv_param.stride_w();
	}
	if (!conv_param.has_kstride_h()) {
		kstride_h_ = kstride_w_ = conv_param.kstride();
	} else {
		kstride_h_ = conv_param.kstride_h();
		kstride_w_ = conv_param.kstride_w();
	}
	// Special case: im2col is the identity for 1x1 convolution with stride 1
	// and no padding, so flag for skipping the buffer and transformation.
	is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1
	&& stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0;
	// Configure output channels and groups.
	channels_ = bottom[0]->channels();
	num_output_ = this->layer_param_.convolution_param().num_output();
	CHECK_GT(num_output_, 0);
	group_ = this->layer_param_.convolution_param().group();
	CHECK_EQ(channels_ % group_, 0);
	CHECK_EQ(num_output_ % group_, 0)
	<< "Number of output should be multiples of group.";
	if (reverse_dimensions()) {
		conv_out_channels_ = channels_;
		conv_in_channels_ = num_output_;
	} else {
		conv_out_channels_ = num_output_;
		conv_in_channels_ = channels_;
	}
	// Handle the parameters: weights and biases.
	// - blobs_[0] holds the filter weights
	// - blobs_[1] holds the biases (optional)
	bias_term_ = this->layer_param_.convolution_param().bias_term();
	if (this->blobs_.size() > 0) {
		LOG(INFO) << "Skipping parameter initialization";
	} else {
		if (bias_term_) {
			this->blobs_.resize(2);
		} else {
			this->blobs_.resize(1);
		}
		// Initialize and fill the weights:
		// output channels x input channels per-group x kernel height x kernel width
		this->blobs_[0].reset(new Blob<Dtype>(
						conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_));
		shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
						this->layer_param_.convolution_param().weight_filler()));
		weight_filler->Fill(this->blobs_[0].get());
		// If necessary, initialize and fill the biases.
		if (bias_term_) {
			vector<int> bias_shape(1, num_output_);
			this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
			shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
							this->layer_param_.convolution_param().bias_filler()));
			bias_filler->Fill(this->blobs_[1].get());
		}
	}
	// Propagate gradients to the parameters (as directed by backward pass).
	this->param_propagate_down_.resize(this->blobs_.size(), true);

}
Example #21
0
void BaseConvolutionNDLayer<Dtype>::LayerSetUp(
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  // Configure the kernel size, padding, stride, and inputs.
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());
  const int first_spatial_axis = channel_axis_ + 1;
  const int num_axes = bottom[0]->num_axes();
  num_spatial_axes_ = num_axes - first_spatial_axis;
  CHECK_GE(num_spatial_axes_, 1);
  // Setup input dimensions (input_shape_).
  vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1);
  input_shape_.Reshape(bottom_dim_blob_shape);
  int* input_shape_data = input_shape_.mutable_cpu_data();
  for (int i = 0; i < num_spatial_axes_ + 1; ++i) {
    input_shape_data[i] = bottom[0]->shape(channel_axis_ + i);
  }
  vector<int> spatial_dim_blob_shape(1, num_spatial_axes_);
  // Setup filter kernel dimensions (kernel_shape_).
  kernel_shape_.Reshape(spatial_dim_blob_shape);
  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();
  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "kernel_h & kernel_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_shape_data[0] = conv_param.kernel_h();
    kernel_shape_data[1] = conv_param.kernel_w();
  } else {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)
        << "kernel_size must be specified once, or once per spatial dimension "
        << "(kernel_size specified " << num_kernel_dims << " times; "
        << num_spatial_axes_ << " spatial dims);";
      for (int i = 0; i < num_spatial_axes_; ++i) {
        kernel_shape_data[i] =
            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);
      }
  }
  for (int i = 0; i < num_spatial_axes_; ++i) {
    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";
  }
  // Setup stride dimensions (stride_).
  stride_.Reshape(spatial_dim_blob_shape);
  int* stride_data = stride_.mutable_cpu_data();
  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "stride_h & stride_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.stride_size())
        << "Either stride or stride_h/w should be specified; not both.";
    stride_data[0] = conv_param.stride_h();
    stride_data[1] = conv_param.stride_w();
  } else {
    const int num_stride_dims = conv_param.stride_size();
    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||
          num_stride_dims == num_spatial_axes_)
        << "stride must be specified once, or once per spatial dimension "
        << "(stride specified " << num_stride_dims << " times; "
        << num_spatial_axes_ << " spatial dims);";
    const int kDefaultStride = 1;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :
          conv_param.stride((num_stride_dims == 1) ? 0 : i);
      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";
    }
  }
  // Setup pad dimensions (pad_).
  pad_.Reshape(spatial_dim_blob_shape);
  int* pad_data = pad_.mutable_cpu_data();
  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "pad_h & pad_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.pad_size())
        << "Either pad or pad_h/w should be specified; not both.";
    pad_data[0] = conv_param.pad_h();
    pad_data[1] = conv_param.pad_w();
  } else {
    const int num_pad_dims = conv_param.pad_size();
    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||
          num_pad_dims == num_spatial_axes_)
        << "pad must be specified once, or once per spatial dimension "
        << "(pad specified " << num_pad_dims << " times; "
        << num_spatial_axes_ << " spatial dims);";
    const int kDefaultPad = 0;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :
          conv_param.pad((num_pad_dims == 1) ? 0 : i);
    }
  }
  // Special case: im2col is the identity for 1x1 convolution with stride 1
  // and no padding, so flag for skipping the buffer and transformation.
  is_1x1_ = true;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    is_1x1_ &=
        kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0;
    if (!is_1x1_) { break; }
  }
  // Configure output channels and groups.
  channels_ = bottom[0]->shape(channel_axis_);
  num_output_ = this->layer_param_.convolution_param().num_output();
  CHECK_GT(num_output_, 0);
  group_ = this->layer_param_.convolution_param().group();
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";
  if (reverse_dimensions()) {
    conv_out_channels_ = channels_;
    conv_in_channels_ = num_output_;
  } else {
    conv_out_channels_ = num_output_;
    conv_in_channels_ = channels_;
  }
  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize and fill the weights:
    // output channels x input channels per-group x kernel height x kernel width
    vector<int> weight_shape(2);
    weight_shape[0] = conv_out_channels_;
    weight_shape[1] = conv_in_channels_ / group_;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      weight_shape.push_back(kernel_shape_data[i]);
    }
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the biases.
    if (bias_term_) {
      vector<int> bias_shape(1, num_output_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void CmpInnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int num_output = this->layer_param_.inner_product_param().num_output();
  bias_term_ = this->layer_param_.inner_product_param().bias_term();
  transpose_ = this->layer_param_.inner_product_param().transpose();
  N_ = num_output;
  const int axis = bottom[0]->CanonicalAxisIndex(
      this->layer_param_.inner_product_param().axis());
  // Dimensions starting from "axis" are "flattened" into a single
  // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
  // and axis == 1, N inner products with dimension CHW are performed.
  K_ = bottom[0]->count(axis);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize the weights
    vector<int> weight_shape(2);
    if (transpose_) {
      weight_shape[0] = K_;
      weight_shape[1] = N_;
    } else {
      weight_shape[0] = N_;
      weight_shape[1] = K_;
    }
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      vector<int> bias_shape(1, N_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.inner_product_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }  // parameter initialization
  this->param_propagate_down_.resize(this->blobs_.size(), true);
  
  this->sparse_ratio_ = this->layer_param_.inner_product_param().sparse_ratio();
  this->class_num_ = this->layer_param_.inner_product_param().class_num();
  this->quantize_term_ = this->layer_param_.inner_product_param().quantize_term();
  int count = this->blobs_[0]->count() ; 
  vector<int> mask_shape(1,count);
  this->masks_.Reshape(mask_shape);
  int *mask_data = this->masks_.mutable_cpu_data();
  caffe_set(count, 1, this->masks_.mutable_cpu_data());


  if(quantize_term_)
  {   
    this->indices_.Reshape(mask_shape);
    vector<int> cen_shape(1,class_num_);
    this->centroids_.Reshape(cen_shape);
    this->tmpDiff_.Reshape(cen_shape);
    this->freq_.Reshape(cen_shape);
  } 

}
void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) 
{
    const int num_output = this->layer_param_.inner_product_param().num_output();
    bias_term_ = this->layer_param_.inner_product_param().bias_term();
    transpose_ = this->layer_param_.inner_product_param().transpose();

    // 全连接层输出神经元的个数  
    N_ = num_output;
    const int axis = bottom[0]->CanonicalAxisIndex(
    this->layer_param_.inner_product_param().axis());

    // K_ 单个样本特征向量长度  
    // Dimensions starting from "axis" are "flattened" into a single
    // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
    // and axis == 1, N inner products with dimension CHW are performed.
    K_ = bottom[0]->count(axis);

    // 在Caffe.proto里,LayerParameter中有一个repeated blobs field,但是在更多
    // net的定义文件即prototxt文件里并没有blobs,那么在这里将进行处理__显然,如
    // 果this->blobs_.size()>0那么参数blob就不需要初始化了,skip;反之,则进行初始化  
    // Check if we need to set up the weights
    if (this->blobs_.size() > 0) 
    {
        LOG(INFO) << "Skipping parameter initialization";
    } 
    else 
    {
        if (bias_term_) 
        {
            this->blobs_.resize(2);
        } 
        else 
        {
            this->blobs_.resize(1);
        }
    
        // Initialize the weights
        vector<int> weight_shape(2);

        if (transpose_) 
        {
            weight_shape[0] = K_;
            weight_shape[1] = N_;
        } 
        else 
        {
            weight_shape[0] = N_;
            weight_shape[1] = K_;
        }
    
        // 可以认为blobs_[0]的维度为N_*K_,即通常,我们将权值矩阵设为N*K维。可以
        // 这么认为,但是在实际上,在C++中数据都是存放在内存中,并没有所谓的矩阵的概念
        this->blobs_[0].reset(new Blob<Dtype>(weight_shape));

        // fill the weights 定义了一个智能指针
        shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.inner_product_param().weight_filler()));

        weight_filler->Fill(this->blobs_[0].get());

        // If necessary, intiialize and fill the bias term
        if (bias_term_) 
        {
            vector<int> bias_shape(1, N_);
            this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
            
            shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
              this->layer_param_.inner_product_param().bias_filler()));

            bias_filler->Fill(this->blobs_[1].get());
        }
    }  // parameter initialization

    // param_propagate_down_是从Layer<Dtype> 继承来的数据成员  
    this->param_propagate_down_.resize(this->blobs_.size(), true);
}
Example #24
0
void
TiedConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype> *> &bottom,
                                        vector<Blob<Dtype> *> *top) {
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  CHECK(!conv_param.has_kernel_size() !=
        !(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
      << "Filter size is kernel_size OR kernel_h and kernel_w; not both";
  CHECK(conv_param.has_kernel_size() ||
        (conv_param.has_kernel_h() && conv_param.has_kernel_w()))
      << "For non-square filters both kernel_h and kernel_w are required.";
  CHECK((!conv_param.has_pad() && conv_param.has_pad_h() &&
         conv_param.has_pad_w()) ||
        (!conv_param.has_pad_h() && !conv_param.has_pad_w()))
      << "pad is pad OR pad_h and pad_w are required.";
  CHECK((!conv_param.has_stride() && conv_param.has_stride_h() &&
         conv_param.has_stride_w()) ||
        (!conv_param.has_stride_h() && !conv_param.has_stride_w()))
      << "Stride is stride OR stride_h and stride_w are required.";
  if (conv_param.has_kernel_size()) {
    kernel_h_ = kernel_w_ = conv_param.kernel_size();
  } else {
    kernel_h_ = conv_param.kernel_h();
    kernel_w_ = conv_param.kernel_w();
  }
  CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
  CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
  if (!conv_param.has_pad_h()) {
    pad_h_ = pad_w_ = conv_param.pad();
  } else {
    pad_h_ = conv_param.pad_h();
    pad_w_ = conv_param.pad_w();
  }
  if (!conv_param.has_stride_h()) {
    stride_h_ = stride_w_ = conv_param.stride();
  } else {
    stride_h_ = conv_param.stride_h();
    stride_w_ = conv_param.stride_w();
  }
  // Configure output channels and groups.
  channels_ = bottom[0]->channels();
  num_output_ = conv_param.num_output();
  CHECK_GT(num_output_, 0);
  group_ = conv_param.group();
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";

  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  bias_term_ = conv_param.bias_term();
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    // output channels x input channels per-group x kernel height x kernel width
    this->blobs_[0].reset(
        new Blob<Dtype>(num_output_, channels_ / group_, kernel_h_, kernel_w_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(
        GetFiller<Dtype>(conv_param.weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, intiialize and fill the biases:
    // 1 x 1 x 1 x output channels.
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, num_output_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          conv_param.bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
};
void Convolution3DLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  CHECK_EQ(bottom.size(), 1) << "Conv Layer takes a single blob as input.";
  CHECK_EQ(top->size(), 1) << "Conv Layer takes a single blob as output.";

  kernel_size_ = this->layer_param_.convolution_param().kernel_size();
  kernel_depth_ = this->layer_param_.convolution_param().kernel_depth();
  stride_ = this->layer_param_.convolution_param().stride();
  temporal_stride_ = this->layer_param_.convolution_param().temporal_stride();
  pad_ = this->layer_param_.convolution_param().pad();
  temporal_pad_ = this->layer_param_.convolution_param().temporal_pad();
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  length_ = bottom[0]->length();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  num_output_ = this->layer_param_.convolution_param().num_output();
  filter_group_ = this->layer_param_.convolution_param().filter_group();
  CHECK_GT(num_output_, 0);

  // number of output filters must be divided by filter_group
  CHECK_EQ(num_output_ % filter_group_, 0);

  // The vol2col result buffer would only hold one image at a time to avoid
  // overly large memory usage.

  int height_out = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1;
  int width_out = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1;
  int length_out = (length_ + 2 * temporal_pad_ - kernel_depth_) / temporal_stride_ + 1;

  // buffer for one image
  col_buffer_.Reshape(
      1, channels_ * kernel_depth_ * kernel_size_ * kernel_size_, length_out, height_out, width_out);


  bias_term_ = this->layer_param_.convolution_param().bias_term();

  // Figure out the dimensions for individual gemms.
  M_ = num_output_ / filter_group_; // doing convolution filter_group_ times per volume
  K_ = channels_ * kernel_depth_ * kernel_size_ * kernel_size_;
  N_ = length_out * height_out * width_out;

  // output size
  (*top)[0]->Reshape(bottom[0]->num(), num_output_, length_out, height_out, width_out);

  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize the weights
    this->blobs_[0].reset(new Blob<Dtype>(
        num_output_, channels_, kernel_depth_, kernel_size_, kernel_size_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, 1, num_output_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }


  }

  // Set up the bias filler
  if (bias_term_) {
    bias_multiplier_.reset(new SyncedMemory(N_ * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
        reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < N_; ++i) {
        bias_multiplier_data[i] = 1.;
    }
  }
}
  void ZJQContextLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
                                     vector<Blob<Dtype>*>* top)
  {
    CHECK_EQ(bottom.size(), 2)<<"Context Layer takes two blobs as input.";
    CHECK_EQ(top->size(), 1) << "Context Layer takes a single blob as output.";
    // Figure out the dimensions
    num_feat_map_ = bottom[0]->channels();
    height_ = bottom[0]->height();
    width_ = bottom[0]->width();
    context_dim_ = bottom[1]->count() / bottom[1]->num();

    (*top)[0]->Reshape(bottom[0]->num(), num_feat_map_, height_, width_);
    // Check if we need to set up the weights
    if (this->blobs_.size() > 0)
    {
      LOG(INFO) << "Skipping parameter initialization";
    }
    else
    {
      this->blobs_.resize(1);

      // Intialize the weight
      this->blobs_[0].reset(new Blob<Dtype>(1, num_feat_map_, context_dim_, 1));
      // fill the weights
      shared_ptr<Filler<Dtype> > weight_filler(
          GetFiller<Dtype>(this->layer_param_.weight_filler()));
      weight_filler->Fill(this->blobs_[0].get());
    }

    w_multi_context_.Reshape(1, num_feat_map_, 1, 1);

    {
      all_ones_.Reshape(1, 1, height_, width_);
      Dtype* all_ones = all_ones_.mutable_cpu_data();
      for(int i=0; i<all_ones_.count(); ++i)
      {
        all_ones[i] = 1.0;
      }
    }

    {
      all_ones_sample_.Reshape(bottom[0]->num(), 1, 1, 1);
      Dtype* all_one_sample = all_ones_sample_.mutable_cpu_data();
      for(int i=0; i<all_ones_sample_.count(); ++i)
      {
        all_one_sample[i] = 1.0;
      }
    }

    tmp_.Reshape(1, num_feat_map_, height_, width_);

    bias_.Reshape(1, 1, 1, num_feat_map_);

    bias_multiplier_.reset(new SyncedMemory(bottom[0]->num() * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
    reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < bottom[0]->num(); ++i)
    {
      bias_multiplier_data[i] = 1.;
    }
  }
Example #27
0
void DeConvolutionLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  Layer<Dtype>::SetUp(bottom, top);
  // we need the output of the corresponding deconvolution in order to determine the size
  kernel_size_ = this->layer_param_.deconvolution_param().kernel_size();
  stride_ = this->layer_param_.deconvolution_param().stride();
  group_ = this->layer_param_.deconvolution_param().group();
  pad_ = this->layer_param_.deconvolution_param().pad();
  height_out_ = this->layer_param_.deconvolution_param().output_height();
  width_out_ = this->layer_param_.deconvolution_param().output_width();
  // JTS TODO check num_ <-> num_output
  num_ = bottom[0]->num();
  // TODO read channels_ and num_output_ from optional second input
  channels_ = this->layer_param_.deconvolution_param().output_channels();
  //channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  //num_output_ = this->layer_param_.deconvolution_param().num_output();
  num_output_ = this->layer_param_.deconvolution_param().output_channels();
  int inverse_num_out = bottom[0]->channels();
  CHECK_GT(inverse_num_out, 0);
  CHECK_EQ(channels_ % group_, 0);
  // init im2col result buffer
  //std::cout << height_ << " " << width_ << std::endl;
  //std::cout << "nout "<< num_output_ << " " << height_out_ << " " << width_out_ << std::endl;
  col_buffer_.Reshape(
      1, channels_ * kernel_size_ * kernel_size_, height_, width_);
  // Set the parameters
  CHECK_EQ(inverse_num_out % group_, 0)
      << "Number of output should be multiples of group.";
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  // Figure out the dimensions for individual gemms.
  // JTS check N_ + K_
  M_ = inverse_num_out / group_;
  K_ = channels_ * kernel_size_ * kernel_size_ / group_;
  N_ = height_ * width_;
  (*top)[0]->Reshape(bottom[0]->num(), num_output_, height_out_, width_out_);
  // Check if we need to set up the weights
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Intialize the weight
    this->blobs_[0].reset(new Blob<Dtype>(
        inverse_num_out, channels_ / group_, kernel_size_, kernel_size_));
    // fill the weights
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.deconvolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    //std::cout << "wcount " << this->blobs_[0]->count() << std::endl;
    // If necessary, intiialize and fill the bias term
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, num_output_));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.deconvolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  // Set up the bias filler
  if (bias_term_) {
    bias_multiplier_.reset(new SyncedMemory(N_ * sizeof(Dtype)));
    Dtype* bias_multiplier_data =
        reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
    for (int i = 0; i < N_; ++i) {
        bias_multiplier_data[i] = 1.;
    }
  }
}
void CudnnNdConvolutionLayer<Dtype>::LayerSetUp(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  ConvolutionParameter conv_param =
    this->layer_param_.convolution_param();
  // Configure the kernel size, padding, stride, and inputs.
  CHECK(conv_param.has_kernel_shape())
      << "Kernel shape is required.";
  if (conv_param.has_pad_shape()) {
    CHECK_EQ(conv_param.kernel_shape().dim_size(),
             conv_param.pad_shape().dim_size())
        << "Kernel and Pad shape don't match !";
  }
  if (conv_param.has_stride_shape()) {
    CHECK_EQ(conv_param.kernel_shape().dim_size(),
             conv_param.stride_shape().dim_size())
        << "Kernel and Stride shape don't match !";
  }
  for (int i = 0; i < conv_param.kernel_shape().dim_size(); ++i) {
    kernel_shape_.push_back(conv_param.kernel_shape().dim(i));
    CHECK_GT(kernel_shape_[i], 0) << "Filter dimensions cannot be zero.";
  }
  if (conv_param.has_pad_shape()) {
    for (int i = 0; i < conv_param.kernel_shape().dim_size(); ++i) {
      pad_shape_.push_back(conv_param.pad_shape().dim(i));
    }
  } else {
    pad_shape_ = std::vector<int>(kernel_shape_.size(), 0);
  }
  if (conv_param.has_stride_shape()) {
    for (int i = 0; i < conv_param.kernel_shape().dim_size(); ++i) {
      stride_shape_.push_back(conv_param.stride_shape().dim(i));
    }
  } else {
    stride_shape_ = std::vector<int>(kernel_shape_.size(), 1);
  }
  // Configure output channels and groups.
  channels_ = bottom[0]->shape(1);
  num_output_ = this->layer_param_.convolution_param().num_output();
  CHECK_GT(num_output_, 0);
  group_ = this->layer_param_.convolution_param().group();
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";

  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  bias_term_ = this->layer_param_.convolution_param().bias_term();

  vector<int> weight_shape(kernel_shape_);
  weight_shape.insert(weight_shape.begin(), channels_ / group_);
  weight_shape.insert(weight_shape.begin(), num_output_);

  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize and fill the weights:
    // output channels x input channels per-group x kernel height x kernel width
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the biases.
    if (bias_term_) {
      vector<int> bias_shape(1, num_output_);
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
            this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }

  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);

  // Initialize CUDA streams and cuDNN.
  stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP];
  handle_ = new cudnnHandle_t[this->group_ * CUDNN_STREAMS_PER_GROUP];
  workspaceSizeInBytes = 0;
  workspace_data_ = NULL;

  for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) {
    CUDA_CHECK(cudaStreamCreate(&stream_[g]));
    CUDNN_CHECK(cudnnCreate(&handle_[g]));
    CUDNN_CHECK(cudnnSetStream(handle_[g], stream_[g]));
  }

  // Set the indexing parameters.
  weight_shape[0] /= group_;
  weight_offset_ = 1;
  for (int i = 0; i < weight_shape.size(); ++i) {
    weight_offset_ *= weight_shape[i];
  }
  bias_offset_ = weight_shape[0];

  // Create filter descriptor.
  cudnn::createNdFilterDesc<Dtype>(&filter_desc_, weight_shape);

  bwd_filter_algo_= new cudnnConvolutionBwdFilterAlgo_t[bottom.size()];
  bwd_data_algo_  = new cudnnConvolutionBwdDataAlgo_t[bottom.size()];
  workspace_bwd_filter_sizes_ = new size_t[bottom.size()];
  workspace_bwd_data_sizes_ = new size_t[bottom.size()];
  workspace_ = new void*[this->group_ * CUDNN_STREAMS_PER_GROUP];
  // Create tensor descriptor(s) for data and corresponding convolution(s).
  for (int i = 0; i < bottom.size(); i++) {
    cudnnTensorDescriptor_t bottom_desc;
    cudnn::createTensorDesc<Dtype>(&bottom_desc);
    bottom_descs_.push_back(bottom_desc);
    cudnnTensorDescriptor_t top_desc;
    cudnn::createTensorDesc<Dtype>(&top_desc);
    top_descs_.push_back(top_desc);
    cudnnConvolutionDescriptor_t conv_desc;
    cudnn::createConvolutionDesc<Dtype>(&conv_desc);
    conv_descs_.push_back(conv_desc);
    workspace_bwd_data_sizes_[i] = 0;
    workspace_bwd_filter_sizes_[i] = 0;
  }

  // Tensor descriptor for bias.
  if (this->bias_term_) {
    cudnn::createTensorDesc<Dtype>(&bias_desc_);
  }

  handles_setup_ = true;
}
Example #29
0
void CRFWithLossLayer<Dtype>::LayerSetUp(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {

// Get state number and state feature number from parameter
  state_num_ = this->layer_param_.crf_loss_param().state_num();
  feature_num_ = this->layer_param_.crf_loss_param().feature_num();
  nbest_ = this->layer_param_.crf_loss_param().nbest();
  max_seq_length_ = this->layer_param_.crf_loss_param().max_seq_length();
  for_training_ = this->layer_param_.crf_loss_param().for_training();

  num_ = bottom[0]->count(0, 1);

// Check whether to set up the start_weight, trans_weight_, state_weight 
  if (this->blobs_.size() > 0) { 
	LOG(INFO) << "Skipping parameter initialization";	
  } else {

    // Assume that this crf layer always have the threekind of parameter
	this->blobs_.resize(3);	

    // Start param have only 1 dimension,  params as many as state number  
    vector<int> start_weight_shape(1); 
    start_weight_shape[0] = state_num_; 
    this->blobs_[0].reset(new Blob<Dtype>(start_weight_shape)); 
    // Transition param have have fully trans connection (maybe partially in the future), 2 dims
    vector<int> trans_weight_shape(2); 
    trans_weight_shape[0] = state_num_; 
    trans_weight_shape[1] = state_num_; 
    this->blobs_[1].reset(new Blob<Dtype>(trans_weight_shape)); 
    // State feature num associated with local context has two dims for each state at any feature combination of context 
    vector<int> state_weight_shape(2); 
    state_weight_shape[0] = state_num_; 
    state_weight_shape[1] = feature_num_; 
    this->blobs_[2].reset(new Blob<Dtype>(state_weight_shape)); 
	// Reshape the alpha matrix to the proper size; 
	vector<int> alpha_shape(3);
	alpha_shape[0] = num_;
	alpha_shape[1] = max_seq_length_;
	alpha_shape[2] = state_num_;
	alpha_.Reshape(alpha_shape);
	// Reshape the beta matrix to the proper size;
	vector<int> beta_shape(4);
	beta_shape[0] = num_;
	beta_shape[1] = max_seq_length_;
	beta_shape[2] = state_num_;
	beta_shape[3] = 1;
	beta_.Reshape(beta_shape);
	// Reshape the gamma matrix to the proper size;
	vector<int> gamma_shape(4);
	gamma_shape[0] = num_;
	gamma_shape[1] = max_seq_length_;
	gamma_shape[2] = state_num_;
	gamma_shape[3] = 1;
	gamma_.Reshape(gamma_shape); 
    // Reshape the epsilon matrix to the proper size;
	vector<int> ep_shape(4);
	ep_shape[0] = num_; 
	ep_shape[1] = max_seq_length_; 
	ep_shape[2] = state_num_;
	ep_shape[3] = state_num_;
	epsilon_.Reshape(ep_shape); 
	// Reshape the buffer for state energy table;
	vector<int> se_shape(4);
    se_shape[0] = 1;
    se_shape[1] = max_seq_length_;
    se_shape[2] = state_num_;
    se_shape[3] = 1;
	buf_state_energy_.Reshape(se_shape);
	// Reshape the buffer for transposed bottom 
 	vector<int> tr_shape = bottom[0]->shape();
 	tr_shape[1] = max_seq_length_;
 	tr_shape[2] = feature_num_;
 	tr_shape[3] = 1; 
 	buf_bottom_transposed_.Reshape(tr_shape); 
    // Reshape the buffer vector to a length of feature number
    vector<int> buf_feat_shape(1);
	buf_feat_shape[0] = feature_num_;
	buf_feature_.Reshape(buf_feat_shape);
	// Reshape the buffer vector to a length of state number
    vector<int> buf_2_shape(1);
	buf_2_shape[0] = state_num_;
	buf_state_.Reshape(buf_2_shape);
    // Reshape the multiplier
    vector<int> multi_shape(1);
	multi_shape[0] = max_seq_length_;
    multiplier_seq_len_.Reshape(multi_shape);

    // For simplicity, all the weight fillers are the same (probably not so good) 
    for (int i = 0; i < 3; ++i) {
		shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
            this->layer_param_.crf_loss_param().weight_filler()));	
        weight_filler->Fill(this->blobs_[i].get());
    }
  }
}