示例#1
0
void PoolingLayer<Tensor,T>::BackPropagateTemplateAverage(const Tensor<T>& input, Tensor<T>& dedx_prev)
{
	Tensor<T>& de_dx_t = hessian ? this->d2e_dx2_ : this->de_dx_;

	assert(dedx_prev.HaveSameSize(input));

	for(unsigned n = 0; n < this->out_.d(); n++){
		for(unsigned y = 0; y < this->out_.h(); y++){
			for(unsigned x = 0; x < this->out_.w(); x++) {
				for(UINT syi = 0; syi < this->sy_; ++syi){
					for(UINT sxi = 0; sxi < this->sx_; ++sxi){
						dedx_prev(x*this->sx_+sxi, y*this->sy_+syi, n) = de_dx_t(x,y,n)/(this->sy_*this->sx_);
					}
				}
			}
		}
	}
}
示例#2
0
void PoolingLayer<Tensor,T>::BackPropagateTemplateMax(const Tensor<T>& input, Tensor<T>& dedx_prev)
{
    Tensor<T>& de_dx_t = hessian ? this->d2e_dx2_ : this->de_dx_;

    assert(dedx_prev.HaveSameSize(input));

    for(unsigned n = 0; n < this->out_.d(); n++){
        for(unsigned y = 0; y < this->out_.h(); y++){
            for(unsigned x = 0; x < this->out_.w(); x++) {
                for(UINT syi = 0; syi < this->sy_; ++syi){
                    for(UINT sxi = 0; sxi < this->sx_; ++sxi){
                        dedx_prev(x*this->sx_+sxi, y*this->sy_+syi, n) =  
                            //Check if this is the input corresponding to max out
                            input(x*this->sx_+sxi, y*this->sy_+syi, n) == this->out_(x,y,n) ? de_dx_t(x,y,n) : 0;
                        //TODO: some issues with floats equality test?
                    }
                }
            }
        }
    }
}