int BST::sum_all(Node * root) { int total; if (root == NULL) { return 0; } total = sum_all(root -> LC); total = sum_all(root -> RC); total += root -> data; return total; }
int main(void) { int i = 0; char value[1000]; puts("Hello World"); /* prints Hello World */ memset(value, 0, 1000); calculate(value); printf("result is:%s\n", value); printf("%d\n", sum_all(value)); return EXIT_SUCCESS; }
int main(void) { int tot; int mdarr[ROWS][COLS] = { {1,3,5,7}, {2,4,6,8}, {0,1,2,3} }; puts("sum each row element"); sum_rows(mdarr); puts("sum each col element"); sum_cols(mdarr); tot = sum_all(mdarr); printf("Total rows and cols: %d\n", tot); return(EXIT_SUCCESS); }
void ConvolutionalLayer::backPropagate(vector<mat>& errors, const vector<mat>& fins, const vector<mat>& fouts, float learning_rate) { size_t nInputs = getNumInputMaps(), nOutputs = getNumOutputMaps(); size_t batch_size = fins[0].getCols(); // In the following codes, the iteration index i and j stands for // i : # of input features. i = 0 ~ nInputs - 1 // j : # of output features. j = 0 ~ nOutputs - 1 vector<mat> deltas(nOutputs); for (size_t j=0; j<nOutputs; ++j) deltas[j] = fouts[j] & ( 1.0f - fouts[j] ) & errors[j]; this->feedBackward(errors, deltas); assert(learning_rate > 0); float lr = learning_rate / batch_size; // iImgs represents the input images. // oImgs represents the output images. (Before sigmoid or any other activation function) vector<vector<mat> > iImgs(nInputs), oImgs(nOutputs); for (size_t i=0; i<nInputs; ++i) iImgs[i] = reshapeVectors2Images(fins[i], _input_img_size); for (size_t j=0; j<nOutputs; ++j) oImgs[j] = reshapeVectors2Images(deltas[j], this->get_output_img_size()); // Update kernels with learning rate for (size_t k=0; k<batch_size; ++k) { for (size_t j=0; j<nOutputs; ++j) { for (size_t i=0; i<nInputs; ++i) _kernels[i][j] -= convn(rot180(iImgs[i][k]), oImgs[j][k], "valid_shm") * lr; _bias[j] -= sum_all(oImgs[j][k]) * lr; } } }