void update_gradient(int iLayer, matrix_t& target) { if (iLayer < _cnn_layers.size()) { // Infer convolutional layer const int i = iLayer; _cnn_layers[i]->infer_hiddens(); if (iLayer + 1 < _cnn_layers.size()) { _cnn_layers[i + 1]->visibles() = _cnn_layers[i]->hiddens(); update_gradient(iLayer + 1, target); _cnn_layers[i + 1]->backprop_visibles(); _cnn_layers[i]->backprop_hidden_deltas(_cnn_layers[i + 1]->visibles()); _cnn_layers[i]->update_gradient(); } else { // Transition from convolutional model to dense model _nn_layers[0]->visibles() = reshape(_cnn_layers[i]->hiddens(), 1, _cnn_layers[i]->hiddens().count()); update_gradient(iLayer + 1, target); // Transition back _nn_layers[0]->backprop_visible_deltas(); _cnn_layers[i]->backprop_hidden_deltas(reshape(_nn_layers[0]->visible_deltas(), _model.cnn_layers()[i]->hiddens_size())); _cnn_layers[i]->update_gradient(); } } else if (iLayer - _cnn_layers.size() < _nn_layers.size()) { // Infer dense layer const int i = iLayer - _cnn_layers.size(); _nn_layers[i]->infer_hiddens(); if (i + 1 < _nn_layers.size()) { _nn_layers[i + 1]->visibles() = _nn_layers[i]->hiddens(); update_gradient(iLayer + 1, target); _nn_layers[i + 1]->backprop_visible_deltas(); _nn_layers[i]->backprop_hidden_deltas(_nn_layers[i + 1]->visible_deltas()); _nn_layers[i]->update_gradient(); } else { _nn_layers[i]->calculate_deltas(target); _nn_layers[i]->update_gradient(); } } else { assert(0); // should never happen } }
static void shapepulse(SRC_NODE *node) { COMMON_NODE *cmnode; while (node != NULL) { if (node->updateLater <= 0) update_gradient(node); if (node->ptime > 0.0 && node->power != 0.0 && node->visible) { cmnode = (COMMON_NODE *) &node->node.common_node; if (strlen(cmnode->pattern) == 1 && *cmnode->pattern == '?') { pulse(node); } else { if (node->device >= GRADDEV) grad_shapepulse(node); else rf_shapepulse(node); } } node = node->bnode; } }
// Does not require the hidden units to be inferred virtual void update_gradient(matrix_t& target) { update_gradient(0, target); }