Layer::BlockSparseMatrix Layer::runReverse(const BlockSparseMatrix& m) const { if(util::isLogEnabled("Layer")) { util::log("Layer") << " Running reverse propagation on matrix (" << m.rows() << " rows, " << m.columns() << " columns) through layer with dimensions (" << blocks() << " blocks, " << getInputCount() << " inputs, " << getOutputCount() << " outputs, " << blockStep() << " block step).\n"; util::log("Layer") << " layer: " << m_sparseMatrix.shapeString() << "\n"; } auto result = m.reverseConvolutionalMultiply(m_sparseMatrix.transpose()); if(util::isLogEnabled("Layer")) { util::log("Layer") << " output: " << result.shapeString() << "\n"; } if(util::isLogEnabled("Layer::Detail")) { util::log("Layer::Detail") << " output: " << result.debugString() << "\n"; } return result; }
/*! * Very important function, allows all modules to sleep/start from * send commands. Also if the module has been designated as a interupt * module then it will send back false for failure also can have * additional data on how well the module has done */ void SamgarModule::SucceedFail( bool Awns, double additionaldata ) { if( modulemode == ModeInterupt ) {// only send it when you got // worthwhile data Bottle &MyBottle = prepare(); MyBottle.clear(); MyBottle.addInt( ActivationCode ); // so it knows its a log report MyBottle.addString(MyName.c_str()); MyBottle.addDouble(additionaldata); if(getOutputCount()>0) write(); currentmode = StateStoped; } long i; while( currentmode != StateRunning) { yarp::os::Time::delay(0.01); i++; if (i%100==0) { checkConnection(); //return; } } checkConnection(); }
void NeuralNetwork::setLabelForOutputNeuron(unsigned int idx, const std::string& label) { assert(idx < getOutputCount()); util::log("NeuralNetwork") << "Setting label for output neuron " << idx << " to " << label << "\n"; _labels[idx] = label; }
bool NeuralNetwork::areConnectionsValid() const { // TODO util::log("NeuralNetwork") << "Verified network with " << getInputCount() << " inputs and " << getOutputCount() << " outputs\n"; return true; }
void NeuralNetwork::mirror(const Layer& layer) { size_t blocks = getGreatestCommonDivisor(layer.blocks(), getGreatestCommonDivisor(getOutputCount(), layer.getInputCount())); assertM(getOutputCount() % blocks == 0, "Input count " << getOutputCount() << " not divisible by " << blocks << "."); assertM(layer.getInputCount() % blocks == 0, "Output count " << layer.getInputCount() << " not divisivle by " << blocks << "."); size_t newInputs = getOutputCount() / blocks; size_t newOutputs = layer.getInputCount() / blocks; assert(newInputs > 0); assert(newOutputs > 0); util::log("NeuralNetwork") << "Mirroring neural network output layer (" << back().blocks() << " blocks, " << back().getInputBlockingFactor() << " inputs, " << back().getOutputBlockingFactor() << " outputs) to (" << blocks << " blocks, " << newInputs << " inputs, " << newOutputs << " outputs)\n"; addLayer(Layer(blocks, newInputs, newOutputs)); std::default_random_engine engine; // TODO: should wire this out of the neural network bool shouldSeedWithTime = util::KnobDatabase::getKnobValue( "NeuralNetwork::SeedWithTime", false); if(shouldSeedWithTime) { engine.seed(std::time(0)); } else { engine.seed(0); } // should be pseudo inverse back().initializeRandomly(engine); }
static int checkCache (BrailleContractionData *bcd) { if (!bcd->table->cache.input.characters) return 0; if (!bcd->table->cache.output.cells) return 0; if (bcd->input.offsets && !bcd->table->cache.offsets.count) return 0; if (bcd->table->cache.output.maximum != getOutputCount(bcd)) return 0; if (bcd->table->cache.cursorOffset != makeCachedCursorOffset(bcd)) return 0; if (bcd->table->cache.expandCurrentWord != prefs.expandCurrentWord) return 0; if (bcd->table->cache.capitalizationMode != prefs.capitalizationMode) return 0; { unsigned int count = getInputCount(bcd); if (bcd->table->cache.input.count != count) return 0; if (wmemcmp(bcd->input.begin, bcd->table->cache.input.characters, count) != 0) return 0; } return 1; }
float NeuralNetwork::computeAccuracy(const BlockSparseMatrix& input, const BlockSparseMatrix& reference) const { assert(input.rows() == reference.rows()); assert(reference.columns() == getOutputCount()); auto result = runInputs(input); float threshold = 0.5f; auto resultActivations = result.greaterThanOrEqual(threshold); auto referenceActivations = reference.greaterThanOrEqual(threshold); util::log("NeuralNetwork") << "Result activations " << resultActivations.toString(); util::log("NeuralNetwork") << "Reference activations " << referenceActivations.toString(); auto matchingActivations = resultActivations.equals(referenceActivations); float matches = matchingActivations.reduceSum(); return matches / result.size(); }
Layer::BlockSparseMatrix Layer::runInputs(const BlockSparseMatrix& m) const { if(util::isLogEnabled("Layer")) { util::log("Layer") << " Running forward propagation on matrix (" << m.rows() << " rows, " << m.columns() << " columns) through layer with dimensions (" << blocks() << " blocks, " << getInputCount() << " inputs, " << getOutputCount() << " outputs, " << blockStep() << " block step).\n"; util::log("Layer") << " layer: " << m_sparseMatrix.shapeString() << "\n"; } if(util::isLogEnabled("Layer::Detail")) { util::log("Layer::Detail") << " input: " << m.debugString() << "\n"; util::log("Layer::Detail") << " layer: " << m_sparseMatrix.debugString() << "\n"; util::log("Layer::Detail") << " bias: " << m_bias.debugString() << "\n"; } auto unbiasedOutput = m.convolutionalMultiply(m_sparseMatrix, blockStep()); auto output = unbiasedOutput.convolutionalAddBroadcastRow(m_bias); output.sigmoidSelf(); if(util::isLogEnabled("Layer")) { util::log("Layer") << " output: " << output.shapeString() << "\n"; } if(util::isLogEnabled("Layer::Detail")) { util::log("Layer::Detail") << " output: " << output.debugString() << "\n"; } return output; }
static void updateCache (BrailleContractionData *bcd) { { unsigned int count = getInputCount(bcd); if (count > bcd->table->cache.input.size) { unsigned int newSize = count | 0X7F; wchar_t *newCharacters = malloc(ARRAY_SIZE(newCharacters, newSize)); if (!newCharacters) { logMallocError(); bcd->table->cache.input.count = 0; goto inputDone; } if (bcd->table->cache.input.characters) free(bcd->table->cache.input.characters); bcd->table->cache.input.characters = newCharacters; bcd->table->cache.input.size = newSize; } wmemcpy(bcd->table->cache.input.characters, bcd->input.begin, count); bcd->table->cache.input.count = count; bcd->table->cache.input.consumed = getInputConsumed(bcd); } inputDone: { unsigned int count = getOutputConsumed(bcd); if (count > bcd->table->cache.output.size) { unsigned int newSize = count | 0X7F; unsigned char *newCells = malloc(ARRAY_SIZE(newCells, newSize)); if (!newCells) { logMallocError(); bcd->table->cache.output.count = 0; goto outputDone; } if (bcd->table->cache.output.cells) free(bcd->table->cache.output.cells); bcd->table->cache.output.cells = newCells; bcd->table->cache.output.size = newSize; } memcpy(bcd->table->cache.output.cells, bcd->output.begin, count); bcd->table->cache.output.count = count; bcd->table->cache.output.maximum = getOutputCount(bcd); } outputDone: if (bcd->input.offsets) { unsigned int count = getInputCount(bcd); if (count > bcd->table->cache.offsets.size) { unsigned int newSize = count | 0X7F; int *newArray = malloc(ARRAY_SIZE(newArray, newSize)); if (!newArray) { logMallocError(); bcd->table->cache.offsets.count = 0; goto offsetsDone; } if (bcd->table->cache.offsets.array) free(bcd->table->cache.offsets.array); bcd->table->cache.offsets.array = newArray; bcd->table->cache.offsets.size = newSize; } memcpy(bcd->table->cache.offsets.array, bcd->input.offsets, ARRAY_SIZE(bcd->input.offsets, count)); bcd->table->cache.offsets.count = count; } else { bcd->table->cache.offsets.count = 0; } offsetsDone: bcd->table->cache.cursorOffset = makeCachedCursorOffset(bcd); bcd->table->cache.expandCurrentWord = prefs.expandCurrentWord; bcd->table->cache.capitalizationMode = prefs.capitalizationMode; }
size_t SoftmaxLayer::getActivationMemory() const { return precision().size() * getOutputCount(); }
size_t Layer::totalNeurons() const { return getOutputCount(); }
emb_size_type getOptModelOutputCount(emb_optimizer optim) { return getOutputCount(myModel); }
void RemotePluginServer::dispatchControlEvents() { RemotePluginOpcode opcode = RemotePluginNoOpcode; static float *parameterBuffer = 0; tryRead(m_controlRequestFd, &opcode, sizeof(RemotePluginOpcode)); switch (opcode) { case RemotePluginGetVersion: writeFloat(m_controlResponseFd, getVersion()); break; case RemotePluginGetName: writeString(m_controlResponseFd, getName()); break; case RemotePluginGetMaker: writeString(m_controlResponseFd, getMaker()); break; case RemotePluginTerminate: terminate(); break; case RemotePluginGetInputCount: m_numInputs = getInputCount(); writeInt(m_controlResponseFd, m_numInputs); break; case RemotePluginGetOutputCount: m_numOutputs = getOutputCount(); writeInt(m_controlResponseFd, m_numOutputs); break; case RemotePluginGetParameterCount: writeInt(m_controlResponseFd, getParameterCount()); break; case RemotePluginGetParameterName: writeString(m_controlResponseFd, getParameterName(readInt(m_controlRequestFd))); break; case RemotePluginGetParameter: writeFloat(m_controlResponseFd, getParameter(readInt(m_controlRequestFd))); break; case RemotePluginGetParameterDefault: writeFloat(m_controlResponseFd, getParameterDefault(readInt(m_controlRequestFd))); break; case RemotePluginGetParameters: { if (!parameterBuffer) { parameterBuffer = new float[getParameterCount()]; } int p0 = readInt(m_controlRequestFd); int pn = readInt(m_controlRequestFd); getParameters(p0, pn, parameterBuffer); tryWrite(m_controlResponseFd, parameterBuffer, (pn - p0 + 1) * sizeof(float)); break; } case RemotePluginHasMIDIInput: { bool m = hasMIDIInput(); tryWrite(m_controlResponseFd, &m, sizeof(bool)); break; } case RemotePluginGetProgramCount: writeInt(m_controlResponseFd, getProgramCount()); break; case RemotePluginGetProgramName: writeString(m_controlResponseFd, getProgramName(readInt(m_controlRequestFd))); break; case RemotePluginIsReady: { if (!m_shm) sizeShm(); bool b(isReady()); std::cerr << "isReady: returning " << b << std::endl; tryWrite(m_controlResponseFd, &b, sizeof(bool)); } case RemotePluginSetDebugLevel: { RemotePluginDebugLevel newLevel = m_debugLevel; tryRead(m_controlRequestFd, &newLevel, sizeof(RemotePluginDebugLevel)); setDebugLevel(newLevel); m_debugLevel = newLevel; break; } case RemotePluginWarn: { bool b = warn(readString(m_controlRequestFd)); tryWrite(m_controlResponseFd, &b, sizeof(bool)); break; } case RemotePluginShowGUI: { showGUI(readString(m_controlRequestFd)); break; } case RemotePluginHideGUI: { hideGUI(); break; } //Deryabin Andrew: vst chunks support case RemotePluginGetVSTChunk: { std::vector<char> chunk = getVSTChunk(); writeRaw(m_controlResponseFd, chunk); break; } case RemotePluginSetVSTChunk: { std::vector<char> chunk = readRaw(m_controlRequestFd); setVSTChunk(chunk); break; } //Deryabin Andrew: vst chunks support: end code case RemotePluginNoOpcode: break; case RemotePluginReset: reset(); break; default: std::cerr << "WARNING: RemotePluginServer::dispatchControlEvents: unexpected opcode " << opcode << std::endl; } }
size_t CTCDecoderLayer::getActivationMemory() const { return precision().size() * getOutputCount(); }
int main(int nargs, char** args) { (void)nargs; (void)args; int evalPoints = getEvalPointsCount(); int inputs = getInputCount(); int outputs = getOutputCount(); NUMBER* x = new NUMBER[inputs]; NUMBER* y = new NUMBER[outputs]; NUMBER::TapeType& tape = NUMBER::getGlobalTape(); codi::TapeVectorHelper<NUMBER, Gradient> vh; tape.setActive(); for(int curPoint = 0; curPoint < evalPoints; ++curPoint) { std::cout << "Point " << curPoint << " : {"; for(int i = 0; i < inputs; ++i) { if(i != 0) { std::cout << ", "; } double val = getEvalPoint(curPoint, i); std::cout << val; x[i] = (NUMBER)(val); } std::cout << "}\n"; for(int i = 0; i < outputs; ++i) { y[i] = 0.0; } int runs = outputs / DIM; if(outputs % DIM != 0) { runs += 1; } std::vector<std::vector<double> > jac(outputs); for(int curOut = 0; curOut < runs; ++curOut) { size_t curSize = DIM; if((curOut + 1) * DIM > (size_t)outputs) { curSize = outputs % DIM; } for(int i = 0; i < inputs; ++i) { tape.registerInput(x[i]); } func(x, y); for(int i = 0; i < outputs; ++i) { tape.registerOutput(y[i]); } Gradient grad; for(size_t curDim = 0; curDim < curSize; ++curDim) { grad[curDim] = 1.0; vh.setGradient(y[curOut * DIM + curDim].getGradientData(), grad); grad[curDim] = 0.0; } vh.evaluate(); for(size_t curDim = 0; curDim < curSize; ++curDim) { for(int curIn = 0; curIn < inputs; ++curIn) { jac[curOut * DIM + curDim].push_back(vh.getGradient(x[curIn].getGradientData())[curDim]); } } vh.clearAdjoints(); tape.reset(); } for(int curIn = 0; curIn < inputs; ++curIn) { for(int curOut = 0; curOut < outputs; ++curOut) { std::cout << curIn << " " << curOut << " " << jac[curOut][curIn] << std::endl; } } } }