size_t Layer::getOutputCount() const { size_t outputCount = getOutputCountForInputCount(getInputCount()); util::log("Layer") << m_sparseMatrix.shapeString() << ": Output count for input count " << getInputCount() << " is " << outputCount << "\n"; return outputCount; }
Layer::BlockSparseMatrix Layer::runReverse(const BlockSparseMatrix& m) const { if(util::isLogEnabled("Layer")) { util::log("Layer") << " Running reverse propagation on matrix (" << m.rows() << " rows, " << m.columns() << " columns) through layer with dimensions (" << blocks() << " blocks, " << getInputCount() << " inputs, " << getOutputCount() << " outputs, " << blockStep() << " block step).\n"; util::log("Layer") << " layer: " << m_sparseMatrix.shapeString() << "\n"; } auto result = m.reverseConvolutionalMultiply(m_sparseMatrix.transpose()); if(util::isLogEnabled("Layer")) { util::log("Layer") << " output: " << result.shapeString() << "\n"; } if(util::isLogEnabled("Layer::Detail")) { util::log("Layer::Detail") << " output: " << result.debugString() << "\n"; } return result; }
virtual eBool checkValidity() const { // Get all input operators of type bitmap. eArray<eIBitmapOp *> bmpInputs; for (eU32 i=0; i<getInputCount(); i++) { const eIOperator *op = getInputOperator(i); eASSERT(op != eNULL); if (op->getCategory() == "Bitmap") { bmpInputs.append((eIBitmapOp *)op); } } // All input operators must have same size. // Check if all input operators of type // bitmap have pair-wise same bitmap size. for (eU32 i=1; i<bmpInputs.size(); i++) { if (bmpInputs[i-1]->getResult().width != bmpInputs[i]->getResult().width || bmpInputs[i-1]->getResult().height != bmpInputs[i]->getResult().height) { return eFALSE; } } return eIOperator::checkValidity(); }
OP_EXEC(eGraphicsApiDx9 *gfx, const eVector3 &pos, eF32 range, const eFloatColor &diffuse, const eFloatColor &ambient, const eFloatColor &specular, eBool castsShadows, eU8 shadowedFaces, eF32 penumbraSize, eF32 shadowBias) { if (getInputCount() > 0) { eSceneData &sd = ((eIModelOp *)getInputOperator(0))->getResult().sceneData; m_sceneData.merge(sd); } m_light.setDiffuse(diffuse); m_light.setAmbient(ambient); m_light.setSpecular(specular); m_light.setPosition(pos); m_light.setRange(range); m_light.setPenumbraSize(penumbraSize); m_light.setShadowBias(shadowBias*0.1f); // *0.1, because available precision in editor is too small. for (eInt i=0; i<eCMFACE_COUNT; i++) { const eBool enable = (eGetBit(shadowedFaces, i) && castsShadows); m_light.setCastsShadows((eCubeMapFace)i, enable); } m_sceneData.addLight(&m_light); }
void _copyFirstInputBitmap() { if (getInputCount() != 0) { const Result &res = ((eIBitmapOp *)getInputOperator(0))->getResult(); eMemCopy(m_bitmap, res.bitmap, res.size*sizeof(eColor)); } }
OP_EXEC(eGraphicsApiDx9 *gfx) { for (eU32 i=0; i<getInputCount(); i++) { eSceneData &sd = ((eIModelOp *)getInputOperator(i))->getResult().sceneData; m_sceneData.merge(sd); } }
bool NeuralNetwork::areConnectionsValid() const { // TODO util::log("NeuralNetwork") << "Verified network with " << getInputCount() << " inputs and " << getOutputCount() << " outputs\n"; return true; }
INT32 JoystickInputProducer::evaluateForAnyInput() { int count = getInputCount(); for (INT32 i = 0; i < count; i++) { INT32 e = getInput(i); if (getValue(e) != 0.0f) return e; } return -1; }
static int checkCache (BrailleContractionData *bcd) { if (!bcd->table->cache.input.characters) return 0; if (!bcd->table->cache.output.cells) return 0; if (bcd->input.offsets && !bcd->table->cache.offsets.count) return 0; if (bcd->table->cache.output.maximum != getOutputCount(bcd)) return 0; if (bcd->table->cache.cursorOffset != makeCachedCursorOffset(bcd)) return 0; if (bcd->table->cache.expandCurrentWord != prefs.expandCurrentWord) return 0; if (bcd->table->cache.capitalizationMode != prefs.capitalizationMode) return 0; { unsigned int count = getInputCount(bcd); if (bcd->table->cache.input.count != count) return 0; if (wmemcmp(bcd->input.begin, bcd->table->cache.input.characters, count) != 0) return 0; } return 1; }
virtual void _preExecute(eGraphicsApiDx9 *gfx) { // If there's at least one input operator, set // our self to the size of this input operator // (all input operators have the same bitmap // size, so it doesn't matter which one). // Further more, bitmap operators only allow // bitmap operators as input, so it's impossible // to get an operator of another type as input. if (getInputCount() > 0) { eIBitmapOp *op = (eIBitmapOp *)getInputOperator(0); eASSERT(op != eNULL); eASSERT(TEST_CATEGORY(op, "Bitmap", Bitmap_CID)); _reallocate(op->getResult().width, op->getResult().height); } }
std::string NeuralNetwork::shapeString() const { std::stringstream stream; stream << "Neural Network [" << size() << " layers, " << getInputCount() << " inputs (" << getInputBlockingFactor() << " way blocked), " << getOutputNeurons() << " outputs (" << getOutputBlockingFactor() << " way blocked)]\n"; for(auto& layer : *this) { size_t index = &layer - &*begin(); stream << " Layer " << index << ": [" << layer.blocks() << " blocks, " << layer.getInputCount() << " inputs (" << layer.getInputBlockingFactor() << " way blocked), " << layer.getOutputCount() << " outputs (" << layer.getOutputBlockingFactor() << " way blocked), " << layer.blockStep() << " block step]\n"; } return stream.str(); }
Layer::BlockSparseMatrix Layer::runInputs(const BlockSparseMatrix& m) const { if(util::isLogEnabled("Layer")) { util::log("Layer") << " Running forward propagation on matrix (" << m.rows() << " rows, " << m.columns() << " columns) through layer with dimensions (" << blocks() << " blocks, " << getInputCount() << " inputs, " << getOutputCount() << " outputs, " << blockStep() << " block step).\n"; util::log("Layer") << " layer: " << m_sparseMatrix.shapeString() << "\n"; } if(util::isLogEnabled("Layer::Detail")) { util::log("Layer::Detail") << " input: " << m.debugString() << "\n"; util::log("Layer::Detail") << " layer: " << m_sparseMatrix.debugString() << "\n"; util::log("Layer::Detail") << " bias: " << m_bias.debugString() << "\n"; } auto unbiasedOutput = m.convolutionalMultiply(m_sparseMatrix, blockStep()); auto output = unbiasedOutput.convolutionalAddBroadcastRow(m_bias); output.sigmoidSelf(); if(util::isLogEnabled("Layer")) { util::log("Layer") << " output: " << output.shapeString() << "\n"; } if(util::isLogEnabled("Layer::Detail")) { util::log("Layer::Detail") << " output: " << output.debugString() << "\n"; } return output; }
emb_size_type getOptModelInputCount(emb_optimizer optim) { return getInputCount(myModel); }
void RemotePluginServer::dispatchControlEvents() { RemotePluginOpcode opcode = RemotePluginNoOpcode; static float *parameterBuffer = 0; tryRead(m_controlRequestFd, &opcode, sizeof(RemotePluginOpcode)); switch (opcode) { case RemotePluginGetVersion: writeFloat(m_controlResponseFd, getVersion()); break; case RemotePluginGetName: writeString(m_controlResponseFd, getName()); break; case RemotePluginGetMaker: writeString(m_controlResponseFd, getMaker()); break; case RemotePluginTerminate: terminate(); break; case RemotePluginGetInputCount: m_numInputs = getInputCount(); writeInt(m_controlResponseFd, m_numInputs); break; case RemotePluginGetOutputCount: m_numOutputs = getOutputCount(); writeInt(m_controlResponseFd, m_numOutputs); break; case RemotePluginGetParameterCount: writeInt(m_controlResponseFd, getParameterCount()); break; case RemotePluginGetParameterName: writeString(m_controlResponseFd, getParameterName(readInt(m_controlRequestFd))); break; case RemotePluginGetParameter: writeFloat(m_controlResponseFd, getParameter(readInt(m_controlRequestFd))); break; case RemotePluginGetParameterDefault: writeFloat(m_controlResponseFd, getParameterDefault(readInt(m_controlRequestFd))); break; case RemotePluginGetParameters: { if (!parameterBuffer) { parameterBuffer = new float[getParameterCount()]; } int p0 = readInt(m_controlRequestFd); int pn = readInt(m_controlRequestFd); getParameters(p0, pn, parameterBuffer); tryWrite(m_controlResponseFd, parameterBuffer, (pn - p0 + 1) * sizeof(float)); break; } case RemotePluginHasMIDIInput: { bool m = hasMIDIInput(); tryWrite(m_controlResponseFd, &m, sizeof(bool)); break; } case RemotePluginGetProgramCount: writeInt(m_controlResponseFd, getProgramCount()); break; case RemotePluginGetProgramName: writeString(m_controlResponseFd, getProgramName(readInt(m_controlRequestFd))); break; case RemotePluginIsReady: { if (!m_shm) sizeShm(); bool b(isReady()); std::cerr << "isReady: returning " << b << std::endl; tryWrite(m_controlResponseFd, &b, sizeof(bool)); } case RemotePluginSetDebugLevel: { RemotePluginDebugLevel newLevel = m_debugLevel; tryRead(m_controlRequestFd, &newLevel, sizeof(RemotePluginDebugLevel)); setDebugLevel(newLevel); m_debugLevel = newLevel; break; } case RemotePluginWarn: { bool b = warn(readString(m_controlRequestFd)); tryWrite(m_controlResponseFd, &b, sizeof(bool)); break; } case RemotePluginShowGUI: { showGUI(readString(m_controlRequestFd)); break; } case RemotePluginHideGUI: { hideGUI(); break; } //Deryabin Andrew: vst chunks support case RemotePluginGetVSTChunk: { std::vector<char> chunk = getVSTChunk(); writeRaw(m_controlResponseFd, chunk); break; } case RemotePluginSetVSTChunk: { std::vector<char> chunk = readRaw(m_controlRequestFd); setVSTChunk(chunk); break; } //Deryabin Andrew: vst chunks support: end code case RemotePluginNoOpcode: break; case RemotePluginReset: reset(); break; default: std::cerr << "WARNING: RemotePluginServer::dispatchControlEvents: unexpected opcode " << opcode << std::endl; } }
size_t CTCDecoderLayer::getFloatingPointOperationCount() const { return getInputCount() * getInputCount(); }
size_t SoftmaxLayer::getFloatingPointOperationCount() const { return 2 * getInputCount(); }
size_t NeuralNetwork::getOutputCount() const { return getOutputCountForInputCount(getInputCount()); }
static void updateCache (BrailleContractionData *bcd) { { unsigned int count = getInputCount(bcd); if (count > bcd->table->cache.input.size) { unsigned int newSize = count | 0X7F; wchar_t *newCharacters = malloc(ARRAY_SIZE(newCharacters, newSize)); if (!newCharacters) { logMallocError(); bcd->table->cache.input.count = 0; goto inputDone; } if (bcd->table->cache.input.characters) free(bcd->table->cache.input.characters); bcd->table->cache.input.characters = newCharacters; bcd->table->cache.input.size = newSize; } wmemcpy(bcd->table->cache.input.characters, bcd->input.begin, count); bcd->table->cache.input.count = count; bcd->table->cache.input.consumed = getInputConsumed(bcd); } inputDone: { unsigned int count = getOutputConsumed(bcd); if (count > bcd->table->cache.output.size) { unsigned int newSize = count | 0X7F; unsigned char *newCells = malloc(ARRAY_SIZE(newCells, newSize)); if (!newCells) { logMallocError(); bcd->table->cache.output.count = 0; goto outputDone; } if (bcd->table->cache.output.cells) free(bcd->table->cache.output.cells); bcd->table->cache.output.cells = newCells; bcd->table->cache.output.size = newSize; } memcpy(bcd->table->cache.output.cells, bcd->output.begin, count); bcd->table->cache.output.count = count; bcd->table->cache.output.maximum = getOutputCount(bcd); } outputDone: if (bcd->input.offsets) { unsigned int count = getInputCount(bcd); if (count > bcd->table->cache.offsets.size) { unsigned int newSize = count | 0X7F; int *newArray = malloc(ARRAY_SIZE(newArray, newSize)); if (!newArray) { logMallocError(); bcd->table->cache.offsets.count = 0; goto offsetsDone; } if (bcd->table->cache.offsets.array) free(bcd->table->cache.offsets.array); bcd->table->cache.offsets.array = newArray; bcd->table->cache.offsets.size = newSize; } memcpy(bcd->table->cache.offsets.array, bcd->input.offsets, ARRAY_SIZE(bcd->input.offsets, count)); bcd->table->cache.offsets.count = count; } else { bcd->table->cache.offsets.count = 0; } offsetsDone: bcd->table->cache.cursorOffset = makeCachedCursorOffset(bcd); bcd->table->cache.expandCurrentWord = prefs.expandCurrentWord; bcd->table->cache.capitalizationMode = prefs.capitalizationMode; }
int main(int nargs, char** args) { (void)nargs; (void)args; int evalPoints = getEvalPointsCount(); int inputs = getInputCount(); int outputs = getOutputCount(); NUMBER* x = new NUMBER[inputs]; NUMBER* y = new NUMBER[outputs]; NUMBER::TapeType& tape = NUMBER::getGlobalTape(); codi::TapeVectorHelper<NUMBER, Gradient> vh; tape.setActive(); for(int curPoint = 0; curPoint < evalPoints; ++curPoint) { std::cout << "Point " << curPoint << " : {"; for(int i = 0; i < inputs; ++i) { if(i != 0) { std::cout << ", "; } double val = getEvalPoint(curPoint, i); std::cout << val; x[i] = (NUMBER)(val); } std::cout << "}\n"; for(int i = 0; i < outputs; ++i) { y[i] = 0.0; } int runs = outputs / DIM; if(outputs % DIM != 0) { runs += 1; } std::vector<std::vector<double> > jac(outputs); for(int curOut = 0; curOut < runs; ++curOut) { size_t curSize = DIM; if((curOut + 1) * DIM > (size_t)outputs) { curSize = outputs % DIM; } for(int i = 0; i < inputs; ++i) { tape.registerInput(x[i]); } func(x, y); for(int i = 0; i < outputs; ++i) { tape.registerOutput(y[i]); } Gradient grad; for(size_t curDim = 0; curDim < curSize; ++curDim) { grad[curDim] = 1.0; vh.setGradient(y[curOut * DIM + curDim].getGradientData(), grad); grad[curDim] = 0.0; } vh.evaluate(); for(size_t curDim = 0; curDim < curSize; ++curDim) { for(int curIn = 0; curIn < inputs; ++curIn) { jac[curOut * DIM + curDim].push_back(vh.getGradient(x[curIn].getGradientData())[curDim]); } } vh.clearAdjoints(); tape.reset(); } for(int curIn = 0; curIn < inputs; ++curIn) { for(int curOut = 0; curOut < outputs; ++curOut) { std::cout << curIn << " " << curOut << " " << jac[curOut][curIn] << std::endl; } } } }