void UseNodiscardCheck::registerMatchers(MatchFinder *Finder) { // If we use ``[[nodiscard]]`` attribute, we require at least C++17. Use a // macro or ``__attribute__`` with pre c++17 compilers by using // ReplacementString option. if ((NoDiscardMacro == "[[nodiscard]]" && !getLangOpts().CPlusPlus17) || !getLangOpts().CPlusPlus) return; auto FunctionObj = cxxRecordDecl(hasAnyName("::std::function", "::boost::function")); // Find all non-void const methods which have not already been marked to // warn on unused result. Finder->addMatcher( cxxMethodDecl( allOf(isConst(), isDefinitionOrInline(), unless(anyOf( returns(voidType()), isNoReturn(), isOverloadedOperator(), isVariadic(), hasTemplateReturnType(), hasClassMutableFields(), isConversionOperator(), hasAttr(clang::attr::WarnUnusedResult), hasType(isInstantiationDependentType()), hasAnyParameter(anyOf( parmVarDecl(anyOf(hasType(FunctionObj), hasType(references(FunctionObj)))), hasType(isNonConstReferenceOrPointer()), hasParameterPack())))))) .bind("no_discard"), this); }
bool compile(const nstr& name, NNet& network, size_t threads){ RunNetwork* runNetwork = new RunNetwork; NNet::Layer* inputLayer = network.layer(0); size_t numLayers = network.numLayers(); RunLayer* lastRunLayer = 0; for(size_t l = 1; l < numLayers; ++l){ RunLayer* runLayer = new RunLayer; runLayer->queue = new Queue(threads); size_t inputLayerSize = inputLayer->size(); NNet::Layer* layer = network.layer(l); size_t layerSize = layer->size(); if(l > 1){ runLayer->inputVecStart = lastRunLayer->outputVecStart; runLayer->inputVec = lastRunLayer->outputVec; } if(l < numLayers - 1){ double* outputVecPtrStart; double* outputVecPtr; allocVector(layerSize, &outputVecPtrStart, &outputVecPtr); runLayer->outputVecStart = outputVecPtrStart; runLayer->outputVec = outputVecPtr; } TypeVec args; args.push_back(getPointer(doubleVecType(inputLayerSize))); args.push_back(getPointer(doubleVecType(inputLayerSize))); args.push_back(getPointer(doubleType())); args.push_back(int32Type()); FunctionType* ft = FunctionType::get(voidType(), args, false); Function* f = Function::Create(ft, Function::ExternalLinkage, name.c_str(), &module_); BasicBlock* entry = BasicBlock::Create(context_, "entry", f); builder_.SetInsertPoint(entry); auto aitr = f->arg_begin(); Value* inputVecPtr = aitr; inputVecPtr->setName("input_vec_ptr"); ++aitr; Value* weightVecPtr = aitr; weightVecPtr->setName("weight_vec_ptr"); ++aitr; Value* outputVecPtr = aitr; outputVecPtr->setName("output_vec_ptr"); ++aitr; Value* outputIndex = aitr; outputIndex->setName("output_index"); Value* inputVec = builder_.CreateLoad(inputVecPtr, "input_vec"); Value* weightVec = builder_.CreateLoad(weightVecPtr, "weight_vec"); Value* mulVec = builder_.CreateFMul(inputVec, weightVec, "mul_vec"); Value* sumActivation = builder_.CreateExtractElement(mulVec, getInt32(0), "sum_elem"); for(size_t i = 1; i < inputLayerSize; ++i){ Value* elem = builder_.CreateExtractElement(mulVec, getInt32(i), "sum_elem"); sumActivation = builder_.CreateFAdd(sumActivation, elem, "sum_activation"); } Value* output = getActivationOutput(layer->neuron(0), sumActivation); Value* outputElement = builder_.CreateGEP(outputVecPtr, outputIndex, "out_elem"); builder_.CreateStore(output, outputElement); builder_.CreateRetVoid(); runLayer->f = f; runLayer->fp = (void (*)(void*, void*, void*, int)) engine_->getPointerToFunction(f); for(size_t j = 0; j < layerSize; ++j){ NNet::Neuron* nj = layer->neuron(j); RunNeuron* runNeuron = new RunNeuron; runNeuron->layer = runLayer; runNeuron->outputIndex = j; double* weightVecPtrStart; double* weightVecPtr; allocVector(inputLayerSize, &weightVecPtrStart, &weightVecPtr); runNeuron->weightVecStart = weightVecPtrStart; runNeuron->weightVec = weightVecPtr; for(size_t i = 0; i < inputLayerSize; ++i){ NNet::Neuron* ni = inputLayer->neuron(i); weightVecPtr[i] = nj->weight(ni); } runLayer->queue->add(runNeuron); } runNetwork->layerVec.push_back(runLayer); inputLayer = layer; lastRunLayer = runLayer; } networkMap_.insert(make_pair(name, runNetwork)); return true; }