void Programme::addVariable(Variable& var) { if(hasVar(variables, var.getNom())) { throw "La variable \""+var.getNom()+"\" a déjà été instanciée dans le programme \""+nom+"\"."; } else if(hasVar(arguments, var.getNom())) { throw "La variable \""+var.getNom()+"\" a déjà été instanciée dans le programme \""+nom+"\" en tant qu'argument."; } else { variables.push_back(var); } }
ZCsl::Variable* ZCsl::Block::findVar(const ZString& aVarName, ZBoolean aFail) { ZFUNCTRACE_DEVELOP("ZCsl::Block::findVar(const ZString& aVarName, ZBoolean aFail)"); Variable* v = iVars; while (v) { if (v->match(aVarName)) return v; v = v->iPrev; } // while if (aFail) iParent->throwExcept(msgVarNotFound, aVarName); return v; } // findVar
void Variables::removeGlobal(const Symbol& _key, int _iLevel) { Variable* pVar = getOrCreate(_key); if (pVar->isGlobal()) { pVar->setGlobal(false); pVar->setGlobalValue(NULL); } remove(pVar, _iLevel); }
void MmaSink::put(const Variable& x) { #ifdef DEBUG_MMASINK GBStream << "sink:variable " << this << x.cstring() << '\n'; #endif MLPutFunction(d_mlink,"ToExpression",1L); MLPutString(d_mlink,x.cstring()); #ifdef DEBUG_MMASINK checkforerror(); #endif ++d_count; };
std::string process_impl(Variable const & e, bool /*use_parenthesis*/, rt_latex_translator<InterfaceType> const & /*translator*/) const { std::stringstream ss; std::map<id_type, std::string>::const_iterator it = variable_strings_.find(e.id()); if (it != variable_strings_.end()) ss << it->second; else ss << " x_{" << e.id() << "} "; return ss.str(); }
void setNeighbours(NodeSet *left, NodeSet *right) { leftNeighbours=left; rightNeighbours=right; for(NodeSet::iterator i=left->begin();i!=left->end();++i) { Node *v=*(i); v->addRightNeighbour(this); } for(NodeSet::iterator i=right->begin();i!=right->end();++i) { Node *v=*(i); v->addLeftNeighbour(this); } }
Variable Array::getMember(const Variable &id) { if (id.isNumber()) { std::list<Variable>::iterator iter = _values.begin(); std::advance(iter, static_cast<size_t>(id.asNumber())); return *iter; } if (id.isString() && id.asString() == "length") return Variable((unsigned long)_values.size()); return Object::getMember(id); }
static bool isRecordTypeWithoutSideEffects(const Variable& var) { // a type that has no side effects (no constructors and no members with constructors) /** @todo false negative: check base class for side effects */ /** @todo false negative: check constructors for side effects */ if (var.type() && var.type()->numConstructors == 0 && (var.type()->varlist.empty() || var.type()->needInitialization == Scope::True) && var.type()->derivedFrom.empty()) return true; return false; }
/// <summary> /// The example shows /// - how to load a pretrained model and evaluate several nodes by combining their outputs /// Note: The example uses the model trained by <CNTK>/Examples/Image/Classification/ResNet/Python/TrainResNet_CIFAR10.py /// Please see README.md in <CNTK>/Examples/Image/Classification/ResNet about how to train the model. /// The parameter 'modelFilePath' specifies the path to the model. /// </summary> void EvaluateCombinedOutputs(const wchar_t* modelFilePath, const DeviceDescriptor& device) { printf("\n===== Evaluate combined outputs =====\n"); // Load the model. FunctionPtr modelFunc = Function::Load(modelFilePath, device); // Get node of interest std::wstring intermediateLayerName = L"final_avg_pooling"; FunctionPtr interLayerPrimitiveFunc = modelFunc->FindByName(intermediateLayerName); Variable poolingOutput = interLayerPrimitiveFunc->Output(); // Create a function which combine outputs from the node "final_avg_polling" and the final layer of the model. FunctionPtr evalFunc = Combine( { modelFunc->Output(), poolingOutput }); Variable inputVar = evalFunc->Arguments()[0]; // Prepare input data. // For evaluating an image, you first need to perform some image preprocessing to make sure that the input image has the correct size and layout // that match the model inputs. // Please note that the model used by this example expects the CHW image layout. // inputVar.Shape[0] is image width, inputVar.Shape[1] is image height, and inputVar.Shape[2] is channels. // For simplicity and avoiding external dependencies, we skip the preprocessing step here, and just use some artificially created data as input. std::vector<float> inputData(inputVar.Shape().TotalSize()); for (size_t i = 0; i < inputData.size(); ++i) { inputData[i] = static_cast<float>(i % 255); } // Create input value and input data map ValuePtr inputVal = Value::CreateBatch(inputVar.Shape(), inputData, device); std::unordered_map<Variable, ValuePtr> inputDataMap = { { inputVar, inputVal } }; // Create output data map. Using null as Value to indicate using system allocated memory. // Alternatively, create a Value object and add it to the data map. Variable modelOutput = evalFunc->Outputs()[0]; Variable interLayerOutput = evalFunc->Outputs()[1]; std::unordered_map<Variable, ValuePtr> outputDataMap = { { modelOutput, nullptr }, { interLayerOutput, nullptr } }; // Start evaluation on the device evalFunc->Evaluate(inputDataMap, outputDataMap, device); // Get evaluate result as dense outputs for(auto & outputVariableValuePair : outputDataMap) { auto variable = outputVariableValuePair.first; auto value = outputVariableValuePair.second; std::vector<std::vector<float>> outputData; value->CopyVariableValueTo(variable, outputData); PrintOutput<float>(variable.Shape().TotalSize(), outputData); } }
Variable * VariableScript::_build_value(LR_Node *node) { Variable *ret = 0; switch (node->productionId()) { case internal::PROD_value_0: ret = Variable_null::instance(); ret->grab(); break; case internal::PROD_value_1: ret = vnnew Variable_bool(true); break; case internal::PROD_value_2: ret = vnnew Variable_bool(false); break; case internal::PROD_value_3: ret = vnnew Variable_int32(node->child(0)->token()->int32); break; case internal::PROD_value_4: ret = vnnew Variable_int64(node->child(0)->token()->int64); break; case internal::PROD_value_5: ret = vnnew Variable_float32(node->child(0)->token()->float32); break; case internal::PROD_value_6: ret = vnnew Variable_float64(node->child(0)->token()->float64); break; case internal::PROD_value_7: ret = vnnew Variable_string(node->child(0)->token()->text); break; case internal::PROD_value_8: ret = _build_reference(node->child(0)); break; case internal::PROD_value_9: ret = _build_array(node->child(0)); break; case internal::PROD_value_10: { Variable_object *object = vnnew Variable_object(); _build_object(node->child(0), object); ret = object; break; } } return ret; }
bool VariablesInfo::SetVarValue(string varName, float varValue) { Variable* var = NULL; unsigned i; for (i = 0; i < mVariables.size() && var == NULL; i++) { if (mVariables[i]->GetVarName() == varName) var = mVariables[i]; } if (var != NULL) { var->SetVarValue(varValue); return true; } return false; }
Variable::commandPrompResults Variable::CompareWith(const Variable &aVariable) const { Variable::Type type = aVariable.getType(); if (type == FLOAT || type == FLOAT) { return CompareWithFloat(aVariable.getFloat()); } else if (type == INT && type == INT) { return CompareWithInt(aVariable.getInteger()); } else if (type == STRING || type == STRING) { return CompareWithString(aVariable.getString()); } return UNDEFCMP; }
//----------------------------------------------------------------------------- // Returns colour data from the named variable. //----------------------------------------------------------------------------- D3DCOLORVALUE *Script::GetColourData( char *variable ) { m_variables->Iterate( true ); while( m_variables->Iterate() != NULL ) { Variable* pVar = m_variables->GetCurrent(); if( strcmp( pVar->GetName(), variable ) == 0 ) return (D3DCOLORVALUE*)m_variables->GetCurrent()->GetData(); } return NULL; }
bool Registry::AddVariableToCurrentImportList(Variable* import_var) { Module* submod = CurrentModule()->GetVariable(m_currentImportedModule)->GetModule(); Variable* var = submod->GetNextExportVariable(); if (var == NULL) { string error = "Unable to add variable '" + import_var->GetNameDelimitedBy(GetCC()) + "' when creating an instance of the module '" + submod->GetModuleName() + "' because this module is defined to have only " + SizeTToString(submod->GetNumExportVariables()) + " variable(s) definable by default in its construction."; SetError(error); return true; } var->Synchronize(import_var); return false; }
void IISource::get(Variable& x) { int type = getType(); if(type==GBInputNumbers::s_IOSYMBOL) { symbolGB y; ((ISource *)this)->get(y); x.assign(y.value().chars()); } else if(type==GBInputNumbers::s_IOFUNCTION) { StringAccumulator acc; getAnything(acc); x.assign(acc.chars()); } else DBG(); };
bool ReactantList::SetComponentFormulasTo(Formula form) { for (size_t component=0; component<m_components.size(); component++) { Module* module = g_registry.GetModule(m_module); assert(module != NULL); Variable* var = module->GetVariable(m_components[component].second); if (var != NULL) { if (var->SetFormula(&form)) return true; } } return false; }
void Variable::operator=(const Variable &aVariable) { Variable::Type type = aVariable.getType(); if (type == INT) { set(aVariable.getInteger()); } else if (type == FLOAT) { set(aVariable.getFloat()); } else if (type == STRING) { set(aVariable.getString()); } else { clear(); } }
Variable* Execution::exUnaryExpression(TreeNode* t){ this->testNode(t, UnaryExpression, true); TreeNode* c = t->child; if (c->sibling != NULL){ string op = exUnaryOperator(c); Variable* tmp = UnaryExpression(c->sibling); Variable* res = Expression::opUnary(op, tmp); tmp->release(); return res; } else{ return PostfixExpression(c); } }
void BuildItem::addToVariable( const std::string &name, const Variable &val ) { auto i = myVariables.find( name ); if ( i != myVariables.end() ) i->second.moveToEnd( val.values() ); else { auto ni = myVariables.emplace( std::make_pair( name, Variable( val ) ) ); if ( val.useToolFlagTransform() ) ni.first->second.setToolTag( val.getToolTag() ); } }
FunctionPtr FullyConnectedDNNLayer(Variable input, size_t outputDim, const DeviceDescriptor& device, const std::function<FunctionPtr(const FunctionPtr&)>& nonLinearity) { assert(input.Shape().NumAxes() == 1); size_t inputDim = input.Shape()[0]; auto timesParam = Parameter(NDArrayView::RandomUniform<float>({ outputDim, inputDim }, -0.5, 0.5, 1, device)); auto timesFunction = Times(timesParam, input); auto plusParam = Parameter({ outputDim }, 0.0f, device); auto plusFunction = Plus(plusParam, timesFunction); return nonLinearity(plusFunction); }
void AntimonyEvent::Convert(Variable* converted, Variable* cf) { m_trigger.Convert(converted, cf); m_delay.Convert(converted, cf); m_priority.Convert(converted, cf); for (size_t fr=0; fr<m_formresults.size(); fr++) { Variable* asntvar = g_registry.GetModule(m_module)->GetVariable(m_varresults[fr]); if (converted->GetSameVariable() == asntvar->GetSameVariable()) { m_formresults[fr].AddConversionFactor(cf); } m_formresults[fr].Convert(converted, cf); } }
Variable* Watches::add(const QString& expression) { if (!hasStartedSession()) return 0; Variable* v = currentSession()->variableController()->createVariable( model(), this, expression); appendChild(v); v->attachMaybe(); if (childCount() == 1 && !isExpanded()) { setExpanded(true); } return v; }
void Tabla::AsignarValor (const char *Id, Token * UnToken, unsigned *Indices, int Dim) { Variable *Aux = Buscar (Id); if (!Aux) Crear (Id, UnToken, 0 /*VengoDe */ , Indices, Dim); else Aux->AsignarValor (UnToken, Indices, Dim); return; }
Tabla::~Tabla () { ContadorTabla--; Variable *Aux; while (Inicio) { Aux = Inicio; Inicio = Inicio->GetSig (); if ((Aux->GetFU ()) && (!Aux->GetFP ())) delete Aux->GetCampo (); delete Aux; } }
Token * Tabla::Leer (const char *Id, unsigned *Indices, int Dim) { Variable *Aux = Buscar (Id); if (!Aux) { Buzon.SetIdentificadorAsociado (Id); Buzon.Error (VARIABLE_NO_EXISTE); return 0; } return Aux->Leer (Indices, Dim); }
Variable * Tabla::Buscar (const char *Id) { /* TODO: hacer que sea más rápido con hashing */ Variable *Aux = Inicio; while (Aux) { if (!strcasecmp(Id, Aux->GetIdentificador())) break; Aux = Aux->GetSig (); } return Aux; }
std::set<std::string> Expression::getNameSet() const { std::set<std::string> nameSet; for (auto const& term : termList_) { // I dont like this cast, I prefer the option to have // a method to check if variable or constant, or a hasName(), // but it is not in the design Variable* var; if (var = dynamic_cast<Variable*>(term.get())) { nameSet.insert(var->getName()); } } return nameSet; }
void BDHServerController::addConfigurationVariable(QXmlAttributes a) { Variable v; v.setId(a.value("id").toUInt()); v.setDescription(a.value("name").toStdString()); v.setDataType((quint8)a.value("dataType").toUShort()); v.setSampleTime(a.value("requestTime").toUInt()); m_configuration.addVariable(v); // qDebug() <<"Encontro una variable!!!!\n"; }
bool test_RNN_xor(Func&& model_maker, bool cuda = false) { auto nhid = 32; auto model = std::make_shared<SimpleContainer>(); auto l1 = model->add(Linear(1, nhid), "l1"); auto rnn = model->add(model_maker(nhid), "rnn"); auto lo = model->add(Linear(nhid, 1), "lo"); auto optim = Adam(model, 1e-2).make(); auto forward_op = [&](Variable x) { auto T = x.size(0); auto B = x.size(1); x = x.view({T * B, 1}); x = l1->forward({x})[0].view({T, B, nhid}).tanh_(); x = rnn->forward({x})[0][T - 1]; x = lo->forward({x})[0]; return x; }; if (cuda) { model->cuda(); } float running_loss = 1; int epoch = 0; auto max_epoch = 1500; while (running_loss > 1e-2) { auto bs = 16U; auto nlen = 5U; const auto backend = cuda ? at::kCUDA : at::kCPU; auto inp = at::rand({nlen, bs, 1}, backend).round().toType(torch::kFloat32); auto lab = inp.sum(0); auto x = autograd::make_variable(inp, /*requires_grad=*/true); auto y = autograd::make_variable(lab); x = forward_op(x); Variable loss = at::mse_loss(x, y); optim->zero_grad(); loss.backward(); optim->step(); running_loss = running_loss * 0.99 + loss.toCFloat() * 0.01; if (epoch > max_epoch) { return false; } epoch++; } return true; };
void MeshfreeSolutionTransfer::transfer(const Variable & from_var, const Variable & to_var) { libmesh_experimental(); System * from_sys = from_var.system(); System * to_sys = to_var.system(); EquationSystems & from_es = from_sys->get_equation_systems(); MeshBase & from_mesh = from_es.get_mesh(); InverseDistanceInterpolation<LIBMESH_DIM> idi (from_mesh.comm(), 4, 2); std::vector<Point> & src_pts (idi.get_source_points()); std::vector<Number> & src_vals (idi.get_source_vals()); std::vector<std::string> field_vars; field_vars.push_back(from_var.name()); idi.set_field_variables(field_vars); // We now will loop over every node in the source mesh // and add it to a source point list, along with the solution { MeshBase::const_node_iterator nd = from_mesh.local_nodes_begin(); MeshBase::const_node_iterator end = from_mesh.local_nodes_end(); for (; nd!=end; ++nd) { const Node * node = *nd; src_pts.push_back(*node); src_vals.push_back((*from_sys->solution)(node->dof_number(from_sys->number(),from_var.number(),0))); } } // We have only set local values - prepare for use by gathering remote gata idi.prepare_for_use(); // Create a MeshlessInterpolationFunction that uses our // InverseDistanceInterpolation object. Since each // MeshlessInterpolationFunction shares the same // InverseDistanceInterpolation object in a threaded environment we // must also provide a locking mechanism. Threads::spin_mutex mutex; MeshlessInterpolationFunction mif(idi, mutex); // project the solution to_sys->project_solution(&mif); }