void m_bundleopen(int argc,const t_atom *argv) { double t = 0; if(argc--) t += GetAFloat(*argv++); if(argc--) t += GetAFloat(*argv++); osc::uint64 timetag = GetTimetag(t); FLEXT_ASSERT(packet); ++bundle; if(timetag <= 1) // here immediate timetag can also be 0... but do OSC a favor *packet << osc::BeginBundleImmediate; else *packet << osc::BeginBundle(timetag); }
void regression::map(int argc, const t_atom *argv) { GRT::UINT numSamples = regression_data.getNumSamples(); GRT::Regressifier ®ressifier = get_Regressifier_instance(); if (numSamples == 0) { error("no observations added, use 'add' to add training data"); return; } if (regressifier.getTrained() == false) { error("data_typel has not been trained, use 'train' to train the data_typel"); return; } GRT::UINT numInputNeurons = regressifier.getNumInputFeatures(); GRT::VectorDouble query(numInputNeurons); if (argc < 0 || (unsigned)argc != numInputNeurons) { error("invalid input length, expected " + std::to_string(numInputNeurons) + " got " + std::to_string(argc)); } for (uint32_t index = 0; index < (uint32_t)argc; ++index) { double value = GetAFloat(argv[index]); query[index] = value; } bool success = regressifier.predict(query); if (success == false) { error("unable to map input"); return; } GRT::VectorDouble regression_data = regressifier.getRegressionData(); GRT::VectorDouble::size_type numOutputDimensions = regression_data.size(); if (numOutputDimensions != regressifier.getNumOutputDimensions()) { error("invalid output dimensions: " + std::to_string(numOutputDimensions)); return; } AtomList result; for (uint32_t index = 0; index < numOutputDimensions; ++index) { t_atom value_a; double value = regression_data[index]; SetFloat(value_a, value); result.Append(value_a); } ToOutList(0, result); }
void feature_extraction::map(int argc, const t_atom *argv) { GRT::VectorDouble input(argc); GRT::FeatureExtraction &feature_extractor = get_FeatureExtraction_instance(); if (argc <= 0 || (GRT::UINT)argc != feature_extractor.getNumInputDimensions()) { std::stringstream ss; ss << "invalid input length: " << argc << ", expected: " << feature_extractor.getNumInputDimensions(); error(ss.str()); return; } for (uint32_t index = 0; index < (uint32_t)argc; ++index) { double value = GetAFloat(argv[index]); input[index] = value; } bool success = feature_extractor.computeFeatures(input); if (success == false) { error("unable to map input"); return; } GRT::VectorDouble features = feature_extractor.getFeatureVector(); if (features.size() == 0 || features.size() != feature_extractor.getNumOutputDimensions()) { std::stringstream ss; ss << "unexpected output length: " << features.size() << ", expected: " << feature_extractor.getNumOutputDimensions(); error(ss.str()); return; } AtomList features_l; GRT::VectorDouble::iterator iterator; for (iterator = features.begin(); iterator != features.end(); iterator++) { t_atom feature_a; SetDouble(&feature_a, *iterator); features_l.Append(feature_a); } ToOutList(0, features_l); }
void ann::map(int argc, const t_atom *argv) { const data_type data_type = get_data_type(); GRT::UINT numSamples = data_type == LABELLED_CLASSIFICATION ? classification_data.getNumSamples() : regression_data.getNumSamples(); if (numSamples == 0) { flext::error("no observations added, use 'add' to add training data"); return; } if (grt_ann.getTrained() == false) { flext::error("model has not been trained, use 'train' to train the model"); return; } GRT::UINT numInputNeurons = grt_ann.getNumInputNeurons(); GRT::VectorDouble query(numInputNeurons); if (argc < 0 || (unsigned)argc != numInputNeurons) { flext::error("invalid input length, expected %d, got %d", numInputNeurons, argc); } for (uint32_t index = 0; index < (uint32_t)argc; ++index) { double value = GetAFloat(argv[index]); query[index] = value; } bool success = grt_ann.predict(query); if (success == false) { flext::error("unable to map input"); return; } if (grt_ann.getClassificationModeActive()) { const GRT::VectorDouble likelihoods = grt_ann.getClassLikelihoods(); const GRT::Vector<GRT::UINT> labels = classification_data.getClassLabels(); const GRT::UINT predicted = grt_ann.getPredictedClassLabel(); const GRT::UINT classification = predicted == 0 ? 0 : get_class_id_for_index(predicted); if (likelihoods.size() != labels.size()) { flext::error("labels / likelihoods size mismatch"); } else if (probs) { AtomList probs_list; for (unsigned count = 0; count < labels.size(); ++count) { t_atom label_a; t_atom likelihood_a; SetFloat(likelihood_a, static_cast<float>(likelihoods[count])); SetInt(label_a, get_class_id_for_index(labels[count])); probs_list.Append(label_a); probs_list.Append(likelihood_a); } ToOutAnything(1, get_s_probs(), probs_list); } ToOutInt(0, classification); } else if (grt_ann.getRegressionModeActive()) { GRT::VectorDouble regression_data = grt_ann.getRegressionData(); GRT::VectorDouble::size_type numOutputDimensions = regression_data.size(); if (numOutputDimensions != grt_ann.getNumOutputNeurons()) { flext::error("invalid output dimensions: %d", numOutputDimensions); return; } AtomList result; for (uint32_t index = 0; index < numOutputDimensions; ++index) { t_atom value_a; double value = regression_data[index]; SetFloat(value_a, value); result.Append(value_a); } ToOutList(0, result); } }
void ann::add(int argc, const t_atom *argv) { if (get_data_type() != data_type::LABELLED_CLASSIFICATION) { ml::add(argc, argv); return; } // work around a bug in GRT where class labels must be contigious if (argc < 2) { flext::error("invalid input length, must contain at least 2 values"); return; } GRT::UINT numInputDimensions = classification_data.getNumDimensions(); GRT::UINT numOutputDimensions = 1; GRT::UINT combinedVectorSize = numInputDimensions + numOutputDimensions; if ((unsigned)argc != combinedVectorSize) { numInputDimensions = argc - numOutputDimensions; if (numInputDimensions < 1) { flext::error(std::string("invalid input length, expected at least " + std::to_string(numOutputDimensions + 1)).c_str()); return; } post("new input vector size, adjusting num_inputs to " + std::to_string(numInputDimensions)); set_num_inputs(numInputDimensions); } GRT::VectorDouble inputVector(numInputDimensions); GRT::VectorDouble targetVector(numOutputDimensions); for (uint32_t index = 0; index < (unsigned)argc; ++index) { float value = GetAFloat(argv[index]); if (index < numOutputDimensions) { targetVector[index] = value; } else { inputVector[index - numOutputDimensions] = value; } } GRT::UINT label = get_index_for_class((GRT::UINT)targetVector[0]); assert(label > 0); // if ((double)label != targetVector[0]) // { // flext::error("class label must be a positive integer"); // return; // } // // if (label == 0) // { // flext::error("class label must be non-zero"); // return; // } classification_data.addSample(label, inputVector); }
virtual bool CbMethodResort(int inlet,const t_symbol *sym,int argc,const t_atom *argv) { const char *dst = GetString(sym); if(*dst != '/') return false; FLEXT_ASSERT(packet); { // treat destination string const char *var = strchr(dst,'%'); *packet << osc::BeginMessage(var?Convert(dst,var,argc,argv).c_str():dst); } while(argc) { if(IsSymbol(*argv)) { const char *hdr = GetString(*argv++); --argc; const char *var = strchr(hdr,'%'); if(var) { // variable found in string char typetag = var[1]; if(!typetag) // % is only character *packet << "%"; else if(hdr != var || var[2]) // variable not in front, or string more than 2 chars long -> substitute variables *packet << Convert(hdr,var,argc,argv).c_str(); else { // standalone switch(typetag) { case osc::TRUE_TYPE_TAG: *packet << true; break; case osc::FALSE_TYPE_TAG: *packet << false; break; case osc::NIL_TYPE_TAG: *packet << osc::Nil; break; case osc::INFINITUM_TYPE_TAG: *packet << osc::Infinitum; break; case osc::INT32_TYPE_TAG: { osc::int32 z = (argc--)?GetAInt(*argv++):0; *packet << z; break; } case osc::FLOAT_TYPE_TAG: { float z = (argc--)?GetAFloat(*argv++):0; *packet << z; break; } case osc::CHAR_TYPE_TAG: { Symbol s = (argc--)?GetASymbol(*argv++):NULL; *packet << (s?*GetString(s):'\0'); break; } case osc::RGBA_COLOR_TYPE_TAG: { osc::uint32 r = (argc--)?(GetAInt(*argv++)&0xff):0; osc::uint32 g = (argc--)?(GetAInt(*argv++)&0xff):0; osc::uint32 b = (argc--)?(GetAInt(*argv++)&0xff):0; osc::uint32 a = (argc--)?(GetAInt(*argv++)&0xff):0; *packet << osc::RgbaColor((r<<24)+(g<<16)+(b<<8)+a); break; } case osc::MIDI_MESSAGE_TYPE_TAG: { osc::uint32 channel = (argc--)?(GetAInt(*argv++)&0xff):0; osc::uint32 status = (argc--)?(GetAInt(*argv++)&0xff):0; osc::uint32 data1 = (argc--)?(GetAInt(*argv++)&0xff):0; osc::uint32 data2 = (argc--)?(GetAInt(*argv++)&0xff):0; *packet << osc::MidiMessage((channel<<24)+(status<<16)+(data1<<8)+data2); break; } case osc::INT64_TYPE_TAG: { osc::int64 z = 0; if(argc--) z += GetAInt(*argv++); if(argc--) z += GetAInt(*argv++); *packet << z; break; } case osc::TIME_TAG_TYPE_TAG: { double z = 0; if(argc--) z += GetAFloat(*argv++); if(argc--) z += GetAFloat(*argv++); *packet << osc::TimeTag(GetTimetag(z)); break; } case osc::DOUBLE_TYPE_TAG: { double z = 0; if(argc--) z += GetAFloat(*argv++); if(argc--) z += GetAFloat(*argv++); *packet << z; break; } case osc::STRING_TYPE_TAG: { Symbol s = (argc--)?GetASymbol(*argv++):NULL; *packet << (s?GetString(s):""); break; } case osc::SYMBOL_TYPE_TAG: { Symbol s = (argc--)?GetASymbol(*argv++):NULL; *packet << osc::Symbol(s?GetString(s):""); break; } case osc::BLOB_TYPE_TAG: post("%s %s - Blob type not supported",thisName(),GetString(thisTag())); break; default: post("%s %s - Unknown type tag %s",thisName(),GetString(thisTag()),typetag); } } } else *packet << osc::Symbol(hdr); } else if(CanbeFloat(*argv)) *packet << GetAFloat(*argv++),--argc; else { post("%s %s - Invalid atom type",thisName(),GetString(thisTag())); ++argv,--argc; } } *packet << osc::EndMessage; if(!bundle && autosend) Send(true); return true; }