Esempio n. 1
0
 void ann::error()
 {
     if (!grt_ann.getTrained())
     {
         flext::error("model not yet trained, send the \"train\" message to train");
         return;
     }
             
     float error_f = grt_ann.getTrainingError();
     t_atom error_a;
     
     SetFloat(error_a, error_f);
     
     ToOutAnything(0, get_s_error(), 1, &error_a);
                   
 }
Esempio n. 2
0
    void ann::map(int argc, const t_atom *argv)
    {
        const data_type data_type = get_data_type();

        GRT::UINT numSamples = data_type == LABELLED_CLASSIFICATION ? classification_data.getNumSamples() : regression_data.getNumSamples();

        if (numSamples == 0)
        {
            flext::error("no observations added, use 'add' to add training data");
            return;
        }

        if (grt_ann.getTrained() == false)
        {
            flext::error("model has not been trained, use 'train' to train the model");
            return;
        }
        
        GRT::UINT numInputNeurons = grt_ann.getNumInputNeurons();
        GRT::VectorDouble query(numInputNeurons);
        
        if (argc < 0 || (unsigned)argc != numInputNeurons)
        {
            flext::error("invalid input length, expected %d, got %d", numInputNeurons, argc);
        }

        for (uint32_t index = 0; index < (uint32_t)argc; ++index)
        {
            double value = GetAFloat(argv[index]);
            query[index] = value;
        }
        
        bool success = grt_ann.predict(query);
        
        if (success == false)
        {
            flext::error("unable to map input");
            return;
        }
        
        if (grt_ann.getClassificationModeActive())
        {
            const GRT::VectorDouble likelihoods = grt_ann.getClassLikelihoods();
            const GRT::Vector<GRT::UINT> labels = classification_data.getClassLabels();
            const GRT::UINT predicted = grt_ann.getPredictedClassLabel();
            const GRT::UINT classification = predicted == 0 ? 0 : get_class_id_for_index(predicted);
            
            if (likelihoods.size() != labels.size())
            {
                flext::error("labels / likelihoods size mismatch");
            }
            else if (probs)
            {
                AtomList probs_list;

                for (unsigned count = 0; count < labels.size(); ++count)
                {
                    t_atom label_a;
                    t_atom likelihood_a;
                    
                    SetFloat(likelihood_a, static_cast<float>(likelihoods[count]));
                    SetInt(label_a, get_class_id_for_index(labels[count]));
                    
                    probs_list.Append(label_a);
                    probs_list.Append(likelihood_a);
                }
                ToOutAnything(1, get_s_probs(), probs_list);
            }
                 
            ToOutInt(0, classification);
        }
        else if (grt_ann.getRegressionModeActive())
        {
            GRT::VectorDouble regression_data = grt_ann.getRegressionData();
            GRT::VectorDouble::size_type numOutputDimensions = regression_data.size();
            
            if (numOutputDimensions != grt_ann.getNumOutputNeurons())
            {
                flext::error("invalid output dimensions: %d", numOutputDimensions);
                return;
            }
            
            AtomList result;
            
            for (uint32_t index = 0; index < numOutputDimensions; ++index)
            {
                t_atom value_a;
                double value = regression_data[index];
                SetFloat(value_a, value);
                result.Append(value_a);
            }
            
            ToOutList(0, result);
        }
    }