Exemplo n.º 1
0
void MLSymbolTable::audit(void)
{
	int i=0;
	bool OK = true;
	int size = mMap.size();

	for(i=0; i<size; ++i)
	{
		SymbolIDT id = getID(i);
		if (i != id)
		{
			OK = false;
			break;
		}
	}
	if (OK)
	{
		debug() << "Symbol table OK. " << size << " symbols.\n";
	}
	else
	{
		int idx = mIndexesByID[i];		
		SymbolIDT id = getID(i);
		const std::string& s = getStringByID(i);
		MLError() << "MLSymbolTable: error in symbol table, line " << i << ":\n";
		MLError() << "    ID " << i << ": index "  << idx << " = " << s << ", ID = " << id << "\n";
	}
}
Exemplo n.º 2
0
void MLSymbolTable::dump(void)
{
	bool found;
	int i, idx;
	int size = mMap.size();
	debug() << "---------------------------------------------------------\n";
	debug() << size << " symbols:\n";

	// print in sorted order.
	for(idx=0; idx<size; ++idx)
	{
		found = false;
		for(i=0; i<size; ++i)
		{
			int idxAtI = mIndexesByID[i];
			if (idx == idxAtI)
			{
				found = true;
				break;
			}
		}
		
		if (found)
		{
			const std::string& s = getStringByID(i);
			SymbolIDT id = getID(i);
debug() << "    ID " << i << ": index "  << idx << " = " << s << ", ID = " << id << "\n";
		}
		else
		{
			MLError() << "error: symbol index " << idx << "not found! \n";
		}
	}
}
Exemplo n.º 3
0
std::tuple<cv::Mat1d, double, vl_size, double, double> SGDStep::loadWithPlatt(const std::string &fileName) const
{
    cv::FileStorage fs(fileName, cv::FileStorage::READ);

    if ((fs["model"].isNone() || fs["model"].empty()) ||
        (fs["bias"].isNone() || fs["bias"].empty()) ||
        (fs["plattA"].isNone() || fs["plattA"].empty()) ||
        (fs["plattB"].isNone() || fs["plattB"].empty())) {
        std::stringstream s;
        s << "Error. Unable to load classifier data from file: " << fileName << ". Aborting." << std::endl;
        throw MLError(s.str(), currentMethod, currentLine);
    }
    cv::Mat1d model;
    double bias;
    double iterations;
    double plattA;
    double plattB;

    fs["model"] >> model;
    fs["bias"] >> bias;
    fs["plattA"] >> plattA;
    fs["plattB"] >> plattB;

    if(fs["iterations"].isNone() || fs["iterations"].empty()) {
        inform("No iteration info found, skipping. Maybe an old classifier format?");
        iterations = 0;
    } else {
        fs["iterations"] >> iterations;
    }

    fs.release();

    return std::make_tuple(model, bias, static_cast<vl_size>(iterations), plattA, plattB);
}
Exemplo n.º 4
0
void SGDStep::save(const cv::Mat1d &model,
                   const double bias,
                   const double iterations,
                   const double plattA,
                   const double plattB) const
{
    cv::Ptr<SGDConfig> config;
    try {
        config = config_cast<SGDConfig>(this->mConfig);
    } catch(std::bad_cast) {
        std::stringstream s;
        s << "Wrong config type: " << this->mConfig->identifier();
        throw MLError(s.str(), currentMethod, currentLine);
    }

    cv::FileStorage fs(config->classifierFiles()[0], cv::FileStorage::WRITE);

    fs << "model" << model;
    fs << "bias" << bias;
    fs << "iterations" << iterations;
    fs << "plattA" << plattA;
    fs << "plattB" << plattB;

    fs.release();
}
Exemplo n.º 5
0
void MLPluginProcessor::loadPluginDescription(const char* desc)
{
	mpPluginDoc = new XmlDocument(String(desc));
	
	if (mpPluginDoc.get())
	{
		ScopedPointer<XmlElement> doc (mpPluginDoc->getDocumentElement(true));
		if (doc)	// true = quick scan header
		{
			mEngine.scanDoc(&*mpPluginDoc, &mNumParameters);
			debug() << "loaded " << JucePlugin_Name << " plugin description, " << mNumParameters << " parameters.\n";
		}
		else
		{
			MLError() << "MLPluginProcessor: error loading plugin description!\n";
		}   
	}
	else
	{
		MLError() << "MLPluginProcessor: couldn't load plugin description!\n";
		return;
	}
	
	// get plugin parameters and initial values and create corresponding model properties.
	int params = getNumParameters();
	for(int i=0; i<params; ++i)
	{
		MLPublishedParamPtr p = getParameterPtr(i);
		MLPublishedParam* param = &(*p);
		if(param)
		{
			MLSymbol type = param->getType();
			if((type == "float") || (type == MLSymbol()))
			{
				debug() << param->getAlias() << " is a float type \n";
				setProperty(param->getAlias(), param->getDefault());
			}
			else
			{
				debug() << param->getAlias() << " is a non-float type \n";
			}
		}
	}
}
Exemplo n.º 6
0
void MLFile::insert(const std::string& path, MLFilePtr f)
{
    // debug() << "INSERTING: " << path << "\n";
    int len = path.length();
    if(len)
    {
        int b = path.find_first_of("/");
        if(b == std::string::npos)
        {
            // leaf, insert file here creating dir if needed
            // debug() << "        LEAF: " << path << "\n\n" ;
            
            // add leaf file to map
            mFiles[path] = f;
        }
        else
        {
            std::string firstDir = path.substr(0, b);
            std::string restOfDirs = path.substr(b + 1, len - b);
            
            // debug() << "    FIRST: " << firstDir << ", REST " << restOfDirs << "\n";
            
            // find or add first dir
            if(firstDir == "")
            {
                MLError() << "MLFile::insert: empty directory name!\n";
            }
            else
            {
                if(mFiles.find(firstDir) == mFiles.end())
                {
                    mFiles[firstDir] = MLFilePtr(new MLFile(firstDir));
                }
                mFiles[firstDir]->insert(restOfDirs, f);
            }
        }
    }
    else
    {
        MLError() << "MLFile::insert: empty file name!\n";
    }
}
Exemplo n.º 7
0
void MLPluginProcessor::loadPluginDescription(const char* desc)
{
	mpPluginDoc = new XmlDocument(String(desc));
	
	if (mpPluginDoc.get())
	{
		ScopedPointer<XmlElement> doc (mpPluginDoc->getDocumentElement(true));
		if (doc)	// true = quick scan header
		{
			mEngine.scanDoc(&*mpPluginDoc, &mNumParameters);
			//debug() << "loaded " << JucePlugin_Name << " plugin description, " << mNumParameters << " parameters.\n";	
		}
		else
		{
			MLError() << "MLPluginProcessor: error loading plugin description!\n";
		}   
	}
	else
	{
		MLError() << "MLPluginProcessor: couldn't load plugin description!\n";
	}
}
Exemplo n.º 8
0
std::tuple<cv::Mat1d, double, vl_size, double, double> SGDStep::loadWithPlatt() const
{
    cv::Ptr<SGDConfig> config;
    try {
        config = config_cast<SGDConfig>(this->mConfig);
    } catch(std::bad_cast) {
        std::stringstream s;
        s << "Wrong config type: " << this->mConfig->identifier();
        throw MLError(s.str(), currentMethod, currentLine);
    }

    return this->loadWithPlatt(config->classifierFiles()[0]);
}
Exemplo n.º 9
0
// MLProc::prepareToProcess() is called after all connections in DSP graph are made. 
// This is where sample rates and block sizes propagate through the graph, and 
// are checked for consistency.
// Setup internal buffers and data to prepare for processing any attached input signals.
// MLProcs have no concept of enabled -- it's up to the enclosing container to disable
// itself if things go wrong here.
//
MLProc::err MLProc::prepareToProcess()
{
	MLProc::err e = OK;
	
    // debug() << "preparing " << getClassName() << " \"" << getName() << "\" : " ;

	int ins = getNumInputs();
	int outs = getNumOutputs();
	MLSampleRate rate = getContextSampleRate();
	int blockSize = getContextVectorSize();
	
    // debug() << ins << " ins, " << outs << " outs, rate " << rate << ", blockSize " << blockSize << "\n";
	
	// All inputs must have a signal connected.	
	// So connect unconnected inputs to null input signal.
	for (int i=0; i<ins; i++)
	{
		if (!mInputs[i])
		{
			mInputs[i] = &getContext()->getNullInput();
		}
	}
	
	// set size and rate of output signals
	for (int i=1; i<=outs; ++i)
	{
		MLSignal& out = getOutput(i);	
		if (&out)
		{
			out.setRate(rate);		
			MLSample* outData = out.setDims(blockSize);
			if (!outData) 
			{
				e = memErr;
				goto bail;
			}
		}
		else
		{
			MLError() << "MLProc::prepareToProcess: null output " << i << " for " << getName() << "! \n";
		}
//debug() << "    out " << i << ": " << (void *)(MLSignal*)(&out) << ", " << out.getSize() << " samples.\n";
	}
	e = resize();
	
	// recalc params for new sample rate
	mParamsChanged = true;	
bail:	
	return e;
}
Exemplo n.º 10
0
void MLSymbolKey::makeString()
{
	if (mLength && mpData)
	{
		if (mpString) delete(mpString);
		mpString = new std::string(mpData, mLength);
		if (!mpString)
		{
			MLError() << "MLSymbolKey::makeString: could not allocate!\n";
		}
		else
		{
			mpData = mpString->data();
//			debug() << "made string " << *mpString << "\n";
		}
	}
}
Exemplo n.º 11
0
void SGDStep::save(const cv::Mat1d &model,
                   const double bias,
                   const double iterations) const
{
    cv::Ptr<SGDConfig> config;
    try {
        config = config_cast<SGDConfig>(this->mConfig);
    } catch(std::bad_cast) {
        std::stringstream s;
        s << "Wrong config type: " << this->mConfig->identifier();
        throw MLError(s.str(), currentMethod, currentLine);
    }

    this->save(config->classifierFiles()[0],
               model,
               bias,
            iterations);
}
Exemplo n.º 12
0
MLFilePtr MLFile::find(const std::string& path)
{
    // debug() << "FINDING: " << path << "\n";
    int len = path.length();
    if(len)
    {
        int b = path.find_first_of("/");
        if(b == std::string::npos)
        {
            // end of path, find short name here or return fail.
            //debug() << "        path end: " << path << "\n\n" ;
            
            nameToFileMap::const_iterator it;
            it = mFiles.find(path);
            if(it != mFiles.end())
            {
                // return the found file.
                return it->second;
            }
            else
            {
                //debug() << "MLFile::find: did not find " << path << " in :\n";
                
                nameToFileMap::const_iterator it2;
                for(it2 = mFiles.begin(); it2 != mFiles.end(); ++it2)
                {
                        //debug() << it2->first << ", ";
                }
                //debug() << "\n";
                
                return MLFilePtr();
            }
        }
        else
        {
            std::string firstDir = path.substr(0, b);
            std::string restOfDirs = path.substr(b + 1, len - b);
            
            //debug() << "    FIRST: " << firstDir << ", REST " << restOfDirs << "\n";
            
            // find file matching first dir
            if(firstDir == "")
            {
                MLError() << "MLFile::find: empty directory name!\n";
            }
            else if(mFiles.find(firstDir) != mFiles.end())
            {
                // look for rest of dirs in found non-leaf file
                return mFiles[firstDir]->find(restOfDirs);
            }
            else
            {
                return MLFilePtr();
            }
        }
    }
    else
    {
        MLError() << "MLFile::find: empty file name!\n";
    }
    return MLFilePtr();
}
Exemplo n.º 13
0
void MLPluginProcessor::setStateFromXML(const XmlElement& xmlState, bool setViewAttributes)
{
	if (!(xmlState.hasTagName (JucePlugin_Name))) return;
	if (!(mEngine.getCompileStatus() == MLProc::OK)) return; // TODO revisit need to compile first

	// getCallbackLock() is in juce_AudioProcessor
	// process lock is a quick fix.  it is here to prevent doParams() from getting called in 
	// process() methods and thereby setting mParamsChanged to false before the real changes take place.
	// A better alternative would be a lock-free queue of parameter changes.
	const ScopedLock sl (getCallbackLock()); 
		
	// only the differences between default parameters and the program state are saved in a program,
	// so the first step is to set the default parameters.
	setDefaultParameters();
	
	// get program version of saved state
	unsigned blobVersion = xmlState.getIntAttribute ("pluginVersion");
	unsigned pluginVersion = JucePlugin_VersionCode;
	
	if (blobVersion > pluginVersion)
	{
		// TODO show error to user
		MLError() << "MLPluginProcessor::setStateFromXML: saved program version is newer than plugin version!\n";
		return;
	}
    
	// try to load scale if a scale attribute exists
    // TODO auto save all state including this
 	const String scaleDir = xmlState.getStringAttribute ("scaleDir"); // look for old-style dir attribute   
	const String scaleName = xmlState.getStringAttribute ("scaleName");
    String fullName;
    if(scaleName != String::empty)
    {
        fullName = scaleName;
        if(scaleDir != String::empty)
        {
            fullName = scaleDir + String("/") + fullName + ".scl";
        }
    }
    else
    {
        fullName = "12-equal";
    }
    std::string fullScaleName(fullName.toUTF8());
	setProperty("key_scale", fullScaleName);
    bool loaded = false;
    // look for scale under full name with path
    if(fullScaleName != std::string())
    {
        const MLFilePtr f = mScaleFiles->getFileByName(fullScaleName);
        if(f != MLFilePtr())
        {
            loadScale(f->mFile);
            loaded = true;
        }
    }
    if(!loaded)
    {
        loadDefaultScale();
    }
    
	// get preset name saved in blob.  when saving from AU host, name will also be set from RestoreState().
	const String presetName = xmlState.getStringAttribute ("presetName");
	setProperty("preset", std::string(presetName.toUTF8()));
    
	/*
     debug() << "MLPluginProcessor: setStateFromXML: loading program " << presetName << ", version " << std::hex << blobVersion << std::dec << "\n";
     MemoryOutputStream myStream;
     xmlState->writeToStream (myStream, "");
     debug() << myStream.toString();
     */
	
    /*
     setCurrentPresetName(presetName.toUTF8());
     setCurrentPresetDir(presetDir.toUTF8());
     */
    
	// get plugin-specific translation table for updating older versions of data
	std::map<MLSymbol, MLSymbol> translationTable;

	// TODO move this into Aalto! 
	// make translation tables based on program version. 
	//
	if (blobVersion <= 0x00010120)
	{
		// translate seq parameters
		for(unsigned n=0; n<16; ++n)
		{
			std::stringstream pName;
			std::stringstream pName2;
			pName << "seq_value" << n;
			pName2 << "seq_pulse" << n;
			MLSymbol oldSym(pName.str());
			MLSymbol newSym = MLSymbol("seq_value#").withFinalNumber(n);
			MLSymbol oldSym2(pName2.str());
			MLSymbol newSym2 = MLSymbol("seq_pulse#").withFinalNumber(n);
			translationTable[oldSym] = newSym;
			translationTable[oldSym2] = newSym2;	
		}
	}

	if (blobVersion <= 0x00010200)
	{
		MLSymbol oldSym = MLSymbol("seq_value");
		MLSymbol newSym = MLSymbol("seq_value").withFinalNumber(0);
		MLSymbol oldSym2 = MLSymbol("seq_pulse");
		MLSymbol newSym2 = MLSymbol("seq_pulse").withFinalNumber(0);
		translationTable[oldSym] = newSym;
		translationTable[oldSym2] = newSym2;
		
		// translate seq parameters
		for(unsigned n=1; n<16; ++n)
		{
			oldSym = MLSymbol("seq_value#").withFinalNumber(n);
			newSym = MLSymbol("seq_value").withFinalNumber(n);
			oldSym2 = MLSymbol("seq_pulse#").withFinalNumber(n);
			newSym2 = MLSymbol("seq_pulse").withFinalNumber(n);
			translationTable[oldSym] = newSym;
			translationTable[oldSym2] = newSym2;	
		}		
	}
	
	// get params from xml
	const unsigned numAttrs = xmlState.getNumAttributes();
	String patcherInputStr ("patcher_input_");

	for(unsigned i=0; i<numAttrs; ++i)
	{
		// get name / value pair.
		const String& attrName = xmlState.getAttributeName(i);
		const MLParamValue paramVal = xmlState.getDoubleAttribute(attrName);
		
		// if not a patcher input setting,
		if (!attrName.contains(patcherInputStr))
		{					
			// see if we have this named parameter in our engine. 
			MLSymbol paramSym = XMLAttrToSymbol(attrName);
			const int pIdx = getParameterIndex(paramSym);
			
			if (pIdx >= 0)
			{
				// debug() << "setStateFromXML: <" << paramSym << " = " << paramVal << ">\n";
				setPropertyImmediate(paramSym, paramVal);
			}
			else // try finding a match through translation table. 
			{
				//debug() << "Looking for parameter " << paramSym << " in table...\n";
				std::map<MLSymbol, MLSymbol>::iterator it;
				it = translationTable.find(paramSym);
				if (it != translationTable.end())
				{
					const MLSymbol newSym = translationTable[paramSym];
					const int pNewIdx = getParameterIndex(newSym);
					if (pNewIdx >= 0)
					{
						//debug() << "translated parameter to " << newSym << " .\n";
						setPropertyImmediate(newSym, paramVal);
					}
					else
					{
						MLError() << "MLPluginProcessor::setStateFromXML: no such parameter! \n";
					}
				}
				else
				{
					// fail silently on unfound params, because we have deprecated some but they may still 
					// be around in old presets. 
					//debug() << "MLPluginProcessor::setStateFromXML: parameter " << paramSym << " not found!\n";
				}
			}
		}
	}
	
	// get editor state from XML
    if(setViewAttributes)
	{
		int x = xmlState.getIntAttribute("editor_x");
		int y = xmlState.getIntAttribute("editor_y");
		int width = xmlState.getIntAttribute("editor_width");
		int height = xmlState.getIntAttribute("editor_height");
		mEditorRect = MLRect(x, y, width, height);
		mEditorNumbersOn = xmlState.getIntAttribute("editor_num", 1);
		mEditorAnimationsOn = xmlState.getIntAttribute("editor_anim", 1);
	}
}
Exemplo n.º 14
0
void MmaSink::checkforerror() const {
  if(MLError(d_mlink)!=MLEOK) errorc(__LINE__);
};
Exemplo n.º 15
0
cv::Mat SGDStep::optimizeImpl(const bool debugMode,
                              const std::pair<std::vector<std::vector<uint32_t>>, std::vector<std::vector<uint32_t>>> &indices,
                              const std::vector<std::pair<cv::Mat, int32_t>> &data) const
{
    cv::Ptr<SGDConfig> config;
    try {
        config = config_cast<SGDConfig>(this->mConfig);
    } catch(std::bad_cast) {
        std::stringstream s;
        s << "Wrong config type: " << this->mConfig->identifier();
        throw MLError(s.str(), currentMethod, currentLine);
    }

    std::vector<double> lambdas = config->lambdas();
    std::vector<double> learningRates = config->learningRates();
    std::vector<double> multipliers = config->multipliers();

    if(!checkRangeProperties<double>(lambdas) || !checkRangeProperties<double>(learningRates) || !checkRangeProperties<double>(multipliers)) {
        throw MLError("Config parameters do not span a valid range.", currentMethod, currentLine);
    }

    double lambdaStart, lambdaEnd, lambdaInc;
    lambdaStart = lambdas[0];
    lambdaEnd = lambdas[1];
    lambdaInc = lambdas[2];

    double lrStart, lrEnd, lrInc;
    lrStart = learningRates[0];
    lrEnd = learningRates[1];
    lrInc = learningRates[2];

    double mulStart, mulEnd, mulInc;
    mulStart = multipliers[0];
    mulEnd = multipliers[1];
    mulInc = multipliers[2];

    double bestLambda = 0;
    double bestLearningRate = 0;
    double bestMultiplier = 0;
    double bestF = 0;

    std::vector<cv::Mat1d> trainingsDescriptorCache(config->folds());
    std::vector<cv::Mat1d> trainingsLabelCache(config->folds());
    std::vector<cv::Mat1d> testDescriptorCache(config->folds());
    std::vector<cv::Mat1d> testLabelCache(config->folds());

    for(double lambda = lambdaStart; lambda < lambdaEnd; lambda += lambdaInc) {
        for(double lr = lrStart; lr < lrEnd; lr += lrInc) {
            for(double mul = mulStart; mul < mulEnd; mul += mulInc) {
                if(debugMode) {
                    debug("Lambda:", lambda);
                    debug("Learning rate:", lr);
                    debug("Multiplier:", mul);
                }

                double avgF = 0;

                // Iterate over folds
                for(size_t fold = 0; fold < config->folds(); ++fold) {
                    if(trainingsDescriptorCache[fold].empty() ||
                       trainingsLabelCache[fold].empty()) {
                        if(debugMode) { debug("Rebuilding training cache."); }
                        cv::Mat tmpDesc;
                        cv::Mat tmpIdx;
                        for(auto idx : indices.first[fold]) {
                            tmpDesc.push_back(data[idx].first);
                            tmpIdx.push_back(data[idx].second);
                        }
                        if(tmpDesc.type() != CV_64F) {
                            tmpDesc.convertTo(trainingsDescriptorCache[fold], CV_64F);
                        } else {
                            trainingsDescriptorCache[fold] = tmpDesc;
                        }
                        if(tmpIdx.type() != CV_64F) {
                            tmpIdx.convertTo(trainingsLabelCache[fold], CV_64F);
                        } else {
                            trainingsLabelCache[fold] = tmpIdx;
                        }
                    }
                    if(testDescriptorCache[fold].empty() ||
                       testLabelCache[fold].empty()) {
                        if(debugMode) { debug("Rebuilding test cache."); }
                        cv::Mat tmpDesc;
                        cv::Mat tmpIdx;
                        for(auto idx : indices.second[fold]) {
                            tmpDesc.push_back(data[idx].first);
                            tmpIdx.push_back(data[idx].second);
                        }
                        if(tmpDesc.type() != CV_64F) {
                            tmpDesc.convertTo(testDescriptorCache[fold], CV_64F);
                        } else {
                            testDescriptorCache[fold] = tmpDesc;
                        }
                        if(tmpIdx.type() != CV_64F) {
                            tmpIdx.convertTo(testLabelCache[fold], CV_64F);
                        } else {
                            testLabelCache[fold] = tmpIdx;
                        }
                    }

                    if(debugMode) { debug("Setting up SVM."); }
                    cv::Ptr<VlFeatWrapper::SGDSolver> solver = new VlFeatWrapper::SGDSolver(trainingsDescriptorCache[fold],
                                                                                            trainingsDescriptorCache[fold],
                                                                                            lambda);

                    if(debugMode) { debug("Setting parameters."); }
                    // Bias learning rate and multiplier
                    solver->setBiasLearningRate(lr);
                    solver->setBiasMultiplier(mul);
                    // Sample weights
                    cv::Mat1d weights = calculateWeights(trainingsLabelCache[fold]);
                    solver->setWeights(weights);

                    if(debugMode) { debug("Training..."); }
                    solver->train();

                    cv::Mat1d predictions = solver->predict(testDescriptorCache[fold]);
                    double negativeLabel, positiveLabel;
                    cv::minMaxIdx(testLabelCache[fold], &negativeLabel, &positiveLabel, NULL, NULL);
                    predictions.setTo(negativeLabel, predictions < 0);
                    predictions.setTo(positiveLabel, predictions >= 0);
                    predictions = predictions.t();

                    double f = Metrics::f1(predictions, testLabelCache[fold]);
                    if(debugMode) { debug("F1 score:", f); }
                    avgF += f;
                }
                avgF /= config->folds();

                if(avgF > bestF) {
                    bestLambda = lambda;
                    bestLearningRate = lr;
                    bestMultiplier = mul;
                    bestF = avgF;
                }
            }
        }
    }

    debug("Best F1 score:", bestF);
    debug("Best lambda:", bestLambda);
    debug("Best learning rate:", bestLearningRate);
    debug("Best multiplier:", bestMultiplier);

    config->setLambdas(std::vector<double>({bestLambda}));
    config->setLearningRates(std::vector<double>({bestLearningRate}));
    config->setMultipliers(std::vector<double>({bestMultiplier}));

    return cv::Mat();
}
Exemplo n.º 16
0
bool MmaSink::verror() const {
  return MLError(d_mlink)!=MLEOK;
};
Exemplo n.º 17
0
cv::Mat SGDStep::trainImpl(const bool debugMode,
                           const cv::Mat &input,
                           const cv::Mat &labels) const
{
    cv::Ptr<SGDConfig> config;
    try {
        config = config_cast<SGDConfig>(this->mConfig);
    } catch(std::bad_cast) {
        std::stringstream s;
        s << "Wrong config type: " << this->mConfig->identifier();
        throw MLError(s.str(), currentMethod, currentLine);
    }

    if(input.empty()) {
        throw MLError("Missing parameters, input empty.", currentMethod, currentLine);
    } else if(labels.empty()) {
        throw MLError("Missing parameters, labels empty.", currentMethod, currentLine);
    }

    cv::Mat1d dInput;
    cv::Mat1d dParam;
    if(!(input.type() == CV_64FC1)) {
        if(debugMode) { debug("Incompatible type of input data, converting."); }
        input.convertTo(dInput, CV_64FC1);
    } else {
        dInput = input;
    }

    if(!(labels.type() == CV_64FC1)) {
        if(debugMode) { debug("Incompatible type of parameter data, converting."); }
        labels.convertTo(dParam, CV_64FC1);
    } else {
        dParam = labels;
    }

    cv::Mat1d weights = calculateWeights(labels);
    double lambda = config->lambdas()[0];
    if(debugMode) { debug("Lambda:", lambda); }
    double learningRate = config->learningRates()[0];
    if(debugMode) { debug("Learning rate:", learningRate); }
    double epsilon = config->epsilon();
    if(debugMode) { debug("Epsilon:", epsilon); }
    double multiplier = config->multipliers()[0];
    if(debugMode) { debug("Multiplier:", multiplier); }
    vl_size iterations = config->iterations();
    if(debugMode) { debug("Iterations:", iterations); }
    vl_size maxIterations = config->maxIterations();
    if(debugMode) { debug("Max. iterations:", maxIterations); }

    cv::Ptr<VlFeatWrapper::SGDSolver> solver = new VlFeatWrapper::SGDSolver(dInput,
                                                                            dParam,
                                                                            lambda);

    // Bias learning rate and multiplier
    if(learningRate > 0) { solver->setBiasLearningRate(learningRate); }
    if(multiplier > 0) { solver->setBiasMultiplier(multiplier); }
    // Sample weights
    if(!weights.empty()) { solver->setWeights(weights); }

    try {
        std::tuple<cv::Mat1d, double, vl_size> warmStartData = this->load(config->classifierFiles()[0]);
        if(!std::get<0>(warmStartData).empty()) {
            solver->setModel(std::get<0>(warmStartData));
        }
        solver->setBias(std::get<1>(warmStartData));
        solver->setStartIterationCount(std::get<2>(warmStartData));
        if(debugMode) { debug("Warm starting training."); }
    } catch(MLError & e) {
        if(debugMode) { debug("Starting training."); }
    }

    solver->train();

    cv::Mat1d model;
    double bias = 0;

    //Updated classifier data
    model = solver->getModelMat();
    if(debugMode) { debug("Model size:", model.size()); }
    bias = solver->getBias();
    if(debugMode) { debug("Bias:", bias); }
    iterations = solver->getIterationCount();
    if(debugMode) { debug("Iterations:", iterations); }

    cv::Mat1d predictions = solver->predict(dInput);
    double negativeLabel, positiveLabel;
    cv::minMaxIdx(dParam, &negativeLabel, &positiveLabel, NULL, NULL);
    predictions.setTo(negativeLabel, predictions < 0);
    predictions.setTo(positiveLabel, predictions >= 0);
    predictions = predictions.t();

    if(config->plattScale()) {
        std::pair<double, double> plattParams = Platt::platt_calibrate(predictions, dParam);
        if(debugMode) { debug("Platt parameter A:", plattParams.first); }
        if(debugMode) { debug("Platt parameter B:", plattParams.second); }
        if(!model.empty()) {
            this->save(model,
                       bias,
                       iterations,
                       plattParams.first,
                       plattParams.second);
        }
    }

    if(!model.empty()) {
        this->save(model,
                   bias,
                   iterations);
    }

    return cv::Mat();
}
Exemplo n.º 18
0
cv::Mat SGDStep::predictImpl(const bool debugMode,
                             const cv::Mat &input) const
{
    cv::Ptr<SGDConfig> config;
    try {
        config = config_cast<SGDConfig>(this->mConfig);
    } catch(std::bad_cast) {
        std::stringstream s;
        s << "Wrong config type: " << this->mConfig->identifier();
        throw MLError(s.str(), currentMethod, currentLine);
    }

    std::vector<std::string> classifiers = config->classifierFiles();
    if(debugMode) { debug(classifiers.size(), "classifier(s)"); }
    cv::Mat1d results(1, classifiers.size());

    for(size_t idx = 0; idx < classifiers.size(); ++idx) {
        std::string classifierFile = classifiers[idx];
        if(debugMode) { debug("Loading classifier", classifierFile); }
        std::tuple<cv::Mat1d, double, vl_size> classifierData;
        std::tuple<cv::Mat1d, double, vl_size, double, double> plattClassifierData;
        try {
            if(config->plattScale()) {
                plattClassifierData = this->loadWithPlatt(classifierFile);
            } else {
                classifierData = this->load(classifierFile);
            }
            if(input.cols != std::get<0>(classifierData).cols) {
                std::stringstream s;
                s << "Data doesn't fit trained model." << std::endl;
                throw MLError(s.str(), currentMethod, currentLine);
            } else {
                if(input.type() != CV_64F) {
                    cv::Mat tmp;
                    input.convertTo(tmp, CV_64F);
                    if(config->plattScale()) {
                        double score = tmp.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
                        results.at<double>(idx) = Platt::sigmoid_predict(score,
                                                                         std::get<3>(plattClassifierData),
                                                                         std::get<4>(plattClassifierData));
                    } else {
                        double score = tmp.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
                        results.at<double>(idx) = score;
                    }
                } else {
                    if(config->plattScale()) {
                        double score = input.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
                        results.at<double>(idx) = Platt::sigmoid_predict(score,
                                                                         std::get<3>(plattClassifierData),
                                                                         std::get<4>(plattClassifierData));
                    } else {
                        double score = input.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
                        results.at<double>(idx) = score;
                    }
                }
            }
        } catch(MLError) {
            throw;
        }
    }

    if(config->binary()) {
        if(config->plattScale()) {
            double min, max;
            cv::Point minIdx, maxIdx;
            cv::minMaxLoc(results, &min, &max, &minIdx, &maxIdx);
            int32_t best = maxIdx.x;
            results.setTo(0);
            results.at<double>(best) = 1;
        } else {
            results.setTo(1, results > 0);
            results.setTo(-1, results < 0);
        }
    }

    return results;
}
Exemplo n.º 19
0
void MLProc::printErr(MLProc::err e)
{
	MLError() << "*** proc " << getName() << " error: ";
	switch(e)
	{
		case memErr:
			MLError() << "memErr\n";
			break;		
		case inputBoundsErr:
			MLError() << "inputBoundsErr\n";
			break;		
		case inputOccupiedErr:
			MLError() << "inputOccupiedErr\n";
			break;		
		case inputRateErr:
			MLError() << "inputRateErr\n";
			break;		
		case noInputErr:
			MLError() << "noInputErr\n";
			break;		
		case inputMismatchErr:
			MLError() << "inputMismatchErr\n";
			break;		
		case fractionalBlockSizeErr:
			MLError() << "fractionalBlockSizeErr\n";
			break;		
		case connectScopeErr:
			MLError() << "connectScopeErr\n";
			break;		
		case nameInUseErr:
			MLError() << "nameInUseErr\n";
			break;
		case headNotContainerErr:
			MLError() << "headNotContainerErr\n";
			break;
		case nameNotFoundErr:
			MLError() << "nameNotFoundErr\n";
			break;
		case fileOpenErr:
			MLError() << "fileOpenErr\n";
			break;
		case docSyntaxErr:
			MLError() << "docSyntaxErr\n";
			break;
		case newProcErr:
			MLError() << "newProcErr\n";
			break;
		case SSE2RequiredErr:
			MLError() << "SSE2RequiredErr\n";
			break;
		case OK:
			MLError() << "OK\n";
			break;
		default:
			MLError() << "unknown error " << e << "\n";
			break;
	}
}
Exemplo n.º 20
0
// look up a symbol by name and return its ID.
// if the symbol already exists, this routine must not allocate any memory.
SymbolIDT MLSymbolTable::getSymbolID(const char * sym, const int len)
{
	SymbolIDT r = 0;
	bool found = false;
	int size = mMap.size();
	
	//debug() << size << " entries, making symbol " << sym << "\n";
	
	if (len == 0)
	{
		return 0;
	}	

	// symbol data stays external for now, no memory is allocated.
	MLSymbolKey symKey(sym, len);	
	MLSymbolMapT::iterator mapIter(mMap.find(symKey));
	if (mapIter != mMap.end())
	{
		found = true;
		r = mapIter->second;
	}

	if (!found)
	{
		// make a new entry in the symbol table.
		if(size < kTableSize)
		{		
			MLSymbolMapT::iterator beginIter;
			MLSymbolMapT::iterator newEntryIter;
			std::pair<MLSymbolMapT::iterator, bool> newEntryRet;
			
			// insert key/ID pair into map, with ID=0 for now
			std::pair<MLSymbolKey, SymbolIDT> newEntry(symKey, 0);
			newEntryRet = mMap.insert(newEntry);
			
			// check insertion
			if (!newEntryRet.second)
			{
				MLError() << "MLSymbolTable::getSymbolID: error, key " << sym << " already in map!\n";
			}
			newEntryIter = newEntryRet.first;
			beginIter = mMap.begin();

			// get index of new entry
			int newIndex = distance(beginIter, newEntryIter);
						
			//debug() << "adding symbol " << sym << ", length " << len << "\n";		
			//debug() << "new map entry index: " << newIndex << " ID = " << size << "\n";
		
			// make key data local in map
			MLSymbolKey& newKey = const_cast<MLSymbolKey&>(newEntryIter->first);			
			newKey.makeString();
			
			// set ID of new entry
			newEntryIter->second = size;
		
			// adjust indexes to reflect insertion
			for(int id=0; id<size; ++id)
			{
				if (mIndexesByID[id] >= newIndex)
				{
					mIndexesByID[id]++;
				}
			}
			
			// make new index list entry
			mIndexesByID[size] = newIndex;	
			
			// make new string pointer. 
			mStringsByID[size] = newKey.mpString;
			
			r = size;	
		}
		else
		{
			debug() << "symbol table size exceeded!\n";
		}
	}
	
	return r;
}