Пример #1
0
void MarkovExpectation::compute(FitContext *fc, const char *what, const char *how)
{
	if (fc) {
		for (auto c1 : components) {
			c1->compute(fc, what, how);
		}
	}

	omxRecompute(initial, fc);
	if (initialV != omxGetMatrixVersion(initial)) {
		omxCopyMatrix(scaledInitial, initial);
		EigenVectorAdaptor Ei(scaledInitial);
		if (scale == SCALE_SOFTMAX) Ei.derived() = Ei.array().exp();
		if (scale != SCALE_NONE) {
			Ei /= Ei.sum();
		}
		if (verbose >= 2) mxPrintMat("initial", Ei);
		initialV = omxGetMatrixVersion(initial);
	}

	if (transition) {
		omxRecompute(transition, fc);
		if (transitionV != omxGetMatrixVersion(transition)) {
			omxCopyMatrix(scaledTransition, transition);
			EigenArrayAdaptor Et(scaledTransition);
			if (scale == SCALE_SOFTMAX) Et.derived() = Et.array().exp();
			if (scale != SCALE_NONE) {
				Eigen::ArrayXd v = Et.colwise().sum();
				Et.rowwise() /= v.transpose();
			}
			if (verbose >= 2) mxPrintMat("transition", Et);
			transitionV = omxGetMatrixVersion(transition);
		}
	}
}
Пример #2
0
static void nloptInequalityFunction(unsigned m, double *result, unsigned n, const double* x, double* grad, void* f_data)
{
	GradientOptimizerContext *goc = (GradientOptimizerContext *) f_data;
	assert(n == goc->fc->numParam);
	Eigen::Map< Eigen::VectorXd > Epoint((double*)x, n);
	Eigen::Map< Eigen::VectorXd > Eresult(result, m);
	Eigen::Map< Eigen::MatrixXd > jacobian(grad, n, m);
	if (grad && goc->verbose >= 2) {
		if (m == 1) {
			mxLog("major iteration ineq=%.12f", Eresult[0]);
		} else {
			mxPrintMat("major iteration ineq", Eresult);
		}
	}
	inequality_functional ff(*goc);
	ff(Epoint, Eresult);
	if (grad) {
		fd_jacobian(goc->gradientAlgo, goc->gradientIterations, goc->gradientStepSize,
			    ff, Eresult, Epoint, jacobian);
		if (!std::isfinite(Eresult.sum())) {
			// infeasible at start of major iteration
			nlopt_opt opt = (nlopt_opt) goc->extraData;
			nlopt_force_stop(opt);
		}
	}
	if (goc->verbose >= 3 && grad) {
		mxPrintMat("inequality jacobian", jacobian);
	}
}
Пример #3
0
static void nloptEqualityFunction(unsigned m, double* result, unsigned n, const double* x, double* grad, void* f_data)
{
	context &ctx = *(context *)f_data;
	GradientOptimizerContext &goc = ctx.goc;
	assert(n == goc.fc->numParam);
	Eigen::Map< Eigen::VectorXd > Epoint((double*)x, n);
	Eigen::VectorXd Eresult(ctx.origeq);
	Eigen::MatrixXd jacobian(n, ctx.origeq);
	equality_functional ff(goc);
	ff(Epoint, Eresult);
	if (grad) {
		fd_jacobian(goc.gradientAlgo, goc.gradientIterations, goc.gradientStepSize,
			    ff, Eresult, Epoint, jacobian);
		if (ctx.eqmask.size() == 0) {
			ctx.eqmask.assign(m, false);
			for (int c1=0; c1 < int(m-1); ++c1) {
				for (int c2=c1+1; c2 < int(m); ++c2) {
					bool match = (Eresult[c1] == Eresult[c2] &&
						      (jacobian.col(c1) == jacobian.col(c2)));
					if (match && !ctx.eqmask[c2]) {
						ctx.eqmask[c2] = match;
						++ctx.eqredundent;
						if (goc.verbose >= 2) {
							mxLog("nlopt: eq constraint %d is redundent with %d",
							      c1, c2);
						}
					}
				}
			}
			if (ctx.eqredundent) {
				if (goc.verbose >= 1) {
					mxLog("nlopt: detected %d redundent equality constraints; retrying",
					      ctx.eqredundent);
				}
				nlopt_opt opt = (nlopt_opt) goc.extraData;
				nlopt_force_stop(opt);
			}
		}
	}
	Eigen::Map< Eigen::VectorXd > Uresult(result, m);
	Eigen::Map< Eigen::MatrixXd > Ujacobian(grad, n, m);
	int dx=0;
	for (int cx=0; cx < int(m); ++cx) {
		if (ctx.eqmask[cx]) continue;
		Uresult[dx] = Eresult[cx];
		if (grad) {
			Ujacobian.col(dx) = jacobian.col(cx);
		}
		++dx;
	}
	if (goc.verbose >= 4 && grad) {
		mxPrintMat("eq result", Uresult);
		mxPrintMat("eq jacobian", Ujacobian);
	}
}
Пример #4
0
void omxPrintMatrix(omxMatrix *source, const char* header) // make static TODO
{
	EigenMatrixAdaptor Esrc(source);
	if (!header) header = source->name();
	if (!header) header = "?";
	mxPrintMat(header, Esrc);
}
Пример #5
0
static double nloptObjectiveFunction(unsigned n, const double *x, double *grad, void *f_data)
{
	GradientOptimizerContext *goc = (GradientOptimizerContext *) f_data;
	nlopt_opt opt = (nlopt_opt) goc->extraData;
	FitContext *fc = goc->fc;
	assert(n == fc->numParam);
	int mode = 0;
	double fit = goc->solFun((double*) x, &mode);
	if (grad) {
		fc->iterations += 1;
		if (goc->maxMajorIterations != -1 && fc->iterations >= goc->maxMajorIterations) {
			nlopt_force_stop(opt);
		}
	}
	if (grad && goc->verbose >= 2) {
		mxLog("major iteration fit=%.12f", fit);
	}
	if (mode == -1) {
		if (!goc->feasible) {
			nlopt_force_stop(opt);
		}
		return nan("infeasible");
	}
	if (!grad) return fit;

	Eigen::Map< Eigen::VectorXd > Epoint((double*) x, n);
	Eigen::Map< Eigen::VectorXd > Egrad(grad, n);
	if (fc->wanted & FF_COMPUTE_GRADIENT) {
		Egrad = fc->grad;
	} else if (fc->CI && fc->CI->varIndex >= 0) {
		Egrad.setZero();
		Egrad[fc->CI->varIndex] = fc->lowerBound? 1 : -1;
		fc->grad = Egrad;
	} else {
		if (goc->verbose >= 3) mxLog("fd_gradient start");
		fit_functional ff(*goc);
		gradient_with_ref(goc->gradientAlgo, goc->gradientIterations, goc->gradientStepSize,
				  ff, fit, Epoint, Egrad);
		fc->grad = Egrad;
	}
	if (goc->verbose >= 3) {
		mxPrintMat("gradient", Egrad);
	}
	return fit;
}
Пример #6
0
void omxExpectation::loadFromR()
{
	if (!rObj || !data) return;

	auto ox = this;

	int numCols=0;
	bool isRaw = strEQ(omxDataType(data), "raw");
	if (isRaw || data->hasSummaryStats()) {
		ProtectedSEXP Rdcn(R_do_slot(rObj, Rf_install("dataColumnNames")));
		loadCharVecFromR(name, Rdcn, dataColumnNames);

		ProtectedSEXP Rdc(R_do_slot(rObj, Rf_install("dataColumns")));
		numCols = Rf_length(Rdc);
		ox->saveDataColumnsInfo(Rdc);
		if(OMX_DEBUG) mxPrintMat("Variable mapping", base::getDataColumns());
		if (isRaw) {
			auto dc = base::getDataColumns();
			for (int cx=0; cx < numCols; ++cx) {
				int var = dc[cx];
				data->assertColumnIsData(var);
			}
		}
	}

	if (R_has_slot(rObj, Rf_install("thresholds"))) {
		if(OMX_DEBUG) {
			mxLog("Accessing Threshold matrix.");
		}
		ProtectedSEXP threshMatrix(R_do_slot(rObj, Rf_install("thresholds")));

		if(INTEGER(threshMatrix)[0] != NA_INTEGER) {
			ox->thresholdsMat = omxMatrixLookupFromState1(threshMatrix, ox->currentState);
			ox->loadThresholds();
		} else {
			if (OMX_DEBUG) {
				mxLog("No thresholds matrix; not processing thresholds.");
			}
			ox->numOrdinal = 0;
		}
	}
}
Пример #7
0
void omxSD(GradientOptimizerContext &rf)
{
	int maxIter = rf.maxMajorIterations;
	if (maxIter == -1) maxIter = 50000;

	Eigen::VectorXd currEst(rf.numFree);
	rf.copyToOptimizer(currEst.data());

    int iter = 0;
	double priorSpeed = 1.0, shrinkage = 0.7;
    rf.setupSimpleBounds();
    rf.informOut = INFORM_UNINITIALIZED;

    {
	    int mode = 0;
	    rf.solFun(currEst.data(), &mode);
	    if (mode == -1) {
		    rf.informOut = INFORM_STARTING_VALUES_INFEASIBLE;
		    return;
	    }
    }
    double refFit = rf.getFit();

    rf.grad.resize(rf.numFree);

    fit_functional ff(rf);
    Eigen::VectorXd majorEst = currEst;

    while(++iter < maxIter && !isErrorRaised()) {
	    gradient_with_ref(rf.gradientAlgo, 1, rf.gradientIterations, rf.gradientStepSize,
			      ff, refFit, majorEst, rf.grad);

	    if (rf.verbose >= 3) mxPrintMat("grad", rf.grad);

        if(rf.grad.norm() == 0)
        {
            rf.informOut = INFORM_CONVERGED_OPTIMUM;
            if(rf.verbose >= 2) mxLog("After %i iterations, gradient achieves zero!", iter);
            break;
        }

        int retries = 300;
        double speed = std::min(priorSpeed, 1.0);
	double bestSpeed = speed;
	bool foundBetter = false;
	Eigen::VectorXd bestEst(majorEst.size());
	Eigen::VectorXd prevEst(majorEst.size());
	Eigen::VectorXd searchDir = rf.grad;
	searchDir /= searchDir.norm();
	prevEst.setConstant(nan("uninit"));
        while (--retries > 0 && !isErrorRaised()){
		Eigen::VectorXd nextEst = majorEst - speed * searchDir;
		nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);

		if (nextEst == prevEst) break;
		prevEst = nextEst;

		rf.checkActiveBoxConstraints(nextEst);

		int mode = 0;
		double fit = rf.solFun(nextEst.data(), &mode);
		if (fit < refFit) {
			foundBetter = true;
			refFit = rf.getFit();
			bestSpeed = speed;
			bestEst = nextEst;
			break;
		}
		speed *= shrinkage;
        }

	if (false && foundBetter) {
		// In some tests, this did not help so it is not enabled.
		// It might be worth testing more.
		mxLog("trying larger step size");
		retries = 3;
		while (--retries > 0 && !isErrorRaised()){
			speed *= 1.01;
			Eigen::VectorXd nextEst = majorEst - speed * searchDir;
			nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);
			rf.checkActiveBoxConstraints(nextEst);
			int mode = 0;
			double fit = rf.solFun(nextEst.data(), &mode);
			if (fit < refFit) {
				foundBetter = true;
				refFit = rf.getFit();
				bestSpeed = speed;
				bestEst = nextEst;
			}
		}
	}

        if (!foundBetter) {
            rf.informOut = INFORM_CONVERGED_OPTIMUM;
            if(rf.verbose >= 2) mxLog("After %i iterations, cannot find better estimation along the gradient direction", iter);
            break;
        }

	if (rf.verbose >= 2) mxLog("major fit %f bestSpeed %g", refFit, bestSpeed);
	majorEst = bestEst;
	priorSpeed = bestSpeed * 1.1;
    }
    rf.est = majorEst;
    if ((rf.grad.array().abs() > 0.1).any()) {
	    rf.informOut = INFORM_NOT_AT_OPTIMUM;
    }
    if (iter >= maxIter - 1) {
            rf.informOut = INFORM_ITERATION_LIMIT;
            if(rf.verbose >= 2) mxLog("Maximum iteration achieved!");
    }
    if(rf.verbose >= 1) mxLog("Status code : %i", rf.informOut);
}
Пример #8
0
	void state::compute(int want, FitContext *fc)
	{
		state *st = (state*) this;
		auto *oo = this;

		for (auto c1 : components) {
			if (c1->fitFunction) {
				omxFitFunctionCompute(c1->fitFunction, want, fc);
			} else {
				omxRecompute(c1, fc);
			}
		}
		if (!(want & FF_COMPUTE_FIT)) return;

		int nrow = components[0]->rows;
		for (auto c1 : components) {
			if (c1->rows != nrow) {
				mxThrow("%s: component '%s' has %d rows but component '%s' has %d rows",
					 oo->name(), components[0]->name(), nrow, c1->name(), c1->rows);
			}
		}

		Eigen::VectorXd expect;
		Eigen::VectorXd rowResult;
		int numC = components.size();
		Eigen::VectorXd tp(numC);
		double lp=0;
		for (int rx=0; rx < nrow; ++rx) {
			if (expectation->loadDefVars(rx) || rx == 0) {
				omxExpectationCompute(fc, expectation, NULL);
				if (!st->transition || rx == 0) {
					EigenVectorAdaptor Einitial(st->initial);
					expect = Einitial;
					if (expect.rows() != numC || expect.cols() != 1) {
						omxRaiseErrorf("%s: initial prob matrix must be %dx%d not %dx%d",
							       name(), numC, 1, expect.rows(), expect.cols());
						return;
					}
				}
				if (st->transition && (st->transition->rows != numC || st->transition->cols != numC)) {
					omxRaiseErrorf("%s: transition prob matrix must be %dx%d not %dx%d",
						       name(), numC, numC, st->transition->rows, st->transition->cols);
					return;
				}
			}
			for (int cx=0; cx < int(components.size()); ++cx) {
				EigenVectorAdaptor Ecomp(components[cx]);
				tp[cx] = Ecomp[rx];
			}
			if (st->verbose >= 4) {
				mxPrintMat("tp", tp);
			}
			if (st->transition) {
				EigenMatrixAdaptor Etransition(st->transition);
				expect = (Etransition * expect).eval();
			}
			rowResult = tp.array() * expect.array();
			double rowp = rowResult.sum();
			rowResult /= rowp;
			lp += log(rowp);
			if (st->transition) expect = rowResult;
		}
		oo->matrix->data[0] = Global->llScale * lp;
		if (st->verbose >= 2) mxLog("%s: fit=%f", oo->name(), lp);
	}