Beispiel #1
0
void MarkovExpectation::compute(FitContext *fc, const char *what, const char *how)
{
	if (fc) {
		for (auto c1 : components) {
			c1->compute(fc, what, how);
		}
	}

	omxRecompute(initial, fc);
	if (initialV != omxGetMatrixVersion(initial)) {
		omxCopyMatrix(scaledInitial, initial);
		EigenVectorAdaptor Ei(scaledInitial);
		if (scale == SCALE_SOFTMAX) Ei.derived() = Ei.array().exp();
		if (scale != SCALE_NONE) {
			Ei /= Ei.sum();
		}
		if (verbose >= 2) mxPrintMat("initial", Ei);
		initialV = omxGetMatrixVersion(initial);
	}

	if (transition) {
		omxRecompute(transition, fc);
		if (transitionV != omxGetMatrixVersion(transition)) {
			omxCopyMatrix(scaledTransition, transition);
			EigenArrayAdaptor Et(scaledTransition);
			if (scale == SCALE_SOFTMAX) Et.derived() = Et.array().exp();
			if (scale != SCALE_NONE) {
				Eigen::ArrayXd v = Et.colwise().sum();
				Et.rowwise() /= v.transpose();
			}
			if (verbose >= 2) mxPrintMat("transition", Et);
			transitionV = omxGetMatrixVersion(transition);
		}
	}
}
void omxComputeNormalExpectation(omxExpectation* ox, const char *, const char *) {
	omxNormalExpectation* one = (omxNormalExpectation*) (ox->argStruct);

	omxRecompute(one->cov, NULL);
	if(one->means != NULL)
	    omxRecompute(one->means, NULL);
	if (one->thresholds) omxRecompute(one->thresholds, NULL);
}
Beispiel #3
0
void omxExpectationCompute(FitContext *fc, omxExpectation *ox, const char *what, const char *how)
{
	if (!ox) return;

	if (ox->data) ox->data->recompute(); // for dynamic data
	if (ox->thresholdsMat) omxRecompute(ox->thresholdsMat, fc);
	ox->compute(fc, what, how);
}
Beispiel #4
0
void omxExpectationRecompute(omxExpectation *ox) {
	for(int i = 0; i < int(ox->thresholds.size()); i++) {
		if (!ox->thresholds[i].matrix) continue;
		omxRecompute(ox->thresholds[i].matrix, NULL);
	}

	omxExpectationCompute(ox, NULL);
}
Beispiel #5
0
void loglikelihoodCIFun(omxFitFunction *ff, int want, FitContext *fc)
{
	const omxConfidenceInterval *CI = fc->CI;

	if (want & FF_COMPUTE_PREOPTIMIZE) {
		fc->targetFit = (fc->lowerBound? CI->lbound : CI->ubound) + fc->fit;
		//mxLog("Set target fit to %f (MLE %f)", fc->targetFit, fc->fit);
		return;
	}

	if (!(want & FF_COMPUTE_FIT)) {
		Rf_error("Not implemented yet");
	}

	omxMatrix *fitMat = ff->matrix;

	// We need to compute the fit here because that's the only way to
	// check our soft feasibility constraints. If parameters don't
	// change between here and the constraint evaluation then we
	// should avoid recomputing the fit again in the constraint. TODO

	omxFitFunctionCompute(fitMat->fitFunction, FF_COMPUTE_FIT, fc);
	const double fit = totalLogLikelihood(fitMat);
	omxRecompute(CI->matrix, fc);
	double CIElement = omxMatrixElement(CI->matrix, CI->row, CI->col);
	omxResizeMatrix(fitMat, 1, 1);

	if (!std::isfinite(fit) || !std::isfinite(CIElement)) {
		fc->recordIterationError("Confidence interval is in a range that is currently incalculable. Add constraints to keep the value in the region where it can be calculated.");
		fitMat->data[0] = nan("infeasible");
		return;
	}

	if (want & FF_COMPUTE_FIT) {
		double param = (fc->lowerBound? CIElement : -CIElement);
		if (fc->compositeCIFunction) {
			double diff = fc->targetFit - fit;
			diff *= diff;
			if (diff > 1e2) {
				// Ensure there aren't any creative solutions
				diff = nan("infeasible");
				return;
			}
			fitMat->data[0] = diff + param;
		} else {
			fitMat->data[0] = param;
		}
		//mxLog("param at %f", fitMat->data[0]);
	}
	if (want & (FF_COMPUTE_GRADIENT | FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)) {
		// add deriv adjustments here TODO
	}
}
void omxPopulateNormalAttributes(omxExpectation *ox, SEXP algebra) {
    if(OMX_DEBUG) { mxLog("Populating Normal Attributes."); }

	omxNormalExpectation* one = (omxNormalExpectation*) (ox->argStruct);
    
	omxMatrix *cov = one->cov;
	omxMatrix *means = one->means;

	omxRecompute(cov, NULL);
	if(means != NULL) omxRecompute(means, NULL);

	{
		SEXP expCovExt;
	ScopedProtect p1(expCovExt, Rf_allocMatrix(REALSXP, cov->rows, cov->cols));
	for(int row = 0; row < cov->rows; row++)
		for(int col = 0; col < cov->cols; col++)
			REAL(expCovExt)[col * cov->rows + row] =
				omxMatrixElement(cov, row, col);
	Rf_setAttrib(algebra, Rf_install("ExpCov"), expCovExt);
	}

	
	if (means != NULL) {
		SEXP expMeanExt;
		ScopedProtect p1(expMeanExt, Rf_allocMatrix(REALSXP, means->rows, means->cols));
		for(int row = 0; row < means->rows; row++)
			for(int col = 0; col < means->cols; col++)
				REAL(expMeanExt)[col * means->rows + row] =
					omxMatrixElement(means, row, col);
		Rf_setAttrib(algebra, Rf_install("ExpMean"), expMeanExt);
	} else {
		SEXP expMeanExt;
		ScopedProtect p1(expMeanExt, Rf_allocMatrix(REALSXP, 0, 0));
		Rf_setAttrib(algebra, Rf_install("ExpMean"), expMeanExt);
	}

	Rf_setAttrib(algebra, Rf_install("numStats"), Rf_ScalarReal(omxDataDF(ox->data)));
}
omxMatrix* omxGetNormalExpectationComponent(omxExpectation* ox, omxFitFunction* off, const char* component){
/* Return appropriate parts of Expectation to the Fit Function */
	if(OMX_DEBUG) { mxLog("Normal expectation: %s requested--", component); }

	omxNormalExpectation* one = (omxNormalExpectation*)(ox->argStruct);
	omxMatrix* retval = NULL;

	if(strEQ("cov", component)) {
		retval = one->cov;
	} else if(strEQ("means", component)) {
		retval = one->means;
	} else if(strEQ("pvec", component)) {
		// Once implemented, change compute function and return pvec
	}
	if (retval) omxRecompute(retval, NULL);
	
	return retval;
}
Beispiel #8
0
void FitMultigroup::compute(int want, FitContext *fc)
{
	omxMatrix *fitMatrix = matrix;
	double fit = 0;
	double mac = 0;

	FitMultigroup *mg = (FitMultigroup*) this;

	for (size_t ex=0; ex < mg->fits.size(); ex++) {
		omxMatrix* f1 = mg->fits[ex];
		if (f1->fitFunction) {
			omxFitFunctionCompute(f1->fitFunction, want, fc);
			if (want & FF_COMPUTE_MAXABSCHANGE) {
				mac = std::max(fc->mac, mac);
			}
			if (want & FF_COMPUTE_PREOPTIMIZE) {
				if (units == FIT_UNITS_UNINITIALIZED) {
					units = f1->fitFunction->units;
				} else if (units != f1->fitFunction->units) {
					mxThrow("%s: cannot combine units %s and %s (from %s)",
						matrix->name(), fitUnitsToName(units),
						fitUnitsToName(f1->fitFunction->units), f1->name());
				}
			}
		} else {
			omxRecompute(f1, fc);
		}
		if (want & FF_COMPUTE_FIT) {
			if(f1->rows != 1 || f1->cols != 1) {
				omxRaiseErrorf("%s[%d]: %s of type %s does not evaluate to a 1x1 matrix",
					       fitMatrix->name(), (int)ex, f1->name(), f1->fitFunction->fitType);
			}
			fit += f1->data[0];
			if (mg->verbose >= 1) { mxLog("%s: %s fit=%f", fitMatrix->name(), f1->name(), f1->data[0]); }
		}
	}

	if (fc) fc->mac = mac;

	if (want & FF_COMPUTE_FIT) {
		fitMatrix->data[0] = fit;
		if (mg->verbose >= 1) { mxLog("%s: fit=%f", fitMatrix->name(), fit); }
	}
}
Beispiel #9
0
void ComputeFit(const char *callerName, omxMatrix *fitMat, int want, FitContext *fc)
{
	bool doFit = want & FF_COMPUTE_FIT;
	fc->incrComputeCount();
	omxFitFunction *ff = fitMat->fitFunction;
	if (ff) {
		omxFitFunctionComputeAuto(ff, want, fc);
	} else {
		if (want != FF_COMPUTE_FIT) Rf_error("Only fit is available");
		if (fc->ciobj) Rf_error("CIs cannot be computed for unitless algebra");
		omxRecompute(fitMat, fc);
	}
	if (doFit) {
		fc->fit = totalLogLikelihood(fitMat);
		if (std::isfinite(fc->fit)) {
			fc->resetIterationError();
		}
		Global->checkpointPostfit(callerName, fc, fc->est, false);
		if (OMX_DEBUG) {
			mxLog("%s: completed evaluation, fit=%.12g", fitMat->name(), fc->fit);
		}
	}
}
Beispiel #10
0
void ComputeFit(const char *callerName, omxMatrix *fitMat, int want, FitContext *fc)
{
	bool doFit = want & FF_COMPUTE_FIT;
	R_CheckUserInterrupt();

#pragma omp atomic
	++Global->computeCount; // could avoid lock by keeping in FitContext

	// old version of openmp can't do this as part of the atomic instruction
	int evaluation = Global->computeCount;

	if (doFit) {
		if (OMX_DEBUG) {
			mxLog("%s: starting evaluation %d, want %d", fitMat->name(), evaluation, want);
		}
		Global->checkpointPrefit(callerName, fc, fc->est, false);
	}
	omxFitFunction *ff = fitMat->fitFunction;
	if (ff) {
		omxFitFunctionComputeAuto(ff, want, fc);
	} else {
		if (want != FF_COMPUTE_FIT) Rf_error("Only fit is available");
		if (fc->CI) Rf_error("CIs cannot be computed for unitless algebra");
		omxRecompute(fitMat, fc);
	}
	if (doFit) {
		fc->fit = totalLogLikelihood(fitMat);
		if (std::isfinite(fc->fit)) {
			fc->resetIterationError();
		}
		Global->checkpointPostfit(fc);
		if (OMX_DEBUG) {
			mxLog("%s: completed evaluation %d, fit=%f", fitMat->name(), evaluation, fc->fit);
		}
	}
}
Beispiel #11
0
static void omxRowFitFunctionSingleIteration(omxFitFunction *localobj, omxFitFunction *sharedobj, int rowbegin, int rowcount,
					     FitContext *fc) {

    omxRowFitFunction* oro = ((omxRowFitFunction*) localobj->argStruct);
    omxRowFitFunction* shared_oro = ((omxRowFitFunction*) sharedobj->argStruct);

    omxMatrix *rowAlgebra, *rowResults;
    omxMatrix *filteredDataRow, *dataRow, *existenceVector;
    omxMatrix *dataColumns;
	omxData *data;
	int isContiguous, contiguousStart, contiguousLength;

	rowAlgebra	    = oro->rowAlgebra;
	rowResults	    = shared_oro->rowResults;
	data		    = oro->data;
    dataColumns     = oro->dataColumns;
    dataRow         = oro->dataRow;
    filteredDataRow = oro->filteredDataRow;
    existenceVector = oro->existenceVector;
    
    isContiguous    = oro->contiguous.isContiguous;
	contiguousStart = oro->contiguous.start;
	contiguousLength = oro->contiguous.length;

	int *toRemove = (int*) malloc(sizeof(int) * dataColumns->cols);
	int *zeros = (int*) calloc(dataColumns->cols, sizeof(int));

	for(int row = rowbegin; row < data->rows && (row - rowbegin) < rowcount; row++) {
		mxLogSetCurrentRow(row);

		data->loadDefVars(localobj->matrix->currentState, row);

        // Populate data row
		if (isContiguous) {
			omxContiguousDataRow(data, row, contiguousStart, contiguousLength, dataRow);
		} else {
			omxDataRow(data, row, dataColumns, dataRow);	// Populate data row
		}

		markDataRowDependencies(localobj->matrix->currentState, oro);
		
		for(int j = 0; j < dataColumns->cols; j++) {
			if(omxDataElementMissing(data, row, j)) {
				toRemove[j] = 1;
				omxSetVectorElement(existenceVector, j, 0);
			} else {
			    toRemove[j] = 0;
			    omxSetVectorElement(existenceVector, j, 1);
			}
		}		
		
		omxCopyMatrix(filteredDataRow, dataRow);
		omxRemoveRowsAndColumns(filteredDataRow, zeros, toRemove);

		omxRecompute(rowAlgebra, fc);

		omxCopyMatrixToRow(rowAlgebra, row, rowResults);
	}
	free(toRemove);
	free(zeros);
}
Beispiel #12
0
static void omxCallRowFitFunction(omxFitFunction *oo, int want, FitContext *fc)
{
	if (want & (FF_COMPUTE_INITIAL_FIT | FF_COMPUTE_PREOPTIMIZE)) return;

    if(OMX_DEBUG) { mxLog("Beginning Row Evaluation.");}
	// Requires: Data, means, covariances.

	omxMatrix* objMatrix  = oo->matrix;
	int numChildren = fc? fc->childList.size() : 0;

    omxMatrix *reduceAlgebra;
	omxData *data;

    omxRowFitFunction* oro = ((omxRowFitFunction*) oo->argStruct);

	reduceAlgebra   = oro->reduceAlgebra;
	data		    = oro->data;

	/* Michael Spiegel, 7/31/12
	* The demo "RowFitFunctionSimpleExamples" will fail in the parallel 
	* Hessian calculation if the resizing operation is performed.
	*
	omxMatrix *rowAlgebra, *rowResults
	rowAlgebra	    = oro->rowAlgebra;
	rowResults	    = oro->rowResults;

	if(rowResults->cols != rowAlgebra->cols || rowResults->rows != data->rows) {
		if(OMX_DEBUG_ROWS(1)) { 
			mxLog("Resizing rowResults from %dx%d to %dx%d.", 
				rowResults->rows, rowResults->cols, 
				data->rows, rowAlgebra->cols); 
		}
		omxResizeMatrix(rowResults, data->rows, rowAlgebra->cols);
	}
	*/
		
    int parallelism = (numChildren == 0) ? 1 : numChildren;

	if (parallelism > data->rows) {
		parallelism = data->rows;
	}

	if (parallelism > 1) {
		int stride = (data->rows / parallelism);

#pragma omp parallel for num_threads(parallelism) 
		for(int i = 0; i < parallelism; i++) {
			FitContext *kid = fc->childList[i];
			omxMatrix *childMatrix = kid->lookupDuplicate(objMatrix);
			omxFitFunction *childFit = childMatrix->fitFunction;
			if (i == parallelism - 1) {
				omxRowFitFunctionSingleIteration(childFit, oo, stride * i, data->rows - stride * i, fc);
			} else {
				omxRowFitFunctionSingleIteration(childFit, oo, stride * i, stride, fc);
			}
		}
	} else {
		omxRowFitFunctionSingleIteration(oo, oo, 0, data->rows, fc);
	}

	omxRecompute(reduceAlgebra, fc);

	omxCopyMatrix(oo->matrix, reduceAlgebra);

}
Beispiel #13
0
void omxComputeNumericDeriv::omxEstimateHessianOffDiagonal(int i, int l, struct hess_struct* hess_work)
{
	int ix = paramMap[i];
	int lx = paramMap[l];
    static const double v = 2.0; //Note: NumDeriv comments that this could be a parameter, but is hard-coded in the algorithm

	double *Haprox             = hess_work->Haprox;
	omxMatrix* fitMatrix = hess_work->fitMatrix; 
	FitContext* fc = hess_work->fc; 
	double *freeParams         = fc->est;

	double iOffset = std::max(fabs(stepSize*optima[i]), stepSize);
	double lOffset = std::max(fabs(stepSize*optima[l]), stepSize);

	for(int k = 0; k < numIter; k++) {
		freeParams[ix] = optima[i] + iOffset;
		freeParams[lx] = optima[l] + lOffset;

		fc->copyParamToModel();

		++hess_work->probeCount;
		omxRecompute(fitMatrix, fc);
		double f1 = omxMatrixElement(fitMatrix, 0, 0);

		freeParams[ix] = optima[i] - iOffset;
		freeParams[lx] = optima[l] - lOffset;

		fc->copyParamToModel();

		++hess_work->probeCount;
		omxRecompute(fitMatrix, fc);
		double f2 = omxMatrixElement(fitMatrix, 0, 0);

		Haprox[k] = (f1 - 2.0 * minimum + f2 - hessian[i*numParams+i]*iOffset*iOffset -
						hessian[l*numParams+l]*lOffset*lOffset)/(2.0*iOffset*lOffset);
		if(verbose >= 2) {
			mxLog("Hessian first off-diagonal calculation: Haprox = %f, iOffset = %f, lOffset=%f from params %f, %f and %f, %f and %d (also: %f, %f and %f)",
			      Haprox[k], iOffset, lOffset, f1, hessian[i*numParams+i], hessian[l*numParams+l],
			      v, k, pow(v, k), stepSize*optima[i], stepSize*optima[l]);
		}

		freeParams[ix] = optima[i];				// Reset parameter values
		freeParams[lx] = optima[l];

		iOffset = iOffset / v;					//  And shrink step
		lOffset = lOffset / v;
	}

	for(int m = 1; m < numIter; m++) {						// Richardson Step
		for(int k = 0; k < (numIter - m); k++) {
			//if(OMX_DEBUG) {mxLog("Hessian off-diagonal calculation: Haprox = %f, iOffset = %f, lOffset=%f from params %f, %f and %f, %f and %d (also: %f, %f and %f, and %f).", Haprox[k], iOffset, lOffset, stepSize, optima[i], optima[l], v, m, pow(4.0, m), stepSize*optima[i], stepSize*optima[l], k);}
			Haprox[k] = (Haprox[k+1] * pow(4.0, m) - Haprox[k]) / (pow(4.0, m)-1);
		}
	}

	if(verbose >= 2) {
		mxLog("Hessian estimation: Populating Hessian"
		      " ([%d, %d] = %d and %d) with value %f...",
		      i, l, i*numParams+l, l*numParams+i, Haprox[0]);
	}
	hessian[i*numParams+l] = Haprox[0];
	hessian[l*numParams+i] = Haprox[0];
}
Beispiel #14
0
/**
  @params i              parameter number
  @params hess_work      local copy
  @params optima         shared read-only variable
  @params gradient       shared write-only variable
  @params hessian        shared write-only variable
 */
void omxComputeNumericDeriv::omxEstimateHessianOnDiagonal(int i, struct hess_struct* hess_work)
{
	int ix = paramMap[i];
	static const double v = 2.0; //Note: NumDeriv comments that this could be a parameter, but is hard-coded in the algorithm

	double *Haprox             = hess_work->Haprox;
	double *Gcentral             = hess_work->Gcentral;
	double *Gforward             = hess_work->Gforward;
	double *Gbackward            = hess_work->Gbackward;
	omxMatrix* fitMatrix = hess_work->fitMatrix; 
	FitContext* fc = hess_work->fc; 
	double *freeParams         = fc->est;

	/* Part the first: Gradient and diagonal */
	double iOffset = std::max(fabs(stepSize * optima[i]), stepSize);
	for(int k = 0; k < numIter; k++) {			// Decreasing step size, starting at k == 0
		freeParams[ix] = optima[i] + iOffset;
		
		fc->copyParamToModel();

		++hess_work->probeCount;
		omxRecompute(fitMatrix, fc);
		double f1 = omxMatrixElement(fitMatrix, 0, 0);

		freeParams[ix] = optima[i] - iOffset;

		fc->copyParamToModel();

		++hess_work->probeCount;
		omxRecompute(fitMatrix, fc);
		double f2 = omxMatrixElement(fitMatrix, 0, 0);

		Gcentral[k] = (f1 - f2) / (2.0*iOffset); 						// This is for the gradient
		Gforward[k] = (minimum - f2) / iOffset;
		Gbackward[k] = (f1 - minimum) / iOffset;
		Haprox[k] = (f1 - 2.0 * minimum + f2) / (iOffset * iOffset);		// This is second derivative
		freeParams[ix] = optima[i];									// Reset parameter value
		iOffset /= v;
		if(verbose >= 2) {
			mxLog("Hessian: diag[%s] Δ%g (#%d) F1 %f F2 %f grad %f hess %f",
			      fc->varGroup->vars[i]->name, iOffset, k, f1, f2, Gcentral[k], Haprox[k]);
		}
	}

	for(int m = 1; m < numIter; m++) {						// Richardson Step
		for(int k = 0; k < (numIter - m); k++) {
			// NumDeriv Hard-wires 4s for r here. Why?
			Gcentral[k] = (Gcentral[k+1] * pow(4.0, m) - Gcentral[k])/(pow(4.0, m)-1);
			Gforward[k] = (Gforward[k+1] * pow(4.0, m) - Gforward[k])/(pow(4.0, m)-1);
			Gbackward[k] = (Gbackward[k+1] * pow(4.0, m) - Gbackward[k])/(pow(4.0, m)-1);
			Haprox[k] = (Haprox[k+1] * pow(4.0, m) - Haprox[k])/(pow(4.0, m)-1);
		}
	}

	if(verbose >= 2) {
		mxLog("Hessian: diag[%s] final grad %f hess %f", fc->varGroup->vars[i]->name, Gcentral[0], Haprox[0]);
	}
	gcentral[i]  = Gcentral[0];
	gforward[i]  = Gforward[0];
	gbackward[i] = Gbackward[0];
	if (hessian) hessian[i*numParams + i] = Haprox[0];
}
Beispiel #15
0
void AlgebraFitFunction::compute(int want, FitContext *fc)
{
	if (fc && varGroup != fc->varGroup) {
		setVarGroup(fc->varGroup);
	}

	if (want & (FF_COMPUTE_FIT | FF_COMPUTE_INITIAL_FIT | FF_COMPUTE_PREOPTIMIZE)) {
		if (algebra) {
			omxRecompute(algebra, fc);
			ff->matrix->data[0] = algebra->data[0];
		} else {
			ff->matrix->data[0] = 0;
		}
	}

	if (gradMap.size() == 0) return;
	if (gradient) {
		omxRecompute(gradient, fc);
		if (want & FF_COMPUTE_GRADIENT) {
			for (size_t v1=0; v1 < gradMap.size(); ++v1) {
				int to = gradMap[v1];
				if (to < 0) continue;
				fc->grad(to) += omxVectorElement(gradient, v1);
			}
		}
		if (want & FF_COMPUTE_INFO && fc->infoMethod == INFO_METHOD_MEAT) {
			std::vector<double> grad(varGroup->vars.size());
			for (size_t v1=0; v1 < gradMap.size(); ++v1) {
				int to = gradMap[v1];
				if (to < 0) continue;
				grad[to] += omxVectorElement(gradient, v1);
			}
			addSymOuterProd(1, grad.data(), varGroup->vars.size(), fc->infoB);
		}
	}
	if (hessian && ((want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)) ||
			(want & FF_COMPUTE_INFO && fc->infoMethod == INFO_METHOD_HESSIAN))) {
		omxRecompute(hessian, fc);

		if (!vec2diag) {
			HessianBlock *hb = new HessianBlock;
			hb->vars.resize(numDeriv);
			int vx=0;
			for (size_t h1=0; h1 < gradMap.size(); ++h1) {
				if (gradMap[h1] < 0) continue;
				hb->vars[vx] = gradMap[h1];
				++vx;
			}
			hb->mat.resize(numDeriv, numDeriv);
			for (size_t d1=0, h1=0; h1 < gradMap.size(); ++h1) {
				if (gradMap[h1] < 0) continue;
				for (size_t d2=0, h2=0; h2 <= h1; ++h2) {
					if (gradMap[h2] < 0) continue;
					if (h1 == h2) {
						hb->mat(d2,d1) = omxMatrixElement(hessian, h2, h1);
					} else {
						double coef1 = omxMatrixElement(hessian, h2, h1);
						double coef2 = omxMatrixElement(hessian, h1, h2);
						if (coef1 != coef2) {
							Rf_warning("%s: Hessian algebra '%s' is not symmetric at [%d,%d]",
								   ff->matrix->name(), hessian->name(), 1+h2, 1+h1);
						}
						hb->mat(d2,d1) = coef1;
					}
					++d2;
				}
				++d1;
			}
			fc->queue(hb);
		} else {
			for (size_t h1=0; h1 < gradMap.size(); ++h1) {
				int to = gradMap[h1];
				if (to < 0) continue;
				HessianBlock *hb = new HessianBlock;
				hb->vars.assign(1, to);
				hb->mat.resize(1,1);
				hb->mat(0,0) = omxMatrixElement(hessian, h1, h1);
				fc->queue(hb);
			}
		}
	}
	// complain if unimplemented FF_COMPUTE_INFO requested? TODO
}
Beispiel #16
0
void omxLISRELExpectation::studyExoPred() // compare with similar function for RAM
{
	if (data->defVars.size() == 0 || !TY || !TY->isSimple() || !PS->isSimple()) return;

	Eigen::VectorXd estSave;
	copyParamToModelFake1(currentState, estSave);
	omxRecompute(PS, 0);
	omxRecompute(LY, 0);
	omxRecompute(BE, 0);

	EigenMatrixAdaptor ePS(PS);  // latent covariance
	EigenMatrixAdaptor eLY(LY);  // to manifest loading
	EigenMatrixAdaptor eBE(BE);  // to latent loading
	Eigen::VectorXd hasVariance = ePS.diagonal().array().abs().matrix();

	int found = 0;
	std::vector<int> exoDataCol(PS->rows, -1);
	int alNum = ~AL->matrixNumber;
	for (int k=0; k < int(data->defVars.size()); ++k) {
		omxDefinitionVar &dv = data->defVars[k];
		if (dv.matrix == alNum && hasVariance[ dv.row ] == 0.0) {
			for (int cx=0; cx < eBE.rows(); ++cx) {
				if (eBE(cx, dv.row) == 0.0) continue;
				mxThrow("%s: latent exogenous variables are not supported (%s -> %s)", name,
					 PS->rownames[dv.row], BE->rownames[cx]);
			}
			if (eLY.col(dv.row).array().abs().sum() == 0.) continue;
			exoDataCol[dv.row] = dv.column;
			found += 1;
			dv.loadData(currentState, 0.);
			if (verbose >= 1) {
				mxLog("%s: set defvar '%s' for latent '%s' to exogenous mode",
				      name, data->columnName(dv.column), PS->rownames[dv.row]);
			}
			data->defVars.erase(data->defVars.begin() + k--);
		}
	}

	copyParamToModelRestore(currentState, estSave);

	if (!found) return;

	slope = omxInitMatrix(LY->rows, found, currentState);
	EigenMatrixAdaptor eSl(slope);
	eSl.setZero();

	for (int cx=0, ex=0; cx < PS->rows; ++cx) {
		if (exoDataCol[cx] == -1) continue;
		exoDataColumns.push_back(exoDataCol[cx]);
		for (int rx=0; rx < LY->rows; ++rx) {
			slope->addPopulate(LY, rx, cx, rx, ex);
		}
		ex += 1;
	}

	exoPredMean.resize(exoDataColumns.size());
	for (int cx=0; cx < int(exoDataColumns.size()); ++cx) {
		auto &e1 = data->rawCols[ exoDataColumns[cx] ];
		Eigen::Map< Eigen::VectorXd > vec(e1.ptr.realData, data->numRawRows());
		exoPredMean[cx] = vec.mean();
	}
}
Beispiel #17
0
void UserConstraint::refresh(FitContext *fc)
{
	omxRecompute(pad, fc);
	//omxRecompute(jacobian, fc); //<--Not sure if Jacobian needs to be recomputed every time constraint function does.
}
Beispiel #18
0
static void omxRowFitFunctionSingleIteration(omxFitFunction *localobj, omxFitFunction *sharedobj, int rowbegin, int rowcount,
					     FitContext *fc) {

    omxRowFitFunction* oro = ((omxRowFitFunction*) localobj->argStruct);
    omxRowFitFunction* shared_oro = ((omxRowFitFunction*) sharedobj->argStruct);

    omxMatrix *rowAlgebra, *rowResults;
    omxMatrix *filteredDataRow, *dataRow, *existenceVector;
    omxMatrix *dataColumns;
	omxData *data;
	int isContiguous, contiguousStart, contiguousLength;
    int numCols, numRemoves;

	rowAlgebra	    = oro->rowAlgebra;
	rowResults	    = shared_oro->rowResults;
	data		    = oro->data;
    dataColumns     = oro->dataColumns;
    dataRow         = oro->dataRow;
    filteredDataRow = oro->filteredDataRow;
    existenceVector = oro->existenceVector;
    
    isContiguous    = oro->contiguous.isContiguous;
	contiguousStart = oro->contiguous.start;
	contiguousLength = oro->contiguous.length;

	Eigen::VectorXd oldDefs;
	oldDefs.resize(data->defVars.size());
	oldDefs.setConstant(NA_REAL);

	numCols = dataColumns->cols;
	int *toRemove = (int*) malloc(sizeof(int) * dataColumns->cols);
	int *zeros = (int*) calloc(dataColumns->cols, sizeof(int));

	for(int row = rowbegin; row < data->rows && (row - rowbegin) < rowcount; row++) {

		data->handleDefinitionVarList(localobj->matrix->currentState, row, oldDefs.data());

		omxStateNextRow(localobj->matrix->currentState);						// Advance row
		
        // Populate data row
		numRemoves = 0;
	
		if (isContiguous) {
			omxContiguousDataRow(data, row, contiguousStart, contiguousLength, dataRow);
		} else {
			omxDataRow(data, row, dataColumns, dataRow);	// Populate data row
		}

		markDataRowDependencies(localobj->matrix->currentState, oro);
		
		for(int j = 0; j < dataColumns->cols; j++) {
			double dataValue = omxVectorElement(dataRow, j);
			if(std::isnan(dataValue)) {
				numRemoves++;
				toRemove[j] = 1;
                omxSetVectorElement(existenceVector, j, 0);
			} else {
			    toRemove[j] = 0;
                omxSetVectorElement(existenceVector, j, 1);
			}
		}		
		// TODO: Determine if this is the correct response.
		
		if(numRemoves == numCols) {
			char *errstr = (char*) calloc(250, sizeof(char));
			sprintf(errstr, "Row %d completely missing.  omxRowFitFunction cannot have completely missing rows.", omxDataIndex(data, row));
			omxRaiseError(errstr);
			free(errstr);
			continue;
		}

		omxCopyMatrix(filteredDataRow, dataRow);
		omxRemoveRowsAndColumns(filteredDataRow, 0, numRemoves, zeros, toRemove);

		omxRecompute(rowAlgebra, fc);

		omxCopyMatrixToRow(rowAlgebra, omxDataIndex(data, row), rowResults);
	}
	free(toRemove);
	free(zeros);
}
Beispiel #19
0
void omxCallLISRELExpectation(omxExpectation* oo, FitContext *fc, const char *, const char *) {
    if(OMX_DEBUG) {
        mxLog("LISREL Expectation Called.");
    }
    omxLISRELExpectation* oro = (omxLISRELExpectation*)(oo->argStruct);

    omxRecompute(oro->LX, fc);
    omxRecompute(oro->LY, fc);
    omxRecompute(oro->BE, fc);
    omxRecompute(oro->GA, fc);
    omxRecompute(oro->PH, fc);
    omxRecompute(oro->PS, fc);
    omxRecompute(oro->TD, fc);
    omxRecompute(oro->TE, fc);
    omxRecompute(oro->TH, fc);
    if(oro->TX != NULL) {     // Update means?
        omxRecompute(oro->TX, fc);
        omxRecompute(oro->KA, fc);
    }
    if(oro->TY != NULL) {
        omxRecompute(oro->TY, fc);
        omxRecompute(oro->AL, fc);
    }

    omxCalculateLISRELCovarianceAndMeans(oro);
}
Beispiel #20
0
	void state::compute(int want, FitContext *fc)
	{
		state *st = (state*) this;
		auto *oo = this;

		for (auto c1 : components) {
			if (c1->fitFunction) {
				omxFitFunctionCompute(c1->fitFunction, want, fc);
			} else {
				omxRecompute(c1, fc);
			}
		}
		if (!(want & FF_COMPUTE_FIT)) return;

		int nrow = components[0]->rows;
		for (auto c1 : components) {
			if (c1->rows != nrow) {
				mxThrow("%s: component '%s' has %d rows but component '%s' has %d rows",
					 oo->name(), components[0]->name(), nrow, c1->name(), c1->rows);
			}
		}

		Eigen::VectorXd expect;
		Eigen::VectorXd rowResult;
		int numC = components.size();
		Eigen::VectorXd tp(numC);
		double lp=0;
		for (int rx=0; rx < nrow; ++rx) {
			if (expectation->loadDefVars(rx) || rx == 0) {
				omxExpectationCompute(fc, expectation, NULL);
				if (!st->transition || rx == 0) {
					EigenVectorAdaptor Einitial(st->initial);
					expect = Einitial;
					if (expect.rows() != numC || expect.cols() != 1) {
						omxRaiseErrorf("%s: initial prob matrix must be %dx%d not %dx%d",
							       name(), numC, 1, expect.rows(), expect.cols());
						return;
					}
				}
				if (st->transition && (st->transition->rows != numC || st->transition->cols != numC)) {
					omxRaiseErrorf("%s: transition prob matrix must be %dx%d not %dx%d",
						       name(), numC, numC, st->transition->rows, st->transition->cols);
					return;
				}
			}
			for (int cx=0; cx < int(components.size()); ++cx) {
				EigenVectorAdaptor Ecomp(components[cx]);
				tp[cx] = Ecomp[rx];
			}
			if (st->verbose >= 4) {
				mxPrintMat("tp", tp);
			}
			if (st->transition) {
				EigenMatrixAdaptor Etransition(st->transition);
				expect = (Etransition * expect).eval();
			}
			rowResult = tp.array() * expect.array();
			double rowp = rowResult.sum();
			rowResult /= rowp;
			lp += log(rowp);
			if (st->transition) expect = rowResult;
		}
		oo->matrix->data[0] = Global->llScale * lp;
		if (st->verbose >= 2) mxLog("%s: fit=%f", oo->name(), lp);
	}
Beispiel #21
0
static void CallFIMLFitFunction(omxFitFunction *off, int want, FitContext *fc)
{
	// TODO: Figure out how to give access to other per-iteration structures.
	// TODO: Current implementation is slow: update by filtering correlations and thresholds.
	// TODO: Current implementation does not implement speedups for sorting.
	// TODO: Current implementation may fail on all-continuous-missing or all-ordinal-missing rows.
	
	if (want & (FF_COMPUTE_PREOPTIMIZE)) return;

    if(OMX_DEBUG) { 
	    mxLog("Beginning Joint FIML Evaluation.");
    }
	int returnRowLikelihoods = 0;

	omxFIMLFitFunction* ofiml = ((omxFIMLFitFunction*)off->argStruct);
	omxMatrix* fitMatrix  = off->matrix;
	int numChildren = (int) fc->childList.size();

	omxMatrix *cov 		= ofiml->cov;
	omxMatrix *means	= ofiml->means;
	if (!means) {
		omxRaiseErrorf("%s: raw data observed but no expected means "
			       "vector was provided. Add something like mxPath(from = 'one',"
			       " to = manifests) to your model.", off->name());
		return;
	}
	omxData* data           = ofiml->data;                            //  read-only
	omxMatrix *dataColumns	= ofiml->dataColumns;

	returnRowLikelihoods = ofiml->returnRowLikelihoods;   //  read-only
	omxExpectation* expectation = off->expectation;
	std::vector< omxThresholdColumn > &thresholdCols = expectation->thresholds;

	if (data->defVars.size() == 0 && !strEQ(expectation->expType, "MxExpectationStateSpace")) {
		if(OMX_DEBUG) {mxLog("Precalculating cov and means for all rows.");}
		omxExpectationRecompute(fc, expectation);
		// MCN Also do the threshold formulae!
		
		for(int j=0; j < dataColumns->cols; j++) {
			int var = omxVectorElement(dataColumns, j);
			if (!omxDataColumnIsFactor(data, var)) continue;
			if (j < int(thresholdCols.size()) && thresholdCols[j].numThresholds > 0) { // j is an ordinal column
				omxMatrix* nextMatrix = thresholdCols[j].matrix;
				omxRecompute(nextMatrix, fc);
				checkIncreasing(nextMatrix, thresholdCols[j].column, thresholdCols[j].numThresholds, fc);
				for(int index = 0; index < numChildren; index++) {
					FitContext *kid = fc->childList[index];
					omxMatrix *target = kid->lookupDuplicate(nextMatrix);
					omxCopyMatrix(target, nextMatrix);
				}
			} else {
				Rf_error("No threshold given for ordinal column '%s'",
					 omxDataColumnName(data, j));
			}
		}

		double *corList 	= ofiml->corList;
		double *weights		= ofiml->weights;

		if (corList) {
			omxStandardizeCovMatrix(cov, corList, weights, fc);	// Calculate correlation and covariance
		}
		for(int index = 0; index < numChildren; index++) {
			FitContext *kid = fc->childList[index];
			omxMatrix *childFit = kid->lookupDuplicate(fitMatrix);
			omxFIMLFitFunction* childOfiml = ((omxFIMLFitFunction*) childFit->fitFunction->argStruct);
			omxCopyMatrix(childOfiml->cov, cov);
			omxCopyMatrix(childOfiml->means, means);
			if (corList) {
				memcpy(childOfiml->weights, weights, sizeof(double) * cov->rows);
				memcpy(childOfiml->corList, corList, sizeof(double) * (cov->rows * (cov->rows - 1)) / 2);
			}
		}
		if(OMX_DEBUG) { omxPrintMatrix(cov, "Cov"); }
		if(OMX_DEBUG) { omxPrintMatrix(means, "Means"); }
    }

	memset(ofiml->rowLogLikelihoods->data, 0, sizeof(double) * data->rows);
    
	int parallelism = (numChildren == 0) ? 1 : numChildren;

	if (parallelism > data->rows) {
		parallelism = data->rows;
	}

	FIMLSingleIterationType singleIter = ofiml->SingleIterFn;

	bool failed = false;
	if (parallelism > 1) {
		int stride = (data->rows / parallelism);

#pragma omp parallel for num_threads(parallelism) reduction(||:failed)
		for(int i = 0; i < parallelism; i++) {
			FitContext *kid = fc->childList[i];
			omxMatrix *childMatrix = kid->lookupDuplicate(fitMatrix);
			omxFitFunction *childFit = childMatrix->fitFunction;
			if (i == parallelism - 1) {
				failed |= singleIter(kid, childFit, off, stride * i, data->rows - stride * i);
			} else {
				failed |= singleIter(kid, childFit, off, stride * i, stride);
			}
		}
	} else {
		failed |= singleIter(fc, off, off, 0, data->rows);
	}
	if (failed) {
		omxSetMatrixElement(off->matrix, 0, 0, NA_REAL);
		return;
	}

	if(!returnRowLikelihoods) {
		double val, sum = 0.0;
		// floating-point addition is not associative,
		// so we serialized the following reduction operation.
		for(int i = 0; i < data->rows; i++) {
			val = omxVectorElement(ofiml->rowLogLikelihoods, i);
//			mxLog("%d , %f, %llx\n", i, val, *((unsigned long long*) &val));
			sum += val;
		}	
		if(OMX_DEBUG) {mxLog("Total Likelihood is %3.3f", sum);}
		omxSetMatrixElement(off->matrix, 0, 0, sum);
	}
}