void omxExpectationRecompute(omxExpectation *ox) { for(int i = 0; i < int(ox->thresholds.size()); i++) { if (!ox->thresholds[i].matrix) continue; omxRecompute(ox->thresholds[i].matrix, NULL); } omxExpectationCompute(ox, NULL); }
void omxExpectation::asVector1(FitContext *fc, int row, Eigen::Ref<Eigen::VectorXd> out) { loadDefVars(row); omxExpectationCompute(fc, this, 0); omxMatrix *cov = getComponent("cov"); if (!cov) { Rf_error("%s::asVector is not implemented (for object '%s')", expType, name); } normalToStdVector(cov, getComponent("means"), getComponent("slope"), thresholdsMat, numOrdinal, getThresholdInfo(), out); }
void omxCallGREMLFitFunction(omxFitFunction *oo, int want, FitContext *fc){ if (want & (FF_COMPUTE_PREOPTIMIZE)) return; //Recompute Expectation: omxExpectation* expectation = oo->expectation; omxExpectationCompute(expectation, NULL); omxGREMLFitState *gff = (omxGREMLFitState*)oo->argStruct; //<--Cast generic omxFitFunction to omxGREMLFitState //Ensure that the pointer in the GREML fitfunction is directed at the right FreeVarGroup //(not necessary for most compute plans): if(fc && gff->varGroup != fc->varGroup){ gff->buildParamMap(fc->varGroup); } //Declare local variables used in more than one scope in this function: const double Scale = fabs(Global->llScale); //<--absolute value of loglikelihood scale const double NATLOG_2PI = 1.837877066409345483560659472811; //<--log(2*pi) int i; Eigen::Map< Eigen::MatrixXd > Eigy(omxMatrixDataColumnMajor(gff->y), gff->y->cols, 1); Eigen::Map< Eigen::MatrixXd > Vinv(omxMatrixDataColumnMajor(gff->invcov), gff->invcov->rows, gff->invcov->cols); EigenMatrixAdaptor EigX(gff->X); Eigen::MatrixXd P, Py; P.setZero(gff->invcov->rows, gff->invcov->cols); double logdetV=0, logdetquadX=0; if(want & (FF_COMPUTE_FIT | FF_COMPUTE_GRADIENT | FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){ if(gff->usingGREMLExpectation){ omxGREMLExpectation* oge = (omxGREMLExpectation*)(expectation->argStruct); //Check that factorizations of V and the quadratic form in X succeeded: if(oge->cholV_fail_om->data[0]){ oo->matrix->data[0] = NA_REAL; if (fc) fc->recordIterationError("expected covariance matrix is non-positive-definite"); return; } if(oge->cholquadX_fail){ oo->matrix->data[0] = NA_REAL; if (fc) fc->recordIterationError("Cholesky factorization failed; possibly, the matrix of covariates is rank-deficient"); return; } //Log determinant of V: logdetV = oge->logdetV_om->data[0]; //Log determinant of quadX: for(i=0; i < gff->X->cols; i++){ logdetquadX += log(oge->cholquadX_vectorD[i]); } logdetquadX *= 2; gff->REMLcorrection = Scale*0.5*logdetquadX; //Finish computing fit (negative loglikelihood): P.triangularView<Eigen::Lower>() = Vinv.selfadjointView<Eigen::Lower>() * (Eigen::MatrixXd::Identity(Vinv.rows(), Vinv.cols()) - (EigX * oge->quadXinv.selfadjointView<Eigen::Lower>() * oge->XtVinv)); //Vinv * (I-Hatmat) Py = P.selfadjointView<Eigen::Lower>() * Eigy; oo->matrix->data[0] = gff->REMLcorrection + Scale*0.5*( (((double)gff->y->cols) * NATLOG_2PI) + logdetV + ( Eigy.transpose() * Py )(0,0)); gff->nll = oo->matrix->data[0]; } else{ //If not using GREML expectation, deal with means and cov in a general way to compute fit... //Declare locals: EigenMatrixAdaptor yhat(gff->means); EigenMatrixAdaptor EigV(gff->cov); double logdetV=0, logdetquadX=0; Eigen::MatrixXd Vinv, quadX; Eigen::LLT< Eigen::MatrixXd > cholV(gff->cov->rows); Eigen::LLT< Eigen::MatrixXd > cholquadX(gff->X->cols); Eigen::VectorXd cholV_vectorD, cholquadX_vectorD; //Cholesky factorization of V: cholV.compute(EigV); if(cholV.info() != Eigen::Success){ omxRaiseErrorf("expected covariance matrix is non-positive-definite"); oo->matrix->data[0] = NA_REAL; return; } //Log determinant of V: cholV_vectorD = (( Eigen::MatrixXd )(cholV.matrixL())).diagonal(); for(i=0; i < gff->X->rows; i++){ logdetV += log(cholV_vectorD[i]); } logdetV *= 2; Vinv = cholV.solve(Eigen::MatrixXd::Identity( EigV.rows(), EigV.cols() )); //<-- V inverse quadX = EigX.transpose() * Vinv * EigX; //<--Quadratic form in X cholquadX.compute(quadX); //<--Cholesky factorization of quadX if(cholquadX.info() != Eigen::Success){ omxRaiseErrorf("Cholesky factorization failed; possibly, the matrix of covariates is rank-deficient"); oo->matrix->data[0] = NA_REAL; return; } cholquadX_vectorD = (( Eigen::MatrixXd )(cholquadX.matrixL())).diagonal(); for(i=0; i < gff->X->cols; i++){ logdetquadX += log(cholquadX_vectorD[i]); } logdetquadX *= 2; gff->REMLcorrection = Scale*0.5*logdetquadX; //Finish computing fit: oo->matrix->data[0] = gff->REMLcorrection + Scale*0.5*( ((double)gff->y->rows * NATLOG_2PI) + logdetV + ( Eigy.transpose() * Vinv * (Eigy - yhat) )(0,0)); gff->nll = oo->matrix->data[0]; return; } } if(want & (FF_COMPUTE_GRADIENT | FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){ //This part requires GREML expectation: omxGREMLExpectation* oge = (omxGREMLExpectation*)(expectation->argStruct); //Declare local variables for this scope: int numChildren = fc->childList.size(); int __attribute__((unused)) parallelism = (numChildren == 0) ? 1 : numChildren; fc->grad.resize(gff->dVlength); //<--Resize gradient in FitContext //Set up new HessianBlock: HessianBlock *hb = new HessianBlock; if(want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){ hb->vars.resize(gff->dVlength); hb->mat.resize(gff->dVlength, gff->dVlength); } //Begin looping thru free parameters: #pragma omp parallel for num_threads(parallelism) for(i=0; i < gff->dVlength; i++){ //Declare locals within parallelized region: int j=0, t1=0, t2=0; Eigen::MatrixXd PdV_dtheta1; Eigen::MatrixXd dV_dtheta1(Eigy.rows(), Eigy.rows()); //<--Derivative of V w/r/t parameter i. Eigen::MatrixXd dV_dtheta2(Eigy.rows(), Eigy.rows()); //<--Derivative of V w/r/t parameter j. t1 = gff->gradMap[i]; //<--Parameter number for parameter i. if(t1 < 0){continue;} if(want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){hb->vars[i] = t1;} if( oge->numcases2drop ){ dropCasesAndEigenize(gff->dV[i], dV_dtheta1, oge->numcases2drop, oge->dropcase, 1); } else{dV_dtheta1 = Eigen::Map< Eigen::MatrixXd >(omxMatrixDataColumnMajor(gff->dV[i]), gff->dV[i]->rows, gff->dV[i]->cols);} //PdV_dtheta1 = P.selfadjointView<Eigen::Lower>() * dV_dtheta1.selfadjointView<Eigen::Lower>(); PdV_dtheta1 = P.selfadjointView<Eigen::Lower>(); PdV_dtheta1 = PdV_dtheta1 * dV_dtheta1.selfadjointView<Eigen::Lower>(); for(j=i; j < gff->dVlength; j++){ if(j==i){ gff->gradient(t1) = Scale*0.5*(PdV_dtheta1.trace() - (Eigy.transpose() * PdV_dtheta1 * Py)(0,0)); fc->grad(t1) += gff->gradient(t1); if(want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){ gff->avgInfo(t1,t1) = Scale*0.5*(Eigy.transpose() * PdV_dtheta1 * PdV_dtheta1 * Py)(0,0); } } else{if(want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){ t2 = gff->gradMap[j]; //<--Parameter number for parameter j. if(t2 < 0){continue;} if( oge->numcases2drop ){ dropCasesAndEigenize(gff->dV[j], dV_dtheta2, oge->numcases2drop, oge->dropcase, 1); } else{dV_dtheta2 = Eigen::Map< Eigen::MatrixXd >(omxMatrixDataColumnMajor(gff->dV[j]), gff->dV[j]->rows, gff->dV[j]->cols);} gff->avgInfo(t1,t2) = Scale*0.5*(Eigy.transpose() * PdV_dtheta1 * P.selfadjointView<Eigen::Lower>() * dV_dtheta2.selfadjointView<Eigen::Lower>() * Py)(0,0); gff->avgInfo(t2,t1) = gff->avgInfo(t1,t2); }}}} //Assign upper triangle elements of avgInfo to the HessianBlock: if(want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)){ for (size_t d1=0, h1=0; h1 < gff->dV.size(); ++h1) { for (size_t d2=0, h2=0; h2 <= h1; ++h2) { hb->mat(d2,d1) = gff->avgInfo(h2,h1); ++d2; } ++d1; } fc->queue(hb); }}
static void omxCallWLSFitFunction(omxFitFunction *oo, int want, FitContext *fc) { if (want & (FF_COMPUTE_INITIAL_FIT | FF_COMPUTE_PREOPTIMIZE)) return; if(OMX_DEBUG) { mxLog("Beginning WLS Evaluation.");} // Requires: Data, means, covariances. double sum = 0.0; omxMatrix *eCov, *eMeans, *P, *B, *weights, *oFlat, *eFlat; omxMatrix *seCov, *seMeans, *seThresholdsMat, *seFlat; omxWLSFitFunction *owo = ((omxWLSFitFunction*)oo->argStruct); /* Locals for readability. Compiler should cut through this. */ eCov = owo->expectedCov; eMeans = owo->expectedMeans; std::vector< omxThresholdColumn > &eThresh = oo->expectation->thresholds; oFlat = owo->observedFlattened; eFlat = owo->expectedFlattened; weights = owo->weights; B = owo->B; P = owo->P; seCov = owo->standardExpectedCov; seMeans = owo->standardExpectedMeans; seThresholdsMat = owo->standardExpectedThresholds; seFlat = owo->standardExpectedFlattened; int onei = 1; omxExpectation* expectation = oo->expectation; /* Recompute and recopy */ if(OMX_DEBUG) { mxLog("WLSFitFunction Computing expectation"); } omxExpectationCompute(fc, expectation, NULL); omxMatrix *expThresholdsMat = expectation->thresholdsMat; standardizeCovMeansThresholds(eCov, eMeans, expThresholdsMat, eThresh, seCov, seMeans, seThresholdsMat); if(expThresholdsMat != NULL){ flattenDataToVector(seCov, seMeans, seThresholdsMat, eThresh, eFlat); } else { flattenDataToVector(eCov, eMeans, expThresholdsMat, eThresh, eFlat); } omxCopyMatrix(B, oFlat); //if(OMX_DEBUG) {omxPrintMatrix(B, "....WLS Observed Vector: "); } if(OMX_DEBUG) {omxPrintMatrix(eFlat, "....WLS Expected Vector: "); } omxDAXPY(-1.0, eFlat, B); //if(OMX_DEBUG) {omxPrintMatrix(B, "....WLS Observed - Expected Vector: "); } if(weights != NULL) { //if(OMX_DEBUG_ALGEBRA) {omxPrintMatrix(weights, "....WLS Weight Matrix: "); } omxDGEMV(TRUE, 1.0, weights, B, 0.0, P); } else { // ULS Case: Memcpy faster than dgemv. omxCopyMatrix(P, B); } sum = F77_CALL(ddot)(&(P->cols), P->data, &onei, B->data, &onei); oo->matrix->data[0] = sum; if(OMX_DEBUG) { mxLog("WLSFitFunction value comes to: %f.", oo->matrix->data[0]); } }
static void ba81ComputeFit(omxFitFunction* oo, int want, FitContext *fc) { BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; if (fc) state->numFreeParam = fc->varGroup->vars.size(); if (want & FF_COMPUTE_INITIAL_FIT) return; if (estate->type == EXPECTATION_AUGMENTED) { buildItemParamMap(oo, fc); if (want & FF_COMPUTE_PARAMFLAVOR) { for (size_t px=0; px < state->numFreeParam; ++px) { if (state->paramFlavor[px] == NULL) continue; fc->flavor[px] = state->paramFlavor[px]; } return; } if (want & FF_COMPUTE_PREOPTIMIZE) { omxExpectationCompute(fc, oo->expectation, NULL); return; } if (want & FF_COMPUTE_INFO) { buildLatentParamMap(oo, fc); if (!state->freeItemParams) { omxRaiseErrorf("%s: no free parameters", oo->name()); return; } ba81SetupQuadrature(oo->expectation); if (fc->infoMethod == INFO_METHOD_HESSIAN) { ba81ComputeEMFit(oo, FF_COMPUTE_HESSIAN, fc); } else { omxRaiseErrorf("Information matrix approximation method %d is not available", fc->infoMethod); return; } return; } double got = ba81ComputeEMFit(oo, want, fc); oo->matrix->data[0] = got; return; } else if (estate->type == EXPECTATION_OBSERVED) { if (want == FF_COMPUTE_STARTING) { buildLatentParamMap(oo, fc); if (state->freeLatents) setLatentStartingValues(oo, fc); return; } if (want & (FF_COMPUTE_INFO | FF_COMPUTE_GRADIENT)) { buildLatentParamMap(oo, fc); // only to check state->freeLatents buildItemParamMap(oo, fc); if (!state->freeItemParams && !state->freeLatents) { omxRaiseErrorf("%s: no free parameters", oo->name()); return; } ba81SetupQuadrature(oo->expectation); if (want & FF_COMPUTE_GRADIENT || (want & FF_COMPUTE_INFO && fc->infoMethod == INFO_METHOD_MEAT)) { gradCov(oo, fc); } else { if (state->freeLatents) { omxRaiseErrorf("Information matrix approximation method %d is not available", fc->infoMethod); return; } if (!state->freeItemParams) { omxRaiseErrorf("%s: no free parameters", oo->name()); return; } sandwich(oo, fc); } } if (want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)) { omxRaiseErrorf("%s: Hessian is not available for observed data", oo->name()); } if (want & FF_COMPUTE_MAXABSCHANGE) { double mac = std::max(omxMaxAbsDiff(state->itemParam, estate->itemParam), omxMaxAbsDiff(state->latentMean, estate->_latentMeanOut)); fc->mac = std::max(mac, omxMaxAbsDiff(state->latentCov, estate->_latentCovOut)); state->copyEstimates(estate); } if (want & FF_COMPUTE_FIT) { omxExpectationCompute(fc, oo->expectation, NULL); Eigen::ArrayXd &patternLik = estate->grp.patternLik; const int numUnique = estate->getNumUnique(); if (state->returnRowLikelihoods) { const double OneOverLargest = estate->grp.quad.getReciprocalOfOne(); omxData *data = estate->data; for (int rx=0; rx < numUnique; rx++) { int dups = omxDataNumIdenticalRows(data, estate->grp.rowMap[rx]); for (int dup=0; dup < dups; dup++) { int dest = omxDataIndex(data, estate->grp.rowMap[rx]+dup); oo->matrix->data[dest] = patternLik[rx] * OneOverLargest; } } } else { double *rowWeight = estate->grp.rowWeight; const double LogLargest = estate->LogLargestDouble; double got = 0; #pragma omp parallel for num_threads(Global->numThreads) reduction(+:got) for (int ux=0; ux < numUnique; ux++) { if (patternLik[ux] == 0) continue; got += rowWeight[ux] * (log(patternLik[ux]) - LogLargest); } double fit = nan("infeasible"); if (estate->grp.excludedPatterns < numUnique) { fit = Global->llScale * got; // add in some badness for excluded patterns fit += fit * estate->grp.excludedPatterns; } if (estate->verbose >= 1) mxLog("%s: observed fit %.4f (%d/%d excluded)", oo->name(), fit, estate->grp.excludedPatterns, numUnique); oo->matrix->data[0] = fit; } } } else { Rf_error("%s: Predict nothing or scores before computing %d", oo->name(), want); } }
void state::compute(int want, FitContext *fc) { state *st = (state*) this; auto *oo = this; for (auto c1 : components) { if (c1->fitFunction) { omxFitFunctionCompute(c1->fitFunction, want, fc); } else { omxRecompute(c1, fc); } } if (!(want & FF_COMPUTE_FIT)) return; int nrow = components[0]->rows; for (auto c1 : components) { if (c1->rows != nrow) { mxThrow("%s: component '%s' has %d rows but component '%s' has %d rows", oo->name(), components[0]->name(), nrow, c1->name(), c1->rows); } } Eigen::VectorXd expect; Eigen::VectorXd rowResult; int numC = components.size(); Eigen::VectorXd tp(numC); double lp=0; for (int rx=0; rx < nrow; ++rx) { if (expectation->loadDefVars(rx) || rx == 0) { omxExpectationCompute(fc, expectation, NULL); if (!st->transition || rx == 0) { EigenVectorAdaptor Einitial(st->initial); expect = Einitial; if (expect.rows() != numC || expect.cols() != 1) { omxRaiseErrorf("%s: initial prob matrix must be %dx%d not %dx%d", name(), numC, 1, expect.rows(), expect.cols()); return; } } if (st->transition && (st->transition->rows != numC || st->transition->cols != numC)) { omxRaiseErrorf("%s: transition prob matrix must be %dx%d not %dx%d", name(), numC, numC, st->transition->rows, st->transition->cols); return; } } for (int cx=0; cx < int(components.size()); ++cx) { EigenVectorAdaptor Ecomp(components[cx]); tp[cx] = Ecomp[rx]; } if (st->verbose >= 4) { mxPrintMat("tp", tp); } if (st->transition) { EigenMatrixAdaptor Etransition(st->transition); expect = (Etransition * expect).eval(); } rowResult = tp.array() * expect.array(); double rowp = rowResult.sum(); rowResult /= rowp; lp += log(rowp); if (st->transition) expect = rowResult; } oo->matrix->data[0] = Global->llScale * lp; if (st->verbose >= 2) mxLog("%s: fit=%f", oo->name(), lp); }
void omxExpectationRecompute(FitContext *fc, omxExpectation *ox) { omxExpectationCompute(fc, ox, NULL); }