/** * MAP is not affected by the number of items. EAP is. Likelihood can * get concentrated in a single quadrature ordinate. For 3PL, response * patterns can have a bimodal likelihood. This will confuse MAP and * is a key advantage of EAP (Thissen & Orlando, 2001, p. 136). * * Thissen, D. & Orlando, M. (2001). IRT for items scored in two * categories. In D. Thissen & H. Wainer (Eds.), \emph{Test scoring} * (pp 73-140). Lawrence Erlbaum Associates, Inc. */ static void ba81PopulateAttributes(omxExpectation *oo, SEXP robj) { BA81Expect *state = (BA81Expect *) oo->argStruct; if (!state->debugInternal) return; ba81NormalQuad &quad = state->getQuad(); int maxAbilities = quad.abilities(); const int numUnique = state->getNumUnique(); const double LogLargest = state->LogLargestDouble; SEXP Rlik; if (state->grp.patternLik.size() != numUnique) { refreshPatternLikelihood(state, oo->dynamicDataSource); } Rf_protect(Rlik = Rf_allocVector(REALSXP, numUnique)); memcpy(REAL(Rlik), state->grp.patternLik.data(), sizeof(double) * numUnique); double *lik_out = REAL(Rlik); for (int px=0; px < numUnique; ++px) { // Must return value in log units because it may not be representable otherwise lik_out[px] = log(lik_out[px]) - LogLargest; } MxRList dbg; dbg.add("patternLikelihood", Rlik); if (quad.getEstepTableSize(0)) { SEXP Rexpected; Rf_protect(Rexpected = Rf_allocVector(REALSXP, quad.getEstepTableSize(0))); Eigen::Map< Eigen::ArrayXd > box(REAL(Rexpected), quad.getEstepTableSize(0)); quad.exportEstepTable(0, box); dbg.add("em.expected", Rexpected); } SEXP Rmean, Rcov; if (state->estLatentMean) { Rf_protect(Rmean = Rf_allocVector(REALSXP, maxAbilities)); memcpy(REAL(Rmean), state->estLatentMean->data, maxAbilities * sizeof(double)); dbg.add("mean", Rmean); } if (state->estLatentCov) { Rf_protect(Rcov = Rf_allocMatrix(REALSXP, maxAbilities, maxAbilities)); memcpy(REAL(Rcov), state->estLatentCov->data, maxAbilities * maxAbilities * sizeof(double)); dbg.add("cov", Rcov); } Rf_setAttrib(robj, Rf_install("debug"), dbg.asR()); }
static void gradCov(omxFitFunction *oo, FitContext *fc) { const double Scale = Global->llScale; omxExpectation *expectation = oo->expectation; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) expectation->argStruct; if (estate->verbose >= 1) mxLog("%s: cross product approximation", oo->name()); estate->grp.ba81OutcomeProb(estate->itemParam->data, FALSE); const int numThreads = Global->numThreads; const int numUnique = estate->getNumUnique(); ba81NormalQuad &quad = estate->getQuad(); const int numSpecific = quad.numSpecific; const int maxDims = quad.maxDims; const int pDims = numSpecific? maxDims-1 : maxDims; const int maxAbilities = quad.maxAbilities; Eigen::MatrixXd icovMat(pDims, pDims); if (maxAbilities) { Eigen::VectorXd mean; Eigen::MatrixXd srcMat; estate->getLatentDistribution(fc, mean, srcMat); icovMat = srcMat.topLeftCorner(pDims, pDims); Matrix tmp(icovMat.data(), pDims, pDims); int info = InvertSymmetricPosDef(tmp, 'U'); if (info) { omxRaiseErrorf("%s: latent covariance matrix is not positive definite", oo->name()); return; } icovMat.triangularView<Eigen::Lower>() = icovMat.transpose().triangularView<Eigen::Lower>(); } std::vector<int> &rowMap = estate->grp.rowMap; double *rowWeight = estate->grp.rowWeight; std::vector<bool> &rowSkip = estate->grp.rowSkip; const int totalQuadPoints = quad.totalQuadPoints; omxMatrix *itemParam = estate->itemParam; omxBuffer<double> patternLik(numUnique); const int priDerivCoef = pDims + triangleLoc1(pDims); const int numLatents = maxAbilities + triangleLoc1(maxAbilities); const int thrDerivSize = itemParam->cols * state->itemDerivPadSize; const int totalOutcomes = estate->totalOutcomes(); const int numItems = state->freeItemParams? estate->numItems() : 0; const size_t numParam = fc->varGroup->vars.size(); std::vector<double> thrGrad(numThreads * numParam); std::vector<double> thrMeat(numThreads * numParam * numParam); const double *wherePrep = quad.wherePrep.data(); if (numSpecific == 0) { omxBuffer<double> thrLxk(totalQuadPoints * numThreads); omxBuffer<double> derivCoef(totalQuadPoints * priDerivCoef); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * priDerivCoef); } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + thrId * totalQuadPoints; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.ba81LikelihoodSlow2(px, lxk); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalQuadPoints; qx++) { patternLik1 += lxk[qx]; } patternLik[px] = patternLik1; // if (!validPatternLik(state, patternLik1)) complain, TODO for (int qx=0; qx < totalQuadPoints; qx++) { double tmp = lxk[qx]; mapLatentDeriv(state, estate, tmp, derivCoef.data() + qx * priDerivCoef, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; (*Glibrpf_model[id].dLL1)(spec, iparam, wherePrep + qx * maxDims, expected.data(), myDeriv); } } gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } else { const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; omxBuffer<double> thrLxk(totalQuadPoints * numSpecific * numThreads); omxBuffer<double> thrEi(totalPrimaryPoints * numThreads); omxBuffer<double> thrEis(totalPrimaryPoints * numSpecific * numThreads); const int derivPerPoint = priDerivCoef + 2 * numSpecific; omxBuffer<double> derivCoef(totalQuadPoints * derivPerPoint); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * derivPerPoint); for (int Sgroup=0; Sgroup < numSpecific; ++Sgroup) { calcDerivCoef1(fc, state, estate, where, Sgroup, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup); } } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + totalQuadPoints * numSpecific * thrId; double *Ei = thrEi.data() + totalPrimaryPoints * thrId; double *Eis = thrEis.data() + totalPrimaryPoints * numSpecific * thrId; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.cai2010EiEis(px, lxk, Eis, Ei); for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } for (int qloc=0, eisloc=0, qx=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { mapLatentDeriv(state, estate, Eis[eisloc] * lxk[qloc], derivCoef.data() + qx * derivPerPoint, latentGrad.data()); for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { double lxk1 = lxk[qloc]; double Eis1 = Eis[eisloc + Sgroup]; double tmp = Eis1 * lxk1; mapLatentDerivS(state, estate, Sgroup, tmp, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { if (estate->grp.Sgroup[ix] != Sgroup) continue; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; const double *where = wherePrep + qx * maxDims; Eigen::VectorXd ptheta(dims); for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*Glibrpf_model[id].dLL1)(spec, iparam, ptheta.data(), expected.data(), myDeriv); } ++qloc; } ++qx; } } // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) { patternLik1 += Ei[qx]; } patternLik[px] = patternLik1; gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrGrad.data() + tx * numParam; for (size_t en=0; en < numParam; ++en) { thrGrad[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrMeat.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrMeat[en] += th[en]; } } for (size_t d1=0; d1 < numParam; ++d1) { fc->grad(d1) += thrGrad[d1]; } if (fc->infoB) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoB[cell] += thrMeat[cell]; } } } }
static void sandwich(omxFitFunction *oo, FitContext *fc) { const double abScale = fabs(Global->llScale); omxExpectation *expectation = oo->expectation; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) expectation->argStruct; if (estate->verbose >= 1) mxLog("%s: sandwich", oo->name()); estate->grp.ba81OutcomeProb(estate->itemParam->data, FALSE); const int numThreads = Global->numThreads; const int numUnique = estate->getNumUnique(); ba81NormalQuad &quad = estate->getQuad(); const int numSpecific = quad.numSpecific; const int maxDims = quad.maxDims; std::vector<int> &rowMap = estate->grp.rowMap; double *rowWeight = estate->grp.rowWeight; std::vector<bool> &rowSkip = estate->grp.rowSkip; const int totalQuadPoints = quad.totalQuadPoints; omxMatrix *itemParam = estate->itemParam; omxBuffer<double> patternLik(numUnique); std::vector<const double*> &itemSpec = estate->grp.spec; const int totalOutcomes = estate->totalOutcomes(); const int numItems = estate->grp.numItems(); const size_t numParam = fc->varGroup->vars.size(); const double *wherePrep = quad.wherePrep.data(); std::vector<double> thrBreadG(numThreads * numParam * numParam); std::vector<double> thrBreadH(numThreads * numParam * numParam); std::vector<double> thrMeat(numThreads * numParam * numParam); if (numSpecific == 0) { omxBuffer<double> thrLxk(totalQuadPoints * numThreads); #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + thrId * totalQuadPoints; omxBuffer<double> itemDeriv(state->itemDerivPadSize); omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO double *breadG = thrBreadG.data() + thrId * numParam * numParam; //a double *breadH = thrBreadH.data() + thrId * numParam * numParam; //a double *meat = thrMeat.data() + thrId * numParam * numParam; //b std::vector<double> patGrad(numParam); estate->grp.ba81LikelihoodSlow2(px, lxk); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalQuadPoints; qx++) { patternLik1 += lxk[qx]; } patternLik[px] = patternLik1; // if (!validPatternLik(state, patternLik1)) complain double weight = 1 / patternLik[px]; for (int qx=0; qx < totalQuadPoints; qx++) { double tmp = lxk[qx] * weight; double sqrtTmp = sqrt(tmp); std::vector<double> gradBuf(numParam); int gradOffset = 0; for (int ix=0; ix < numItems; ++ix) { if (ix) gradOffset += state->paramPerItem[ix-1]; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; pick -= 1; const int iOutcomes = estate->itemOutcomes(ix); OMXZERO(expected.data(), iOutcomes); expected[pick] = 1; const double *spec = itemSpec[ix]; double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; OMXZERO(itemDeriv.data(), state->itemDerivPadSize); (*Glibrpf_model[id].dLL1)(spec, iparam, wherePrep + qx * maxDims, expected.data(), itemDeriv.data()); (*Glibrpf_model[id].dLL2)(spec, iparam, itemDeriv.data()); for (int par = 0; par < state->paramPerItem[ix]; ++par) { int to = state->itemGradMap[gradOffset + par]; if (to >= 0) { gradBuf[to] -= itemDeriv[par] * sqrtTmp; patGrad[to] -= itemDeriv[par] * tmp; } } int derivBase = ix * state->itemDerivPadSize; for (int ox=0; ox < state->itemDerivPadSize; ox++) { int to = state->paramMap[derivBase + ox]; if (to >= int(numParam)) { int Hto = to - numParam; breadH[Hto] += abScale * itemDeriv[ox] * tmp * rowWeight[px]; } } } addSymOuterProd(abScale * rowWeight[px], gradBuf.data(), numParam, breadG); } addSymOuterProd(abScale * rowWeight[px], patGrad.data(), numParam, meat); } } else { Rf_error("Sandwich information matrix method is not implemented for bifactor models"); const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; omxBuffer<double> thrLxk(totalQuadPoints * numSpecific * numThreads); omxBuffer<double> thrEi(totalPrimaryPoints * numThreads); omxBuffer<double> thrEis(totalPrimaryPoints * numSpecific * numThreads); #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO omxBuffer<double> itemDeriv(state->itemDerivPadSize); double *breadG = thrBreadG.data() + thrId * numParam * numParam; //a double *breadH = thrBreadH.data() + thrId * numParam * numParam; //a double *meat = thrMeat.data() + thrId * numParam * numParam; //b std::vector<double> patGrad(numParam); double *lxk = thrLxk.data() + totalQuadPoints * numSpecific * thrId; double *Ei = thrEi.data() + totalPrimaryPoints * thrId; double *Eis = thrEis.data() + totalPrimaryPoints * numSpecific * thrId; estate->grp.cai2010EiEis(px, lxk, Eis, Ei); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) { patternLik1 += Ei[qx]; } patternLik[px] = patternLik1; for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } // WARNING: I didn't work out the math. I just coded this the way // it seems to make sense. for (int qloc=0, eisloc=0, qx=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { std::vector<double> gradBuf(numParam); int gradOffset = 0; double lxk1 = lxk[qloc + Sgroup]; double Eis1 = Eis[eisloc + Sgroup]; double tmp = Eis1 * lxk1 / patternLik1; double sqrtTmp = sqrt(tmp); for (int ix=0; ix < numItems; ++ix) { if (ix) gradOffset += state->paramPerItem[ix-1]; if (estate->grp.Sgroup[ix] != Sgroup) continue; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = 1; const double *spec = itemSpec[ix]; double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; OMXZERO(itemDeriv.data(), state->itemDerivPadSize); const double *where = wherePrep + qx * maxDims; Eigen::VectorXd ptheta(dims); for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*Glibrpf_model[id].dLL1)(spec, iparam, ptheta.data(), expected.data(), itemDeriv.data()); (*Glibrpf_model[id].dLL2)(spec, iparam, itemDeriv.data()); for (int par = 0; par < state->paramPerItem[ix]; ++par) { int to = state->itemGradMap[gradOffset + par]; if (to >= 0) { gradBuf[to] -= itemDeriv[par] * sqrtTmp; patGrad[to] -= itemDeriv[par] * tmp; } } int derivBase = ix * state->itemDerivPadSize; for (int ox=0; ox < state->itemDerivPadSize; ox++) { int to = state->paramMap[derivBase + ox]; if (to >= int(numParam)) { int Hto = to - numParam; breadH[Hto] += (abScale * itemDeriv[ox] * tmp * rowWeight[px]); } } } addSymOuterProd(abScale * rowWeight[px], gradBuf.data(), numParam, breadG); } qloc += numSpecific; ++qx; } } addSymOuterProd(abScale * rowWeight[px], patGrad.data(), numParam, meat); } } // only need upper triangle TODO for (int tx=1; tx < numThreads; ++tx) { double *th = thrBreadG.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrBreadG[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrBreadH.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrBreadH[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrMeat.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrMeat[en] += th[en]; } } //pda(thrBreadG.data(), numParam, numParam); //pda(thrBreadH.data(), numParam, numParam); //pda(thrMeat.data(), numParam, numParam); if (fc->infoA) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoA[cell] += thrBreadH[cell] - thrBreadG[cell] + thrMeat[cell]; } } } if (fc->infoB) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoB[cell] += thrMeat[cell]; } } } }
static void ba81ComputeFit(omxFitFunction* oo, int want, FitContext *fc) { BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; if (fc) state->numFreeParam = fc->varGroup->vars.size(); if (want & FF_COMPUTE_INITIAL_FIT) return; if (estate->type == EXPECTATION_AUGMENTED) { buildItemParamMap(oo, fc); if (want & FF_COMPUTE_PARAMFLAVOR) { for (size_t px=0; px < state->numFreeParam; ++px) { if (state->paramFlavor[px] == NULL) continue; fc->flavor[px] = state->paramFlavor[px]; } return; } if (want & FF_COMPUTE_PREOPTIMIZE) { omxExpectationCompute(fc, oo->expectation, NULL); return; } if (want & FF_COMPUTE_INFO) { buildLatentParamMap(oo, fc); if (!state->freeItemParams) { omxRaiseErrorf("%s: no free parameters", oo->name()); return; } ba81SetupQuadrature(oo->expectation); if (fc->infoMethod == INFO_METHOD_HESSIAN) { ba81ComputeEMFit(oo, FF_COMPUTE_HESSIAN, fc); } else { omxRaiseErrorf("Information matrix approximation method %d is not available", fc->infoMethod); return; } return; } double got = ba81ComputeEMFit(oo, want, fc); oo->matrix->data[0] = got; return; } else if (estate->type == EXPECTATION_OBSERVED) { if (want == FF_COMPUTE_STARTING) { buildLatentParamMap(oo, fc); if (state->freeLatents) setLatentStartingValues(oo, fc); return; } if (want & (FF_COMPUTE_INFO | FF_COMPUTE_GRADIENT)) { buildLatentParamMap(oo, fc); // only to check state->freeLatents buildItemParamMap(oo, fc); if (!state->freeItemParams && !state->freeLatents) { omxRaiseErrorf("%s: no free parameters", oo->name()); return; } ba81SetupQuadrature(oo->expectation); if (want & FF_COMPUTE_GRADIENT || (want & FF_COMPUTE_INFO && fc->infoMethod == INFO_METHOD_MEAT)) { gradCov(oo, fc); } else { if (state->freeLatents) { omxRaiseErrorf("Information matrix approximation method %d is not available", fc->infoMethod); return; } if (!state->freeItemParams) { omxRaiseErrorf("%s: no free parameters", oo->name()); return; } sandwich(oo, fc); } } if (want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)) { omxRaiseErrorf("%s: Hessian is not available for observed data", oo->name()); } if (want & FF_COMPUTE_MAXABSCHANGE) { double mac = std::max(omxMaxAbsDiff(state->itemParam, estate->itemParam), omxMaxAbsDiff(state->latentMean, estate->_latentMeanOut)); fc->mac = std::max(mac, omxMaxAbsDiff(state->latentCov, estate->_latentCovOut)); state->copyEstimates(estate); } if (want & FF_COMPUTE_FIT) { omxExpectationCompute(fc, oo->expectation, NULL); Eigen::ArrayXd &patternLik = estate->grp.patternLik; const int numUnique = estate->getNumUnique(); if (state->returnRowLikelihoods) { const double OneOverLargest = estate->grp.quad.getReciprocalOfOne(); omxData *data = estate->data; for (int rx=0; rx < numUnique; rx++) { int dups = omxDataNumIdenticalRows(data, estate->grp.rowMap[rx]); for (int dup=0; dup < dups; dup++) { int dest = omxDataIndex(data, estate->grp.rowMap[rx]+dup); oo->matrix->data[dest] = patternLik[rx] * OneOverLargest; } } } else { double *rowWeight = estate->grp.rowWeight; const double LogLargest = estate->LogLargestDouble; double got = 0; #pragma omp parallel for num_threads(Global->numThreads) reduction(+:got) for (int ux=0; ux < numUnique; ux++) { if (patternLik[ux] == 0) continue; got += rowWeight[ux] * (log(patternLik[ux]) - LogLargest); } double fit = nan("infeasible"); if (estate->grp.excludedPatterns < numUnique) { fit = Global->llScale * got; // add in some badness for excluded patterns fit += fit * estate->grp.excludedPatterns; } if (estate->verbose >= 1) mxLog("%s: observed fit %.4f (%d/%d excluded)", oo->name(), fit, estate->grp.excludedPatterns, numUnique); oo->matrix->data[0] = fit; } } } else { Rf_error("%s: Predict nothing or scores before computing %d", oo->name(), want); } }
static void ba81compute(omxExpectation *oo, FitContext *fc, const char *what, const char *how) { BA81Expect *state = (BA81Expect *) oo->argStruct; if (what) { if (strcmp(what, "latentDistribution")==0 && how && strcmp(how, "copy")==0) { omxCopyMatrix(state->_latentMeanOut, state->estLatentMean); omxCopyMatrix(state->_latentCovOut, state->estLatentCov); double sampleSizeAdj = (state->weightSum - 1.0) / state->weightSum; int covSize = state->_latentCovOut->rows * state->_latentCovOut->cols; for (int cx=0; cx < covSize; ++cx) { state->_latentCovOut->data[cx] *= sampleSizeAdj; } return; } if (strcmp(what, "scores")==0) { state->expectedUsed = true; state->type = EXPECTATION_AUGMENTED; } else if (strcmp(what, "nothing")==0) { state->type = EXPECTATION_OBSERVED; } else { omxRaiseErrorf("%s: don't know how to predict '%s'", oo->name, what); } if (state->verbose >= 1) { mxLog("%s: predict %s", oo->name, what); } return; } bool latentClean = state->latentParamVersion == getLatentVersion(state); bool itemClean = state->itemParamVersion == omxGetMatrixVersion(state->itemParam) && latentClean; ba81NormalQuad &quad = state->getQuad(); if (state->verbose >= 1) { mxLog("%s: Qinit %d itemClean %d latentClean %d (1=clean) expectedUsed=%d", oo->name, (int)quad.isAllocated(), itemClean, latentClean, state->expectedUsed); } if (!latentClean) { ba81RefreshQuadrature(oo); state->latentParamVersion = getLatentVersion(state); } if (!itemClean) { double *param = state->EitemParam? state->EitemParam : state->itemParam->data; state->grp.quad.cacheOutcomeProb(param, FALSE); bool estep = state->expectedUsed; if (estep) { if (oo->dynamicDataSource) { BA81Engine<BA81Expect*, BA81LatentSummary, BA81Estep> engine; engine.ba81Estep1(&state->grp, state); } else { BA81Engine<BA81Expect*, BA81LatentFixed, BA81Estep> engine; engine.ba81Estep1(&state->grp, state); } } else { state->grp.quad.releaseEstep(); refreshPatternLikelihood(state, oo->dynamicDataSource); } if (oo->dynamicDataSource && state->verbose >= 2) { mxLog("%s: empirical distribution mean and cov:", state->name); omxPrint(state->estLatentMean, "mean"); omxPrint(state->estLatentCov, "cov"); } if (state->verbose >= 1) { const int numUnique = state->getNumUnique(); mxLog("%s: estep<%s, %s> %d/%d rows excluded", state->name, (estep && oo->dynamicDataSource? "summary":"fixed"), (estep? "estep":"omitEstep"), state->grp.excludedPatterns, numUnique); } } state->itemParamVersion = omxGetMatrixVersion(state->itemParam); }