static void setLatentStartingValues(omxFitFunction *oo, FitContext *fc) //remove? TODO { BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; std::vector<int> &latentMap = state->latentMap; ba81NormalQuad &quad = estate->getQuad(); int maxAbilities = quad.maxAbilities; omxMatrix *estMean = estate->estLatentMean; omxMatrix *estCov = estate->estLatentCov; for (int a1 = 0; a1 < maxAbilities; ++a1) { if (latentMap[a1] >= 0) { int to = latentMap[a1]; fc->est[to] = omxVectorElement(estMean, a1); } for (int a2 = 0; a2 <= a1; ++a2) { int to = latentMap[maxAbilities + triangleLoc1(a1) + a2]; if (to < 0) continue; fc->est[to] = omxMatrixElement(estCov, a1, a2); } } if (estate->verbose >= 1) { mxLog("%s: set latent parameters for version %d", oo->name(), estate->ElatentVersion); } }
void ba81AggregateDistributions(std::vector<struct omxExpectation *> &expectation, int *version, omxMatrix *meanMat, omxMatrix *covMat) { int allVer = 0; for (size_t ex=0; ex < expectation.size(); ++ex) { BA81Expect *ba81 = (BA81Expect *) expectation[ex]->argStruct; allVer += ba81->ElatentVersion; } if (*version == allVer) return; *version = allVer; BA81Expect *exemplar = (BA81Expect *) expectation[0]->argStruct; ba81NormalQuad &quad = exemplar->getQuad(); ba81NormalQuad combined(quad); int got = 0; for (size_t ex=0; ex < expectation.size(); ++ex) { BA81Expect *ba81 = (BA81Expect *) expectation[ex]->argStruct; // double weight = 1/ba81->weightSum; ? combined.addSummary(ba81->grp.quad); ++got; } if (got == 0) return; int dim = quad.abilities(); int numLatents = dim + triangleLoc1(dim); Eigen::ArrayXd latentDist(numLatents); combined.EAP(got, latentDist); for (int d1=quad.abilities(); d1 < numLatents; d1++) { latentDist[d1] *= got / (got - 1.0); } exportLatentDistToOMX(quad, latentDist.data(), meanMat, covMat); }
/** * MAP is not affected by the number of items. EAP is. Likelihood can * get concentrated in a single quadrature ordinate. For 3PL, response * patterns can have a bimodal likelihood. This will confuse MAP and * is a key advantage of EAP (Thissen & Orlando, 2001, p. 136). * * Thissen, D. & Orlando, M. (2001). IRT for items scored in two * categories. In D. Thissen & H. Wainer (Eds.), \emph{Test scoring} * (pp 73-140). Lawrence Erlbaum Associates, Inc. */ static void ba81PopulateAttributes(omxExpectation *oo, SEXP robj) { BA81Expect *state = (BA81Expect *) oo->argStruct; if (!state->debugInternal) return; ba81NormalQuad &quad = state->getQuad(); int maxAbilities = quad.abilities(); const int numUnique = state->getNumUnique(); const double LogLargest = state->LogLargestDouble; SEXP Rlik; if (state->grp.patternLik.size() != numUnique) { refreshPatternLikelihood(state, oo->dynamicDataSource); } Rf_protect(Rlik = Rf_allocVector(REALSXP, numUnique)); memcpy(REAL(Rlik), state->grp.patternLik.data(), sizeof(double) * numUnique); double *lik_out = REAL(Rlik); for (int px=0; px < numUnique; ++px) { // Must return value in log units because it may not be representable otherwise lik_out[px] = log(lik_out[px]) - LogLargest; } MxRList dbg; dbg.add("patternLikelihood", Rlik); if (quad.getEstepTableSize(0)) { SEXP Rexpected; Rf_protect(Rexpected = Rf_allocVector(REALSXP, quad.getEstepTableSize(0))); Eigen::Map< Eigen::ArrayXd > box(REAL(Rexpected), quad.getEstepTableSize(0)); quad.exportEstepTable(0, box); dbg.add("em.expected", Rexpected); } SEXP Rmean, Rcov; if (state->estLatentMean) { Rf_protect(Rmean = Rf_allocVector(REALSXP, maxAbilities)); memcpy(REAL(Rmean), state->estLatentMean->data, maxAbilities * sizeof(double)); dbg.add("mean", Rmean); } if (state->estLatentCov) { Rf_protect(Rcov = Rf_allocMatrix(REALSXP, maxAbilities, maxAbilities)); memcpy(REAL(Rcov), state->estLatentCov->data, maxAbilities * maxAbilities * sizeof(double)); dbg.add("cov", Rcov); } Rf_setAttrib(robj, Rf_install("debug"), dbg.asR()); }
// Attempt G-H grid? http://dbarajassolano.wordpress.com/2012/01/26/on-sparse-grid-quadratures/ void ba81RefreshQuadrature(omxExpectation* oo) { BA81Expect *state = (BA81Expect *) oo->argStruct; ba81NormalQuad &quad = state->getQuad(); Eigen::VectorXd mean; Eigen::MatrixXd fullCov; state->getLatentDistribution(NULL, mean, fullCov); if (state->verbose >= 1) { mxLog("%s: refresh quadrature", oo->name); if (state->verbose >= 2) { int dim = mean.rows(); pda(mean.data(), 1, dim); pda(fullCov.data(), dim, dim); } } quad.refresh(mean, fullCov); }
static void gradCov(omxFitFunction *oo, FitContext *fc) { const double Scale = Global->llScale; omxExpectation *expectation = oo->expectation; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) expectation->argStruct; if (estate->verbose >= 1) mxLog("%s: cross product approximation", oo->name()); estate->grp.ba81OutcomeProb(estate->itemParam->data, FALSE); const int numThreads = Global->numThreads; const int numUnique = estate->getNumUnique(); ba81NormalQuad &quad = estate->getQuad(); const int numSpecific = quad.numSpecific; const int maxDims = quad.maxDims; const int pDims = numSpecific? maxDims-1 : maxDims; const int maxAbilities = quad.maxAbilities; Eigen::MatrixXd icovMat(pDims, pDims); if (maxAbilities) { Eigen::VectorXd mean; Eigen::MatrixXd srcMat; estate->getLatentDistribution(fc, mean, srcMat); icovMat = srcMat.topLeftCorner(pDims, pDims); Matrix tmp(icovMat.data(), pDims, pDims); int info = InvertSymmetricPosDef(tmp, 'U'); if (info) { omxRaiseErrorf("%s: latent covariance matrix is not positive definite", oo->name()); return; } icovMat.triangularView<Eigen::Lower>() = icovMat.transpose().triangularView<Eigen::Lower>(); } std::vector<int> &rowMap = estate->grp.rowMap; double *rowWeight = estate->grp.rowWeight; std::vector<bool> &rowSkip = estate->grp.rowSkip; const int totalQuadPoints = quad.totalQuadPoints; omxMatrix *itemParam = estate->itemParam; omxBuffer<double> patternLik(numUnique); const int priDerivCoef = pDims + triangleLoc1(pDims); const int numLatents = maxAbilities + triangleLoc1(maxAbilities); const int thrDerivSize = itemParam->cols * state->itemDerivPadSize; const int totalOutcomes = estate->totalOutcomes(); const int numItems = state->freeItemParams? estate->numItems() : 0; const size_t numParam = fc->varGroup->vars.size(); std::vector<double> thrGrad(numThreads * numParam); std::vector<double> thrMeat(numThreads * numParam * numParam); const double *wherePrep = quad.wherePrep.data(); if (numSpecific == 0) { omxBuffer<double> thrLxk(totalQuadPoints * numThreads); omxBuffer<double> derivCoef(totalQuadPoints * priDerivCoef); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * priDerivCoef); } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + thrId * totalQuadPoints; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.ba81LikelihoodSlow2(px, lxk); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalQuadPoints; qx++) { patternLik1 += lxk[qx]; } patternLik[px] = patternLik1; // if (!validPatternLik(state, patternLik1)) complain, TODO for (int qx=0; qx < totalQuadPoints; qx++) { double tmp = lxk[qx]; mapLatentDeriv(state, estate, tmp, derivCoef.data() + qx * priDerivCoef, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; (*Glibrpf_model[id].dLL1)(spec, iparam, wherePrep + qx * maxDims, expected.data(), myDeriv); } } gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } else { const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; omxBuffer<double> thrLxk(totalQuadPoints * numSpecific * numThreads); omxBuffer<double> thrEi(totalPrimaryPoints * numThreads); omxBuffer<double> thrEis(totalPrimaryPoints * numSpecific * numThreads); const int derivPerPoint = priDerivCoef + 2 * numSpecific; omxBuffer<double> derivCoef(totalQuadPoints * derivPerPoint); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * derivPerPoint); for (int Sgroup=0; Sgroup < numSpecific; ++Sgroup) { calcDerivCoef1(fc, state, estate, where, Sgroup, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup); } } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + totalQuadPoints * numSpecific * thrId; double *Ei = thrEi.data() + totalPrimaryPoints * thrId; double *Eis = thrEis.data() + totalPrimaryPoints * numSpecific * thrId; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.cai2010EiEis(px, lxk, Eis, Ei); for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } for (int qloc=0, eisloc=0, qx=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { mapLatentDeriv(state, estate, Eis[eisloc] * lxk[qloc], derivCoef.data() + qx * derivPerPoint, latentGrad.data()); for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { double lxk1 = lxk[qloc]; double Eis1 = Eis[eisloc + Sgroup]; double tmp = Eis1 * lxk1; mapLatentDerivS(state, estate, Sgroup, tmp, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { if (estate->grp.Sgroup[ix] != Sgroup) continue; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; const double *where = wherePrep + qx * maxDims; Eigen::VectorXd ptheta(dims); for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*Glibrpf_model[id].dLL1)(spec, iparam, ptheta.data(), expected.data(), myDeriv); } ++qloc; } ++qx; } } // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) { patternLik1 += Ei[qx]; } patternLik[px] = patternLik1; gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrGrad.data() + tx * numParam; for (size_t en=0; en < numParam; ++en) { thrGrad[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrMeat.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrMeat[en] += th[en]; } } for (size_t d1=0; d1 < numParam; ++d1) { fc->grad(d1) += thrGrad[d1]; } if (fc->infoB) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoB[cell] += thrMeat[cell]; } } } }
static void sandwich(omxFitFunction *oo, FitContext *fc) { const double abScale = fabs(Global->llScale); omxExpectation *expectation = oo->expectation; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) expectation->argStruct; if (estate->verbose >= 1) mxLog("%s: sandwich", oo->name()); estate->grp.ba81OutcomeProb(estate->itemParam->data, FALSE); const int numThreads = Global->numThreads; const int numUnique = estate->getNumUnique(); ba81NormalQuad &quad = estate->getQuad(); const int numSpecific = quad.numSpecific; const int maxDims = quad.maxDims; std::vector<int> &rowMap = estate->grp.rowMap; double *rowWeight = estate->grp.rowWeight; std::vector<bool> &rowSkip = estate->grp.rowSkip; const int totalQuadPoints = quad.totalQuadPoints; omxMatrix *itemParam = estate->itemParam; omxBuffer<double> patternLik(numUnique); std::vector<const double*> &itemSpec = estate->grp.spec; const int totalOutcomes = estate->totalOutcomes(); const int numItems = estate->grp.numItems(); const size_t numParam = fc->varGroup->vars.size(); const double *wherePrep = quad.wherePrep.data(); std::vector<double> thrBreadG(numThreads * numParam * numParam); std::vector<double> thrBreadH(numThreads * numParam * numParam); std::vector<double> thrMeat(numThreads * numParam * numParam); if (numSpecific == 0) { omxBuffer<double> thrLxk(totalQuadPoints * numThreads); #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + thrId * totalQuadPoints; omxBuffer<double> itemDeriv(state->itemDerivPadSize); omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO double *breadG = thrBreadG.data() + thrId * numParam * numParam; //a double *breadH = thrBreadH.data() + thrId * numParam * numParam; //a double *meat = thrMeat.data() + thrId * numParam * numParam; //b std::vector<double> patGrad(numParam); estate->grp.ba81LikelihoodSlow2(px, lxk); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalQuadPoints; qx++) { patternLik1 += lxk[qx]; } patternLik[px] = patternLik1; // if (!validPatternLik(state, patternLik1)) complain double weight = 1 / patternLik[px]; for (int qx=0; qx < totalQuadPoints; qx++) { double tmp = lxk[qx] * weight; double sqrtTmp = sqrt(tmp); std::vector<double> gradBuf(numParam); int gradOffset = 0; for (int ix=0; ix < numItems; ++ix) { if (ix) gradOffset += state->paramPerItem[ix-1]; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; pick -= 1; const int iOutcomes = estate->itemOutcomes(ix); OMXZERO(expected.data(), iOutcomes); expected[pick] = 1; const double *spec = itemSpec[ix]; double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; OMXZERO(itemDeriv.data(), state->itemDerivPadSize); (*Glibrpf_model[id].dLL1)(spec, iparam, wherePrep + qx * maxDims, expected.data(), itemDeriv.data()); (*Glibrpf_model[id].dLL2)(spec, iparam, itemDeriv.data()); for (int par = 0; par < state->paramPerItem[ix]; ++par) { int to = state->itemGradMap[gradOffset + par]; if (to >= 0) { gradBuf[to] -= itemDeriv[par] * sqrtTmp; patGrad[to] -= itemDeriv[par] * tmp; } } int derivBase = ix * state->itemDerivPadSize; for (int ox=0; ox < state->itemDerivPadSize; ox++) { int to = state->paramMap[derivBase + ox]; if (to >= int(numParam)) { int Hto = to - numParam; breadH[Hto] += abScale * itemDeriv[ox] * tmp * rowWeight[px]; } } } addSymOuterProd(abScale * rowWeight[px], gradBuf.data(), numParam, breadG); } addSymOuterProd(abScale * rowWeight[px], patGrad.data(), numParam, meat); } } else { Rf_error("Sandwich information matrix method is not implemented for bifactor models"); const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; omxBuffer<double> thrLxk(totalQuadPoints * numSpecific * numThreads); omxBuffer<double> thrEi(totalPrimaryPoints * numThreads); omxBuffer<double> thrEis(totalPrimaryPoints * numSpecific * numThreads); #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO omxBuffer<double> itemDeriv(state->itemDerivPadSize); double *breadG = thrBreadG.data() + thrId * numParam * numParam; //a double *breadH = thrBreadH.data() + thrId * numParam * numParam; //a double *meat = thrMeat.data() + thrId * numParam * numParam; //b std::vector<double> patGrad(numParam); double *lxk = thrLxk.data() + totalQuadPoints * numSpecific * thrId; double *Ei = thrEi.data() + totalPrimaryPoints * thrId; double *Eis = thrEis.data() + totalPrimaryPoints * numSpecific * thrId; estate->grp.cai2010EiEis(px, lxk, Eis, Ei); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) { patternLik1 += Ei[qx]; } patternLik[px] = patternLik1; for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } // WARNING: I didn't work out the math. I just coded this the way // it seems to make sense. for (int qloc=0, eisloc=0, qx=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { std::vector<double> gradBuf(numParam); int gradOffset = 0; double lxk1 = lxk[qloc + Sgroup]; double Eis1 = Eis[eisloc + Sgroup]; double tmp = Eis1 * lxk1 / patternLik1; double sqrtTmp = sqrt(tmp); for (int ix=0; ix < numItems; ++ix) { if (ix) gradOffset += state->paramPerItem[ix-1]; if (estate->grp.Sgroup[ix] != Sgroup) continue; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = 1; const double *spec = itemSpec[ix]; double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; OMXZERO(itemDeriv.data(), state->itemDerivPadSize); const double *where = wherePrep + qx * maxDims; Eigen::VectorXd ptheta(dims); for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*Glibrpf_model[id].dLL1)(spec, iparam, ptheta.data(), expected.data(), itemDeriv.data()); (*Glibrpf_model[id].dLL2)(spec, iparam, itemDeriv.data()); for (int par = 0; par < state->paramPerItem[ix]; ++par) { int to = state->itemGradMap[gradOffset + par]; if (to >= 0) { gradBuf[to] -= itemDeriv[par] * sqrtTmp; patGrad[to] -= itemDeriv[par] * tmp; } } int derivBase = ix * state->itemDerivPadSize; for (int ox=0; ox < state->itemDerivPadSize; ox++) { int to = state->paramMap[derivBase + ox]; if (to >= int(numParam)) { int Hto = to - numParam; breadH[Hto] += (abScale * itemDeriv[ox] * tmp * rowWeight[px]); } } } addSymOuterProd(abScale * rowWeight[px], gradBuf.data(), numParam, breadG); } qloc += numSpecific; ++qx; } } addSymOuterProd(abScale * rowWeight[px], patGrad.data(), numParam, meat); } } // only need upper triangle TODO for (int tx=1; tx < numThreads; ++tx) { double *th = thrBreadG.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrBreadG[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrBreadH.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrBreadH[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrMeat.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrMeat[en] += th[en]; } } //pda(thrBreadG.data(), numParam, numParam); //pda(thrBreadH.data(), numParam, numParam); //pda(thrMeat.data(), numParam, numParam); if (fc->infoA) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoA[cell] += thrBreadH[cell] - thrBreadG[cell] + thrMeat[cell]; } } } if (fc->infoB) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoB[cell] += thrMeat[cell]; } } } }
static double ba81ComputeEMFit(omxFitFunction* oo, int want, FitContext *fc) { const double Scale = Global->llScale; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; omxMatrix *itemParam = estate->itemParam; std::vector<const double*> &itemSpec = estate->grp.spec; std::vector<int> &cumItemOutcomes = estate->grp.cumItemOutcomes; ba81NormalQuad &quad = estate->getQuad(); const int maxDims = quad.maxDims; const size_t numItems = itemSpec.size(); const int do_fit = want & FF_COMPUTE_FIT; const int do_deriv = want & (FF_COMPUTE_GRADIENT | FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN); if (do_deriv && !state->freeItemParams) { omxRaiseErrorf("%s: no free parameters", oo->name()); return NA_REAL; } if (state->returnRowLikelihoods) { omxRaiseErrorf("%s: vector=TRUE not implemented", oo->name()); return NA_REAL; } if (estate->verbose >= 3) mxLog("%s: complete data fit(want fit=%d deriv=%d)", oo->name(), do_fit, do_deriv); if (do_fit) estate->grp.ba81OutcomeProb(itemParam->data, TRUE); const int thrDerivSize = itemParam->cols * state->itemDerivPadSize; std::vector<double> thrDeriv(thrDerivSize * Global->numThreads); double *wherePrep = quad.wherePrep.data(); double ll = 0; #pragma omp parallel for num_threads(Global->numThreads) reduction(+:ll) for (size_t ix=0; ix < numItems; ix++) { const int thrId = omx_absolute_thread_num(); const double *spec = itemSpec[ix]; const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; Eigen::VectorXd ptheta(dims); const rpf_dLL1_t dLL1 = Glibrpf_model[id].dLL1; const int iOutcomes = estate->grp.itemOutcomes[ix]; const int outcomeBase = cumItemOutcomes[ix] * quad.totalQuadPoints; const double *weight = estate->expected + outcomeBase; const double *oProb = estate->grp.outcomeProb + outcomeBase; const double *iparam = omxMatrixColumn(itemParam, ix); double *myDeriv = thrDeriv.data() + thrDerivSize * thrId + ix * state->itemDerivPadSize; for (int qx=0; qx < quad.totalQuadPoints; qx++) { if (do_fit) { for (int ox=0; ox < iOutcomes; ox++) { ll += weight[ox] * oProb[ox]; } } if (do_deriv) { double *where = wherePrep + qx * maxDims; for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*dLL1)(spec, iparam, ptheta.data(), weight, myDeriv); } weight += iOutcomes; oProb += iOutcomes; } } size_t excluded = 0; if (do_deriv) { double *deriv0 = thrDeriv.data(); int perThread = itemParam->cols * state->itemDerivPadSize; for (int th=1; th < Global->numThreads; th++) { double *thrD = thrDeriv.data() + th * perThread; for (int ox=0; ox < perThread; ox++) deriv0[ox] += thrD[ox]; } int numFreeParams = int(state->numFreeParam); int ox=-1; for (size_t ix=0; ix < numItems; ix++) { const double *spec = itemSpec[ix]; int id = spec[RPF_ISpecID]; double *iparam = omxMatrixColumn(itemParam, ix); double *pad = deriv0 + ix * state->itemDerivPadSize; (*Glibrpf_model[id].dLL2)(spec, iparam, pad); HessianBlock *hb = state->hBlocks[ix].clone(); hb->mat.triangularView<Eigen::Upper>().setZero(); for (int dx=0; dx < state->itemDerivPadSize; ++dx) { int to = state->paramMap[++ox]; if (to == -1) continue; // Need to check because this can happen if // lbounds/ubounds are not set appropriately. if (0 && !std::isfinite(deriv0[ox])) { int item = ox / itemParam->rows; mxLog("item parameters:\n"); const double *spec = itemSpec[item]; int id = spec[RPF_ISpecID]; int numParam = (*Glibrpf_model[id].numParam)(spec); double *iparam = omxMatrixColumn(itemParam, item); pda(iparam, numParam, 1); // Perhaps bounds can be pulled in from librpf? TODO Rf_error("Deriv %d for item %d is %f; are you missing a lbound/ubound?", ox, item, deriv0[ox]); } if (to < numFreeParams) { if (want & FF_COMPUTE_GRADIENT) { fc->grad(to) -= Scale * deriv0[ox]; } } else { if (want & (FF_COMPUTE_HESSIAN | FF_COMPUTE_IHESSIAN)) { int Hto = state->hbMap[ox]; if (Hto >= 0) hb->mat.data()[Hto] -= Scale * deriv0[ox]; } } } fc->queue(hb); } } if (excluded && estate->verbose >= 1) { mxLog("%s: Hessian not positive definite for %d/%d items", oo->name(), (int) excluded, (int) numItems); } if (excluded == numItems) { omxRaiseErrorf("Hessian not positive definite for %d/%d items", (int) excluded, (int) numItems); } return Scale * ll; }
void omxInitExpectationBA81(omxExpectation* oo) { omxState* currentState = oo->currentState; SEXP rObj = oo->rObj; SEXP tmp; if(OMX_DEBUG) { mxLog("Initializing %s.", oo->name); } if (!Glibrpf_model) { #if USE_EXTERNAL_LIBRPF get_librpf_t get_librpf = (get_librpf_t) R_GetCCallable("rpf", "get_librpf_model_GPL"); (*get_librpf)(LIBIFA_RPF_API_VERSION, &Glibrpf_numModels, &Glibrpf_model); #else // if linking against included source code Glibrpf_numModels = librpf_numModels; Glibrpf_model = librpf_model; #endif } BA81Expect *state = new BA81Expect; // These two constants should be as identical as possible state->name = oo->name; if (0) { state->LogLargestDouble = 0.0; state->LargestDouble = 1.0; } else { state->LogLargestDouble = log(std::numeric_limits<double>::max()) - 1; state->LargestDouble = exp(state->LogLargestDouble); ba81NormalQuad &quad = state->getQuad(); quad.setOne(state->LargestDouble); } state->expectedUsed = false; state->estLatentMean = NULL; state->estLatentCov = NULL; state->type = EXPECTATION_OBSERVED; state->itemParam = NULL; state->EitemParam = NULL; state->itemParamVersion = 0; state->latentParamVersion = 0; oo->argStruct = (void*) state; {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("data"))); state->data = omxDataLookupFromState(tmp, currentState); } if (strcmp(omxDataType(state->data), "raw") != 0) { omxRaiseErrorf("%s unable to handle data type %s", oo->name, omxDataType(state->data)); return; } {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("verbose"))); state->verbose = Rf_asInteger(tmp); } int targetQpoints; {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("qpoints"))); targetQpoints = Rf_asInteger(tmp); } {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("qwidth"))); state->grp.setGridFineness(Rf_asReal(tmp), targetQpoints); } {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("ItemSpec"))); state->grp.importSpec(tmp); if (state->verbose >= 2) mxLog("%s: found %d item specs", oo->name, state->numItems()); } state->_latentMeanOut = omxNewMatrixFromSlot(rObj, currentState, "mean"); state->_latentCovOut = omxNewMatrixFromSlot(rObj, currentState, "cov"); state->itemParam = omxNewMatrixFromSlot(rObj, currentState, "item"); state->grp.param = state->itemParam->data; // algebra not allowed yet TODO const int numItems = state->itemParam->cols; if (state->numItems() != numItems) { omxRaiseErrorf("ItemSpec length %d must match the number of item columns (%d)", state->numItems(), numItems); return; } if (state->itemParam->rows != state->grp.impliedParamRows) { omxRaiseErrorf("item matrix must have %d rows", state->grp.impliedParamRows); return; } state->grp.paramRows = state->itemParam->rows; // for algebra item param, will need to defer until later? state->grp.learnMaxAbilities(); int maxAbilities = state->grp.itemDims; state->grp.setFactorNames(state->itemParam->rownames); { ProtectedSEXP tmp2(R_do_slot(rObj, Rf_install(".detectIndependence"))); state->grp.detectIndependence = Rf_asLogical(tmp2); } {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("EstepItem"))); if (!Rf_isNull(tmp)) { int rows, cols; getMatrixDims(tmp, &rows, &cols); if (rows != state->itemParam->rows || cols != state->itemParam->cols) { Rf_error("EstepItem must have the same dimensions as the item MxMatrix"); } state->EitemParam = REAL(tmp); } } oo->computeFun = ba81compute; oo->setVarGroup = ignoreSetVarGroup; oo->destructFun = ba81Destroy; oo->populateAttrFun = ba81PopulateAttributes; oo->componentFun = getComponent; oo->canDuplicate = false; // TODO: Exactly identical rows do not contribute any information. // The sorting algorithm ought to remove them so we get better cache behavior. // The following summary stats would be cheaper to calculate too. omxData *data = state->data; if (data->hasDefinitionVariables()) Rf_error("%s: not implemented yet", oo->name); std::vector<int> &rowMap = state->grp.rowMap; int weightCol; {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("weightColumn"))); weightCol = INTEGER(tmp)[0]; } if (weightCol == NA_INTEGER) { // Should rowMap be part of omxData? This is essentially a // generic compression step that shouldn't be specific to IFA models. state->grp.rowWeight = (double*) R_alloc(data->rows, sizeof(double)); rowMap.resize(data->rows); int numUnique = 0; for (int rx=0; rx < data->rows; ) { int rw = 1; state->grp.rowWeight[numUnique] = rw; rowMap[numUnique] = rx; rx += rw; ++numUnique; } rowMap.resize(numUnique); state->weightSum = state->data->rows; } else { if (omxDataColumnIsFactor(data, weightCol)) { omxRaiseErrorf("%s: weightColumn %d is a factor", oo->name, 1 + weightCol); return; } state->grp.rowWeight = omxDoubleDataColumn(data, weightCol); state->weightSum = 0; for (int rx=0; rx < data->rows; ++rx) { state->weightSum += state->grp.rowWeight[rx]; } rowMap.resize(data->rows); for (size_t rx=0; rx < rowMap.size(); ++rx) { rowMap[rx] = rx; } } // complain about non-integral rowWeights (EAP can't work) TODO auto colMap = oo->getDataColumns(); for (int cx = 0; cx < numItems; cx++) { int *col = omxIntDataColumnUnsafe(data, colMap[cx]); state->grp.dataColumns.push_back(col); } // sanity check data for (int cx = 0; cx < numItems; cx++) { if (!omxDataColumnIsFactor(data, colMap[cx])) { data->omxPrintData("diagnostic", 3); omxRaiseErrorf("%s: column %d is not a factor", oo->name, int(1 + colMap[cx])); return; } } // TODO the max outcome should be available from omxData for (int rx=0; rx < data->rows; rx++) { int cols = 0; for (int cx = 0; cx < numItems; cx++) { const int *col = state->grp.dataColumns[cx]; int pick = col[rx]; if (pick == NA_INTEGER) continue; ++cols; const int no = state->grp.itemOutcomes[cx]; if (pick > no) { Rf_error("Data for item '%s' has at least %d outcomes, not %d", state->itemParam->colnames[cx], pick, no); } } if (cols == 0) { Rf_error("Row %d has all NAs", 1+rx); } } if (state->_latentMeanOut && state->_latentMeanOut->rows * state->_latentMeanOut->cols != maxAbilities) { Rf_error("The mean matrix '%s' must be a row or column vector of size %d", state->_latentMeanOut->name(), maxAbilities); } if (state->_latentCovOut && (state->_latentCovOut->rows != maxAbilities || state->_latentCovOut->cols != maxAbilities)) { Rf_error("The cov matrix '%s' must be %dx%d", state->_latentCovOut->name(), maxAbilities, maxAbilities); } state->grp.setLatentDistribution(state->_latentMeanOut? state->_latentMeanOut->data : NULL, state->_latentCovOut? state->_latentCovOut->data : NULL); { EigenArrayAdaptor Eparam(state->itemParam); Eigen::Map< Eigen::VectorXd > meanVec(state->grp.mean, maxAbilities); Eigen::Map< Eigen::MatrixXd > covMat(state->grp.cov, maxAbilities, maxAbilities); state->grp.quad.setStructure(state->grp.qwidth, state->grp.qpoints, Eparam, meanVec, covMat); } {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("minItemsPerScore"))); state->grp.setMinItemsPerScore(Rf_asInteger(tmp)); } state->grp.buildRowSkip(); if (isErrorRaised()) return; {ScopedProtect p1(tmp, R_do_slot(rObj, Rf_install("debugInternal"))); state->debugInternal = Rf_asLogical(tmp); } state->ElatentVersion = 0; if (state->_latentMeanOut) { state->estLatentMean = omxInitMatrix(maxAbilities, 1, TRUE, currentState); omxCopyMatrix(state->estLatentMean, state->_latentMeanOut); // rename matrices TODO } if (state->_latentCovOut) { state->estLatentCov = omxInitMatrix(maxAbilities, maxAbilities, TRUE, currentState); omxCopyMatrix(state->estLatentCov, state->_latentCovOut); } }
static void ba81compute(omxExpectation *oo, FitContext *fc, const char *what, const char *how) { BA81Expect *state = (BA81Expect *) oo->argStruct; if (what) { if (strcmp(what, "latentDistribution")==0 && how && strcmp(how, "copy")==0) { omxCopyMatrix(state->_latentMeanOut, state->estLatentMean); omxCopyMatrix(state->_latentCovOut, state->estLatentCov); double sampleSizeAdj = (state->weightSum - 1.0) / state->weightSum; int covSize = state->_latentCovOut->rows * state->_latentCovOut->cols; for (int cx=0; cx < covSize; ++cx) { state->_latentCovOut->data[cx] *= sampleSizeAdj; } return; } if (strcmp(what, "scores")==0) { state->expectedUsed = true; state->type = EXPECTATION_AUGMENTED; } else if (strcmp(what, "nothing")==0) { state->type = EXPECTATION_OBSERVED; } else { omxRaiseErrorf("%s: don't know how to predict '%s'", oo->name, what); } if (state->verbose >= 1) { mxLog("%s: predict %s", oo->name, what); } return; } bool latentClean = state->latentParamVersion == getLatentVersion(state); bool itemClean = state->itemParamVersion == omxGetMatrixVersion(state->itemParam) && latentClean; ba81NormalQuad &quad = state->getQuad(); if (state->verbose >= 1) { mxLog("%s: Qinit %d itemClean %d latentClean %d (1=clean) expectedUsed=%d", oo->name, (int)quad.isAllocated(), itemClean, latentClean, state->expectedUsed); } if (!latentClean) { ba81RefreshQuadrature(oo); state->latentParamVersion = getLatentVersion(state); } if (!itemClean) { double *param = state->EitemParam? state->EitemParam : state->itemParam->data; state->grp.quad.cacheOutcomeProb(param, FALSE); bool estep = state->expectedUsed; if (estep) { if (oo->dynamicDataSource) { BA81Engine<BA81Expect*, BA81LatentSummary, BA81Estep> engine; engine.ba81Estep1(&state->grp, state); } else { BA81Engine<BA81Expect*, BA81LatentFixed, BA81Estep> engine; engine.ba81Estep1(&state->grp, state); } } else { state->grp.quad.releaseEstep(); refreshPatternLikelihood(state, oo->dynamicDataSource); } if (oo->dynamicDataSource && state->verbose >= 2) { mxLog("%s: empirical distribution mean and cov:", state->name); omxPrint(state->estLatentMean, "mean"); omxPrint(state->estLatentCov, "cov"); } if (state->verbose >= 1) { const int numUnique = state->getNumUnique(); mxLog("%s: estep<%s, %s> %d/%d rows excluded", state->name, (estep && oo->dynamicDataSource? "summary":"fixed"), (estep? "estep":"omitEstep"), state->grp.excludedPatterns, numUnique); } } state->itemParamVersion = omxGetMatrixVersion(state->itemParam); }