int omxExpectation::numSummaryStats() { omxMatrix *cov = getComponent("cov"); if (!cov) { Rf_error("%s::numSummaryStats is not implemented (for object '%s')", expType, name); } omxMatrix *mean = getComponent("means"); int count = 0; omxMatrix *slope = getComponent("slope"); if (slope) count += slope->rows * slope->cols; auto &ti = getThresholdInfo(); if (ti.size() == 0) { // all continuous count += triangleLoc1(cov->rows); if (mean) count += cov->rows; return count; } count += triangleLoc1(cov->rows - 1); // covariances for (auto &th : ti) { // mean + variance count += th.numThresholds? th.numThresholds : 2; } return count; }
static void buildLatentParamMap(omxFitFunction* oo, FitContext *fc) { FreeVarGroup *fvg = fc->varGroup; BA81FitState *state = (BA81FitState *) oo->argStruct; std::vector<int> &latentMap = state->latentMap; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; int maxAbilities = estate->grp.maxAbilities; if (state->haveLatentMap == fc->varGroup->id[0]) return; if (estate->verbose >= 1) mxLog("%s: rebuild latent parameter map for var group %d", oo->name(), fc->varGroup->id[0]); state->freeLatents = false; int numLatents = maxAbilities + triangleLoc1(maxAbilities); latentMap.assign(numLatents, -1); int meanNum = 0; if (estate->_latentMeanOut) meanNum = ~estate->_latentMeanOut->matrixNumber; int covNum = 0; if (estate->_latentCovOut) covNum = ~estate->_latentCovOut->matrixNumber; int numParam = int(fvg->vars.size()); for (int px=0; px < numParam; px++) { omxFreeVar *fv = fvg->vars[px]; for (size_t lx=0; lx < fv->locations.size(); lx++) { omxFreeVarLocation *loc = &fv->locations[lx]; int matNum = loc->matrix; if (matNum == meanNum && estate->_latentMeanOut) { latentMap[loc->row + loc->col] = px; state->freeLatents = true; } else if (matNum == covNum && estate->_latentCovOut) { int a1 = loc->row; int a2 = loc->col; if (a1 < a2) std::swap(a1, a2); int cell = maxAbilities + triangleLoc1(a1) + a2; if (latentMap[cell] == -1) { latentMap[cell] = px; if (a1 == a2 && fv->lbound == NEG_INF) { fv->lbound = BA81_MIN_VARIANCE; // variance must be positive Global->boundsUpdated = true; if (fc->est[px] < fv->lbound) { Rf_error("Starting value for variance %s is not positive", fv->name); } } } else if (latentMap[cell] != px) { // doesn't detect similar problems in multigroup constraints TODO Rf_error("Covariance matrix must be constrained to preserve symmetry"); } state->freeLatents = true; } } } state->haveLatentMap = fc->varGroup->id[0]; }
static void setLatentStartingValues(omxFitFunction *oo, FitContext *fc) //remove? TODO { BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; std::vector<int> &latentMap = state->latentMap; ba81NormalQuad &quad = estate->getQuad(); int maxAbilities = quad.maxAbilities; omxMatrix *estMean = estate->estLatentMean; omxMatrix *estCov = estate->estLatentCov; for (int a1 = 0; a1 < maxAbilities; ++a1) { if (latentMap[a1] >= 0) { int to = latentMap[a1]; fc->est[to] = omxVectorElement(estMean, a1); } for (int a2 = 0; a2 <= a1; ++a2) { int to = latentMap[maxAbilities + triangleLoc1(a1) + a2]; if (to < 0) continue; fc->est[to] = omxMatrixElement(estCov, a1, a2); } } if (estate->verbose >= 1) { mxLog("%s: set latent parameters for version %d", oo->name(), estate->ElatentVersion); } }
void ba81AggregateDistributions(std::vector<struct omxExpectation *> &expectation, int *version, omxMatrix *meanMat, omxMatrix *covMat) { int allVer = 0; for (size_t ex=0; ex < expectation.size(); ++ex) { BA81Expect *ba81 = (BA81Expect *) expectation[ex]->argStruct; allVer += ba81->ElatentVersion; } if (*version == allVer) return; *version = allVer; BA81Expect *exemplar = (BA81Expect *) expectation[0]->argStruct; ba81NormalQuad &quad = exemplar->getQuad(); ba81NormalQuad combined(quad); int got = 0; for (size_t ex=0; ex < expectation.size(); ++ex) { BA81Expect *ba81 = (BA81Expect *) expectation[ex]->argStruct; // double weight = 1/ba81->weightSum; ? combined.addSummary(ba81->grp.quad); ++got; } if (got == 0) return; int dim = quad.abilities(); int numLatents = dim + triangleLoc1(dim); Eigen::ArrayXd latentDist(numLatents); combined.EAP(got, latentDist); for (int d1=quad.abilities(); d1 < numLatents; d1++) { latentDist[d1] *= got / (got - 1.0); } exportLatentDistToOMX(quad, latentDist.data(), meanMat, covMat); }
static void calcDerivCoef(FitContext *fc, BA81FitState *state, BA81Expect *estate, double *icov, const double *where, double *derivCoef) { ba81NormalQuad &quad = estate->getQuad(); Eigen::VectorXd mean; Eigen::MatrixXd cov; estate->getLatentDistribution(fc, mean, cov); const int pDims = quad.numSpecific? quad.maxDims - 1 : quad.maxDims; const char R='R'; const char L='L'; const char U='U'; const double alpha = 1; const double beta = 0; const int one = 1; std::vector<double> whereDiff(pDims); std::vector<double> whereGram(triangleLoc1(pDims)); for (int d1=0; d1 < pDims; ++d1) { whereDiff[d1] = where[d1] - mean[d1]; } gramProduct(whereDiff.data(), whereDiff.size(), whereGram.data()); F77_CALL(dsymv)(&U, &pDims, &alpha, icov, &pDims, whereDiff.data(), &one, &beta, derivCoef, &one); std::vector<double> covGrad1(pDims * pDims); std::vector<double> covGrad2(pDims * pDims); int cx=0; for (int d1=0; d1 < pDims; ++d1) { for (int d2=0; d2 <= d1; ++d2) { covGrad1[d2 * pDims + d1] = cov(d2,d1) - whereGram[cx]; ++cx; } } F77_CALL(dsymm)(&R, &L, &pDims, &pDims, &alpha, covGrad1.data(), &pDims, icov, &pDims, &beta, covGrad2.data(), &pDims); F77_CALL(dsymm)(&R, &L, &pDims, &pDims, &alpha, icov, &pDims, covGrad2.data(), &pDims, &beta, covGrad1.data(), &pDims); for (int d1=0; d1 < pDims; ++d1) { covGrad1[d1 * pDims + d1] /= 2.0; } cx = pDims; for (int d1=0; d1 < pDims; ++d1) { int cell = d1 * pDims; for (int d2=0; d2 <= d1; ++d2) { derivCoef[cx] = -covGrad1[cell + d2]; ++cx; } } }
void BA81LatentSummary<T>::end(class ifaGroup *grp, T extraData) { ba81NormalQuad &quad = grp->quad; int dim = quad.abilities(); int numLatents = dim + triangleLoc1(dim); Eigen::ArrayXd latentDist(numLatents); quad.EAP(extraData->weightSum, latentDist); for (int d1=quad.abilities(); d1 < numLatents; d1++) { latentDist[d1] *= extraData->weightSum / (extraData->weightSum - 1.0); } exportLatentDistToOMX(quad, latentDist.data(), extraData->estLatentMean, extraData->estLatentCov); ++extraData->ElatentVersion; }
static void exportLatentDistToOMX(ba81NormalQuad &quad, double *latentDist1, omxMatrix *meanOut, omxMatrix *covOut) { const int maxAbilities = quad.abilities(); if (meanOut) { for (int d1=0; d1 < maxAbilities; d1++) { omxSetVectorElement(meanOut, d1, latentDist1[d1]); } } if (covOut) { for (int d1=0; d1 < maxAbilities; d1++) { int cx = maxAbilities + triangleLoc1(d1); for (int d2=0; d2 <= d1; d2++) { double cov = latentDist1[cx]; omxSetMatrixElement(covOut, d1, d2, cov); if (d1 != d2) omxSetMatrixElement(covOut, d2, d1, cov); ++cx; } } } }
void omxInitFitFunctionBA81(omxFitFunction* oo) { if (!oo->argStruct) { // ugh! BA81FitState *state = new BA81FitState; oo->argStruct = state; } omxState *currentState = oo->matrix->currentState; BA81FitState *state = (BA81FitState*) oo->argStruct; omxExpectation *expectation = oo->expectation; BA81Expect *estate = (BA81Expect*) expectation->argStruct; estate->fit = oo; oo->computeFun = ba81Compute; oo->setVarGroup = ba81SetFreeVarGroup; oo->destructFun = ba81Destroy; oo->gradientAvailable = TRUE; oo->hessianAvailable = TRUE; int maxParam = estate->itemParam->rows; state->itemDerivPadSize = maxParam + triangleLoc1(maxParam); int numItems = estate->itemParam->cols; for (int ix=0; ix < numItems; ix++) { const double *spec = estate->itemSpec(ix); int id = spec[RPF_ISpecID]; if (id < 0 || id >= Glibrpf_numModels) { Rf_error("ItemSpec %d has unknown item model %d", ix, id); } } state->itemParam = omxInitMatrix(0, 0, TRUE, currentState); state->latentMean = omxInitMatrix(0, 0, TRUE, currentState); state->latentCov = omxInitMatrix(0, 0, TRUE, currentState); state->copyEstimates(estate); state->returnRowLikelihoods = Rf_asInteger(R_do_slot(oo->rObj, Rf_install("vector"))); }
void reportProgress(int numDone) { std::string detail = std::to_string(numDone) + "/" + std::to_string(triangleLoc1(numParams)); Global->reportProgress1(cnd.name, detail); }
static void gradCov(omxFitFunction *oo, FitContext *fc) { const double Scale = Global->llScale; omxExpectation *expectation = oo->expectation; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) expectation->argStruct; if (estate->verbose >= 1) mxLog("%s: cross product approximation", oo->name()); estate->grp.ba81OutcomeProb(estate->itemParam->data, FALSE); const int numThreads = Global->numThreads; const int numUnique = estate->getNumUnique(); ba81NormalQuad &quad = estate->getQuad(); const int numSpecific = quad.numSpecific; const int maxDims = quad.maxDims; const int pDims = numSpecific? maxDims-1 : maxDims; const int maxAbilities = quad.maxAbilities; Eigen::MatrixXd icovMat(pDims, pDims); if (maxAbilities) { Eigen::VectorXd mean; Eigen::MatrixXd srcMat; estate->getLatentDistribution(fc, mean, srcMat); icovMat = srcMat.topLeftCorner(pDims, pDims); Matrix tmp(icovMat.data(), pDims, pDims); int info = InvertSymmetricPosDef(tmp, 'U'); if (info) { omxRaiseErrorf("%s: latent covariance matrix is not positive definite", oo->name()); return; } icovMat.triangularView<Eigen::Lower>() = icovMat.transpose().triangularView<Eigen::Lower>(); } std::vector<int> &rowMap = estate->grp.rowMap; double *rowWeight = estate->grp.rowWeight; std::vector<bool> &rowSkip = estate->grp.rowSkip; const int totalQuadPoints = quad.totalQuadPoints; omxMatrix *itemParam = estate->itemParam; omxBuffer<double> patternLik(numUnique); const int priDerivCoef = pDims + triangleLoc1(pDims); const int numLatents = maxAbilities + triangleLoc1(maxAbilities); const int thrDerivSize = itemParam->cols * state->itemDerivPadSize; const int totalOutcomes = estate->totalOutcomes(); const int numItems = state->freeItemParams? estate->numItems() : 0; const size_t numParam = fc->varGroup->vars.size(); std::vector<double> thrGrad(numThreads * numParam); std::vector<double> thrMeat(numThreads * numParam * numParam); const double *wherePrep = quad.wherePrep.data(); if (numSpecific == 0) { omxBuffer<double> thrLxk(totalQuadPoints * numThreads); omxBuffer<double> derivCoef(totalQuadPoints * priDerivCoef); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * priDerivCoef); } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + thrId * totalQuadPoints; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.ba81LikelihoodSlow2(px, lxk); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalQuadPoints; qx++) { patternLik1 += lxk[qx]; } patternLik[px] = patternLik1; // if (!validPatternLik(state, patternLik1)) complain, TODO for (int qx=0; qx < totalQuadPoints; qx++) { double tmp = lxk[qx]; mapLatentDeriv(state, estate, tmp, derivCoef.data() + qx * priDerivCoef, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; (*Glibrpf_model[id].dLL1)(spec, iparam, wherePrep + qx * maxDims, expected.data(), myDeriv); } } gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } else { const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; omxBuffer<double> thrLxk(totalQuadPoints * numSpecific * numThreads); omxBuffer<double> thrEi(totalPrimaryPoints * numThreads); omxBuffer<double> thrEis(totalPrimaryPoints * numSpecific * numThreads); const int derivPerPoint = priDerivCoef + 2 * numSpecific; omxBuffer<double> derivCoef(totalQuadPoints * derivPerPoint); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * derivPerPoint); for (int Sgroup=0; Sgroup < numSpecific; ++Sgroup) { calcDerivCoef1(fc, state, estate, where, Sgroup, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup); } } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + totalQuadPoints * numSpecific * thrId; double *Ei = thrEi.data() + totalPrimaryPoints * thrId; double *Eis = thrEis.data() + totalPrimaryPoints * numSpecific * thrId; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.cai2010EiEis(px, lxk, Eis, Ei); for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } for (int qloc=0, eisloc=0, qx=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { mapLatentDeriv(state, estate, Eis[eisloc] * lxk[qloc], derivCoef.data() + qx * derivPerPoint, latentGrad.data()); for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { double lxk1 = lxk[qloc]; double Eis1 = Eis[eisloc + Sgroup]; double tmp = Eis1 * lxk1; mapLatentDerivS(state, estate, Sgroup, tmp, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { if (estate->grp.Sgroup[ix] != Sgroup) continue; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; const double *where = wherePrep + qx * maxDims; Eigen::VectorXd ptheta(dims); for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*Glibrpf_model[id].dLL1)(spec, iparam, ptheta.data(), expected.data(), myDeriv); } ++qloc; } ++qx; } } // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) { patternLik1 += Ei[qx]; } patternLik[px] = patternLik1; gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrGrad.data() + tx * numParam; for (size_t en=0; en < numParam; ++en) { thrGrad[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrMeat.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrMeat[en] += th[en]; } } for (size_t d1=0; d1 < numParam; ++d1) { fc->grad(d1) += thrGrad[d1]; } if (fc->infoB) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoB[cell] += thrMeat[cell]; } } } }
static void buildItemParamMap(omxFitFunction* oo, FitContext *fc) { FreeVarGroup *fvg = fc->varGroup; BA81FitState *state = (BA81FitState *) oo->argStruct; BA81Expect *estate = (BA81Expect*) oo->expectation->argStruct; std::vector<const double*> &itemSpec = estate->grp.spec; if (state->haveItemMap == fc->varGroup->id[0]) return; if (estate->verbose >= 1) mxLog("%s: rebuild item parameter map for var group %d", oo->name(), fc->varGroup->id[0]); omxMatrix *itemParam = estate->itemParam; int size = itemParam->cols * state->itemDerivPadSize; state->freeItemParams = false; state->hBlocks.clear(); state->hBlocks.resize(itemParam->cols); state->hbMap.assign(size, -1); // matrix location to HessianBlock offset state->paramMap.assign(size, -1); // matrix location to free param index state->itemParamFree.assign(itemParam->rows * itemParam->cols, FALSE); const size_t numFreeParam = state->numFreeParam; state->paramFlavor.assign(numFreeParam, NULL); int totalParam = 0; state->paramPerItem.resize(itemParam->cols); for (int cx=0; cx < itemParam->cols; ++cx) { const double *spec = itemSpec[cx]; const int id = spec[RPF_ISpecID]; const int numParam = (*Glibrpf_model[id].numParam)(spec); state->paramPerItem[cx] = numParam; totalParam += numParam; } state->itemGradMap.assign(totalParam, -1); for (size_t px=0; px < numFreeParam; px++) { omxFreeVar *fv = fvg->vars[px]; for (size_t lx=0; lx < fv->locations.size(); lx++) { omxFreeVarLocation *loc = &fv->locations[lx]; int matNum = ~loc->matrix; if (matNum != itemParam->matrixNumber) continue; int at = loc->col * state->itemDerivPadSize + loc->row; state->paramMap[at] = px; std::vector<int> &varMap = state->hBlocks[loc->col].vars; if (std::find(varMap.begin(), varMap.end(), px) == varMap.end()) { varMap.push_back(px); } state->itemParamFree[loc->col * itemParam->rows + loc->row] = TRUE; state->freeItemParams = true; const double *spec = itemSpec[loc->col]; int id = spec[RPF_ISpecID]; const char *flavor; double upper, lower; (*Glibrpf_model[id].paramInfo)(spec, loc->row, &flavor, &upper, &lower); if (state->paramFlavor[px] == 0) { state->paramFlavor[px] = flavor; } else if (strcmp(state->paramFlavor[px], flavor) != 0) { Rf_error("Cannot equate %s with %s[%d,%d]", fv->name, itemParam->name(), loc->row, loc->col); } if (fv->lbound == NEG_INF && std::isfinite(lower)) { fv->lbound = lower; Global->boundsUpdated = true; if (fc->est[px] < fv->lbound) { Rf_error("Starting value %s %f less than lower bound %f", fv->name, fc->est[px], lower); } } if (fv->ubound == INF && std::isfinite(upper)) { fv->ubound = upper; Global->boundsUpdated = true; if (fc->est[px] > fv->ubound) { Rf_error("Starting value %s %f greater than upper bound %f", fv->name, fc->est[px], upper); } } } } int gradOffset = 0; for (int cx=0; cx < itemParam->cols; ++cx) { for (int rx=0; rx < state->paramPerItem[cx]; ++rx) { int at = cx * state->itemDerivPadSize + rx; int px = state->paramMap[at]; if (px >= 0) state->itemGradMap[gradOffset] = px; ++gradOffset; } } for (int cx=0; cx < itemParam->cols; ++cx) { HessianBlock &hb = state->hBlocks[cx]; int numParam = state->paramPerItem[cx]; for (int p1=0; p1 < numParam; p1++) { const int outer_at1 = state->paramMap[cx * state->itemDerivPadSize + p1]; if (outer_at1 < 0) continue; const int outer_hb1 = std::lower_bound(hb.vars.begin(), hb.vars.end(), outer_at1) - hb.vars.begin(); if (hb.vars[outer_hb1] != outer_at1) Rf_error("oops"); for (int p2=0; p2 <= p1; p2++) { int at1 = outer_at1; int hb1 = outer_hb1; int at2 = state->paramMap[cx * state->itemDerivPadSize + p2]; if (at2 < 0) continue; if (p1 == p2 && at1 != at2) Rf_error("oops"); int hb2 = std::lower_bound(hb.vars.begin(), hb.vars.end(), at2) - hb.vars.begin(); if (hb.vars[hb2] != at2) Rf_error("oops"); if (at1 < at2) std::swap(at1, at2); // outer_at1 unaffected if (hb1 < hb2) std::swap(hb1, hb2); // outer_hb1 unaffected // mxLog("Item %d param(%d,%d) -> H[%d,%d] B[%d,%d]", // cx, p1, p2, at1, at2, hb1, hb2); int at = cx * state->itemDerivPadSize + numParam + triangleLoc1(p1) + p2; int hoffset = at1 * numFreeParam + at2; state->paramMap[at] = numFreeParam + hoffset; state->hbMap[at] = hb1 * hb.vars.size() + hb2; } } } state->haveItemMap = fc->varGroup->id[0]; //pia(state->paramMap.data(), state->itemDerivPadSize, itemParam->cols); }
void ba81NormalQuad::setup(double Qwidth, int Qpoints, double *means, Eigen::MatrixXd &priCov, Eigen::VectorXd &sVar) { quadGridSize = Qpoints; numSpecific = sVar.rows() * sVar.cols(); primaryDims = priCov.rows(); maxDims = primaryDims + (numSpecific? 1 : 0); maxAbilities = primaryDims + numSpecific; if (maxAbilities == 0) { setup0(); return; } totalQuadPoints = 1; for (int dx=0; dx < maxDims; dx++) { totalQuadPoints *= quadGridSize; } if (int(Qpoint.size()) != quadGridSize) { Qpoint.clear(); Qpoint.reserve(quadGridSize); double qgs = quadGridSize-1; for (int px=0; px < quadGridSize; ++px) { Qpoint.push_back(px * 2 * Qwidth / qgs - Qwidth); } } std::vector<double> tmpWherePrep(totalQuadPoints * maxDims); Eigen::VectorXi quad(maxDims); for (int qx=0; qx < totalQuadPoints; qx++) { double *wh = tmpWherePrep.data() + qx * maxDims; decodeLocation(qx, maxDims, quad.data()); pointToWhere(quad.data(), wh, maxDims); } totalPrimaryPoints = totalQuadPoints; weightTableSize = totalQuadPoints; if (numSpecific) { totalPrimaryPoints /= quadGridSize; speQarea.resize(quadGridSize * numSpecific); weightTableSize *= numSpecific; } std::vector<double> tmpPriQarea; tmpPriQarea.reserve(totalPrimaryPoints); { Eigen::VectorXd where(primaryDims); for (int qx=0; qx < totalPrimaryPoints; qx++) { decodeLocation(qx, primaryDims, quad.data()); pointToWhere(quad.data(), where.data(), primaryDims); double den = exp(dmvnorm(primaryDims, where.data(), means, priCov.data())); tmpPriQarea.push_back(den); } } std::vector<int> priOrder; priOrder.reserve(totalPrimaryPoints); for (int qx=0; qx < totalPrimaryPoints; qx++) { priOrder.push_back(qx); } if (0) { sortAreaHelper priCmp(tmpPriQarea); std::sort(priOrder.begin(), priOrder.end(), priCmp); } priQarea.clear(); priQarea.reserve(totalPrimaryPoints); double totalArea = 0; for (int qx=0; qx < totalPrimaryPoints; qx++) { double den = tmpPriQarea[priOrder[qx]]; priQarea.push_back(den); //double prevTotalArea = totalArea; totalArea += den; // if (totalArea == prevTotalArea) { // mxLog("%.4g / %.4g = %.4g", den, totalArea, den / totalArea); // } } for (int qx=0; qx < totalPrimaryPoints; qx++) { priQarea[qx] *= One; priQarea[qx] /= totalArea; //mxLog("%.5g,", priQarea[qx]); } for (int sgroup=0; sgroup < numSpecific; sgroup++) { totalArea = 0; double mean = means[primaryDims + sgroup]; double var = sVar(sgroup); for (int qx=0; qx < quadGridSize; qx++) { double den = exp(dmvnorm(1, &Qpoint[qx], &mean, &var)); speQarea[sIndex(sgroup, qx)] = den; totalArea += den; } for (int qx=0; qx < quadGridSize; qx++) { speQarea[sIndex(sgroup, qx)] /= totalArea; } } //pda(speQarea.data(), numSpecific, quadGridSize); for (int sx=0; sx < int(speQarea.size()); ++sx) { speQarea[sx] *= One; } //pda(speQarea.data(), numSpecific, quadGridSize); wherePrep.clear(); wherePrep.reserve(totalQuadPoints * maxDims); if (numSpecific == 0) { for (int qx=0; qx < totalPrimaryPoints; qx++) { int sortq = priOrder[qx] * maxDims; for (int dx=0; dx < maxDims; ++dx) { wherePrep.push_back(tmpWherePrep[sortq + dx]); } } } else { for (int qx=0; qx < totalPrimaryPoints; ++qx) { int sortq = priOrder[qx] * quadGridSize; for (int sx=0; sx < quadGridSize; ++sx) { int base = (sortq + sx) * maxDims; for (int dx=0; dx < maxDims; ++dx) { wherePrep.push_back(tmpWherePrep[base + dx]); } } } } // recompute whereGram because the order might have changed whereGram.resize(triangleLoc1(maxDims), totalQuadPoints); for (int qx=0; qx < totalQuadPoints; qx++) { double *wh = wherePrep.data() + qx * maxDims; gramProduct(wh, maxDims, &whereGram.coeffRef(0, qx)); } }