Пример #1
0
// Given square matrix of Prob[i>j] returns a column vector for Prob[i].
// Uses Markov process, not 1-step conditional probability.
// Challenges have uniform probability 1/N
KMatrix Model::markovUniformPCE(const KMatrix & pv) {
  const double pTol = 1E-6;
  unsigned int numOpt = pv.numR();
  auto p = KMatrix(numOpt, 1, 1.0) / numOpt;  // all 1/n
  auto q = p;
  unsigned int iMax = 1000;  // 10-30 is typical
  unsigned int iter = 0;
  double change = 1.0;
  while (pTol < change) {
    change = 0;
    for (unsigned int i = 0; i < numOpt; i++) {
      double pi = 0.0;
      for (unsigned int j = 0; j < numOpt; j++) {
        pi = pi + pv(i, j)*(p(i, 0) + p(j, 0));
      }
      assert(0 <= pi); // double-check
      q(i, 0) = pi / numOpt;
      double c = fabs(q(i, 0) - p(i, 0));
      change = (c > change) ? c : change;
    }
    // Newton method improves convergence.
    p = (p + q) / 2.0;
    iter++;
    assert(fabs(sum(p) - 1.0) < pTol); // double-check
  }
  assert(iter < iMax); // no way to recover
  return p;
}
Пример #2
0
void RP2Model::setRP2(const KMatrix & pm0) {
  const unsigned int nr = pm0.numR();
  const unsigned int nc = pm0.numC();
  if (0 < numAct) {
    assert(nr == numAct);
  }
  else {
    numAct = nr;
  }

  if (0 < numOptions()) {
    assert(nc == numOptions());
  }
  else {
    theta.resize(nc); // was size zero
    for (unsigned int i = 0; i < nc; i++) {
      theta[i] = i;
    }
  }

  assert(minNumActor <= numAct);
  assert(numAct <= maxNumActor);

  assert(minNumOptions <= numOptions());

  for (auto u : pm0) {
    assert(0.0 <= u);
    assert(u <= 1.0);
  }

  // if all OK, set it
  polUtilMat = pm0;

  return;
}
Пример #3
0
KMatrix rescaleRows(const KMatrix& m1, const double vMin, const double vMax) {
    assert(vMin < vMax);
    const unsigned int nr = m1.numR();
    const unsigned int nc = m1.numC();
    KMatrix m2 = KMatrix(nr, nc);

    for (unsigned int i = 0; i < nr; i++) {
        double rowMin = m1(i, 0);
        double rowMax = m1(i, 0);
        for (unsigned int j = 0; j < nc; j++) {
            const double mij = m1(i, j);
            if (mij < rowMin) {
                rowMin = mij;
            }
            if (mij > rowMax) {
                rowMax = mij;
            }
        }
        const double rowRange = rowMax - rowMin;
        assert(0 < rowRange);

        for (unsigned int j = 0; j < nc; j++) {
            const double mij = m1(i, j);
            const double nij = (mij - rowMin) / rowRange; // normalize into [0, 1]
            const double rij = vMin + (vMax - vMin)*nij; // rescale into [vMin, vMax]
            m2(i, j) = rij;
        }
    }


    return m2;
}
Пример #4
0
KMatrix Model::bigRfromProb(const KMatrix & p, BigRRange rr) {
  double pMin = 1.0;
  double pMax = 0.0;
  for (double pi : p) {
    assert(0.0 <= pi);
    pMin = (pi < pMin) ? pi : pMin;
    pMax = (pi > pMax) ? pi : pMax;
  }

  const double pTol = 1E-8;
  assert(fabs(1 - KBase::sum(p)) < pTol);

  function<double(unsigned int, unsigned int)> rfn = nullptr;
  switch (rr) {
  case BigRRange::Min:
    rfn = [pMin, pMax, p](unsigned int i, unsigned int j) {
      return (p(i, j) - pMin) / (pMax - pMin);
    };
    break;
  case BigRRange::Mid:
    rfn = [pMin, pMax, p](unsigned int i, unsigned int j) {
      return (3 * p(i, j) - (pMax + 2 * pMin)) / (2 * (pMax - pMin));
    };
    break;
  case BigRRange::Max:
    rfn = [pMin, pMax, p](unsigned int i, unsigned int j) {
      return (2 * p(i, j) - (pMax + pMin)) / (pMax - pMin);
    };
    break;
  }
  auto rMat = KMatrix::map(rfn, p.numR(), p.numC());
  return rMat;
}
Пример #5
0
// returns a square matrix of prob(OptI > OptJ)
// these are assumed to be unique options.
// w is a [1,actor] row-vector of actor strengths, u is [act,option] utilities.
KMatrix Model::vProb(VotingRule vr, VPModel vpm, const KMatrix & w, const KMatrix & u) {
  // u_ij is utility to actor i of the position advocated by actor j
  unsigned int numAct = u.numR();
  unsigned int numOpt = u.numC();
  // w_j is row-vector of actor weights, for simple voting
  assert(numAct == w.numC()); // require 1-to-1 matching of actors and strengths
  assert(1 == w.numR()); // weights must be a row-vector

  auto vfn = [vr, &w, &u](unsigned int k, unsigned int i, unsigned int j) {
    double vkij = vote(vr, w(0, k), u(k, i), u(k, j));
    return vkij;
  };

  auto c = coalitions(vfn, numAct, numOpt); // c(i,j) = strength of coaltion for i against j
  KMatrix p = vProb(vpm, c);  // p(i,j) = prob Ai defeats Aj
  return p;
}
Пример #6
0
vector<KMatrix> VHCSearch::vn1(const KMatrix & m0, double s) {
  unsigned int n = m0.numR();
  auto nghbrs = vector<KMatrix>();
  double pms[] = { -1, +1 };
  for (unsigned int i = 0; i < n; i++) {
    for (double si : pms) {
      KMatrix m1 = m0;
      m1(i, 0) = m0(i, 0) + (si*s);
      nghbrs.push_back(m1);
    }
  }
  return nghbrs;
}
Пример #7
0
tuple <KMatrix, VUI> RPState::pDist(int persp) const {
    /// Calculate the probability distribution over states from this perspective

    // TODO: convert this to a single, commonly used setup function

    const unsigned int numA = model->numAct;
    const unsigned int numP = numA; // for this demo, the number of positions is exactly the number of actors

    // get unique indices and their probability
    assert (0 < uIndices.size()); // should have been set with setUENdx();
    //auto uNdx2 = uniqueNdx(); // get the indices to unique positions

    const unsigned int numU = uIndices.size();
    assert(numU <= numP); // might have dropped some duplicates

    cout << "Number of aUtils: " << aUtil.size() << endl << flush;

    const KMatrix u = aUtil[0]; // all have same beliefs in this demo

    auto uufn = [u, this](unsigned int i, unsigned int j1) {
        return u(i, uIndices[j1]);
    };

    auto uMat = KMatrix::map(uufn, numA, numU);
    auto vpm = VPModel::Linear;
    assert(uMat.numR() == numA); // must include all actors
    assert(uMat.numC() == numU);

    // vote_k ( i : j )
    auto vkij = [this, uMat](unsigned int k, unsigned int i, unsigned int j) {
        auto ak = (RPActor*)(model->actrs[k]);
        auto v_kij = Model::vote(ak->vr, ak->sCap, uMat(k, i), uMat(k, j));
        return v_kij;
    };

    // the following uses exactly the values in the given euMat,
    // which may or may not be square
    const KMatrix c = Model::coalitions(vkij, uMat.numR(), uMat.numC());
    const KMatrix pv = Model::vProb(vpm, c); // square
    const KMatrix p = Model::probCE(PCEModel::ConditionalPCM, pv); // column
    const KMatrix eu = uMat*p; // column

    assert(numA == eu.numR());
    assert(1 == eu.numC());

    return tuple <KMatrix, VUI>(p, uIndices);
}
Пример #8
0
// Given square matrix of Prob[i>j] returns a column vector for Prob[i].
// Uses 1-step conditional probabilities, not Markov process
KMatrix Model::condPCE(const KMatrix & pv) {
  unsigned int numOpt = pv.numR();
  auto p = KMatrix(numOpt, 1);
  for (unsigned int i = 0; i < numOpt; i++) {
    double pi = 1.0;
    for (unsigned int j = 0; j < numOpt; j++) {
      pi = pi * pv(i, j);
    }
    // double-check
    assert(0 <= pi);
    assert(pi <= 1);
    p(i, 0) = pi; // probability that i beats all alternatives
  }
  double probOne = sum(p); // probability that one option, any option, beats all alternatives
  p = (p / probOne); // conditional probability that i is that one.
  return p;
}
Пример #9
0
// note that while the C_ij can be any arbitrary positive matrix
// with C_kk = 0, the p_ij matrix has the symmetry pij + pji = 1
// (and hence pkk = 1/2).
KMatrix Model::vProb(VPModel vpm, const KMatrix & c) {
  unsigned int numOpt = c.numR();
  assert(numOpt == c.numC());
  auto p = KMatrix(numOpt, numOpt);
  for (unsigned int i = 0; i < numOpt; i++) {
    for (unsigned int j = 0; j < i; j++) {
      double cij = c(i, j);
      assert(0 <= cij);
      double cji = c(j, i);
      assert(0 <= cji);
      assert((0 < cij) || (0 < cji));
      auto ppr = vProb(vpm, cij, cji);
      p(i, j) = get<0>(ppr); // set the lower left  probability: if Linear, cij / (cij + cji)
      p(j, i) = get<1>(ppr); // set the upper right probability: if Linear, cji / (cij + cji)
    }
    p(i, i) = 0.5; // set the diagonal probability
  }
  return p;
}
Пример #10
0
void RP2Model::setWeights(const KMatrix & w0) {
  const unsigned int nr = w0.numR();
  const unsigned int nc = w0.numC();

  assert(1 == nr);

  if (0 < numAct) {
    assert(nc == numAct);
  }
  else {
    numAct = nc;
  }

  for (auto w : w0) {
    assert(0.0 <= w);
  }
  assert(minNumActor <= numAct);
  assert(numAct <= maxNumActor);

  // if it is OK, set it
  wghtVect = w0;
  return;
}
Пример #11
0
tuple<KMatrix, KMatrix> Model::probCE2(PCEModel pcm, VPModel vpm, const KMatrix & cltnStrngth) {
  const double pTol = 1E-8;
  unsigned int numOpt = cltnStrngth.numR();
  auto p = KMatrix(numOpt, 1);
  const auto victProb = Model::vProb(vpm, cltnStrngth); // prob of victory, square
  switch (pcm) {
  case PCEModel::ConditionalPCM:
    p = condPCE(victProb);
    break;
  case PCEModel::MarkovIPCM:
    p = markovIncentivePCE(cltnStrngth, vpm);
    break;
  case PCEModel::MarkovUPCM:
    p = markovUniformPCE(victProb);
    break;
  default:
    throw KException("Model::probCE unrecognized PCEModel");
    break;
  }
  assert(numOpt == p.numR());
  assert(1 == p.numC());
  assert(fabs(sum(p) - 1.0) < pTol);
  return tuple<KMatrix, KMatrix>(p, victProb);
}
Пример #12
0
RPState* RPState::doSUSN(ReportingLevel rl) const {
    RPState* s2 = nullptr;
    const unsigned int numA = model->numAct;
    assert(numA == rpMod->actrs.size());

    const unsigned int numU = uIndices.size();
    assert ((0 < numU) && (numU <= numA));
    assert (numA == eIndices.size());

    // TODO: filter out essentially-duplicate positions
    //printf("RPState::doSUSN: numA %i \n", numA);
    //printf("RPState::doSUSN: numP %i \n", numP);
    //cout << endl << flush;

    const KMatrix u = aUtil[0]; // all have same beliefs in this demo

    auto vpm = VPModel::Linear;
    const unsigned int numP = pstns.size();
    // Given the utility matrix, uMat, calculate the expected utility to each actor,
    // as a column-vector. Again, this is from the perspective of whoever developed uMat.
    auto euMat = [rl, numA, numP, vpm, this](const KMatrix & uMat) {
        // BTW, be sure to lambda-bind uMat *after* it is modified.
        assert(uMat.numR() == numA); // must include all actors
        assert(uMat.numC() <= numP); // might have dropped some duplicates
        auto uRng = [uMat](unsigned int i, unsigned int j) {
            if ((uMat(i, j) < 0.0) || (1.0 < uMat(i, j))) {
                printf("%f  %i  %i  \n", uMat(i, j), i, j);
                cout << flush;
                cout << flush;

            }
            assert(0.0 <= uMat(i, j));
            assert(uMat(i, j) <= 1.0);
            return;
        };
        KMatrix::mapV(uRng, uMat.numR(), uMat.numC());
        // vote_k ( i : j )
        auto vkij = [this, uMat](unsigned int k, unsigned int i, unsigned int j) {
            auto ak = (RPActor*)(rpMod->actrs[k]);
            auto v_kij = Model::vote(ak->vr, ak->sCap, uMat(k, i), uMat(k, j));
            return v_kij;
        };

        // the following uses exactly the values in the given euMat,
        // which may or may not be square
        const KMatrix c = Model::coalitions(vkij, uMat.numR(), uMat.numC());
        const KMatrix pv = Model::vProb(vpm, c); // square
        const KMatrix p = Model::probCE(PCEModel::ConditionalPCM, pv); // column
        const KMatrix eu = uMat*p; // column

        assert(numA == eu.numR());
        assert(1 == eu.numC());
        auto euRng = [eu](unsigned int i, unsigned int j) {
            // due to round-off error, we must have a tolerance factor
            const double tol = 1E-10;
            const double euij = eu(i, j);
            assert(0.0 <= euij+tol);
            assert(euij <= 1.0+tol);
            return;
        };
        KMatrix::mapV(euRng, eu.numR(), eu.numC());


        if (ReportingLevel::Low < rl) {
            printf("Util matrix is %i x %i \n", uMat.numR(), uMat.numC());
            cout << "Assessing EU from util matrix: " << endl;
            uMat.mPrintf(" %.6f ");
            cout << endl << flush;

            cout << "Coalition strength matrix" << endl;
            c.mPrintf(" %12.6f ");
            cout << endl << flush;

            cout << "Probability Opt_i > Opt_j" << endl;
            pv.mPrintf(" %.6f ");
            cout << endl << flush;

            cout << "Probability Opt_i" << endl;
            p.mPrintf(" %.6f ");
            cout << endl << flush;

            cout << "Expected utility to actors: " << endl;
            eu.mPrintf(" %.6f ");
            cout << endl << flush;
        }

        return eu;
    };
    // end of euMat

    auto euState = euMat(u);
    cout << "Actor expected utilities: ";
    KBase::trans(euState).mPrintf("%6.4f, ");
    cout << endl << flush;

    if (ReportingLevel::Low < rl) {
        printf("--------------------------------------- \n");
        printf("Assessing utility of actual state to all actors \n");
        for (unsigned int h = 0; h < numA; h++) {
            cout << "not available" << endl;
        }
        cout << endl << flush;
        printf("Out of %u positions, %u were unique: ", numA, numU);
        cout << flush;
        for (auto i : uIndices) {
            printf("%2i ", i);
        }
        cout << endl;
        cout << flush;
    }


    auto uufn = [u, this](unsigned int i, unsigned int j1) {
        return u(i, uIndices[j1]);
    };
    auto uUnique = KMatrix::map(uufn, numA, numU);


    // Get expected-utility vector, one entry for each actor, in the current state.
    const KMatrix eu0 = euMat(uUnique); // 'u' with duplicates, 'uUnique' without duplicates

    s2 = new RPState(model);

    //s2->pstns = vector<KBase::Position*>();
    for (unsigned int h = 0; h < numA; h++) {
        s2->pstns.push_back(nullptr);
    }

    // TODO: clean up the nesting of lambda-functions.
    // need to create a hypothetical state and run setOneAUtil(h,Silent) on it
    //
    // The newPosFn does a GA optimization to find the best next position for actor h,
    // and stores it in s2. To do that, it defines three functions for evaluation, neighbors, and show:
    // efn, nfn, and sfn.
    auto newPosFn = [this, rl, euMat, u, eu0, s2](const unsigned int h) {
        s2->pstns[h] = nullptr;

        auto ph = ((const MtchPstn *)(pstns[h]));

        // Evaluate h's estimate of the expected utility, to h, of
        // advocating position mp. To do this, build a hypothetical utility matrix representing
        // h's estimates of the direct utilities to all other actors of h adopting this
        // Position. Do that by modifying the h-column of h's matrix.
        // Then compute the expected probability distribution, over h's hypothetical position
        // and everyone else's actual position. Finally, compute the expected utility to
        // each actor, given that distribution, and pick out the value for h's expected utility.
        // That is the expected value to h of adopting the position.
        auto efn = [this, euMat, rl, u, h](const MtchPstn & mph) {
            // This correctly handles duplicated/unique options
            // We modify the given euMat so that the h-column
            // corresponds to the given mph, but we need to prune duplicates as well.
            // This entails some type-juggling.
            const KMatrix uh0 = aUtil[h];
            assert(KBase::maxAbs(u - uh0) < 1E-10); // all have same beliefs in this demo
            if (mph.match.size() != rpMod->numItm) {
                cout << mph.match.size() << endl << flush;
                cout << rpMod->numItm << endl << flush;
                cout << flush << flush;
            }
            assert(mph.match.size() == rpMod->numItm);
            auto uh = uh0;
            for (unsigned int i = 0; i < rpMod->numAct; i++) {
                auto ai = (RPActor*)(rpMod->actrs[i]);
                double uih = ai->posUtil(&mph);
                uh(i, h) = uih; // utility to actor i of this hypothetical position by h
            }

            // 'uh' now has the correct h-column. Now we need to see how many options
            // are unique in the hypothetical state, and keep only those columns.
            // This entails juggling back and forth between the all current positions
            // and the one hypothetical position (mph at h).
            // Thus, the next call to euMat will consider only unique options.
            auto equivHNdx = [this, h, mph](const unsigned int i, const unsigned int j) {
                // this little function takes care of the different types needed to compare
                // dynamic pointers to positions (all but h) with a constant position (h itself).
                // In other words, the comparisons for index 'h' use the hypothetical mph, not pstns[h]
                bool rslt = false;
                auto mpi = ((const MtchPstn *)(pstns[i]));
                auto mpj = ((const MtchPstn *)(pstns[j]));
                assert(mpi != nullptr);
                assert(mpj != nullptr);
                if (i == j) {
                    rslt = true; // Pi == Pj, always
                }
                else if (h == i) {
                    rslt = (mph == (*mpj));
                }
                else if (h == j) {
                    rslt = ((*mpi) == mph);
                }
                else {
                    rslt = ((*mpi) == (*mpj));
                }
                return rslt;
            };

            auto ns = KBase::uiSeq(0, model->numAct - 1);
            const VUI uNdx = get<0>(KBase::ueIndices<unsigned int>(ns, equivHNdx));
            const unsigned int numU = uNdx.size();
            auto hypUtil = KMatrix(rpMod->numAct, numU);
            // we need now to go through 'uh', copying column J the first time
            // the J-th position is determined to be equivalent to something in the unique list
            for (unsigned int i = 0; i < rpMod->numAct; i++) {
                for (unsigned int j1 = 0; j1 < numU; j1++) {
                    unsigned int j2 = uNdx[j1];
                    hypUtil(i, j1) = uh(i, j2); // hypothetical utility in column h
                }
            }

            if (false) {
                cout << "constructed hypUtil matrix:" << endl << flush;
                hypUtil.mPrintf(" %8.2f ");
                cout << endl << flush;
            }


            if (ReportingLevel::Low < rl) {
                printf("--------------------------------------- \n");
                printf("Assessing utility to %2i of hypo-pos: ", h);
                printPerm(mph.match);
                cout << endl << flush;
                printf("Hypo-util minus base util: \n");
                (uh - uh0).mPrintf(" %+.4E ");
                cout << endl << flush;
            }
            const KMatrix eu = euMat(hypUtil); // uh or hypUtil
            // BUG: If we use 'uh' here, it passes the (0 <= delta-EU) test, because
            // both hypothetical and actual are then calculated without dropping duplicates.
            // If we use 'hypUtil' here, it sometimes gets (delta-EU < 0), because
            // the hypothetical drops duplicates but the actual (computed elsewhere) does not.
            // FIX: fix  the 'elsewhere'
            const double euh = eu(h, 0);
            assert(0 < euh);
            //cout << euh << endl << flush;
            //printPerm(mp.match);
            //cout << endl << flush;
            //cout << flush;
            return euh;
        }; // end of efn

        /*
                // I do not actually use prevMP, but it is still an example for std::set
                auto prevMP = [](const MtchPstn & mp1, const MtchPstn & mp2) {
                    bool r = std::lexicographical_compare(
                                 mp1.match.begin(), mp1.match.end(),
                                 mp2.match.begin(), mp2.match.end());
                    return r;
                };
                std::set<MtchPstn, bool(*)(const MtchPstn &, const MtchPstn &)> mpSet(prevMP);
        */

        // return vector of neighboring 1-permutations
        auto nfn = [](const MtchPstn & mp0) {
            const unsigned int numI = mp0.match.size();
            auto mpVec = vector <MtchPstn>();
            mpVec.push_back(MtchPstn(mp0));

            // one-permutations
            for (unsigned int i = 0; i < numI; i++) {
                for (unsigned int j = i + 1; j < numI; j++) {
                    unsigned int ei = mp0.match[i];
                    unsigned int ej = mp0.match[j];

                    auto mij = MtchPstn(mp0);
                    mij.match[i] = ej;
                    mij.match[j] = ei;
                    mpVec.push_back(mij);
                }
            }


            // two-permutations
            for (unsigned int i = 0; i < numI; i++) {
                for (unsigned int j = i + 1; j < numI; j++) {
                    for (unsigned int k = j + 1; k < numI; k++) {
                        unsigned int ei = mp0.match[i];
                        unsigned int ej = mp0.match[j];
                        unsigned int ek = mp0.match[k];

                        auto mjki = MtchPstn(mp0);
                        mjki.match[i] = ej;
                        mjki.match[j] = ek;
                        mjki.match[k] = ei;
                        mpVec.push_back(mjki);

                        auto mkij = MtchPstn(mp0);
                        mkij.match[i] = ek;
                        mkij.match[j] = ei;
                        mkij.match[k] = ej;
                        mpVec.push_back(mkij);

                    }
                }
            }
            //unsigned int mvs = mpVec.size() ;
            //cout << mvs << endl << flush;
            //cout << flush;
            return mpVec;
        }; // end of nfn

        // show some representation of this position on cout
        auto sfn = [](const MtchPstn & mp0) {
            printPerm(mp0.match);
            return;
        };

        auto ghc = new KBase::GHCSearch<MtchPstn>();
        ghc->eval = efn;
        ghc->nghbrs = nfn;
        ghc->show = sfn;

        auto rslt = ghc->run(*ph, // start from h's current positions
                             ReportingLevel::Silent,
                             100, // iter max
                             3, 0.001); // stable-max, stable-tol

        if (ReportingLevel::Low < rl) {
            printf("---------------------------------------- \n");
            printf("Search for best next-position of actor %2i \n", h);
            //printf("Search for best next-position of actor %2i starting from ", h);
            //trans(*aPos).printf(" %+.6f ");
            cout << flush;
        }

        double vBest = get<0>(rslt);
        MtchPstn pBest = get<1>(rslt);
        unsigned int iterN = get<2>(rslt);
        unsigned int stblN = get<3>(rslt);

        delete ghc;
        ghc = nullptr;
        if (ReportingLevel::Medium < rl) {
            printf("Iter: %u  Stable: %u \n", iterN, stblN);
            printf("Best value for %2i: %+.6f \n", h, vBest);
            cout << "Best position:    " << endl;
            cout << "numCat: " << pBest.numCat << endl;
            cout << "numItm: " << pBest.numItm << endl;
            cout << "perm: ";
            printPerm(pBest.match);
            cout << endl << flush;
        }
        MtchPstn * posBest = new MtchPstn(pBest);
        s2->pstns[h] = posBest;
        // no need for mutex, as s2->pstns is the only shared var,
        // and each h is different.

        double du = vBest - eu0(h, 0); // (hypothetical, future) - (actual, current)
        if (ReportingLevel::Low < rl) {
            printf("EU improvement for %2i of %+.4E \n", h, du);
        }
        //printf("  vBest = %+.6f \n", vBest);
        //printf("  eu0(%i, 0) for %i = %+.6f \n", h, h, eu0(h,0));
        //cout << endl << flush;
        // Logically, du should always be non-negative, as GHC never returns a worse value than the starting point.
        // However, actors plan on the assumption that all others do not change - yet they do.
        const double eps = 0.05; // 0.025; // enough to avoid problems with round-off error
        assert(-eps <= du);
        return;
    }; // end of newPosFn

    const bool par = true;
    auto ts = vector<thread>();
    // Each actor, h, finds the position which maximizes their EU in this situation.
    for (unsigned int h = 0; h < numA; h++) {
        if (par) { // launch all, concurrent
            ts.push_back(thread([newPosFn, h]() {
                newPosFn(h);
                return;
            }));
        }
        else { // do each, sequential
            newPosFn(h);
        }
    }

    if (par) { // now join them all before continuing
        for (auto& t : ts) {
            t.join();
        }
    }

    assert(nullptr != s2);
    assert(numP == s2->pstns.size());
    assert(numA == s2->model->numAct);
    for (auto p : s2->pstns) {
        assert(nullptr != p);
    }
    s2->setUENdx();
    return s2;
}
Пример #13
0
// return the list of the most self-interested position of each actor,
// with the CP last.
// As a side-affect, set each actor's min/max permutation values so as to
// compute normalized utilities later.
vector<VUI> scanAllPossiblePositions(const RPModel * rpm) {
  unsigned int numA = rpm->numAct;
  unsigned int numRefItem = rpm->numItm;
  assert(numRefItem == rpm->numCat);

  LOG(INFO) << "There are" << numA << "actors and" << numRefItem << "reform items";



  KMatrix aCap = KMatrix(1, numA);
  for (unsigned int i = 0; i < numA; i++) {
    auto ri = ((const RPActor *)(rpm->actrs[i]));
    aCap(0, i) = ri->sCap;
  }
  LOG(INFO) << "Actor capabilities: ";
  aCap.mPrintf(" %.2f ");


  LOG(INFO) << "Effective gov cost of items:";
  (rpm->govCost).mPrintf("%.3f ");
  LOG(INFO) << "Government budget: " << rpm->govBudget;
  assert(0 < rpm->govBudget);


  string log("Value to actors (rows) of individual reform items (columns):");
  for (unsigned int i = 0; i < rpm->actrs.size(); i++) {
    auto rai = ((const RPActor*)(rpm->actrs[i]));
    for (unsigned int j = 0; j < numRefItem; j++) {
      double vij = rai->riVals[j];
      log += KBase::getFormattedString(" %6.2f", vij);
    }
  }
  LOG(INFO) << log;

  LOG(INFO) << "Computing positions ... ";
  vector<VUI> allPositions; // list of all possiblepositions
  VUI pstn;
  // build the first permutation: 0,1,2,3,...
  for (unsigned int i = 0; i < numRefItem; i++) {
    pstn.push_back(i);
  }
  allPositions.push_back(pstn);
  while (next_permutation(pstn.begin(), pstn.end())) {
    allPositions.push_back(pstn);
  }
  const unsigned int numPos = allPositions.size();
  LOG(INFO) << "For" << numRefItem << "reform items there are"
   << numPos << "positions";


  // -------------------------------------------------
  // The next section sets up actor utilities.
  // First, we compute the unnormalized, raw utilities. The 'utilActorPos' checks
  // to see if pvMin/pvMax have been set, and returns the raw scores if not.
  // Then we scan across rows to find that actor's pvMin/pvMax, and record that
  // so utilActorPos can use it in the future. Finally, we normalize the rows and
  // display the normalized utility matrix.
  auto ruFn = [allPositions, rpm](unsigned int ai, unsigned int pj) {
    auto pstn = allPositions[pj];
    double uip = rpm->utilActorPos(ai, pstn);
    return uip;
  };

  LOG(INFO) << "Computing utilities of positions ... ";
  // rows are actors, columns are all possible positions
  auto rawUij = KMatrix::map(ruFn, numA, numPos);

  // set the min/max for each actor
  for (unsigned int i = 0; i < numA; i++) {
    double pvMin = rawUij(i, 0);
    double pvMax = rawUij(i, 0);
    for (unsigned int j = 0; j < numPos; j++) {
      double rij = rawUij(i, j);
      if (rij < pvMin) {
        pvMin = rij;
      }
      if (rij > pvMax) {
        pvMax = rij;
      }
    }
    assert(0 <= pvMin);
    assert(pvMin < pvMax);
    auto ai = ((RPActor*)(rpm->actrs[i]));
    ai->posValMin = pvMin;
    ai->posValMax = pvMax;
  }
  LOG(INFO) << "Normalizing utilities of positions ... ";
  KMatrix uij = KBase::rescaleRows(rawUij, 0.0, 1.0); // von Neumann utility scale

  string utilMtx("Complete (normalized) utility matrix of all possible positions (rows) versus actors (columns) \n");
  for (unsigned int pj = 0; pj < numPos; pj++) {
    utilMtx += KBase::getFormattedString("%3u  ", pj);
    auto pstn = allPositions[pj];
    //printVUI(pstn);
    utilMtx += KBase::stringVUI(pstn);
    utilMtx += "  ";
    for (unsigned int ai = 0; ai < numA; ai++) {
      double uap = uij(ai, pj);
      utilMtx += KBase::getFormattedString("%6.4f, ", uap);
    }
    utilMtx += KBase::getFormattedString("\n");
  }
  LOG(INFO) << utilMtx;

  // -------------------------------------------------
  // The next section determines the most self-interested positions for each actor,
  // as well as the 'central position' over all possible reform priorities
  // (which 'office seeking politicans' would adopt IF proportional voting).
  LOG(INFO) << "Computing best position for each actor";
  vector<VUI> bestAP; // list of each actor's best position (followed by CP)
  for (unsigned int ai = 0; ai < numA; ai++) {
    unsigned int bestJ = 0;
    double bestV = 0;
    for (unsigned int pj = 0; pj < numPos; pj++) {
      if (bestV < uij(ai, pj)) {
        bestJ = pj;
        bestV = uij(ai, pj);
      }
    }
    string bestMtx("Best position for ");
    string ais = std::to_string(ai);
    //string bjs = std::to_string(bestJ);
    string ps = KBase::stringVUI(allPositions[bestJ]);
    bestMtx += ais + " is " + ps;
    LOG(INFO) << bestMtx; 
    //LOG(INFO) << "Best for" << ai << "is ";
    //printVUI(positions[bestJ]);
    bestAP.push_back(allPositions[bestJ]);
  }


  LOG(INFO) << "Computing zeta ... ";
  KMatrix zeta = aCap * uij;
  assert((1 == zeta.numR()) && (numPos == zeta.numC()));


  LOG(INFO) << "Sorting positions from most to least net support ...";

  auto betterPR = [](tuple<unsigned int, double, VUI> pr1,
      tuple<unsigned int, double, VUI> pr2) {
    double v1 = get<1>(pr1);
    double v2 = get<1>(pr2);
    bool better = (v1 > v2);
    return better;
  };

  auto pairs = vector<tuple<unsigned int, double, VUI>>();
  for (unsigned int i = 0; i < numPos; i++) {
    auto pri = tuple<unsigned int, double, VUI>(i, zeta(0, i), allPositions[i]);
    pairs.push_back(pri);
  }

  sort(pairs.begin(), pairs.end(), betterPR);

  const unsigned int maxDisplayed = 720; // factorial(6)
  unsigned int  numPr = (pairs.size() < maxDisplayed) ? pairs.size() : maxDisplayed;

  LOG(INFO) << "Displaying highest" << numPr;
  for (unsigned int i = 0; i < numPr; i++) {
    auto pri = pairs[i];
    unsigned int ni = get<0>(pri);
    double zi = get<1>(pri);
    VUI pi = get<2>(pri);
    string ps = KBase::stringVUI(pi);
    LOG(INFO) << KBase::getFormattedString(" %3u: %4u  %.2f  %s", i, ni, zi, ps.c_str());
    //printVUI(pi);
  }

  VUI bestPerm = get<2>(pairs[0]);

  bestAP.push_back(bestPerm);
  return bestAP;
}
Пример #14
0
// Given square matrix of strengths, Coalition[i over j] returns a column vector for Prob[i].
// Uses Markov process, not 1-step conditional probability.
// Challenge probabilities are proportional to influence promoting a challenge
KMatrix Model::markovIncentivePCE(const KMatrix & coalitions, VPModel vpm) {
  using KBase::sqr;
  using KBase::qrtc;
  const bool printP = false;
  const double pTol = 1E-8;
  const unsigned int numOpt = coalitions.numR();
  assert(numOpt == coalitions.numC());

  const auto victProbMatrix = vProb(vpm, coalitions);

  // given coalitions, calculate the total incentive for i to challenge j
  // This is n[ i -> j] in the "Markov Voting with Incentives in KTAB" paper
  auto iFn = [victProbMatrix, coalitions](unsigned int i, unsigned int j) {
    const double epsSupport = 1E-10;
    const double sij = coalitions(i, j);
    double inctv = sij * victProbMatrix(i,j);
    if (i == j) {
      inctv = inctv + epsSupport;
    }
    return inctv;
  };

  const auto inctvMatrix = KMatrix::map(iFn, numOpt, numOpt);

  // Using the incentives, calculate the probability of i challenging j,
  // given that j is the current favorite proposal.
  // This is P[ i -> j] in the "Markov Voting with Incentives in KTAB" paper
  // Note that if every actor prefers j to every other option,
  // then all incentive(i,j) will be zero, except incentive(j,j) = eps.
  // Even in this case, we will not get a division by zero error,
  // and it will correctly return that the only "challenger" is j itself,
  // with guaranteed success.
  //
  auto cpFn = [inctvMatrix, numOpt](unsigned int i, unsigned int j) {
    double sum = 0.0;
    for (unsigned int k = 0; k < numOpt; k++) {
      sum = sum + inctvMatrix(k, j);
    }
    const double pij = inctvMatrix(i, j) / sum;
    return pij;
  };

  const auto chlgProbMatrix = KMatrix::map(cpFn, numOpt, numOpt);

  // probability starts as uniform distribution (column vector)
  auto p = KMatrix(numOpt, 1, 1.0) / numOpt;  // all 1/n
  auto q = p;
  unsigned int iMax = 1000;  // 10-30 is typical
  unsigned int iter = 0;
  double change = 1.0;

  // do the markov calculation
  while (pTol < change)  { // && (iter < iMax)
    if (printP) {
      printf("Iteration  %u / %u \n", iter, iMax);
      cout << "pDist:" << endl;
      trans(p).mPrintf(" %.4f");
      cout << endl;
      printf("change: %.4e \n", change);
      cout << endl << flush;
    }
    auto ct = KMatrix(numOpt, numOpt);
    for (unsigned int i = 0; i < numOpt; i++) {
      for (unsigned int j = 0; j < numOpt; j++) {
        // See "Markov Voting with Incentives in KTAB" paper
        ct(i, j) = p(i, 0) * chlgProbMatrix(j, i);
      }
    }
    if (printP) {
      cout << "Ct:" << endl;
      ct.mPrintf("  %.3f");
      cout << endl << flush;
    }
    change = 0.0;
    for (unsigned int i = 0; i < numOpt; i++) {
      double qi = 0.0;
      for (unsigned int j = 0; j < numOpt; j++) {
        double vij = victProbMatrix(i, j);
        double cj = ct(i, j) + ct(j, i);
        qi = qi + vij* cj;
      }
      assert(0 <= qi); // double-check
      q(i, 0) = qi;
      double c = fabs(q(i, 0) - p(i, 0));
      change = (c > change) ? c : change;
    }
    // Newton method improves convergence.
    p = (p+q)/2.0;
    iter++;
    assert(fabs(sum(p) - 1.0) < pTol); // double-check
  }

  assert(iter < iMax); // no way to recover
  return p;
}