Indexes Project::keys() { Indexes keys; for (Indexes k = source->keys(); ! nil(k); ++k) if (subset(flds, *k)) keys.push(*k); return nil(keys) ? Indexes(flds) : keys; }
SkinRegistry::Indexes SkinRegistry::FindIndexes(const std::wstring& folderPath, const std::wstring& file) { const int folderIndex = FindFolderIndex(folderPath); if (folderIndex != -1) { const Folder& skinFolder = m_Folders[folderIndex]; const WCHAR* fileSz = file.c_str(); for (size_t i = 0, isize = skinFolder.files.size(); i < isize; ++i) { if (_wcsicmp(skinFolder.files[i].filename.c_str(), fileSz) == 0) { return Indexes(folderIndex, (int)i); } } } return Indexes::Invalid(); // Not found. }
SkinRegistry::Indexes SkinRegistry::FindIndexesForID(UINT id) { if (id >= ID_CONFIG_FIRST && id <= ID_CONFIG_LAST) { // Check which skin was selected for (size_t i = 0, isize = m_Folders.size(); i < isize; ++i) { const Folder& skinFolder = m_Folders[i]; if (id >= skinFolder.baseID && id < (skinFolder.baseID + skinFolder.files.size())) { return Indexes((int)i, (int)(id - skinFolder.baseID)); } } } return Indexes::Invalid(); // Not found. }
void OneDimMomDist::updateDistribution(ParticleSet& PtclSet, TrialWaveFunction& Psi, IndexType NumCycles) { for (IndexType cycle = 0; cycle < NumCycles; cycle++) { pcp->NewWalker(); TinyVector<IndexType, 1> Indexes(0.0); PosType dr(0,0,0); placeIntsInBin(Indexes, 1.0); totalNumSamples++; for (Indexes[0] = 1; Indexes[0] < NumPts[0]; Indexes[0]++) { dr[0] = dr[1] = dr[2] += Spacing[0]; // dr[0] += Spacing[0]; IndexType partToDisplace = (*pcp)(); PtclSet.makeMove(partToDisplace, dr); placeIntsInBin(Indexes, Psi.ratio(PtclSet, partToDisplace)); totalNumSamples++; PtclSet.rejectMove(partToDisplace); } } }
void DurRecoveryUnit::commitUnitOfWork() { invariant(inAUnitOfWork()); invariant(!_mustRollback); if (!inOutermostUnitOfWork()) { // If we are nested, make all changes for this level part of the containing UnitOfWork. // They will be added to the global damages list once the outermost UnitOfWork commits, // which it must now do. if (haveUncommitedChangesAtCurrentLevel()) { _startOfUncommittedChangesForLevel.back() = Indexes(_changes.size(), _writes.size()); } return; } commitChanges(); // global journal flush opportunity getDur().commitIfNeeded(); }
double Project::optimize2(const Fields& index, const Fields& needs, const Fields& firstneeds, bool is_cursor, bool freeze) { if (strategy == COPY) return source->optimize(index, needs, firstneeds, is_cursor, freeze); // look for index containing result key columns as prefix Fields best_index; double best_cost = IMPOSSIBLE; Indexes idxs = nil(index) ? source->indexes() : Indexes(index); Lisp<Fixed> fix = source->fixed(); Fields fldswof = withoutFixed(flds, fix); for (; ! nil(idxs); ++idxs) { Fields ix = *idxs; if (prefix_set(withoutFixed(ix, fix), fldswof)) { double cost = source->optimize1(ix, needs, firstneeds, is_cursor, false); if (cost < best_cost) { best_cost = cost; best_index = ix; } } } if (nil(best_index)) { if (is_cursor) return IMPOSSIBLE; if (freeze) strategy = LOOKUP; return 2 * source->optimize(index, needs, firstneeds, is_cursor, freeze); // 2* for lookups } else { if (! freeze) return best_cost; strategy = SEQUENTIAL; via = best_index; // NOTE: optimize1 to avoid tempindex return source->optimize1(best_index, needs, firstneeds, is_cursor, freeze); } }
void DurRecoveryUnit::beginUnitOfWork() { _startOfUncommittedChangesForLevel.push_back(Indexes(_changes.size(), _writes.size())); }