void indri::collection::Repository::_merge(index_state& state) { // this is only legal if we're not readOnly if (_readOnly) return; size_t memoryBound = (size_t) (0.75 * _memory); std::vector<indri::index::Index*>* result = new std::vector<indri::index::Index*>; if (state->size() <= 2 || (_mergeMemory(*state) < memoryBound && _mergeFiles(*state) < MERGE_FILE_LIMIT)) { indri::index::Index* index = _mergeStage(state); result->push_back(index); } else { // divide and conquer index_state first = new std::vector<indri::index::Index*>; index_state second = new std::vector<indri::index::Index*>; first->assign(state->begin(), state->begin() + state->size() / 2); second->assign(state->begin() + state->size() / 2, state->end()); // release the previous state object state = 0; _merge(second); _merge(first); std::copy(first->begin(), first->end(), std::back_inserter(*result)); std::copy(second->begin(), second->end(), std::back_inserter(*result)); } state = result; }
// a step void epoch() { // this->_pop.clear(); // _pareto_front.clear(); _selection (_parent_pop, _child_pop); parallel::p_for(parallel::range_t(0, _child_pop.size()), mutate<crowd::Indiv<Phen> >(_child_pop)); #ifndef EA_EVAL_ALL _eval_subpop(_child_pop); _merge(_parent_pop, _child_pop, _mixed_pop); #else _merge(_parent_pop, _child_pop, _mixed_pop); _eval_subpop(_mixed_pop); #endif _apply_modifier(_mixed_pop); #ifndef NDEBUG BOOST_FOREACH(indiv_t& ind, _mixed_pop) for (size_t i = 0; i < ind->fit().objs().size(); ++i) { assert(!std::isnan(ind->fit().objs()[i])); } #endif _fill_nondominated_sort(_mixed_pop, _parent_pop); _mixed_pop.clear(); _child_pop.clear(); _convert_pop(_parent_pop, this->_pop); assert(_parent_pop.size() == Params::pop::size); assert(_pareto_front.size() <= Params::pop::size * 2); assert(_mixed_pop.size() == 0); // assert(_child_pop.size() == 0); assert(this->_pop.size() == Params::pop::size); }
int main (int argc, char *argv[]) { graph_t gin; graph_t gout; args_t args; startup("cmerge", argc, argv, NULL, NULL); memset(&args, 0, sizeof(args_t)); _parse_opt(argc, argv, &args); if (ngdb_read(args.input, &gin)) { printf("Could not read in %s\n", args.input); goto fail; } printf("merging...\n"); if (_merge(&gin, &gout, args.mergesets, args.nmergesets)) { printf("Could not perform merge\n"); goto fail; } if (ngdb_write(&gout, args.output)) { printf("Could not write to %s\n", args.output); goto fail; } return 0; fail: return 1; }
void _split(T A[], T temp[], int begin, int end) { if ((end - begin) < 2) return; // recursively split into halves until size is 1, // then merge and go back up. int mid = (end + begin) / 2; // I chose 16 because it's a power of 2 and precedence. if ((end - mid) <= 16) { insertion_sort(A, end, mid); // mid included. insertion_sort(A, mid, begin); // mid excluded. } // Split if large enough. else { _split(A, temp, begin, mid); // mid will be excluded. _split(A, temp, mid, end); // mid will be included. } _merge(A, temp, begin, mid, end); // merge them back again. _copy (A, temp, begin, end); // Copy sorted array to original array. }
/* [begin, end)区间 * 归并排序 */ void _sort(ListNode *begin, ListNode *end) { //处理0~1个节点 if (begin == NULL || begin->next == end) { return ; } //处理两个节点,交换数据 else if (begin->next->next == end) { if ( begin->val > begin->next->val) { int t = begin->val; begin->val = begin->next->val; begin->next->val = t; } return ; } // 寻找中间节点 ListNode * p1 = begin, *p2 = begin->next; while(p2 != end) { p1 = p1->next; p2 = p2->next; if(p2 != end) { p2 = p2->next; } } //排序 _sort(begin, p1); _sort(p1, end); _merge(begin, p1, end); }
static SdbListIter * _merge_sort(SdbListIter *head, SdbListComparator cmp) { SdbListIter *second; if (!head || !head->n) { return head; } second = _sdb_list_split (head); head = _merge_sort (head, cmp); second = _merge_sort (second, cmp); return _merge (head, second, cmp); }
void _mergesort(std::vector<T>& arr, int64_t begin, int64_t end, std::vector<T>& scratch) { // trivially sorted since it is 1 if (end - begin <= 1) return; int64_t mid = (begin + end) / 2; _mergesort(arr, begin, mid, scratch); _mergesort(arr, mid, end, scratch); _merge(arr, begin, mid, end, scratch); _copy(scratch, begin, end, arr); }
node<V>* _cut(node<V>* heap,node<V>* n) { if(n->next==n) { n->parent->child=NULL; } else { n->next->prev=n->prev; n->prev->next=n->next; n->parent->child=n->next; } n->next=n->prev=n; n->marked=false; return _merge(heap,n); }
int _MergeSort(int li, int ls) { if(li==ls) return vec[ li ]; else { int middle = (li+ls)>>1; _MergeSort(li, middle); _MergeSort(middle + 1, ls); _merge(li, middle, ls); } };
/* Substring sort */ void substringsort(const sauchar_t *T, const saidx_t *PA, saidx_t *first, saidx_t *last, saidx_t *buf, saidx_t bufsize, saidx_t depth, saint_t lastsuffix) { saidx_t *a, *b; saidx_t *curbuf; saidx_t i, j, k; saidx_t curbufsize; if(lastsuffix != 0) { ++first; } for(a = first, i = 0; (a + SS_BLOCKSIZE) < last; a += SS_BLOCKSIZE, ++i) { _multikey_introsort(T, PA, a, a + SS_BLOCKSIZE, depth); curbuf = a + SS_BLOCKSIZE; curbufsize = last - (a + SS_BLOCKSIZE); if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { _merge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth); } } _multikey_introsort(T, PA, a, last, depth); for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) { if(i & 1) { _merge(T, PA, a - k, a, last, buf, bufsize, depth); a -= k; } } if(lastsuffix != 0) { /* Insert last type B* suffix. */ for(a = first, i = *(first - 1); (a < last) && ((*a < 0) || (0 < _compare(T, PA + i, PA + *a, depth))); ++a) { *(a - 1) = *a; } *(a - 1) = i; } }
node<V>* _removeMinimum(node<V>* n) { _unMarkAndUnParentAll(n->child); if(n->next==n) { n=n->child; } else { n->next->prev=n->prev; n->prev->next=n->next; n=_merge(n->next,n->child); } if(n==NULL)return n; node<V>* trees[64]={NULL}; while(true) { if(trees[n->degree]!=NULL) { node<V>* t=trees[n->degree]; if(t==n)break; trees[n->degree]=NULL; if(n->value<t->value) { t->prev->next=t->next; t->next->prev=t->prev; _addChild(n,t); } else { t->prev->next=t->next; t->next->prev=t->prev; if(n->next==n) { t->next=t->prev=t; _addChild(t,n); n=t; } else { n->prev->next=t; n->next->prev=t; t->next=n->next; t->prev=n->prev; _addChild(t,n); n=t; } } continue; } else { trees[n->degree]=n; } n=n->next; } node<V>* min=n; do { if(n->value<min->value)min=n; n=n->next; } while(n!=n); return min; }
list_t mergesort_l(list_t src, comparator_t cmp) { if (!src || !src->cdr) return src; _queue_t qs[2] = { {NULL, NULL} , {NULL, NULL} }; size_t i; for (i = 0; src; i++) { enqueue(&qs[i & 1], shift(&src), NULL); } return _merge(mergesort_l(qs[0].head, cmp), mergesort_l(qs[1].head, cmp), cmp); }
void _mergesort(int left, int right) { if(right == left) return; else { uint middle = (left + right) >> 1; _mergesort(left, middle); _mergesort(middle + 1, right); _merge(left, middle, right); } };
void mergeSort(int *input, int n) { int *bak = malloc(sizeof(int)*n); int *handle = input; int *store = bak; int width,i; for(width=1; width<n; width=2*width) { for(i=0; i<n; i=i+2*width) { _merge(handle, i, MIN(i+width,n), MIN(i+2*width,n),store); } if (handle==input) handle = bak; else if (handle==bak) handle = input; if (store==input) store = bak; else if (store==bak) store = input; } free(bak); }
void merge_sort(int* const a,const int high) { const int LOW=0; const int TMPSIZE=high-LOW; const int STACKSIZE=log2(TMPSIZE)*2+3; int sort_count=0; strct_prms2 stack_prms[STACKSIZE]; std::unique_ptr<int[]> buff(new int[TMPSIZE]); int* _buff = buff.get(); stack_prms[0].low=LOW; stack_prms[0].high=high-1; strct_prms2* _ptr=nullptr; int _low=-1; int _high=-1; int _mid=-1; while(sort_count>=0) { strct_prms2* _ptr=stack_prms+sort_count; _low=_ptr->low; _high=_ptr->high; if((_ptr->check_)) { _merge(a,_buff,_low,_high); _ptr->check_=0; --sort_count; } else { if(_low<_high) { _ptr->check_=1; _mid=(_low+_high)>>1; ++sort_count;++_ptr; _ptr->low=_mid+1; _ptr->high=_high; ++sort_count;++_ptr; _ptr->low=_low; _ptr->high=_mid; } else --sort_count; }
/** * \brief Merge pairs of nodes and reverse the order. * * Given a list of \p src nodes, merge adjacent pairs and reverse the ordering * of the result. The resulting merged nodes are stored in \p dst. This is the * first merge pass when deleting the minimum node from the heap. * * \param [in] ph Pairing heap containing the nodes to merge. * \param [out] dst List into which the nodes are merged. * \param [in] src List from which to merge nodes. * * \post \c list_isempty(src) * * \note This operation has a time complexity of O(n) with respect to the length * of \p src. */ static void _merge_pairs_reverse(const struct pheap *ph, struct list *dst, struct list *src) { struct pheap_elem *left, *right; /* Move left to right, merging pairs */ while (!list_isempty(src)) { left = containerof(list_popfront(src), struct pheap_elem, child_le); if (!list_isempty(src)) { right = containerof(list_popfront(src), struct pheap_elem, child_le); left = _merge(ph, left, right); } /* Push the merged element onto the front of the destination, so that * the result is in reverse order. */ list_pushfront(dst, &left->child_le); }
void indri::collection::Repository::_merge() { // this is only legal if we're not readOnly if (_readOnly) return; // grab a copy of the current state index_state state = indexes(); index_state mergers = state; if (state->size() && state->back()->documentCount() == 0) { // if the current index is empty, don't need to add a new one; write the others mergers = new index_vector; mergers->assign(state->begin(), state->end() - 1); } else { // current index isn't empty, so add a new one and write the old ones _addMemoryIndex(); } // no need to merge when there's only one index (or none) bool needsWrite = (mergers->size() > 1) || (mergers->size() == 1 && dynamic_cast<indri::index::MemoryIndex*>((*mergers)[0])); if (!needsWrite) return; state = 0; // merge all the indexes together while (needsWrite) { _merge(mergers); needsWrite = (mergers->size() > 1) || (mergers->size() == 1 && dynamic_cast<indri::index::MemoryIndex*>((*mergers)[0])); } _checkpoint(); }
void indri::collection::Repository::_write() { // this is only legal if we're not readOnly if (_readOnly) return; // grab a copy of the current state index_state state = indexes(); // if the current index is empty, don't need to write it if (state->size() && state->back()->documentCount() == 0) return; // make a new MemoryIndex, cutting off the old one from updates _addMemoryIndex(); // if we just added the first, no need to write the "old" one if (state->size() == 0) return; // write out the last index index_state lastState = new std::vector<indri::index::Index*>; lastState->push_back(state->back()); state = 0; _merge(lastState); _checkpoint(); }
void _addChild(node<V>* parent,node<V>* child) { child->prev=child->next=child; child->parent=parent; parent->degree++; parent->child=_merge(parent->child,child); }
void indri::collection::Repository::_trim() { // this is only legal if we're not readOnly if (_readOnly) return; // grab a copy of the current state index_state state = indexes(); if (state->size() <= 3) return; size_t count = state->size(); int position; // here's how this works: // we're trying to just 'trim' the indexes so that we merge // together the small indexes, leaving the large ones as they are. // We merge together a minimum of 3 indexes every time. We may merge // more, however. We start at the most recent indexes, and search // backward in time. When we get to an index that is significantly // larger than the previous index, we stop. // // have to merge at least the last three indexes int firstDocumentCount = (int)(*state)[count-1]->documentCount(); int lastDocumentCount = (int)(*state)[count-3]->documentCount(); int documentCount = 0; // move back until we find a really big index--don't merge with that one for(position = count-4; position>=0; position--) { // compute the average number of documents in the indexes we've seen so far documentCount = (int)(*state)[position]->documentCount(); // break if we find an index more than 8 times as large // as the preceding one. if (documentCount > lastDocumentCount*8.0) { position++; break; } lastDocumentCount = documentCount; } // make sure position is greater than or equal to 0 position = lemur_compat::max<int>(position, 0); // make a new MemoryIndex, cutting off the old one from updates _addMemoryIndex(); // write out the last index index_state substate = new std::vector<indri::index::Index*>; substate->assign(state->begin() + position, state->end()); state = 0; // substate may be larger than 1 if we didn't have enough // memory to merge everything together. That's okay, // because we were just trimming. _merge(substate); _checkpoint(); }
shared_ptr<Way> WayAverager::average() { _sumMovement1 = 0.0; _sumMovement2 = 0.0; _moveCount1 = 0; _moveCount2 = 0; _maxMovement1 = 0.0; _maxMovement2 = 0.0; if (DirectionFinder::isSimilarDirection(_map.shared_from_this(), _w1, _w2) == false) { if (_w1->isOneWay() == true) { _w2->reverseOrder(); } else { _w1->reverseOrder(); } } shared_ptr<const LineString> ls1 = ElementConverter(_map.shared_from_this()). convertToLineString(_w1); shared_ptr<const LineString> ls2 = ElementConverter(_map.shared_from_this()). convertToLineString(_w2); // All of the fancy stats here are compliments of Mike Porter. // calculate standard deviation in meters Meters sd1 = _w1->getCircularError() / 2.0; Meters sd2 = _w2->getCircularError() / 2.0; // calculate variance; double v1 = sd1 * sd1; double v2 = sd2 * sd2; // calculate weights for averaging. double weight1 = 1 - v1 / (v1 + v2); double weight2 = 1 - v2 / (v1 + v2); Meters newAcc = 2.0 * sqrt(weight1 * weight1 * v1 + weight2 * weight2 * v2); shared_ptr<Way> result(new Way(_w1->getStatus(), _map.createNextWayId(), newAcc)); _map.addWay(result); result->addNode(_merge(_w1->getNodeIds()[0], weight1, _w2->getNodeIds()[0], weight2)); // we're getting the vectors affter the above merge because the merge will change node ids. const std::vector<long>& ns1 = _w1->getNodeIds(); const std::vector<long>& ns2 = _w2->getNodeIds(); size_t i1 = 1; size_t i2 = 1; // while there is more than one point available in each line while (i1 < ns1.size() - 1 || i2 < ns2.size() - 1) { // if we're all out of cs1 points if (i1 == ns1.size() - 1) { result->addNode(_moveToLine(ns2[i2++], weight2, ls1.get(), weight1, 2)); } // if we're all out of cs2 points else if (i2 == ns2.size() - 1) { result->addNode(_moveToLine(ns1[i1++], weight1, ls2.get(), weight2, 1)); } else { const Coordinate& last = _map.getNode(result->getLastNodeId())->toCoordinate(); const Coordinate& nc1 = _moveToLineAsCoordinate(ns1[i1], weight1, ls2.get(), weight2); const Coordinate& nc2 = _moveToLineAsCoordinate(ns2[i2], weight2, ls1.get(), weight1); if (nc1.distance(last) < nc2.distance(last)) { result->addNode(_moveToLine(ns1[i1++], weight1, ls2.get(), weight2, 1)); } else { result->addNode(_moveToLine(ns2[i2++], weight2, ls1.get(), weight1, 2)); } } } // merge the last two nodes and move to the average location result->addNode(_merge(ns1[i1], weight1, ns2[i2], weight2)); // use the default tag merging mechanism Tags tags = TagMergerFactory::mergeTags(_w1->getTags(), _w2->getTags(), ElementType::Way); result->setTags(tags); _map.removeWay(_w1->getId()); _map.removeWay(_w2->getId()); _meanMovement1 = _sumMovement1 / (double)_moveCount1; _meanMovement2 = _sumMovement2 / (double)_moveCount2; return result; }
void merge(FibonacciHeap& other) { heap=_merge(heap,other.heap); other.heap=_empty(); }
node<V>* insert(V value) { node<V>* ret=_singleton(value); heap=_merge(heap,ret); return ret; }