void _VECTOR_IMPL<_Tp, _Alloc>::reserve(size_type __n) { if (capacity() < __n) { if (max_size() < __n) { this->_M_throw_length_error(); } const size_type __old_size = size(); pointer __tmp; if (this->_M_start) { __tmp = _M_allocate_and_copy(__n, this->_M_start, this->_M_finish); _M_clear(); } else { __tmp = this->_M_end_of_storage.allocate(__n); } _M_set(__tmp, __tmp + __old_size, __tmp + __n); } }
TiXmlString& TiXmlString::assign(const char* str, size_type len) { size_type cap = capacity(); if (len > cap || cap > 3*(len + 8)) { TiXmlString tmp; tmp.init(len); memcpy(tmp.start(), str, len); swap(tmp); } else { memmove(start(), str, len); set_size(len); } return *this; }
ReturnValue Container::__queryMaxCount(int32_t index, const Thing* thing, uint32_t count, uint32_t& maxQueryCount, uint32_t flags) const { const Item* item = thing->getItem(); if(item == NULL){ maxQueryCount = 0; return RET_NOTPOSSIBLE; } if( ((flags & FLAG_NOLIMIT) == FLAG_NOLIMIT) ){ maxQueryCount = std::max((uint32_t)1, count); return RET_NOERROR; } int32_t freeSlots = std::max((int32_t)(capacity() - size()), (int32_t)0); if(item->isStackable()){ uint32_t n = 0; if(index != INDEX_WHEREEVER){ const Thing* destThing = __getThing(index); const Item* destItem = NULL; if(destThing) destItem = destThing->getItem(); if(destItem && destItem->getID() == item->getID()){ n = 100 - destItem->getItemCount(); } } maxQueryCount = freeSlots * 100 + n; if(maxQueryCount < count){ return RET_CONTAINERNOTENOUGHROOM; } } else{ maxQueryCount = freeSlots; if(maxQueryCount == 0){ return RET_CONTAINERNOTENOUGHROOM; } } return RET_NOERROR; }
void printMap (struct hashMap * ht) { int i; struct hashLink *temp; for(i = 0;i < capacity(ht); i++){ temp = ht->table[i]; if(temp != 0) { printf("\nBucket Index %d -> ", i); } while(temp != 0){ printf("Key:%s|", temp->key); printValue(temp->value); printf(" -> "); temp=temp->next; } } }
void erase( const array_map<K,V,Cmp>& t ) { iterator i = begin(), I = end(); const_iterator j = t.begin(), J = t.end(); data_type* x = new data_type[ capacity() ], *X = x; while( i != I && j != J ) { if( m_cmp( i->first, j->first ) ) *X++ = *i++; else if( m_cmp( j->first, i->first ) ) ++j; else /* *i==*j */ ++i, ++j; } if( x != X ) { while( i != I ) *X++ = *i++; delete[] m_data; m_data = x; m_size = X - x; } else delete[] x; }
void erase(T t) { assert(t != res_empty && t != res_del && "Key cannot be res_empty or res_del!"); if (size_ == 0) { return; } size_t max = capacity() - 1; size_t spot = hash_value(t) & max; while (elements[spot].first != res_empty && elements[spot].first != t) { spot = (spot + 5) & max; } if (elements[spot].first == t) { elements[spot].first = res_del; elements[spot].second = V(); --size_; } }
ReturnValue Container::__queryAdd(int32_t index, const Thing* thing, uint32_t count, uint32_t flags) const { bool childIsOwner = ((flags & FLAG_CHILDISOWNER) == FLAG_CHILDISOWNER); if(childIsOwner){ //a child container is querying, since we are the top container (not carried by a player) //just return with no error. return RET_NOERROR; } const Item* item = thing->getItem(); if(item == NULL){ return RET_NOTPOSSIBLE; } if(!item->isPickupable()){ return RET_CANNOTPICKUP; } if(item == this){ return RET_THISISIMPOSSIBLE; } const Cylinder* cylinder = getParent(); while(cylinder){ if(cylinder == thing){ return RET_THISISIMPOSSIBLE; } cylinder = cylinder->getParent(); } bool skipLimit = ((flags & FLAG_NOLIMIT) == FLAG_NOLIMIT); if(index == INDEX_WHEREEVER && !skipLimit){ if(size() >= capacity()) return RET_CONTAINERNOTENOUGHROOM; } const Cylinder* topParent = getTopParent(); if(topParent != this){ return topParent->__queryAdd(INDEX_WHEREEVER, item, count, flags | FLAG_CHILDISOWNER); } else return RET_NOERROR; }
string::iterator string::insert(iterator p, size_t n, char c) { auto lengthOfLeft = capacity() - size(); if (n <= lengthOfLeft) { for (iterator it = finish_ - 1; it >= p; --it) { *(it + n) = *(it); } mystl::uninitialized_fill_n(p, n, c); finish_ += n; return (p + n); } else { return insert_aux_filln(p, n, c); } }
bool NonVolatileStorage::erase() { FastInterruptDisableLock dLock; if(IRQunlock()==false) return false; while(FLASH->SR & FLASH_SR_BSY) ; FLASH->CR |= FLASH_CR_PER; FLASH->AR=baseAddress; FLASH->CR |= FLASH_CR_STRT; while(FLASH->SR & FLASH_SR_BSY) ; FLASH->CR &= ~FLASH_CR_PER; FLASH->CR |= FLASH_CR_LOCK; for(int i=0;i<capacity();i++) if(*reinterpret_cast<unsigned char*>(baseAddress+i)!=0xff) return false; return true; }
/* print the hashMap */ void printMap (struct hashMap * ht, keyPrinter kp, valPrinter vp) { int i; struct hashLink *temp; for(i = 0;i < capacity(ht); i++){ temp = ht->table[i]; if(temp != 0) { printf("\nBucket Index %d -> ", i); } while(temp != 0){ printf("Key:"); (*kp)(temp->key); printf("| Value: "); (*vp)(temp->value); printf(" -> "); temp=temp->next; } } }
void push_back(const_reference &c){ if (start_ == NULL){ start_ = allocate(1); finish_ = start_; end_ = start_+1; } else if (finish_ == end_) { size_type len = capacity(); size_type cur_len = size(); pointer new_start = allocate(len*2); std::uninitialized_copy(start_, finish_, new_start); destroy(start_, end_); deallocate(start_, len); start_ = new_start; finish_ = start_+cur_len; end_ = start_+len*2; } construct(finish_++, c); }
void HeapRegion::clear_humongous() { assert(isHumongous(), "pre-condition"); if (startsHumongous()) { assert(top() <= end(), "pre-condition"); set_end(_orig_end); if (top() > end()) { // at least one "continues humongous" region after it set_top(end()); } } else { // continues humongous assert(end() == _orig_end, "sanity"); } assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); _humongous_start_region = NULL; }
void StructArray::Release(ArrayData* ad) { assert(ad->isRefCounted()); assert(ad->hasExactlyOneRef()); auto array = asStructArray(ad); auto const size = array->size(); auto const data = array->data(); auto const stop = data + size; for (auto ptr = data; ptr != stop; ++ptr) { tvRefcountedDecRef(ptr); } if (UNLIKELY(strong_iterators_exist())) { free_strong_iterators(ad); } auto const cap = array->capacity(); MM().objFree(array, sizeof(StructArray) + sizeof(TypedValue) * cap); }
/// Ensure there's enough total space in the buffer to hold \a n bytes. void reserve(size_t n) { if (n <= capacity()) return; size_t rd_offset = m_rd_ptr - m_begin; size_t wr_offset = m_wr_ptr - m_begin; auto dirty_sz = wr_offset - rd_offset; char* old_begin = m_begin; auto old_sz = max_size(); auto new_sz = dirty_sz + n; m_begin = alloc_t::allocate(new_sz); m_end = m_begin + new_sz; if (dirty_sz > 0) memcpy(m_begin, old_begin+rd_offset, dirty_sz); m_rd_ptr = m_begin; m_wr_ptr = m_begin + dirty_sz; if (old_begin != m_data) alloc_t::deallocate(old_begin, old_sz); }
/// Outputs the results, according to the given flags class_type& write(size_type cchTotal, size_type numResults, ff_string_slice_t const* results, int flags) { const ff_string_slice_t crlf = fastformat_getNewlineForPlatform(); const size_type requiredSize = size() + cchTotal + ((flags::ff_newLine & flags) ? crlf.len : 0) ; if(requiredSize > capacity()) { throw std::out_of_range("character buffer sink capacity exceeded"); } else { char_type* p = &m_buffer[0] + size(); // next we concatenate all the slices { for(size_type i = 0; i < numResults; ++i) { ff_string_slice_t const& slice = results[i]; ::memcpy(p, slice.ptr, slice.len * sizeof(char_type)); p += slice.len; }} m_len += cchTotal; // then append the new line, if required if(flags::ff_newLine & flags) { ::memcpy(p, crlf.ptr, crlf.len * sizeof(char_type)); p += crlf.len; m_len += crlf.len; } FASTFORMAT_CONTRACT_ENFORCE_POSTCONDITION_STATE_APPL_LAYER(p == &m_buffer[0] + size(), "char_buffer sink writing logic failed: write pointer in wrong place"); } return *this; }
ReturnValue Container::__queryAdd(int32_t index, const Thing* thing, uint32_t count, uint32_t flags, Creature* actor/* = NULL*/) const { bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags); if(childIsOwner) { //a child container is querying, since we are the top container (not carried by a player) //just return with no error. return RET_NOERROR; } const Item* item = thing->getItem(); if(!item) return RET_NOTPOSSIBLE; if(!item->isPickupable()) return RET_CANNOTPICKUP; if(item == this) return RET_THISISIMPOSSIBLE; bool isInbox = false; if(const Container* container = item->getContainer()) { for(const Cylinder* cylinder = getParent(); cylinder; cylinder = cylinder->getParent()) { if(cylinder == container) return RET_THISISIMPOSSIBLE; if(!hasBitSet(FLAG_NOLIMIT, flags) && !isInbox && dynamic_cast<const Inbox*>(cylinder)) isInbox = true; } } if(isInbox || (index == INDEX_WHEREEVER && size() >= capacity() && !hasBitSet(FLAG_NOLIMIT, flags))) return RET_CONTAINERNOTENOUGHROOM; const Cylinder* topParent = getTopParent(); if(topParent != this) return topParent->__queryAdd(INDEX_WHEREEVER, item, count, flags | FLAG_CHILDISOWNER, actor); return RET_NOERROR; }
void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) { // Resize the generation if needed. If the generation resize // reports false, do not attempt to resize the spaces. if (resize_generation(eden_size, survivor_size)) { // Then we lay out the spaces inside the generation resize_spaces(eden_size, survivor_size); space_invariants(); if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr("Young generation size: " "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, eden_size, survivor_size, used(), capacity(), max_gen_size(), min_gen_size()); } } }
const_iterator find(T t) const { assert(t != res_empty && t != res_del && "Key cannot be res_empty or res_del!"); const_iterator it; if (size_) { size_t max = capacity() - 1; size_t spot = hash_value(t) & max; while (elements[spot].first != res_empty && elements[spot].first != t) { spot = (spot + 5) & max; } if (elements[spot].first == t) { it.fus = this; it.i = spot; } } return it; }
ssize_t VectorImpl::setCapacity(size_t new_capacity) { size_t current_capacity = capacity(); ssize_t amount = new_capacity - size(); if (amount <= 0) { // we can't reduce the capacity return current_capacity; } SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize); if (sb) { void* array = sb->data(); _do_copy(array, mStorage, size()); release_storage(); mStorage = const_cast<void*>(array); } else { return NO_MEMORY; } return new_capacity; }
void insert(iterator position, InputIterator first, InputIterator last) { difference_type diff = std::distance(first, last); if (size() + diff > capacity()) { copying_vector temp(allocator_); temp.auto_reserve(size() + diff); temp.insert(temp.end_, begin_, position); temp.insert(temp.end_, first, last); temp.insert(temp.end_, position, end_); swap(temp); } else if (position == end_) { for (iterator i = first; i != last; ++i) { allocator_.construct(end_, *i); ++end_; } } else { assert(false); } };
inline void buffer::insert(char const* first, char const* last) { INVARIANT_CHECK; std::size_t n = last - first; #ifdef TORRENT_BUFFER_DEBUG if (m_pending_copy) { std::copy(m_write_cursor - m_pending_copy, m_write_cursor , m_debug.end() - m_pending_copy); m_pending_copy = 0; } m_debug.insert(m_debug.end(), first, last); #endif if (space_left() < n) { reserve(capacity() + n); } m_empty = false; char const* end = (m_last - m_write_cursor) < (std::ptrdiff_t)n ? m_last : m_write_cursor + n; std::size_t copied = end - m_write_cursor; std::memcpy(m_write_cursor, first, copied); m_write_cursor += copied; if (m_write_cursor > m_read_end) m_read_end = m_write_cursor; first += copied; n -= copied; if (n == 0) return; assert(m_write_cursor == m_last); m_write_cursor = m_first; memcpy(m_write_cursor, first, n); m_write_cursor += n; }
graph cutTree(const graph &g) { int n = g.size(); Matrix capacity(n, Array(n)), flow(n, Array(n)); rep(u,n) for(auto &e: g[u]) capacity[e.from][e.to] += e.w; vector<int> p(n), prev; vector<int> w(n); for (int s = 1; s < n; ++s) { int t = p[s]; // max-flow(s, t) rep(i,n) rep(j,n) flow[i][j] = 0; int total = 0; while (1) { queue<int> Q; Q.push(s); prev.assign(n, -1); prev[s] = s; while (!Q.empty() && prev[t] < 0) { int u = Q.front(); Q.pop(); for(auto &e: g[u]) if (prev[e.to] < 0 && RESIDUE(u, e.to) > 0) { prev[e.to] = u; Q.push(e.to); } } if (prev[t] < 0) goto esc; int inc = 1e9; for (int j = t; prev[j] != j; j = prev[j]) inc = min(inc, RESIDUE(prev[j], j)); for (int j = t; prev[j] != j; j = prev[j]) flow[prev[j]][j] += inc, flow[j][prev[j]] -= inc; total += inc; } esc:w[s] = total; // make tree rep(u, n) if (u != s && prev[u] != -1 && p[u] == t) p[u] = s; if (prev[p[t]] != -1) p[s] = p[t], p[t] = s, w[s] = w[t], w[t] = total; } graph T(n); // (s, p[s]) is a tree edge of weight w[s] rep(s, n) if (s != p[s]) { T[ s ].push_back( Edge(s, p[s], w[s]) ); T[p[s]].push_back( Edge(p[s], s, w[s]) ); } return T; }
//! //! Add given item to the tail end. Return true if successful (i.e., //! vector is not full). Return false otherwise. //! bool D64Vec::add(item_t item) { // Vector is not full. bool ok; if ((numItems_ < capacity()) || grow()) { item_[numItems_++] = item; ok = true; } // Vector is full. else { ok = false; } // return true if successful. return ok; }
StringData* StringData::MakeEmpty() { void* vpEmpty = &s_theEmptyString; auto const sd = static_cast<StringData*>(vpEmpty); auto const data = reinterpret_cast<char*>(sd + 1); sd->m_data = data; sd->m_hdr.init(HeaderKind::String, StaticValue); sd->m_lenAndHash = 0; // len=0, hash=0 data[0] = 0; sd->preCompute(); assert(sd->m_len == 0); assert(sd->capacity() == 0); assert(sd->m_hdr.kind == HeaderKind::String); assert(sd->isFlat()); assert(sd->isStatic()); assert(sd->checkSane()); return sd; }
StringData* StringData::reserve(size_t cap) { assert(!isImmutable() && !hasMultipleRefs()); assert(isFlat()); if (cap <= capacity()) return this; cap = std::min(cap + cap/4, size_t(MaxSize) + 1); auto const sd = Make(cap); auto const src = slice(); auto const dst = sd->mutableData(); sd->setSize(src.len); auto const mcret = memcpy(dst, src.ptr, src.len); auto const ret = static_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. assert(ret == sd); assert(ret->checkSane()); return ret; }
void Container::addThing(int32_t index, Thing* thing) { if (index >= static_cast<int32_t>(capacity())) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (item == nullptr) { return /*RETURNVALUE_NOTPOSSIBLE*/; } item->setParent(this); itemlist.push_front(item); updateItemWeight(item->getWeight()); //send change to client if (getParent() && (getParent() != VirtualCylinder::virtualCylinder)) { onAddContainerItem(item); } }
StringData* StringData::shrinkImpl(size_t len) { assert(!isImmutable() && !hasMultipleRefs()); assert(isFlat()); assert(len <= m_len); assert(len <= capacity()); auto const sd = Make(len); auto const src = slice(); auto const dst = sd->mutableData(); assert(len <= src.len); sd->setSize(len); auto const mcret = memcpy(dst, src.ptr, len); auto const ret = static_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. assert(ret == sd); assert(ret->checkSane()); return ret; }
RTCString& RTCString::append(char ch) { Assert((unsigned char)ch < 0x80); /* Don't create invalid UTF-8. */ if (ch) { // allocate in chunks of 20 in case this gets called several times if (m_cch + 1 >= m_cbAllocated) { reserve(RT_ALIGN_Z(m_cch + 2, IPRT_MINISTRING_APPEND_ALIGNMENT)); // calls realloc(cbBoth) and sets m_cbAllocated; may throw bad_alloc. #ifndef RT_EXCEPTIONS_ENABLED AssertRelease(capacity() > m_cch + 1); #endif } m_psz[m_cch] = ch; m_psz[++m_cch] = '\0'; } return *this; }
_VECTOR_IMPL<_Tp,_Alloc>& _VECTOR_IMPL<_Tp,_Alloc>::operator=(const _VECTOR_IMPL<_Tp, _Alloc>& __x) { if (&__x != this) { const size_type __xlen = __x.size(); if (__xlen > capacity()) { pointer __tmp = _M_allocate_and_copy(__xlen, __CONST_CAST(const_pointer, __x._M_start)+0, __CONST_CAST(const_pointer, __x._M_finish)+0); _M_clear(); this->_M_start = __tmp; this->_M_end_of_storage._M_data = this->_M_start + __xlen; } else if (size() >= __xlen) { pointer __i = __copy_ptrs(__CONST_CAST(const_pointer, __x._M_start)+0, __CONST_CAST(const_pointer, __x._M_finish)+0, this->_M_start, _TrivialAss()); _STLP_STD::_Destroy_Range(__i, this->_M_finish); } else { __copy_ptrs(__CONST_CAST(const_pointer, __x._M_start), __CONST_CAST(const_pointer, __x._M_start) + size(), this->_M_start, _TrivialAss()); __uninitialized_copy(__CONST_CAST(const_pointer, __x._M_start) + size(), __CONST_CAST(const_pointer, __x._M_finish)+0, this->_M_finish, _TrivialUCpy()); } this->_M_finish = this->_M_start + __xlen; } return *this; }
void MemVertexstream::calcMinmax(math::Vector3f& minxyz,math::Vector3f& maxxyz) { Vertexiterator viterator(*this); minxyz=maxxyz=(viterator.position()); for (ion_uint32 v=0;v<capacity();++v) { const math::Vector3f &vec=viterator.position(); if (minxyz.x()>vec.x()) minxyz.x()=vec.x(); if (minxyz.y()>vec.y()) minxyz.y()=vec.y(); if (minxyz.z()>vec.z()) minxyz.z()=vec.z(); if (maxxyz.x()<vec.x()) maxxyz.x()=vec.x(); if (maxxyz.x()<vec.y()) maxxyz.y()=vec.y(); if (maxxyz.x()<vec.z()) maxxyz.z()=vec.z(); ++viterator; } }