void Sieve::generate_primes(unsigned limit, std::vector<unsigned> &primes) { _extend(limit); std::vector<unsigned>::iterator it = std::upper_bound (_primes.begin(), _primes.end(), limit); //find the first position greater than limit and reserve space for the primes primes.reserve(it - _primes.begin()); std::copy(_primes.begin(), it, std::back_inserter( primes )); }
static char *_prettyprint(char *buf, int size, uint32_t interval) { int avail = size; if (interval == 0) { /* we need at least 2 bytes in buf */ strcpy(buf, "0"); return buf; } if (interval < 1000) { _extend(buf, size, avail, "%ims", interval); } else { if (interval % 1000) { interval = (interval / 1000) + 1; } else { interval /= 1000; } if (interval > 86400) { _extend(buf, size, avail, "%ud", interval / 86400); interval %= 86400; } if (interval > 3600) { _extend(buf, size, avail, "%uh", interval / 3600); interval %= 3600; } if (interval > 60) { _extend(buf, size, avail, "%um", interval / 60); interval %= 60; } if (interval) _extend(buf, size, avail, "%us", interval); } return buf; }
unsigned Sieve::iterator::next_prime() { if (_index >= _primes.size()) { unsigned extend_to = _primes[_index - 1] * 2; if (_limit > 0 and _limit < extend_to) { extend_to = _limit; } _extend(extend_to); if (_index >= _primes.size()) { // the next prime is greater than _limit return _limit + 1; } } return SymEngine::Sieve::_primes[_index++]; }
std::vector<MultiIndex> _extend(const std::vector<MultiIndex>& source, dim_t start, dim_t len) { assert (len != 0); if (len == 1) { std::vector<MultiIndex> next(source); for (std::size_t i = 0; i < source.size(); i++) { next[i][start] += 1; } return next; } else { // use divide and conquer approach std::vector<MultiIndex> lhs = _extend(source, start, len/2); std::vector<MultiIndex> rhs = _extend(source, start + len/2, len - len/2); std::vector<MultiIndex> sink(lhs.size() + rhs.size()); auto seek = strict_union(lhs.begin(), lhs.end(), rhs.begin(), rhs.end(), sink.begin(), std::less<MultiIndex>{}); sink.resize(seek - sink.begin()); return sink; } }
ShapeEnum<D, MultiIndex> extend(const ShapeEnum<D, MultiIndex>* source) { std::vector< ShapeSlice<D, MultiIndex> > slices(source->n_slices()+1); std::size_t offset = 1; slices[0] = source->slice(0); for (int islice = 0; islice < source->n_slices(); islice++) { slices[islice+1] = _extend(source->slice(islice), offset); offset += slices[islice+1].size(); } MultiIndex limits = source->limits(); for (dim_t d = 0; d < D; d++) { limits[d] += 1; } return {std::move(slices), offset, limits}; }
void Sieve::_extend(unsigned limit) { #ifdef HAVE_SYMENGINE_PRIMESIEVE if (_primes.back() < limit) primesieve::generate_primes(_primes.back() + 1, limit, &_primes); #else const unsigned sqrt_limit = static_cast<unsigned>(std::sqrt(limit)); unsigned start = _primes.back() + 1; if (limit <= start) return; if (sqrt_limit >= start) { _extend(sqrt_limit); start = _primes.back() + 1; } unsigned segment = _sieve_size; std::valarray<bool> is_prime(segment); for (; start <= limit; start += 2 * segment) { unsigned finish = std::min(start + segment * 2 + 1, limit); is_prime[std::slice(0, segment, 1)] = true; // considering only odd integers. An odd number n corresponds to // n-start/2 in the array. for (unsigned index = 1; index < _primes.size() and _primes[index] * _primes[index] <= finish; ++index) { unsigned n = _primes[index]; unsigned multiple = (start / n + 1) * n; if (multiple % 2 == 0) multiple += n; if (multiple > finish) continue; std::slice sl = std::slice((multiple - start) / 2, 1 + (finish - multiple) / (2 * n), n); // starting from n*n, all the odd multiples of n are marked not // prime. is_prime[sl] = false; } for (unsigned n = start + 1; n <= finish; n += 2) { if (is_prime[(n - start) / 2]) _primes.push_back(n); } } #endif }
void block_init(struct block *b, struct block_pair *pairs, uint32_t n) { uint32_t i; nassert(n > 0); rwlock_write_lock(&b->rwlock, &b->mtx); qsort(pairs, n, sizeof(*pairs), _pair_compare_fun); for (i = 0; i < n; i++) { _extend(b, 1); memcpy(&b->pairs[i], &pairs[i], sizeof(*pairs)); b->pairs_used++; } /* get the last allocated postion of file*/ b->allocated += pairs[n - 1].offset; b->allocated += ALIGN(pairs[n - 1].real_size); rwlock_write_unlock(&b->rwlock); }
byte* lzw_encode(byte *in, int max_bits) { int len = _len(in), bits = 9, next_shift = 512; ushort code, c, nc, next_code = M_NEW; lzw_enc_t *d = _new(lzw_enc_t, 512); if (max_bits > 16) max_bits = 16; if (max_bits < 9 ) max_bits = 12; byte *out = _new(ushort, 4); int out_len = 0, o_bits = 0; uint32_t tmp = 0; inline void write_bits(ushort x) { tmp = (tmp << bits) | x; o_bits += bits; if (_len(out) <= out_len) _extend(out); while (o_bits >= 8) { o_bits -= 8; out[out_len++] = tmp >> o_bits; tmp &= (1 << o_bits) - 1; } }
void VBO::append( void * mem_ptr, unsigned int mem_size ) { if ( !_support ) { return; } _start = _end; _end = _end + mem_size; if( _end > _size ) { // Increase Buffer Size if( !_extend( mem_size ) ) { assert( false ); // Extend buffer failed. return; } } glBindBuffer( _buffer_Type, _id ); glBufferSubData( _buffer_Type, _start, _end - _start, mem_ptr ); glBindBuffer( _buffer_Type, 0 ); }
const char* cstr_item_add(const char *in_string, const char *in_item) { char **items; int count, exists = 0; _itemize((char*)in_string, &items, &count); for (int i = 0; i < count; i++) { if (_streq(_inplace_trim(items[i]), (char*)in_item)) { exists = 1; break; } } if (!exists) { _extend(&items, &count); items[count-1] = _cstr_clone((char*)in_item); } static char *result = NULL; if (result) free(result); result = _items_join(items, count); _items_cleanup(items, count); return result; }
ShapeSlice<D, MultiIndex> _extend(const ShapeSlice<D, MultiIndex>& slice, std::size_t offset) { std::vector<MultiIndex> result = _extend(slice._table(), 0, D); return {std::move(result), offset}; }
DISKOFF block_alloc_off(struct block *b, uint64_t nid, uint32_t real_size, uint32_t skeleton_size, uint32_t height) { DISKOFF r; uint32_t i; int found = 0; uint32_t pos = 0; rwlock_write_lock(&b->rwlock, &b->mtx); r = ALIGN(b->allocated); _extend(b, 1); /* * set old hole to fly * it is not visible until you call 'block_shrink' */ for (i = 0; i < b->pairs_used; i++) { if (b->pairs[i].nid == nid) { b->pairs[i].used = 0; break; } } /* find the not-fly hole to reuse */ if (b->pairs_used > 0) { for (pos = 0; pos < (b->pairs_used - 1); pos++) { DISKOFF off_aligned; struct block_pair *p; struct block_pair *nxtp; p = &b->pairs[pos]; nxtp = &b->pairs[pos + 1]; off_aligned = (ALIGN(p->offset) + ALIGN(p->real_size)); if ((off_aligned + ALIGN(real_size)) <= nxtp->offset) { r = off_aligned; memmove(&b->pairs[pos + 1 + 1], &b->pairs[pos + 1], sizeof(*b->pairs) * (b->pairs_used - pos - 1)); found = 1; break; } } } /* found the reuse hole */ if (found) { pos += 1; } else { pos = b->pairs_used; b->allocated = (ALIGN(b->allocated) + ALIGN(real_size)); } b->pairs[pos].offset = r; b->pairs[pos].height = height; b->pairs[pos].real_size = real_size; b->pairs[pos].skeleton_size = skeleton_size; b->pairs[pos].nid = nid; b->pairs[pos].used = 1; b->pairs_used++; rwlock_write_unlock(&b->rwlock); return r; }
void DisplayGroupController::reshape(const QSizeF& newSize) { adjust(newSize); _extend(newSize); }