static void invalidate_buckets_lru(struct cache *ca) { struct bucket *b; ssize_t i; ca->heap.used = 0; for_each_bucket(b, ca) { /* * If we fill up the unused list, if we then return before * adding anything to the free_inc list we'll skip writing * prios/gens and just go back to allocating from the unused * list: */ if (fifo_full(&ca->unused)) return; if (!can_invalidate_bucket(ca, b)) continue; if (!GC_SECTORS_USED(b) && bch_bucket_add_unused(ca, b)) continue; if (!heap_full(&ca->heap)) heap_add(&ca->heap, b, bucket_max_cmp); else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { ca->heap.data[0] = b; heap_sift(&ca->heap, 0, bucket_max_cmp); } } for (i = ca->heap.used / 2 - 1; i >= 0; --i) heap_sift(&ca->heap, i, bucket_min_cmp); while (!fifo_full(&ca->free_inc)) { if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { /* * We don't want to be calling invalidate_buckets() * multiple times when it can't do anything */ ca->invalidate_needs_gc = 1; wake_up_gc(ca->set); return; } invalidate_one_bucket(ca, b); } }
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, struct bkey *tmp) { while (iter->used > 1) { struct btree_iter_set *top = iter->data, *i = top + 1; if (iter->used > 2 && bch_extent_sort_cmp(i[0], i[1])) i++; if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) break; if (!KEY_SIZE(i->k)) { sort_key_next(iter, i); heap_sift(iter, i - top, bch_extent_sort_cmp); continue; } if (top->k > i->k) { if (bkey_cmp(top->k, i->k) >= 0) sort_key_next(iter, i); else bch_cut_front(top->k, i->k); heap_sift(iter, i - top, bch_extent_sort_cmp); } else { /* can't happen because of comparison func */ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); if (bkey_cmp(i->k, top->k) < 0) { bkey_copy(tmp, top->k); bch_cut_back(&START_KEY(i->k), tmp); bch_cut_front(i->k, top->k); heap_sift(iter, 0, bch_extent_sort_cmp); return tmp; } else { bch_cut_back(&START_KEY(i->k), top->k); } } } return NULL; }
static void generate_len_table(uint8_t *dst, uint64_t *stats, int size) { #if __STDC_VERSION__ >= 199901L HeapElem h[size]; int up[2*size]; int len[2*size]; #else HeapElem *h=_alloca(sizeof(HeapElem)*size); int *up=_alloca(sizeof(int)*2*size); int *len=_alloca(sizeof(int)*2*size); #endif int offset, i, next; for(offset=1; ; offset<<=1) { for(i=0; i<size; i++) { h[i].name = i; h[i].val = (stats[i] << 8) + offset; } for(i=size/2-1; i>=0; i--) heap_sift(h, i, size); for(next=size; next<size*2-1; next++) { // merge the two smallest entries, and put it back in the heap uint64_t min1v = h[0].val; up[h[0].name] = next; h[0].val = INT64_MAX; heap_sift(h, 0, size); up[h[0].name] = next; h[0].name = next; h[0].val += min1v; heap_sift(h, 0, size); } len[2*size-2] = 0; for(i=2*size-3; i>=size; i--) len[i] = len[up[i]] + 1; for(i=0; i<size; i++) { dst[i] = len[up[i]] + 1; if(dst[i] >= 32) break; } if(i==size) break; } }
static void generate_len_table(uint8_t *dst, const uint64_t *stats){ HeapElem h[256]; int up[2*256]; int len[2*256]; int offset, i, next; int size = 256; for(offset=1; ; offset<<=1){ for(i=0; i<size; i++){ h[i].name = i; h[i].val = (stats[i] << 8) + offset; } for(i=size/2-1; i>=0; i--) heap_sift(h, i, size); for(next=size; next<size*2-1; next++){ // merge the two smallest entries, and put it back in the heap uint64_t min1v = h[0].val; up[h[0].name] = next; h[0].val = INT64_MAX; heap_sift(h, 0, size); up[h[0].name] = next; h[0].name = next; h[0].val += min1v; heap_sift(h, 0, size); } len[2*size-2] = 0; for(i=2*size-3; i>=size; i--) len[i] = len[up[i]] + 1; for(i=0; i<size; i++) { dst[i] = len[up[i]] + 1; if(dst[i] >= 32) break; } if(i==size) break; } }
static void invalidate_buckets_lru(struct cache *ca) { struct bucket *b; ssize_t i; ca->heap.used = 0; for_each_bucket(b, ca) { if (!bch_can_invalidate_bucket(ca, b)) continue; if (!heap_full(&ca->heap)) heap_add(&ca->heap, b, bucket_max_cmp); else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { ca->heap.data[0] = b; heap_sift(&ca->heap, 0, bucket_max_cmp); } } for (i = ca->heap.used / 2 - 1; i >= 0; --i) heap_sift(&ca->heap, i, bucket_min_cmp); while (!fifo_full(&ca->free_inc)) { if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { /* * We don't want to be calling invalidate_buckets() * multiple times when it can't do anything */ ca->invalidate_needs_gc = 1; wake_up_gc(ca->set); return; } bch_invalidate_one_bucket(ca, b); } }
/*! \brief Discard the smallest element and contract the heap. On entry, the numElems of the heap are stored in x[0],...,x[numElems-1], and the biggest element is x[0]. The following operations are performed: -# Swap the first and last elements of the heap -# Shorten the length of the heap by one. -# Restore the heap property to the contracted heap. This effectively makes x[0] the next smallest element in the list. \param[in] numElems The number of elements in the current heap. \param[in,out] x The array to be modified. \return The number of elements in the heap after it has been contracted. */ static int heap_del_min(int numElems, double x[]) { int lastChild = numElems - 1; assert(numElems > 0); /* Swap the smallest element with the lastChild. */ swap_double(x[0], x[lastChild]); /* Contract the heap size, thereby discarding the smallest element. */ lastChild--; /* Restore the heap property of the contracted heap. */ heap_sift(0, lastChild, x); return numElems - 1; }
/*! \brief Build a heap by adding one element at a time. \param[in] n The length of x and ix. \param[in,out] x The array to be heapified. */ static void heap_build( int n, double x[] ) { int i; for (i = n/2; i >= 0; i--) heap_sift( i, n-1, x ); }