struct keyprint_hack bch_pkey(const struct bkey *k) { unsigned i = 0; struct keyprint_hack r; char *out = r.s, *end = r.s + KEYHACK_SIZE; #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k)); if (KEY_PTRS(k)) while (1) { p("%llu:%llu gen %llu", PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i)); if (++i == KEY_PTRS(k)) break; p(", "); } p("]"); if (KEY_DIRTY(k)) p(" dirty"); if (KEY_CSUM(k)) p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); #undef p return r; }
static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) { struct btree *b = container_of(bk, struct btree, keys); if (!KEY_OFFSET(insert)) btree_current_write(b)->prio_blocked++; return false; }
static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) { struct btree *b = container_of(bk, struct btree, keys); unsigned i; if (key_merging_disabled(b->c)) return false; for (i = 0; i < KEY_PTRS(l); i++) if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) return false; /* Keys with no pointers aren't restricted to one bucket and could * overflow KEY_SIZE */ if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); SET_KEY_SIZE(l, USHRT_MAX); bch_cut_front(l, r); return false; } if (KEY_CSUM(l)) { if (KEY_CSUM(r)) l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); else SET_KEY_CSUM(l, 0); } SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); return true; }
bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) { char buf[80]; if (!KEY_SIZE(k)) return true; if (KEY_SIZE(k) > KEY_OFFSET(k)) goto bad; if (__ptr_invalid(c, k)) goto bad; return false; bad: bch_extent_to_text(buf, sizeof(buf), k); cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); return true; }
static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); char buf[80]; if (!KEY_SIZE(k)) return true; if (KEY_SIZE(k) > KEY_OFFSET(k)) goto bad; if (__ptr_invalid(b->c, k)) goto bad; return false; bad: bch_extent_to_text(buf, sizeof(buf), k); cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k)); return true; }
static bool bch_extent_insert_fixup(struct btree_keys *b, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) { struct cache_set *c = container_of(b, struct btree, keys)->c; uint64_t old_offset; unsigned old_size, sectors_found = 0; BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_SIZE(insert)); while (1) { struct bkey *k = bch_btree_iter_next(iter); if (!k) break; if (bkey_cmp(&START_KEY(k), insert) >= 0) { if (KEY_SIZE(k)) break; else continue; } if (bkey_cmp(k, &START_KEY(insert)) <= 0) continue; old_offset = KEY_START(k); old_size = KEY_SIZE(k); /* * We might overlap with 0 size extents; we can't skip these * because if they're in the set we're inserting to we have to * adjust them so they don't overlap with the key we're * inserting. But we don't want to check them for replace * operations. */ if (replace_key && KEY_SIZE(k)) { /* * k might have been split since we inserted/found the * key we're replacing */ unsigned i; uint64_t offset = KEY_START(k) - KEY_START(replace_key); /* But it must be a subset of the replace key */ if (KEY_START(k) < KEY_START(replace_key) || KEY_OFFSET(k) > KEY_OFFSET(replace_key)) goto check_failed; /* We didn't find a key that we were supposed to */ if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; if (!bch_bkey_equal_header(k, replace_key)) goto check_failed; /* skip past gen */ offset <<= 8; BUG_ON(!KEY_PTRS(replace_key)); for (i = 0; i < KEY_PTRS(replace_key); i++) if (k->ptr[i] != replace_key->ptr[i] + offset) goto check_failed; sectors_found = KEY_OFFSET(k) - KEY_START(insert); } if (bkey_cmp(insert, k) < 0 && bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { /* * We overlapped in the middle of an existing key: that * means we have to split the old key. But we have to do * slightly different things depending on whether the * old key has been written out yet. */ struct bkey *top; bch_subtract_dirty(k, c, KEY_START(insert), KEY_SIZE(insert)); if (bkey_written(b, k)) { /* * We insert a new key to cover the top of the * old key, and the old key is modified in place * to represent the bottom split. * * It's completely arbitrary whether the new key * is the top or the bottom, but it has to match * up with what btree_sort_fixup() does - it * doesn't check for this kind of overlap, it * depends on us inserting a new key for the top * here. */ top = bch_bset_search(b, bset_tree_last(b), insert); bch_bset_insert(b, top, k); } else { BKEY_PADDED(key) temp; bkey_copy(&temp.key, k); bch_bset_insert(b, k, &temp.key); top = bkey_next(k); } bch_cut_front(insert, top); bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); goto out; } if (bkey_cmp(insert, k) < 0) { bch_cut_front(insert, k); } else { if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) old_offset = KEY_START(insert); if (bkey_written(b, k) && bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { /* * Completely overwrote, so we don't have to * invalidate the binary search tree */ bch_cut_front(k, k); } else { __bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); } } bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); } check_failed: if (replace_key) { if (!sectors_found) { return true; } else if (sectors_found < KEY_SIZE(insert)) { SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - (KEY_SIZE(insert) - sectors_found)); SET_KEY_SIZE(insert, sectors_found); } } out: if (KEY_DIRTY(insert)) bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), KEY_START(insert), KEY_SIZE(insert)); return false; }
/* Process the next pending connection from either a server or a proxy, and * returns a strictly positive value on success (see below). If no pending * connection is found, 0 is returned. Note that neither <srv> nor <px> may be * NULL. Priority is given to the oldest request in the queue if both <srv> and * <px> have pending requests. This ensures that no request will be left * unserved. The <px> queue is not considered if the server (or a tracked * server) is not RUNNING, is disabled, or has a null weight (server going * down). The <srv> queue is still considered in this case, because if some * connections remain there, it means that some requests have been forced there * after it was seen down (eg: due to option persist). The stream is * immediately marked as "assigned", and both its <srv> and <srv_conn> are set * to <srv>. * * This function must only be called if the server queue _AND_ the proxy queue * are locked. Today it is only called by process_srv_queue. When a pending * connection is dequeued, this function returns 1 if the pending connection can * be handled by the current thread, else it returns 2. */ static int pendconn_process_next_strm(struct server *srv, struct proxy *px) { struct pendconn *p = NULL; struct pendconn *pp = NULL; struct server *rsrv; u32 pkey, ppkey; rsrv = srv->track; if (!rsrv) rsrv = srv; p = NULL; if (srv->nbpend) p = pendconn_first(&srv->pendconns); pp = NULL; if (srv_currently_usable(rsrv) && px->nbpend && (!(srv->flags & SRV_F_BACKUP) || (!px->srv_act && (srv == px->lbprm.fbck || (px->options & PR_O_USE_ALL_BK))))) pp = pendconn_first(&px->pendconns); if (!p && !pp) return 0; if (p && !pp) goto use_p; if (pp && !p) goto use_pp; if (KEY_CLASS(p->node.key) < KEY_CLASS(pp->node.key)) goto use_p; if (KEY_CLASS(pp->node.key) < KEY_CLASS(p->node.key)) goto use_pp; pkey = KEY_OFFSET(p->node.key); ppkey = KEY_OFFSET(pp->node.key); if (pkey < NOW_OFFSET_BOUNDARY()) pkey += 0x100000; // key in the future if (ppkey < NOW_OFFSET_BOUNDARY()) ppkey += 0x100000; // key in the future if (pkey <= ppkey) goto use_p; use_pp: /* Let's switch from the server pendconn to the proxy pendconn */ p = pp; use_p: __pendconn_unlink(p); p->strm_flags |= SF_ASSIGNED; p->target = srv; if (p != pp) srv->queue_idx++; else px->queue_idx++; _HA_ATOMIC_ADD(&srv->served, 1); _HA_ATOMIC_ADD(&srv->proxy->served, 1); __ha_barrier_atomic_store(); if (px->lbprm.server_take_conn) px->lbprm.server_take_conn(srv); __stream_add_srv_conn(p->strm, srv); task_wakeup(p->strm->task, TASK_WOKEN_RES); return 1; }