void t5() { // // Now integer functions: // ref_type z1, z2; test_type t1, t2; divide_qr(a, b, z1, z2); divide_qr(a1, b1, t1, t2); BOOST_TEST_EQ(z1.str(), t1.str()); BOOST_TEST_EQ(z2.str(), t2.str()); BOOST_TEST_EQ(integer_modulus(a, si), integer_modulus(a1, si)); BOOST_TEST_EQ(lsb(a), lsb(a1)); for(unsigned i = 0; i < 1000; i += 13) { BOOST_TEST_EQ(bit_test(a, i), bit_test(a1, i)); } // We have to take care that our powers don't grow too large, otherwise this takes "forever", // also don't test for modulo types, as these may give a different result from arbitrary // precision types: BOOST_TEST_EQ(ref_type(pow(d, ui % 19)).str(), test_type(pow(d1, ui % 19)).str()); BOOST_TEST_EQ(ref_type(powm(a, b, c)).str(), test_type(powm(a1, b1, c1)).str()); BOOST_TEST_EQ(ref_type(powm(a, b, ui)).str(), test_type(powm(a1, b1, ui)).str()); BOOST_TEST_EQ(ref_type(powm(a, ui, c)).str(), test_type(powm(a1, ui, c1)).str()); }
void strbuf_worktree_ref(const struct worktree *wt, struct strbuf *sb, const char *refname) { switch (ref_type(refname)) { case REF_TYPE_PSEUDOREF: case REF_TYPE_PER_WORKTREE: if (wt && !wt->is_current) { if (is_main_worktree(wt)) strbuf_addstr(sb, "main-worktree/"); else strbuf_addf(sb, "worktrees/%s/", wt->id); } break; case REF_TYPE_MAIN_PSEUDOREF: case REF_TYPE_OTHER_PSEUDOREF: break; case REF_TYPE_NORMAL: /* * For shared refs, don't prefix worktrees/ or * main-worktree/. It's not necessary and * files-backend.c can't handle it anyway. */ break; } strbuf_addstr(sb, refname); }
int refs_delete_ref(struct ref_store *refs, const char *msg, const char *refname, const unsigned char *old_sha1, unsigned int flags) { struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; if (ref_type(refname) == REF_TYPE_PSEUDOREF) { assert(refs == get_main_ref_store()); return delete_pseudoref(refname, old_sha1); } transaction = ref_store_transaction_begin(refs, &err); if (!transaction || ref_transaction_delete(transaction, refname, old_sha1, flags, msg, &err) || ref_transaction_commit(transaction, &err)) { error("%s", err.buf); ref_transaction_free(transaction); strbuf_release(&err); return 1; } ref_transaction_free(transaction); strbuf_release(&err); return 0; }
/* theta_type -- compute type of a theta-exp or theta-select */ PRIVATE type theta_type(tree t, env e, type a, tree cxt) { def d = get_schema((tok) t->x_the_name, t->x_loc); schema s; env e1 = new_env(e); type b; int i; if (d == NULL) return err_type; s = d->d_schema; check_rename(s, (tok) t->x_the_decor, t->x_the_rename, t); for (i = 0; i < s->z_ncomps; i++) { sym x = s->z_comp[i].z_name; sym xp = get_rename(x, (tok) t->x_the_decor, t->x_the_rename); type tt = (a == NULL ? ref_type(xp, nil, e, t) : comp_type(a, xp, cxt, t->x_loc)); add_def(VAR, x, tt, e1); } b = mk_sproduct(mk_schema(e1)); if (! aflag && d->d_abbrev && d->d_nparams == 0 && type_equal(b, arid, mk_sproduct(s), arid)) return mk_abbrev(d, arid); else return b; }
inline ref_type to_ref(int_fast64_t v) noexcept { REALM_ASSERT_DEBUG(!util::int_cast_has_overflow<ref_type>(v)); // Check that v is divisible by 8 (64-bit aligned). REALM_ASSERT_DEBUG(v % 8 == 0); return ref_type(v); }
static int is_head(const char *refname) { switch (ref_type(refname)) { case REF_TYPE_OTHER_PSEUDOREF: case REF_TYPE_MAIN_PSEUDOREF: if (parse_worktree_ref(refname, NULL, NULL, &refname)) BUG("not a worktree ref: %s", refname); break; default: break; } return !strcmp(refname, "HEAD"); }
void t2() { // bitwise ops: BOOST_TEST_EQ(ref_type(a|b).str(), test_type(a1 | b1).str()); BOOST_TEST_EQ((ref_type(a)|=b).str(), (test_type(a1) |= b1).str()); BOOST_TEST_EQ(ref_type(a&b).str(), test_type(a1 & b1).str()); BOOST_TEST_EQ((ref_type(a)&=b).str(), (test_type(a1) &= b1).str()); BOOST_TEST_EQ(ref_type(a^b).str(), test_type(a1 ^ b1).str()); BOOST_TEST_EQ((ref_type(a)^=b).str(), (test_type(a1) ^= b1).str()); // Shift ops: for (unsigned i = 0; i < 128; ++i) { BOOST_TEST_EQ(ref_type(a << i).str(), test_type(a1 << i).str()); BOOST_TEST_EQ(ref_type(a >> i).str(), test_type(a1 >> i).str()); } // gcd/lcm BOOST_TEST_EQ(ref_type(gcd(a, b)).str(), test_type(gcd(a1, b1)).str()); BOOST_TEST_EQ(ref_type(lcm(c, d)).str(), test_type(lcm(c1, d1)).str()); }
inline ref_type to_ref(int_fast64_t v) noexcept { // Check that v is divisible by 8 (64-bit aligned). REALM_ASSERT_DEBUG(v % 8 == 0); // C++11 standard, paragraph 4.7.2 [conv.integral]: // If the destination type is unsigned, the resulting value is the least unsigned integer congruent to the source // integer (modulo 2n where n is the number of bits used to represent the unsigned type). [ Note: In a two's // complement representation, this conversion is conceptual and there is no change in the bit pattern (if there is // no truncation). - end note ] static_assert(std::is_unsigned<ref_type>::value, "If ref_type changes, from_ref and to_ref should probably be updated"); return ref_type(v); }
int refs_update_ref(struct ref_store *refs, const char *msg, const char *refname, const unsigned char *new_sha1, const unsigned char *old_sha1, unsigned int flags, enum action_on_err onerr) { struct ref_transaction *t = NULL; struct strbuf err = STRBUF_INIT; int ret = 0; if (ref_type(refname) == REF_TYPE_PSEUDOREF) { assert(refs == get_main_ref_store()); ret = write_pseudoref(refname, new_sha1, old_sha1, &err); } else { t = ref_store_transaction_begin(refs, &err); if (!t || ref_transaction_update(t, refname, new_sha1, old_sha1, flags, msg, &err) || ref_transaction_commit(t, &err)) { ret = 1; ref_transaction_free(t); } } if (ret) { const char *str = "update_ref failed for ref '%s': %s"; switch (onerr) { case UPDATE_REFS_MSG_ON_ERR: error(str, refname, err.buf); break; case UPDATE_REFS_DIE_ON_ERR: die(str, refname, err.buf); break; case UPDATE_REFS_QUIET_ON_ERR: break; } strbuf_release(&err); return 1; } strbuf_release(&err); if (t) ref_transaction_free(t); return 0; }
static int collect_reflog(const char *ref, const struct object_id *oid, int unused, void *cb_data) { struct collected_reflog *e; struct collect_reflog_cb *cb = cb_data; struct strbuf newref = STRBUF_INIT; /* * Avoid collecting the same shared ref multiple times because * they are available via all worktrees. */ if (!cb->wt->is_current && ref_type(ref) == REF_TYPE_NORMAL) return 0; strbuf_worktree_ref(cb->wt, &newref, ref); FLEX_ALLOC_STR(e, reflog, newref.buf); strbuf_release(&newref); oidcpy(&e->oid, oid); ALLOC_GROW(cb->e, cb->nr + 1, cb->alloc); cb->e[cb->nr++] = e; return 0; }
int delete_ref(const char *refname, const unsigned char *old_sha1, unsigned int flags) { struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; if (ref_type(refname) == REF_TYPE_PSEUDOREF) return delete_pseudoref(refname, old_sha1); transaction = ref_transaction_begin(&err); if (!transaction || ref_transaction_delete(transaction, refname, old_sha1, flags, NULL, &err) || ref_transaction_commit(transaction, &err)) { error("%s", err.buf); ref_transaction_free(transaction); strbuf_release(&err); return 1; } ref_transaction_free(transaction); strbuf_release(&err); return 0; }
static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) { struct packed_ref_iterator *iter = (struct packed_ref_iterator *)ref_iterator; int ok; while ((ok = next_record(iter)) == ITER_OK) { if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) continue; if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && !ref_resolves_to_object(iter->base.refname, &iter->oid, iter->flags)) continue; return ITER_OK; } if (ref_iterator_abort(ref_iterator) != ITER_DONE) ok = ITER_ERROR; return ok; }
void t3() { // Now check operations involving signed integers: BOOST_TEST_EQ(ref_type(a + si).str(), test_type(a1 + si).str()); BOOST_TEST_EQ(ref_type(si + a).str(), test_type(si + a1).str()); BOOST_TEST_EQ((ref_type(a)+=si).str(), (test_type(a1) += si).str()); if (a >= si) { BOOST_TEST_EQ(ref_type(a - si).str(), test_type(a1 - si).str()); BOOST_TEST_EQ((ref_type(a)-=si).str(), (test_type(a1) -= si).str()); } else { //BOOST_TEST_EQ(ref_type(si - a).str(), test_type(si - a1).str()); } BOOST_TEST_EQ(ref_type(b * si).str(), test_type(b1 * si).str()); BOOST_TEST_EQ(ref_type(si * b).str(), test_type(si * b1).str()); BOOST_TEST_EQ((ref_type(a)*=si).str(), (test_type(a1) *= si).str()); BOOST_TEST_EQ(ref_type(a / si).str(), test_type(a1 / si).str()); BOOST_TEST_EQ((ref_type(a)/=si).str(), (test_type(a1) /= si).str()); BOOST_TEST_EQ(ref_type(a % si).str(), test_type(a1 % si).str()); BOOST_TEST_EQ((ref_type(a)%=si).str(), (test_type(a1) %= si).str()); BOOST_TEST_EQ(ref_type(a|si).str(), test_type(a1 | si).str()); BOOST_TEST_EQ((ref_type(a)|=si).str(), (test_type(a1) |= si).str()); BOOST_TEST_EQ(ref_type(a&si).str(), test_type(a1 & si).str()); BOOST_TEST_EQ((ref_type(a)&=si).str(), (test_type(a1) &= si).str()); BOOST_TEST_EQ(ref_type(a^si).str(), test_type(a1 ^ si).str()); BOOST_TEST_EQ((ref_type(a)^=si).str(), (test_type(a1) ^= si).str()); BOOST_TEST_EQ(ref_type(si|a).str(), test_type(si|a1).str()); BOOST_TEST_EQ(ref_type(si&a).str(), test_type(si&a1).str()); BOOST_TEST_EQ(ref_type(si^a).str(), test_type(si^a1).str()); BOOST_TEST_EQ(ref_type(gcd(a, si)).str(), test_type(gcd(a1, si)).str()); BOOST_TEST_EQ(ref_type(gcd(si, b)).str(), test_type(gcd(si, b1)).str()); BOOST_TEST_EQ(ref_type(lcm(c, si)).str(), test_type(lcm(c1, si)).str()); BOOST_TEST_EQ(ref_type(lcm(si, d)).str(), test_type(lcm(si, d1)).str()); }
void t4() { // Now check operations involving unsigned integers: BOOST_TEST_EQ(ref_type(a + ui).str(), test_type(a1 + ui).str()); BOOST_TEST_EQ(ref_type(ui + a).str(), test_type(ui + a1).str()); BOOST_TEST_EQ((ref_type(a)+=ui).str(), (test_type(a1) += ui).str()); if (a >= ui) { BOOST_TEST_EQ(ref_type(a - ui).str(), test_type(a1 - ui).str()); BOOST_TEST_EQ((ref_type(a)-=ui).str(), (test_type(a1) -= ui).str()); } else { //BOOST_TEST_EQ(ref_type(ui - a).str(), test_type(ui - a1).str()); } BOOST_TEST_EQ(ref_type(b * ui).str(), test_type(b1 * ui).str()); BOOST_TEST_EQ(ref_type(ui * b).str(), test_type(ui * b1).str()); BOOST_TEST_EQ((ref_type(a)*=ui).str(), (test_type(a1) *= ui).str()); BOOST_TEST_EQ(ref_type(a / ui).str(), test_type(a1 / ui).str()); BOOST_TEST_EQ((ref_type(a)/=ui).str(), (test_type(a1) /= ui).str()); BOOST_TEST_EQ(ref_type(a % ui).str(), test_type(a1 % ui).str()); BOOST_TEST_EQ((ref_type(a)%=ui).str(), (test_type(a1) %= ui).str()); BOOST_TEST_EQ(ref_type(a|ui).str(), test_type(a1 | ui).str()); BOOST_TEST_EQ((ref_type(a)|=ui).str(), (test_type(a1) |= ui).str()); BOOST_TEST_EQ(ref_type(a&ui).str(), test_type(a1 & ui).str()); BOOST_TEST_EQ((ref_type(a)&=ui).str(), (test_type(a1) &= ui).str()); BOOST_TEST_EQ(ref_type(a^ui).str(), test_type(a1 ^ ui).str()); BOOST_TEST_EQ((ref_type(a)^=ui).str(), (test_type(a1) ^= ui).str()); BOOST_TEST_EQ(ref_type(ui|a).str(), test_type(ui|a1).str()); BOOST_TEST_EQ(ref_type(ui&a).str(), test_type(ui&a1).str()); BOOST_TEST_EQ(ref_type(ui^a).str(), test_type(ui^a1).str()); BOOST_TEST_EQ(ref_type(gcd(a, ui)).str(), test_type(gcd(a1, ui)).str()); BOOST_TEST_EQ(ref_type(gcd(ui, b)).str(), test_type(gcd(ui, b1)).str()); BOOST_TEST_EQ(ref_type(lcm(c, ui)).str(), test_type(lcm(c1, ui)).str()); BOOST_TEST_EQ(ref_type(lcm(ui, d)).str(), test_type(lcm(ui, d1)).str()); }
PUBLIC type tc_expr(tree t, env e) #endif { switch (t->x_kind) { case REF: return ref_type((sym) t->x_tag, t->x_params, e, t); case INGEN: return ref_type((sym) t->x_tag, list2(t->x_param1, t->x_param2), e, t); case PREGEN: return ref_type((sym) t->x_tag, list1(t->x_param), e, t); case NUMBER: return nat_type; case SEXPR: { def d; frame params; if (! open_sref(t->x_ref, e, &d, ¶ms)) return err_type; if ((tok) t->x_ref->x_sref_decor != empty) { tc_error(t->x_loc, "Decoration ignored in schema reference"); tc_e_etc("Expression: %z", t); tc_e_end(); } if (t->x_ref->x_sref_renames != nil) { tc_error(t->x_loc, "Renaming ignored in schema reference"); tc_e_etc("Expression: %z", t); tc_e_end(); } if (! aflag && d->d_abbrev) return mk_power(mk_abbrev(d, params)); else return mk_power(seal(mk_sproduct(d->d_schema), params)); } case POWER: { type tt1, tt2; if (! anal_power(tt1 = tc_expr(t->x_arg, e), &tt2, t->x_arg)) { tc_error(t->x_loc, "Argument of \\power must be a set"); tc_e_etc("Expression: %z", t); tc_e_etc("Arg type: %t", tt1); tc_e_end(); } return mk_power(mk_power(tt2)); } case TUPLE : { type a[MAX_ARGS]; int n = 0; tree u; for (u = t->x_elements; u != nil; u = cdr(u)) { if (n >= MAX_ARGS) panic("tc_expr - tuple too big"); a[n++] = tc_expr(car(u), e); } return mk_cproduct(n, a); } case CROSS: { type a[MAX_ARGS]; type tt1, tt2; int n = 0; tree u; for (u = t->x_factors; u != nil; u = cdr(u)) { if (n >= MAX_ARGS) panic("tc_expr - product too big"); tt1 = tc_expr(car(u), e); if (! anal_power(tt1, &tt2, car(u))) { tc_error(t->x_loc, "Argument %d of \\cross must be a set", n+1); tc_e_etc("Expression: %z", t); tc_e_etc("Arg %d type: %t", n+1, tt1); tc_e_end(); } a[n++] = tt2; } return mk_power(mk_cproduct(n, a)); } case EXT: case SEQ: case BAG: { type elem_type; type tt; tree u; if (t->x_elements == nil) elem_type = new_typevar(t); else { elem_type = tc_expr(car(t->x_elements), e); for (u = cdr(t->x_elements); u != nil; u = cdr(u)) { if (unify(elem_type, tt = tc_expr(car(u), e))) elem_type = type_union(elem_type, arid, tt, arid); else { tc_error(t->x_loc, "Type mismatch in %s display", (t->x_kind == EXT ? "set" : t->x_kind == SEQ ? "sequence" : "bag")); tc_e_etc("Expression: %z", car(u)); tc_e_etc("Has type: %t", tt); tc_e_etc("Expected: %t", elem_type); tc_e_end(); } } } switch (t->x_kind) { case EXT: return mk_power(elem_type); case SEQ: return (aflag ? rel_type(num_type, elem_type) : mk_seq(elem_type)); case BAG: return (aflag ? rel_type(elem_type, num_type) : mk_bag(elem_type)); } } case THETA: return theta_type(t, e, (type) NULL, t); case BINDING: { tree u; env e1 = new_env(e); for (u = t->x_elements; u != nil; u = cdr(u)) add_def(VAR, (sym) car(u)->x_lhs, tc_expr(car(u)->x_rhs, e), e1); return mk_sproduct(mk_schema(e1)); } case SELECT: { type a = tc_expr(t->x_arg, e); if (type_kind(a) != SPRODUCT) { tc_error(t->x_loc, "Argument of selection must have schema type"); tc_e_etc("Expression: %z", t); tc_e_etc("Arg type: %t", a); tc_e_end(); mark_error(); return err_type; } switch (t->x_field->x_kind) { case IDENT: return (comp_type(a, (sym) t->x_field, t, t->x_loc)); case THETA: return (theta_type(t->x_field, e, a, t)); default: bad_tag("tc_expr.SELECT", t->x_field->x_kind); return (type) NULL; } } case APPLY: return tc_apply(APPLY, t, t->x_arg1, t->x_arg2, e); case INOP: return tc_apply(INOP, t, simply(t->x_op, t->x_loc), pair(t->x_rand1, t->x_rand2), e); case POSTOP: return tc_apply(POSTOP, t, simply(t->x_op, t->x_loc), t->x_rand, e); case LAMBDA: { env e1 = tc_schema(t->x_bvar, e); type dom = tc_expr(char_tuple(t->x_bvar), e1); type ran = tc_expr(t->x_body, e1); return (aflag ? rel_type(dom, ran) : mk_pfun(dom, ran)); } case COMP: case MU: { env e1 = tc_schema(t->x_bvar, e); type a = tc_expr(exists(t->x_body) ? the(t->x_body) : char_tuple(t->x_bvar), e1); return (t->x_kind == COMP ? mk_power(a) : a); } case LETEXPR: return tc_expr(t->x_body, tc_letdefs(t->x_defs, e)); case IF: { type a, b; tc_pred(t->x_if, e); a = tc_expr(t->x_then, e); b = tc_expr(t->x_else, e); if (unify(a, b)) return type_union(a, arid, b, arid); else { tc_error(t->x_loc, "Type mismatch in conditional expression"); tc_e_etc("Expression: %z", t); tc_e_etc("Then type: %t", a); tc_e_etc("Else type: %t", b); tc_e_end(); return err_type; } } default: bad_tag("tc_expr", t->x_kind); /* dummy */ return (type) NULL; } }