/** * List the hashes supported by each repo implementation. * * NOTE: I don't have a way to verify that the list is complete or verify * NOTE: what it should contain, so I just print the list. */ void MyFn(list_hashes)(SG_context * pCtx) { SG_repo * pRepo = NULL; SG_vhash * pvh_vtables = NULL; SG_vhash * pvh_HashMethods = NULL; SG_uint32 k, count_vtables; VERIFY_ERR_CHECK( SG_repo__query_implementation(pCtx,NULL, SG_REPO__QUESTION__VHASH__LIST_REPO_IMPLEMENTATIONS, NULL,NULL,NULL,0, &pvh_vtables) ); VERIFY_ERR_CHECK( SG_vhash__count(pCtx,pvh_vtables,&count_vtables) ); for (k=0; k<count_vtables; k++) { const char * pszKey_vtable_k; SG_uint32 j, count_HashMethods; VERIFY_ERR_CHECK( SG_vhash__get_nth_pair(pCtx,pvh_vtables,k,&pszKey_vtable_k,NULL) ); INFOP("vtable",("Repo Implementation[%d]: [%s]",k,pszKey_vtable_k)); VERIFY_ERR_CHECK( SG_repo__alloc(pCtx,&pRepo,pszKey_vtable_k) ); VERIFY_ERR_CHECK( SG_repo__query_implementation(pCtx,pRepo, SG_REPO__QUESTION__VHASH__LIST_HASH_METHODS, NULL,NULL,NULL,0, &pvh_HashMethods) ); VERIFY_ERR_CHECK( SG_vhash__count(pCtx,pvh_HashMethods,&count_HashMethods) ); for (j=0; j<count_HashMethods; j++) { const char * pszKey_HashMethod_j; const SG_variant * pVariant; SG_int64 i64; SG_uint32 strlen_Hash_j; VERIFY_ERR_CHECK( SG_vhash__get_nth_pair(pCtx,pvh_HashMethods,j,&pszKey_HashMethod_j,&pVariant) ); VERIFY_ERR_CHECK( SG_variant__get__int64(pCtx,pVariant,&i64) ); strlen_Hash_j = (SG_uint32)i64; INFOP("vtable.hash_method",("Repo [%s] Hash [%s] Length [%d]",pszKey_vtable_k,pszKey_HashMethod_j,strlen_Hash_j)); } SG_VHASH_NULLFREE(pCtx, pvh_HashMethods); SG_REPO_NULLFREE(pCtx, pRepo); } fail: SG_VHASH_NULLFREE(pCtx, pvh_HashMethods); SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvh_vtables); }
void SG_repo__unpack(SG_context* pCtx, SG_repo * pRepo, SG_blob_encoding blob_encoding) { SG_vhash* pvh = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, blob_encoding, SG_FALSE, SG_FALSE, 500, 0, &pvh) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh, &count) ); for (i=0; i<count; i++) { const char* psz_hid = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv) ); // Not a lot of thought went into doing each of these in its own repo tx. Consider alternatives. SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL) ); SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); } SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); }
static void _sg_dbrecord__validate_vhash(SG_context* pCtx, const SG_dbrecord* prec) { SG_uint32 count; SG_uint32 i; SG_ERR_CHECK( SG_vhash__count(pCtx, prec->pvh, &count) ); for (i=0; i<count; i++) { const char* pszKey = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, prec->pvh, i, &pszKey, &pv) ); if (!pszKey || !pv || pv->type != SG_VARIANT_TYPE_SZ) { SG_ERR_THROW_RETURN(SG_ERR_DBRECORD_VALIDATION_FAILED); } } return; fail: return; }
// TODO consider the possible perf benefits of changing this routine // to accept lots of changeset ids instead of just one, so it // can handle them all at once. void SG_treendx__update__multiple( SG_context* pCtx, SG_treendx* pTreeNdx, SG_stringarray* psa ) { SG_changeset* pcs = NULL; sqlite3_stmt* pStmt = NULL; SG_vhash* pvh_treepaths = NULL; SG_uint32 count_treepaths = 0; SG_uint32 count_changesets = 0; SG_uint32 ics = 0; SG_NULLARGCHECK_RETURN(psa); SG_NULLARGCHECK_RETURN(pTreeNdx); SG_ERR_CHECK( SG_stringarray__count(pCtx, psa, &count_changesets) ); SG_ERR_CHECK( sg_sqlite__exec__va(pCtx, pTreeNdx->psql, "BEGIN TRANSACTION; ") ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pTreeNdx->psql, &pStmt, "INSERT OR IGNORE INTO treendx (gid, strpath) VALUES (?, ?)") ); for (ics=0; ics<count_changesets; ics++) { const char* psz_hid = NULL; SG_uint32 i = 0; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psa, ics, &psz_hid) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pTreeNdx->pRepo, psz_hid, &pcs) ); SG_ERR_CHECK( SG_changeset__get_treepaths(pCtx, pcs, &pvh_treepaths) ); if (pvh_treepaths) { SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_treepaths, &count_treepaths) ); for (i=0; i<count_treepaths; i++) { const char* psz_gid = NULL; const SG_variant* pv = NULL; const char* psz_path = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_treepaths, i, &psz_gid, &pv) ); SG_ERR_CHECK( SG_variant__get__sz(pCtx, pv, &psz_path) ); SG_ERR_CHECK( sg_sqlite__reset(pCtx, pStmt) ); SG_ERR_CHECK( sg_sqlite__clear_bindings(pCtx, pStmt) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 1, psz_gid) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, psz_path) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); } SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_ERR_CHECK( sg_sqlite__exec__va(pCtx, pTreeNdx->psql, "COMMIT TRANSACTION; ") ); fail: SG_CHANGESET_NULLFREE(pCtx, pcs); }
static void _dump_branch_name( SG_context* pCtx, SG_console_stream cs, const char* pszRefHid, SG_bool bShowOnlyOpenBranchNames, const SG_vhash* pvhRefBranchValues, const SG_vhash* pvhRefClosedBranches) { SG_vhash* pvhRefBranchNames = NULL; if (pvhRefBranchValues) { SG_bool b_has = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefBranchValues, pszRefHid, &b_has) ); if (b_has) { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRefBranchValues, pszRefHid, &pvhRefBranchNames) ); } } if (pvhRefBranchNames) { SG_uint32 count_branch_names = 0; SG_uint32 i; SG_ERR_CHECK( SG_vhash__count(pCtx, pvhRefBranchNames, &count_branch_names) ); for (i=0; i<count_branch_names; i++) { const char* psz_branch_name = NULL; SG_bool bClosed = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhRefBranchNames, i, &psz_branch_name, NULL) ); if (pvhRefClosedBranches) SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefClosedBranches, psz_branch_name, &bClosed) ); if ( !bShowOnlyOpenBranchNames || (bShowOnlyOpenBranchNames && !bClosed) ) { SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s%s\n", "branch", psz_branch_name, bClosed ? " (closed)" : "") ); } } } fail: ; }
void u0038_test_ridesc(SG_context * pCtx) { SG_closet_descriptor_handle* ph = NULL; SG_vhash* pvh = NULL; SG_vhash* pvh_all = NULL; SG_uint32 count = 0; SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "1") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_begin(pCtx, "1", NULL, NULL, NULL, NULL, &pvh, &ph) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_commit(pCtx, &ph, pvh, SG_REPO_STATUS__NORMAL)); SG_VHASH_NULLFREE(pCtx, pvh); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__remove(pCtx, "1") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "2") ); /* This may or may not be an error */ /* delete one that is not there should be an error */ VERIFY_ERR_CHECK_HAS_ERR_DISCARD( SG_closet__descriptors__remove(pCtx, "2") ); /* fetch one that is not there should be an error */ VERIFY_ERR_CHECK_HAS_ERR_DISCARD( SG_closet__descriptors__get(pCtx, "2", NULL, &pvh) ); VERIFY_COND("", pvh==NULL); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "3") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "4") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_begin(pCtx, "3", NULL, NULL, NULL, NULL, &pvh, &ph) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_commit(pCtx, &ph, pvh, SG_REPO_STATUS__NORMAL) ); SG_VHASH_NULLFREE(pCtx, pvh); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_begin(pCtx, "4", NULL, NULL, NULL, NULL, &pvh, &ph) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_commit(pCtx, &ph, pvh, SG_REPO_STATUS__NORMAL) ); SG_VHASH_NULLFREE(pCtx, pvh); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__get(pCtx, "3", NULL, &pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__list(pCtx, &pvh_all) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__count(pCtx, pvh_all, &count) ); /* adding a duplicate name should be an error */ VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_closet__descriptors__add_begin(pCtx, "3", NULL, NULL, NULL, NULL, NULL, &ph), SG_ERR_REPO_ALREADY_EXISTS); VERIFY_COND("count", (count >= 2)); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "3") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "4") ); SG_VHASH_NULLFREE(pCtx, pvh_all); SG_VHASH_NULLFREE(pCtx, pvh); }
static void _copy_keys_into( SG_context* pCtx, SG_vhash* pvh_list, SG_vhash* pvh_blobs ) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_list, &count) ); for (i=0; i<count; i++) { const char* psz = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_list, i, &psz, NULL) ); SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_blobs, psz) ); } fail: ; }
/** * List all of the installed repo implementations. * * NOTE: I don't have a way to verify that the list is complete or verify * NOTE: what it should contain, so I just print the list. */ void MyFn(list_vtables)(SG_context * pCtx) { SG_vhash * pvh_vtables = NULL; SG_uint32 count_vtables; SG_uint32 k; VERIFY_ERR_CHECK( SG_repo__query_implementation(pCtx,NULL, SG_REPO__QUESTION__VHASH__LIST_REPO_IMPLEMENTATIONS, NULL,NULL,NULL,0, &pvh_vtables) ); VERIFY_ERR_CHECK( SG_vhash__count(pCtx,pvh_vtables,&count_vtables) ); for (k=0; k<count_vtables; k++) { const char * pszKey_vtable_k; VERIFY_ERR_CHECK( SG_vhash__get_nth_pair(pCtx,pvh_vtables,k,&pszKey_vtable_k,NULL) ); INFOP("vtable",("Repo Implementation[%d]: [%s]",k,pszKey_vtable_k)); } fail: SG_VHASH_NULLFREE(pCtx, pvh_vtables); }
void SG_sync__add_blobs_to_fragball(SG_context* pCtx, SG_repo* pRepo, SG_pathname* pPath_fragball, SG_vhash* pvh_missing_blobs) { SG_uint32 iMissingBlobCount; const char** paszHids = NULL; SG_NULLARGCHECK_RETURN(pPath_fragball); SG_NULLARGCHECK_RETURN(pvh_missing_blobs); SG_ERR_CHECK_RETURN( SG_vhash__count(pCtx, pvh_missing_blobs, &iMissingBlobCount) ); if (iMissingBlobCount > 0) { SG_uint32 i; SG_ERR_CHECK( SG_allocN(pCtx, iMissingBlobCount, paszHids) ); for (i = 0; i < iMissingBlobCount; i++) SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_missing_blobs, i, &paszHids[i], NULL) ); } SG_ERR_CHECK( SG_fragball__append__blobs(pCtx, pPath_fragball, pRepo, paszHids, iMissingBlobCount) ); /* fall through */ fail: SG_NULLFREE(pCtx, paszHids); }
void SG_repo__pack__zlib(SG_context* pCtx, SG_repo * pRepo) { SG_vhash* pvh = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, SG_BLOBENCODING__FULL, SG_TRUE, SG_TRUE, 500, 0, &pvh) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh, &count) ); SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); for (i=0; i<count; i++) { const char* psz_hid = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv) ); SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid, SG_BLOBENCODING__ZLIB, NULL, NULL, NULL, NULL, NULL); if (SG_context__has_err(pCtx)) { if (!SG_context__err_equals(pCtx,SG_ERR_REPO_BUSY)) { SG_ERR_RETHROW; } else { SG_context__err_reset(pCtx); } } } SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); }
void u0038_test_ridesc(SG_context * pCtx) { SG_vhash* pvh = NULL; SG_vhash* pvh2 = NULL; SG_vhash* pvh_all = NULL; SG_uint32 count = 0; VERIFY_ERR_CHECK_DISCARD( SG_VHASH__ALLOC(pCtx, &pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hello", "world") ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hola", "mundo") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "1") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "1", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__remove(pCtx, "1") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "2") ); /* This may or may not be an error */ /* delete one that is not there should be an error */ VERIFY_ERR_CHECK_HAS_ERR_DISCARD( SG_closet__descriptors__remove(pCtx, "2") ); /* fetch one that is not there should be an error */ VERIFY_ERR_CHECK_HAS_ERR_DISCARD( SG_closet__descriptors__get(pCtx, "2", &pvh2) ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "3") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "4") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "3", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "4", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__get(pCtx, "3", &pvh2) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__list(pCtx, &pvh_all) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__count(pCtx, pvh_all, &count) ); VERIFY_COND("count", (count >= 2)); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "3") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "4") ); SG_VHASH_NULLFREE(pCtx, pvh_all); SG_VHASH_NULLFREE(pCtx, pvh); SG_VHASH_NULLFREE(pCtx, pvh2); }
void SG_sync__build_best_guess_dagfrag( SG_context* pCtx, SG_repo* pRepo, SG_uint64 iDagNum, SG_rbtree* prbStartFromHids, SG_vhash* pvhConnectToHidsAndGens, SG_dagfrag** ppFrag) { SG_uint32 i, countConnectTo; SG_rbtree_iterator* pit = NULL; SG_dagnode* pdn = NULL; SG_dagfrag* pFrag = NULL; SG_repo_fetch_dagnodes_handle* pdh = NULL; SG_int32 minGen = SG_INT32_MAX; SG_int32 maxGen = 0; SG_uint32 gensToFetch = 0; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_bool bNextHid; const char* pszRefHid; SG_int32 gen; #if TRACE_SYNC SG_int64 startTime; SG_int64 endTime; #endif SG_NULLARGCHECK_RETURN(prbStartFromHids); /* Find the minimum generation in pertinent "connect to" nodes. */ if (pvhConnectToHidsAndGens) { SG_ERR_CHECK( SG_vhash__count(pCtx, pvhConnectToHidsAndGens, &countConnectTo) ); for (i = 0; i < countConnectTo; i++) { SG_int32 gen; SG_ERR_CHECK( SG_vhash__get_nth_pair__int32(pCtx, pvhConnectToHidsAndGens, i, &pszRefHid, &gen) ); if (gen < minGen) minGen = gen; } } /* Happens when pulling into an empty repo, or when an entire dag is specifically requested. */ if (minGen == SG_INT32_MAX) minGen = -1; SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, iDagNum, &pdh) ); /* Find the maximum generation in pertinent "start from" nodes. */ SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbStartFromHids, &bNextHid, &pszRefHid, NULL) ); while (bNextHid) { SG_ERR_CHECK( SG_repo__fetch_dagnodes__one(pCtx, pRepo, pdh, pszRefHid, &pdn) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn, &gen) ); if (gen > maxGen) maxGen = gen; SG_DAGNODE_NULLFREE(pCtx, pdn); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &bNextHid, &pszRefHid, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); if (maxGen <= minGen) gensToFetch = FALLBACK_GENS_PER_ROUNDTRIP; else gensToFetch = maxGen - minGen; #if TRACE_SYNC { char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_uint32 count; SG_ERR_CHECK( SG_dagnum__to_sz__hex(pCtx, iDagNum, buf_dagnum, sizeof(buf_dagnum)) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Building best guess dagfrag for dag %s...\n", buf_dagnum) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Starting from nodes:\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbStartFromHids) ); SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhConnectToHidsAndGens, "Connecting to nodes") ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbStartFromHids, &count) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "result has %d generations from %u starting nodes.\n", gensToFetch, count) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &startTime) ); } #endif /* Return a frag with the corresponding generations filled in. */ SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &psz_repo_id) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, psz_repo_id, psz_admin_id, iDagNum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__multi(pCtx, pFrag, pRepo, prbStartFromHids, gensToFetch) ); #if TRACE_SYNC SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &endTime) ); { SG_uint32 dagnodeCount; double seconds = ((double)endTime-(double)startTime)/1000; SG_ERR_CHECK( SG_dagfrag__dagnode_count(pCtx, pFrag, &dagnodeCount) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, " - %u nodes in frag, built in %1.3f seconds\n", dagnodeCount, seconds) ); SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pFrag, "best-guess dagfrag", 0, SG_CS_STDERR) ); } #endif *ppFrag = pFrag; pFrag = NULL; /* Common cleanup */ fail: SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pdh) ); }
void SG_repo__db__calc_delta_from_root( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh ) { SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh); SG_ERR_CHECK( SG_repo__dag__find_direct_path_from_root( pCtx, pRepo, dagnum, psz_csid_to, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_uint32 count_remove = 0; SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_remove, &count_remove) ); if (count_remove) { SG_uint32 i = 0; // TODO it would be nice to have a batch remove so vhash would only have to rehash once for (i=0; i<count_remove; i++) { const char* psz = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_remove, i, &psz, NULL) ); SG_ERR_CHECK( SG_vhash__remove(pCtx, pvh_add, psz) ); } } } *ppvh = pvh_add; pvh_add = NULL; fail: SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); }
void SG_sync_remote__request_fragball( SG_context* pCtx, SG_repo* pRepo, const SG_pathname* pFragballDirPathname, SG_vhash* pvhRequest, char** ppszFragballName) { SG_pathname* pFragballPathname = NULL; SG_uint64* paDagNums = NULL; SG_string* pstrFragballName = NULL; SG_rbtree* prbDagnodes = NULL; SG_rbtree_iterator* pit = NULL; SG_rev_spec* pRevSpec = NULL; SG_stringarray* psaFullHids = NULL; SG_rbtree* prbDagnums = NULL; SG_dagfrag* pFrag = NULL; char* pszRepoId = NULL; char* pszAdminId = NULL; SG_fragball_writer* pfb = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); { char buf_filename[SG_TID_MAX_BUFFER_LENGTH]; SG_ERR_CHECK( SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename)) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename) ); } if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb) ); SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); SG_ERR_CHECK( SG_fragball_writer__close(pCtx, pfb) ); } else { // Specific dags/nodes were requested. Build that fragball. SG_bool found; #if TRACE_SYNC_REMOTE && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request") ); #endif SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found) ); if (found) { SG_vhash* pvh_since = NULL; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since) ); SG_ERR_CHECK( _do_since(pCtx, pRepo, pvh_since, pfb) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree. SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 i; const SG_variant* pvRevSpecs = NULL; SG_vhash* pvhRevSpec = NULL; // For each requested dag, get rev spec request. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums) ); for (i=0; i<count_requested_dagnums; i++) { SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; const char* pszRefDagNum = NULL; SG_uint64 iDagnum; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs) ); // Verify that requested dagnum exists SG_ERR_CHECK( SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL) ); if (!isValidDagnum) continue; SG_ERR_CHECK( SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum) ); if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL) { SG_uint32 countRevSpecs = 0; SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec) ); SG_ERR_CHECK( SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec) ); // Process the rev spec for each dag SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs) ); if (countRevSpecs > 0) { bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL) ); SG_ERR_CHECK( SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes) ); SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids); } SG_REV_SPEC_NULLFREE(pCtx, pRevSpec); } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); } if (prbDagnodes) // can be null when leaves of an empty dag are requested { // Get the leaves of the other repo, which we need to connect to. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found) ); if (found) { SG_vhash* pvhRefAllLeaves; SG_vhash* pvhRefDagLeaves; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found) ); { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves) ); SG_ERR_CHECK( SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, prbDagnodes, pvhRefDagLeaves, &pFrag) ); } } else { // The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect. SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes) ); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); } SG_ERR_CHECK( SG_fragball__write__frag(pCtx, pfb, pFrag) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_DAGFRAG_NULLFREE(pCtx, pFrag); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } SG_ERR_CHECK( SG_fragball_writer__close(pCtx, pfb) ); } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) { SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); } SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_RBTREE_NULLFREE(pCtx, prbDagnums); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec); SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb); }
static void _do_since( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvh_since, SG_fragball_writer* pfb ) { SG_uint32 count_dagnums = 0; SG_uint32 i_dagnum = 0; SG_ihash* pih_new = NULL; SG_rbtree* prb = NULL; SG_vhash* pvh_blobs = NULL; SG_changeset* pcs = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pvh_since); SG_NULLARGCHECK_RETURN(pfb); // TODO do we need to deal with dags which are present here but not in pvh_since? SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_since, &count_dagnums) ); for (i_dagnum=0; i_dagnum<count_dagnums; i_dagnum++) { const char* psz_dagnum = NULL; SG_varray* pva_nodes = NULL; SG_uint64 dagnum = 0; SG_ERR_CHECK( SG_vhash__get_nth_pair__varray(pCtx, pvh_since, i_dagnum, &psz_dagnum, &pva_nodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__hex(pCtx, psz_dagnum, &dagnum) ); SG_ERR_CHECK( SG_repo__find_new_dagnodes_since(pCtx, pRepo, dagnum, pva_nodes, &pih_new) ); if (pih_new) { SG_uint32 count = 0; SG_ERR_CHECK( SG_ihash__count(pCtx, pih_new, &count) ); if (count) { SG_uint32 i = 0; SG_ERR_CHECK( SG_rbtree__alloc(pCtx, &prb) ); SG_ERR_CHECK( SG_vhash__alloc(pCtx, &pvh_blobs) ); for (i=0; i<count; i++) { const char* psz_node = NULL; SG_ERR_CHECK( SG_ihash__get_nth_pair(pCtx, pih_new, i, &psz_node, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb, psz_node) ); SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvh_blobs, psz_node) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_node, &pcs) ); SG_ERR_CHECK( _add_necessary_blobs(pCtx, pcs, pvh_blobs) ); SG_CHANGESET_NULLFREE(pCtx, pcs); } // put all these new nodes in the frag SG_ERR_CHECK( SG_fragball__write__dagnodes(pCtx, pfb, dagnum, prb) ); // and the blobs SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pfb, pvh_blobs) ); SG_VHASH_NULLFREE(pCtx, pvh_blobs); SG_RBTREE_NULLFREE(pCtx, prb); } SG_IHASH_NULLFREE(pCtx, pih_new); } } fail: SG_CHANGESET_NULLFREE(pCtx, pcs); SG_VHASH_NULLFREE(pCtx, pvh_blobs); SG_RBTREE_NULLFREE(pCtx, prb); SG_IHASH_NULLFREE(pCtx, pih_new); }
static void _add_necessary_blobs( SG_context* pCtx, SG_changeset* pcs, SG_vhash* pvh_blobs ) { SG_uint64 dagnum = 0; SG_ERR_CHECK( SG_changeset__get_dagnum(pCtx, pcs, &dagnum) ); if (SG_DAGNUM__IS_DB(dagnum)) { SG_vhash* pvh_changes = NULL; SG_uint32 count_parents = 0; SG_uint32 i_parent = 0; SG_ERR_CHECK( SG_changeset__db__get_changes(pCtx, pcs, &pvh_changes) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_changes, &count_parents) ); for (i_parent=0; i_parent<count_parents; i_parent++) { SG_vhash* pvh_changes_for_one_parent = NULL; SG_vhash* pvh_add = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair__vhash(pCtx, pvh_changes, i_parent, NULL, &pvh_changes_for_one_parent) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_changes_for_one_parent, "add", &pvh_add) ); if (pvh_add) { SG_ERR_CHECK( _copy_keys_into(pCtx, pvh_add, pvh_blobs) ); } pvh_add = NULL; SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_changes_for_one_parent, "attach_add", &pvh_add) ); if (pvh_add) { SG_ERR_CHECK( _copy_keys_into(pCtx, pvh_add, pvh_blobs) ); } } if (!SG_DAGNUM__HAS_HARDWIRED_TEMPLATE(dagnum)) { const char* psz_hid_template = NULL; SG_ERR_CHECK( SG_changeset__db__get_template(pCtx, pcs, &psz_hid_template) ); SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_blobs, psz_hid_template) ); } } if (SG_DAGNUM__IS_TREE(dagnum)) { SG_vhash* pvh_changes = NULL; const char* psz_root = NULL; SG_uint32 count_parents = 0; SG_uint32 i_parent = 0; SG_ERR_CHECK( SG_changeset__tree__get_root(pCtx, pcs, &psz_root) ); SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_blobs, psz_root) ); SG_ERR_CHECK( SG_changeset__tree__get_changes(pCtx, pcs, &pvh_changes) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_changes, &count_parents) ); for (i_parent=0; i_parent<count_parents; i_parent++) { SG_vhash* pvh_changes_for_one_parent = NULL; SG_uint32 count_gids = 0; SG_uint32 i_gid = 0; SG_ERR_CHECK( SG_vhash__get_nth_pair__vhash(pCtx, pvh_changes, i_parent, NULL, &pvh_changes_for_one_parent) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_changes_for_one_parent, &count_gids) ); for (i_gid=0; i_gid<count_gids; i_gid++) { const char* psz_hid = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_changes_for_one_parent, i_gid, NULL, &pv) ); if (SG_VARIANT_TYPE_VHASH == pv->type) { SG_vhash* pvh_info = NULL; SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pv, &pvh_info) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvh_info, SG_CHANGEESET__TREE__CHANGES__HID, &psz_hid) ); if (psz_hid) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_blobs, psz_hid) ); } } } } } fail: ; }
void SG_sync__closet_user_dags( SG_context* pCtx, SG_repo* pRepoSrcNotMine, const char* pszRefHidLeafSrc, SG_varray** ppvaSyncedUserList) { char* pszSrcAdminId = NULL; char* pszHidLeafSrc = NULL; char* pszHidLeafDest = NULL; SG_vhash* pvhDescriptors = NULL; SG_repo* pRepoDest = NULL; SG_repo* pRepoSrcMine = NULL; char* pszDestAdminId = NULL; /* Using disallowed characters to ensure no collision with an actual repo name. * Not that this isn't actually stored anywhere--we just use it as a key in the * vhash below where the /real/ repos have descriptor names. */ const char* pszRefUserMasterFakeName = "\\/USER_MASTER\\/"; /* The repo routines do a null arg check of pRepoSrcNotMine. The other args are optional. */ if (!pszRefHidLeafSrc) { SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepoSrcNotMine, NULL, SG_DAGNUM__USERS, &pszHidLeafSrc) ); pszRefHidLeafSrc = pszHidLeafSrc; } /* Add all repositories in "normal" status, to the list we'll iterate over. */ SG_ERR_CHECK( SG_closet__descriptors__list(pCtx, &pvhDescriptors) ); /* If it exists, add the user master repo to the list. */ { SG_bool bExists = SG_FALSE; SG_ERR_CHECK( SG_repo__user_master__exists(pCtx, &bExists) ); if (bExists) SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvhDescriptors, pszRefUserMasterFakeName) ); } /* Iterate over the repositories, syncing the user database. */ { SG_int32 i = 0; SG_uint32 numDescriptors = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDescriptors, &numDescriptors) ); for(i = 0; i < (SG_int32)numDescriptors; i++) { const char* pszRefNameDest = NULL; SG_bool bAdminIdsMatch = SG_TRUE; const SG_variant* pvRefDest = NULL; /* Note that the source repo will be in this loop, too, but we don't need to check for * it, adding another strcmp, because the leaf hid comparison below will effectively * skip it. So we do one extra leaf fetch and comparison, total, rather than an extra * strcmp for every repo in the closet. */ SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDescriptors, i, &pszRefNameDest, &pvRefDest) ); if (SG_VARIANT_TYPE_NULL == pvRefDest->type) SG_ERR_CHECK( SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoDest) ); else SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRefNameDest, &pRepoDest) ); SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest) ); if (strcmp(pszRefHidLeafSrc, pszHidLeafDest)) { /* Pull from source to dest. * Pull is generally faster than push, so we're using it on purpose. */ SG_pull__admin__local(pCtx, pRepoDest, pRepoSrcNotMine, NULL); if (SG_context__has_err(pCtx)) { /* If there's an admin id mismatch, don't die. Log a warning and move on. */ if (SG_context__err_equals(pCtx, SG_ERR_ADMIN_ID_MISMATCH)) { const char* pszRefNameSrc = NULL; SG_ERR_DISCARD; SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepoSrcNotMine, &pszRefNameSrc) ); if (!pszRefNameSrc) pszRefNameSrc = pszRefUserMasterFakeName; SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepoSrcNotMine, &pszSrcAdminId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepoDest, &pszDestAdminId) ); SG_ERR_CHECK( SG_log__report_warning(pCtx, "admin-id mismatch when syncing users: source repo %s has %s, dest repo %s has %s", pszRefNameSrc, pszSrcAdminId, pszRefNameDest, pszDestAdminId) ); bAdminIdsMatch = SG_FALSE; SG_NULLFREE(pCtx, pszDestAdminId); SG_NULLFREE(pCtx, pszSrcAdminId); } else SG_ERR_RETHROW; } if (bAdminIdsMatch) { SG_NULLFREE(pCtx, pszHidLeafDest); SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest) ); if (strcmp(pszRefHidLeafSrc, pszHidLeafDest)) { /* The pull from source to dest resulted in a new leaf. * Use the new leaf and restart the loop. */ SG_NULLFREE(pCtx, pszHidLeafSrc); pszRefHidLeafSrc = pszHidLeafSrc = pszHidLeafDest; pszHidLeafDest = NULL; SG_REPO_NULLFREE(pCtx, pRepoSrcMine); pRepoSrcNotMine = pRepoSrcMine = pRepoDest; pRepoDest = NULL; i = -1; /* start again at the first descriptor */ } } } SG_NULLFREE(pCtx, pszHidLeafDest); SG_REPO_NULLFREE(pCtx, pRepoDest); } } if (ppvaSyncedUserList) SG_ERR_CHECK( SG_user__list_all(pCtx, pRepoSrcNotMine, ppvaSyncedUserList) ); /* fall through */ fail: SG_NULLFREE(pCtx, pszSrcAdminId); SG_NULLFREE(pCtx, pszHidLeafSrc); SG_NULLFREE(pCtx, pszHidLeafDest); SG_VHASH_NULLFREE(pCtx, pvhDescriptors); SG_REPO_NULLFREE(pCtx, pRepoDest); SG_REPO_NULLFREE(pCtx, pRepoSrcMine); SG_NULLFREE(pCtx, pszDestAdminId); }
void sg_pack__do_changeset(SG_context* pCtx, SG_repo* pRepo, const char* psz_hid_cs, SG_rbtree* prb_blobs) { SG_changeset* pcs = NULL; SG_int32 gen = 0; SG_uint32 count_blobs = 0; SG_uint32 count_parents = 0; SG_varray* pva_parents = NULL; SG_uint32 i; SG_rbtree* prb_new = NULL; const char* psz_hid_root_treenode = NULL; const char* psz_key = NULL; SG_vhash* pvh_lbl = NULL; SG_vhash* pvh_blobs = NULL; SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &psz_hid_root_treenode) ); SG_ERR_CHECK( SG_changeset__get_generation(pCtx, pcs, &gen) ); SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prb_new, count_blobs, NULL) ); SG_ERR_CHECK( SG_changeset__get_list_of_bloblists(pCtx, pcs, &pvh_lbl) ); /* add all the tree user file blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREEUSERFILE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_blobs, i, &psz_hid, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } /* and the treenode blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREENODE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } SG_ERR_CHECK( sg_pack__do_get_dir__top(pCtx, pRepo, gen, psz_hid_root_treenode, prb_blobs, prb_new) ); SG_RBTREE_NULLFREE(pCtx, prb_new); SG_ERR_CHECK( SG_changeset__get_parents(pCtx, pcs, &pva_parents) ); if (pva_parents) { SG_ERR_CHECK( SG_varray__count(pCtx, pva_parents, &count_parents) ); for (i=0; i<count_parents; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_parents, i, &psz_hid) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid, prb_blobs) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); return; fail: SG_RBTREE_NULLFREE(pCtx, prb_new); }
void SG_dbrecord__count_pairs(SG_context* pCtx, const SG_dbrecord* prec, SG_uint32* piResult) { SG_NULLARGCHECK_RETURN(prec); SG_NULLARGCHECK_RETURN(piResult); SG_ERR_CHECK_RETURN( SG_vhash__count(pCtx, prec->pvh, piResult) ); }
static void loop_innards_make_delta( SG_context* pCtx, SG_repo* pRepo, SG_varray* pva_path, SG_uint32 i_path_step, SG_vhash* pvh_add, SG_vhash* pvh_remove ) { SG_changeset* pcs = NULL; const char* psz_csid_cur = NULL; const char* psz_csid_parent = NULL; SG_vhash* pvh_changes = NULL; SG_vhash* pvh_one_parent_changes = NULL; SG_vhash* pvh_cs_add = NULL; SG_vhash* pvh_cs_remove = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_path, i_path_step, &psz_csid_cur) ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_path, i_path_step + 1, &psz_csid_parent) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_csid_cur, &pcs) ); SG_ERR_CHECK( SG_changeset__db__get_changes(pCtx, pcs, &pvh_changes) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_changes, psz_csid_parent, &pvh_one_parent_changes) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_one_parent_changes, "add", &pvh_cs_remove) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_one_parent_changes, "remove", &pvh_cs_add) ); if (pvh_cs_add) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_cs_add, &count) ); for (i=0; i<count; i++) { const char* psz_hid_rec = NULL; SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_cs_add, i, &psz_hid_rec, NULL) ); SG_ERR_CHECK( SG_vhash__remove_if_present(pCtx, pvh_remove, psz_hid_rec, &b) ); if (!b) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_add, psz_hid_rec) ); } } } if (pvh_cs_remove) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_cs_remove, &count) ); for (i=0; i<count; i++) { const char* psz_hid_rec = NULL; SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_cs_remove, i, &psz_hid_rec, NULL) ); SG_ERR_CHECK( SG_vhash__remove_if_present(pCtx, pvh_add, psz_hid_rec, &b) ); if (!b) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_remove, psz_hid_rec) ); } } } fail: SG_CHANGESET_NULLFREE(pCtx, pcs); }
void SG_server__pull_request_fragball(SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, const SG_pathname* pFragballDirPathname, char** ppszFragballName, SG_vhash** ppvhStatus) { SG_pathname* pFragballPathname = NULL; SG_uint32* paDagNums = NULL; SG_rbtree* prbDagnodes = NULL; SG_string* pstrFragballName = NULL; char* pszRevFullHid = NULL; SG_rbtree_iterator* pit = NULL; SG_uint32* repoDagnums = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); SG_NULLARGCHECK_RETURN(ppvhStatus); #if TRACE_SERVER SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request") ); #endif SG_ERR_CHECK( SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname) ); if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } else { // Build the requested fragball. SG_bool found; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // Full clone requested. SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Dagnodes were requested. SG_uint32 generations = 0; SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 count_repo_dagnums = 0; SG_uint32 i; const char* pszDagNum = NULL; const SG_variant* pvRequestedNodes = NULL; SG_vhash* pvhRequestedNodes = NULL; const char* pszHidRequestedDagnode = NULL; // Were additional generations requested? SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums) ); // For each requested dag, get the requested nodes. for (i=0; i<count_requested_dagnums; i++) { SG_uint32 iMissingNodeCount; SG_uint32 iDagnum; SG_uint32 j; SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum) ); // Verify that requested dagnum exists for (j = 0; j < count_repo_dagnums; j++) { if (repoDagnums[j] == iDagnum) { isValidDagnum = SG_TRUE; break; } } if (!isValidDagnum) { char buf[SG_DAGNUM__BUF_MAX__NAME]; SG_ERR_CHECK( SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf)) ); SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf)); } if (pvRequestedNodes) { SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes) ); // Get each node listed for the dag SG_ERR_CHECK( SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount) ); if (iMissingNodeCount > 0) { SG_uint32 j; const SG_variant* pvVal; bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL) ); for (j=0; j<iMissingNodeCount; j++) { SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal) ); if (pvVal) { const char* pszVal; SG_ERR_CHECK( SG_variant__get__sz(pCtx, pvVal, &pszVal) ); if (pszVal) { if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX)) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid) ); pszHidRequestedDagnode = pszRevFullHid; } else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG)) { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid) ); if (!pszRevFullHid) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); pszHidRequestedDagnode = pszRevFullHid; } else SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST); } } SG_ERR_CHECK( SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode) ); // Get additional dagnode generations, if requested. SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations) ); SG_NULLFREE(pCtx, pszRevFullHid); } } } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); // Get additional dagnode generations, if requested. if (generations) { SG_bool found; const char* hid; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL) ); while (found) { SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL) ); } } } if (prbDagnodes) // can be null when leaves of an empty dag are requested { SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_NULLFREE(pCtx, pszRevFullHid); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_NULLFREE(pCtx, repoDagnums); }
/** * Provides each value from a vhash whose name matches a pattern to another callback. * Values that are themselves vhashes are recursed into. * Each value's name is prefixed such that it's fully-qualified when passed to the callback. * Intended for use as a SG_vhash_foreach_callback. */ static void provide_matching_values( SG_context* pCtx, //< Error and context information. void* pCallerData, //< An allocated instance of provide_matching_values__data. const SG_vhash* pHash, //< The hash that the current value is from. const char* szName, //< The name of the current value. const SG_variant* pValue //< The current value. ) { SG_string* pFullName = NULL; SG_string* pScopeName = NULL; SG_string* pSettingName = NULL; SG_uint32 uValueSize = 0u; provide_matching_values__data* pData = NULL; SG_UNUSED(pHash); SG_NULLARGCHECK_RETURN(pCallerData); pData = (provide_matching_values__data*) pCallerData; // build the full name of this value from the incoming prefix and name SG_ERR_CHECK( SG_STRING__ALLOC__COPY(pCtx, &pFullName, pData->pPrefix) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pFullName, "/") ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pFullName, szName) ); // if this is a vhash, get its size if (pValue->type == SG_VARIANT_TYPE_VHASH) { SG_ERR_CHECK( SG_vhash__count(pCtx, pValue->v.val_vhash, &uValueSize) ); } // if this is a vhash with children, then recurse into it // otherwise provide it to the callback if (uValueSize > 0u) { // use our full name as the prefix during recursion // to accomplish that, we'll swap pData->pPrefix and pFullName // that way if an error occurs during recursion, everything will still be freed SG_string* pTemp = NULL; pTemp = pData->pPrefix; pData->pPrefix = pFullName; pFullName = pTemp; SG_ERR_CHECK( SG_vhash__foreach(pCtx, pValue->v.val_vhash, provide_matching_values, pData) ); pTemp = pData->pPrefix; pData->pPrefix = pFullName; pFullName = pTemp; } else { // if we have a pattern that starts with a slash // then match it against the start of the full name // if the name doesn't match, skip this one if (pData->szPattern != NULL && pData->szPattern[0] == '/') { if (strncmp(SG_string__sz(pFullName), pData->szPattern, strlen(pData->szPattern)) != 0) { goto fail; } } // split our full name into a scope name and a local name SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pScopeName) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pSettingName) ); SG_ERR_CHECK( SG_localsettings__split_full_name(pCtx, SG_string__sz(pFullName), NULL, pScopeName, pSettingName) ); // if we have a pattern that doesn't start with a slash // then match it against just the local name of the setting // if the name doesn't match, skip this one if (pData->szPattern != NULL && pData->szPattern[0] != '/') { if (strstr(SG_string__sz(pSettingName), pData->szPattern) == NULL) { goto fail; } } // send the data to the callback pData->pCallback(pCtx, SG_string__sz(pFullName), SG_string__sz(pScopeName), SG_string__sz(pSettingName), pValue, pData->pCallerData); } fail: SG_STRING_NULLFREE(pCtx, pFullName); SG_STRING_NULLFREE(pCtx, pScopeName); SG_STRING_NULLFREE(pCtx, pSettingName); }