void SG_repo__unpack(SG_context* pCtx, SG_repo * pRepo, SG_blob_encoding blob_encoding) { SG_vhash* pvh = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, blob_encoding, SG_FALSE, SG_FALSE, 500, 0, &pvh) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh, &count) ); for (i=0; i<count; i++) { const char* psz_hid = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv) ); // Not a lot of thought went into doing each of these in its own repo tx. Consider alternatives. SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL) ); SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); } SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_sync__compare_repo_blobs(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_bool* pbIdentical) { const SG_uint32 chunk_size = 1000; SG_vhash* pvh = NULL; const char* pszBlob1 = NULL; SG_uint32 i, j; SG_uint32 count_observed = 0; SG_uint32 count_returned; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); for(i = 0; SG_TRUE; i++) { // Ian TODO: other encodings SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo1, SG_BLOBENCODING__ZLIB, SG_FALSE, SG_FALSE, chunk_size, chunk_size * i, &pvh) ); for (j = 0; j < chunk_size; j++) { SG_bool b = SG_TRUE; SG_vhash__get_nth_pair(pCtx, pvh, j, &pszBlob1, NULL); if (SG_context__err_equals(pCtx, SG_ERR_ARGUMENT_OUT_OF_RANGE)) { SG_context__err_reset(pCtx); break; } SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_repo__does_blob_exist(pCtx, pRepo2, pszBlob1, &b) ); if (!b) { *pbIdentical = SG_FALSE; break; } count_observed++; } if (!j) break; SG_VHASH_NULLFREE(pCtx, pvh); } SG_ERR_CHECK( SG_repo__get_blob_stats(pCtx, pRepo2, NULL, NULL, &count_returned, NULL, NULL, NULL, NULL, NULL, NULL, NULL) ); if (count_returned != count_observed) *pbIdentical = SG_FALSE; // fall through fail: SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_sync__compare_repo_blobs(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_bool* pbIdentical) { SG_blobset* pbs1 = NULL; SG_blobset* pbs2 = NULL; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo1, 0, 0, 0, &pbs1) ); SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo1, 0, 0, 0, &pbs2) ); SG_ERR_CHECK( SG_blobset__compare_to(pCtx, pbs1, pbs2, pbIdentical) ); // fall through fail: SG_BLOBSET_NULLFREE(pCtx, pbs1); SG_BLOBSET_NULLFREE(pCtx, pbs2); }
void SG_repo__pack__zlib(SG_context* pCtx, SG_repo * pRepo) { SG_vhash* pvh = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, SG_BLOBENCODING__FULL, SG_TRUE, SG_TRUE, 500, 0, &pvh) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh, &count) ); SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); for (i=0; i<count; i++) { const char* psz_hid = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv) ); SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid, SG_BLOBENCODING__ZLIB, NULL, NULL, NULL, NULL, NULL); if (SG_context__has_err(pCtx)) { if (!SG_context__err_equals(pCtx,SG_ERR_REPO_BUSY)) { SG_ERR_RETHROW; } else { SG_context__err_reset(pCtx); } } } SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); }
void MyFn(empty_tx)(SG_context * pCtx, SG_repo* pRepo) { SG_blobset* pbs = NULL; SG_uint32 count_blobs_before = 0; SG_uint64 len_encoded_before = 0; SG_uint64 len_full_before = 0; SG_uint32 count_blobs_after = 0; SG_uint64 len_encoded_after = 0; SG_uint64 len_full_after = 0; SG_repo_tx_handle* pTx = NULL; VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_before, &len_encoded_before, &len_full_before, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); // Commit empty tx. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", count_blobs_before == count_blobs_after); VERIFY_COND("len_encoded mismatch", len_encoded_before == len_encoded_after); VERIFY_COND("len_full mismatch", len_full_before == len_full_after); // Abort empty tx. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__abort_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", count_blobs_before == count_blobs_after); VERIFY_COND("len_encoded mismatch", len_encoded_before == len_encoded_after); VERIFY_COND("len_full mismatch", len_full_before == len_full_after); return; fail: return; }
// You can only store one blob at a time. void MyFn(multiple_stores_fail)(SG_context * pCtx, SG_repo* pRepo) { SG_blobset* pbs = NULL; SG_uint32 count_blobs_before = 0; SG_uint64 len_encoded_before = 0; SG_uint64 len_full_before = 0; SG_uint32 count_blobs_after = 0; SG_uint64 len_encoded_after = 0; SG_uint64 len_full_after = 0; SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_repo_store_blob_handle* pbh2; VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_before, &len_encoded_before, &len_full_before, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); // Start writing a blob. VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, 500, 0, NULL, &pbh) ); // Start another blob. Should fail. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, 500, 0, NULL, &pbh2), SG_ERR_INCOMPLETE_BLOB_IN_REPO_TX ); // SG_repo__store_blob__begin didn't fail when previous blob wasn't done. VERIFY_ERR_CHECK( SG_repo__abort_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", count_blobs_before == count_blobs_after); VERIFY_COND("len_encoded mismatch", len_encoded_before == len_encoded_after); VERIFY_COND("len_full mismatch", len_full_before == len_full_after); return; fail: return; }
// Create 8 blobs, commit those whose bit is set in blobMask, abort the rest. Verify results. void MyFn(eight_blobs_commit_masked)(SG_context * pCtx, SG_repo* pRepo, SG_uint8 blobMask) { SG_blobset* pbs = NULL; SG_uint32 count_blobs_before = 0; SG_uint64 len_encoded_before = 0; SG_uint64 len_full_before = 0; SG_uint32 count_blobs_after = 0; SG_uint64 len_encoded_after = 0; SG_uint64 len_full_after = 0; SG_byte* pRandomBuf = NULL; SG_uint32 lenRandomBuf; SG_uint64 lenTotal = 0; SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_uint32 i,j; SG_uint32 iLenWritten = 0; char* apszHids[8]; SG_uint8 countBlobsToAdd; SG_uint8 mask = blobMask; // Count the number of bits set in blobMask. for (countBlobsToAdd = 0; mask; mask >>= 1) countBlobsToAdd += mask & 1; VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_before, &len_encoded_before, &len_full_before, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); for (i=0; i < 8; i++) { VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pRandomBuf, &lenRandomBuf) ); lenTotal = lenRandomBuf * 3; VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenTotal, 0, NULL, &pbh) ); for (j=0; j < 3; j++) { VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenRandomBuf, pRandomBuf, &iLenWritten) ); // This chunk is much smaller than SG_STREAMING_BUFFER_SIZE, so the whole thing should be written. VERIFY_COND("SG_repo__store_blob__chunk length mismatch.", iLenWritten == lenRandomBuf); if ((1 == j) && ((1 << i & blobMask) == 0)) // we're mid-blob and blob is to be aborted (bit is unset) break; } if ((1 << i & blobMask) != 0) // blob is to be commit (bit is set) { // Finish the blob. VERIFY_ERR_CHECK( SG_repo__store_blob__end(pCtx, pRepo, pTx, &pbh, &(apszHids[i])) ); } else { // Abort the blob. VERIFY_ERR_CHECK( SG_repo__store_blob__abort(pCtx, pRepo, pTx, &pbh) ); apszHids[i] = NULL; } SG_NULLFREE(pCtx, pRandomBuf); } VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", (count_blobs_before + countBlobsToAdd) == count_blobs_after); // Verify HIDs we think we added, were. for (i=0; i < 8; i++) { if (apszHids[i]) { SG_uint64 len = 0; VERIFY_ERR_CHECK( SG_repo__fetch_blob_into_memory(pCtx, pRepo, apszHids[i], &pRandomBuf, &len) ); SG_NULLFREE(pCtx, pRandomBuf); } } // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pRandomBuf); for (i=0; i < 8; i++) SG_NULLFREE(pCtx, apszHids[i]); }
// Commit a repo tx while a blob is being stored. // // We jump through some awkward hoops to clean up memory in this case. If it becomes more trouble than // it's worth, this test might not be worth running. void MyFn(commit_mid_blob)(SG_context * pCtx, SG_repo* pRepo) { SG_blobset* pbs = NULL; SG_uint32 count_blobs_before = 0; SG_uint64 len_encoded_before = 0; SG_uint64 len_full_before = 0; SG_uint32 count_blobs_after = 0; SG_uint64 len_encoded_after = 0; SG_uint64 len_full_after = 0; SG_byte* pRandomBuf = NULL; SG_uint32 lenRandomBuf; SG_uint64 lenTotal = 0; SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_uint32 i; SG_uint32 iLenWritten = 0; VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_before, &len_encoded_before, &len_full_before, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pRandomBuf, &lenRandomBuf) ); lenTotal = lenRandomBuf * 5; // Start writing blob. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenTotal, 0, NULL, &pbh) ); for (i=0; i < 3; i++) { VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenRandomBuf, pRandomBuf, &iLenWritten) ); // This chunk is much smaller than SG_STREAMING_BUFFER_SIZE, so the whole thing should be written. VERIFY_COND("SG_repo__store_blob__chunk length mismatch.", iLenWritten == lenRandomBuf); } // We're not done yet, so this should fail. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__commit_tx(pCtx, pRepo, &pTx), SG_ERR_INCOMPLETE_BLOB_IN_REPO_TX ); // SG_repo__commit_tx should return SG_ERR_INCOMPLETE_BLOB_IN_REPO_TX VERIFY_COND("SG_repo__commit_tx should free the repo transaction.", !pTx); SG_NULLFREE(pCtx, pRandomBuf); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", count_blobs_before == count_blobs_after); VERIFY_COND("len_encoded mismatch", len_encoded_before == len_encoded_after); VERIFY_COND("len_full mismatch", len_full_before == len_full_after); return; fail: SG_NULLFREE(pCtx, pRandomBuf); }