MyMain() { SG_repo * pRepo = NULL; SG_pathname * pPathnameTempDir = NULL; TEMPLATE_MAIN_START; VERIFY_ERR_CHECK( MyFn(create_repo)(pCtx,&pRepo) ); VERIFY_ERR_CHECK( MyFn(create_tmp_src_dir)(pCtx,&pPathnameTempDir) ); BEGIN_TEST( MyFn(create_some_blobs_from_bytes)(pCtx, pRepo) ); BEGIN_TEST( MyFn(create_some_blobs_from_files)(pCtx, pRepo,pPathnameTempDir) ); BEGIN_TEST( MyFn(create_zero_byte_blob)(pCtx, pRepo) ); ////////////////////////////////////////////////////////////////// // TODO delete repo directory and everything we created under it. // TODO delete temp directory and everything we created under it. // fall-thru to common cleanup fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_PATHNAME_NULLFREE(pCtx, pPathnameTempDir); TEMPLATE_MAIN_END; }
void MyFn(remove__value)(SG_context* pCtx) { SG_vector* pVector = NULL; SG_uint32 uSize = 0u; SG_uint32 uIndex = 0u; // create a test vector VERIFY_ERR_CHECK( MyFn(_create_test_vector)(pCtx, &pVector, &uSize) ); // run through each item and remove it by value // verify that we remove the number of values expected // also verify that the resulting size is as expected for (uIndex = 0u; uIndex < SG_NrElements(gaTestItems); ++uIndex) { test_item* pTestItem = gaTestItems + uIndex; SG_uint32 uRemoved = 0u; VERIFY_ERR_CHECK( SG_vector__remove__value(pCtx, pVector, (void*)pTestItem->szValue, &uRemoved, NULL) ); VERIFYP_COND("Removed wrong number of items.", uRemoved == pTestItem->uCount, ("Value(%s) Expected(%u) Removed(%u)", pTestItem->szValue, pTestItem->uCount, uRemoved)); uSize -= uRemoved; VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uSize) ); } fail: SG_VECTOR_NULLFREE(pCtx, pVector); return; }
// Generates a vector from gaTestItems. void MyFn(_create_test_vector)( SG_context* pCtx, SG_vector** ppVector, SG_uint32* pSize ) { SG_vector* pVector = NULL; SG_uint32 uIndex = 0u; SG_uint32 uExpectedTotal = 0u; SG_uint32 uPass = 0u; SG_uint32 uPassTotal = 1u; SG_uint32 uActualTotal = 0u; // check how many big the final vector will be for (uIndex = 0u; uIndex < SG_NrElements(gaTestItems); ++uIndex) { test_item* pTestItem = gaTestItems + uIndex; uExpectedTotal += pTestItem->uCount; } // allocate the vector VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, uExpectedTotal) ); // add the items in passes rather than each item sequentially // this way values appended multiple times won't be sequential while (uPassTotal > 0u) { uPassTotal = 0u; for (uIndex = 0u; uIndex < SG_NrElements(gaTestItems); ++uIndex) { test_item* pTestItem = gaTestItems + uIndex; if (pTestItem->uCount > uPass) { VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, (void*)pTestItem->szValue, NULL) ); uPassTotal += 1u; } } uActualTotal += uPassTotal; uPass += 1u; } VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uExpectedTotal) ); VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uActualTotal) ); // return the vector if (ppVector != NULL) { *ppVector = pVector; pVector = NULL; } if (pSize != NULL) { *pSize = uActualTotal; } fail: SG_VECTOR_NULLFREE(pCtx, pVector); }
MyMain() { TEMPLATE_MAIN_START; BEGIN_TEST( MyFn(test1)(pCtx) ); BEGIN_TEST( MyFn(test_W2771)(pCtx) ); TEMPLATE_MAIN_END; }
MyMain() { TEMPLATE_MAIN_START; BEGIN_TEST( MyFn(test__parseRfc850)(pCtx) ); BEGIN_TEST( MyFn(test__parseRfc850nonGmt)(pCtx) ); BEGIN_TEST( MyFn(test__formatRfc850)(pCtx) ); TEMPLATE_MAIN_END; }
MyMain() { TEMPLATE_MAIN_START; #if defined(MAC) || defined(LINUX) BEGIN_TEST( MyFn(test1)(pCtx) ); BEGIN_TEST( MyFn(test2)(pCtx) ); #else VERIFY_COND("Skipping test on Windows platform...", SG_TRUE); #endif TEMPLATE_MAIN_END; }
void MyFn(alloc__copy__deep)(SG_context * pCtx) { static const SG_uint32 uSize = 100u; SG_vector* pVector = NULL; SG_vector* pCopy = NULL; SG_uint32 uIndex = 0u; SG_uint32 uOutput1 = 0u; SG_uint32 uOutput2 = 0u; void* pOutput1 = NULL; void* pOutput2 = NULL; VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, uSize) ); // add some allocated data to the vector for (uIndex = 0u; uIndex < uSize; ++uIndex) { SG_uint32* pValue = NULL; VERIFY_ERR_CHECK( SG_alloc1(pCtx, pValue) ); *pValue = uIndex; VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, pValue, &uOutput1) ); VERIFY_COND("Added item has unexpected index.", uOutput1 == uIndex); } // copy the vector VERIFY_ERR_CHECK( SG_VECTOR__ALLOC__COPY(pCtx, pVector, MyFn(copy_uint32), MyFn(free_uint32), &pCopy) ); // verify that the copy matches the original VERIFY_ERR_CHECK( SG_vector__length(pCtx, pVector, &uOutput1) ); VERIFY_ERR_CHECK( SG_vector__length(pCtx, pCopy, &uOutput2) ); VERIFY_COND("Copied vector's length doesn't match added item count.", uOutput1 == uSize); VERIFY_COND("Copied vector's length doesn't match original.", uOutput1 == uOutput2); for (uIndex = 0u; uIndex < uOutput1; ++uIndex) { VERIFY_ERR_CHECK( SG_vector__get(pCtx, pVector, uIndex, &pOutput1) ); VERIFY_ERR_CHECK( SG_vector__get(pCtx, pCopy, uIndex, &pOutput2) ); VERIFYP_COND("Copied vector's pointer value matches original after deep copy.", pOutput1 != pOutput2, ("index(%d)", uIndex)); uOutput1 = *((SG_uint32*)pOutput1); uOutput2 = *((SG_uint32*)pOutput2); VERIFYP_COND("Copied vector's pointed-to value doesn't match original after deep copy.", uOutput1 == uOutput2, ("index(%d)", uIndex)); } fail: SG_context__push_level(pCtx); SG_vector__free__with_assoc(pCtx, pVector, MyFn(free_uint32)); SG_vector__free__with_assoc(pCtx, pCopy, MyFn(free_uint32)); SG_context__pop_level(pCtx); }
void MyFn(clear__with_assoc)(SG_context * pCtx) { static const SG_uint32 uSize = 100u; SG_vector* pVector = NULL; SG_uint32 uIndex = 0u; SG_uint32 uOutput = 0u; VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, uSize) ); // add some allocated data to the vector for (uIndex = 0u; uIndex < uSize; ++uIndex) { SG_uint32* pValue = NULL; VERIFY_ERR_CHECK( SG_alloc1(pCtx, pValue) ); *pValue = uIndex; VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, pValue, &uOutput) ); VERIFY_COND("Added item has unexpected index.", uOutput == uIndex); } // verify that the length is what we expect VERIFY_ERR_CHECK( SG_vector__length(pCtx, pVector, &uOutput) ); VERIFY_COND("Vector's length doesn't match added item count.", uOutput == uSize); // clear the vector using our free callback VERIFY_ERR_CHECK( SG_vector__clear__with_assoc(pCtx, pVector, MyFn(free_uint32)) ); // if we get memory leaks, then the callback wasn't properly called to free the elements // verify that the vector is now empty VERIFY_ERR_CHECK( SG_vector__length(pCtx, pVector, &uOutput) ); VERIFY_COND("Vector's length is non-zero after being cleared.", uOutput == 0u); fail: SG_VECTOR_NULLFREE(pCtx, pVector); }
MyMain() { // char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; // SG_pathname* pPathTopDir = NULL; TEMPLATE_MAIN_START; // BEGIN_TEST( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); // BEGIN_TEST( SG_PATHNAME__ALLOC__SZ(pCtx, &pPathTopDir,bufTopDir) ); // BEGIN_TEST( SG_fsobj__mkdir__pathname(pCtx, pPathTopDir) ); BEGIN_TEST( MyFn(list_vtables)(pCtx) ); BEGIN_TEST( MyFn(list_hashes)(pCtx) ); TEMPLATE_MAIN_END; }
MyMain() { TEMPLATE_MAIN_START; BEGIN_TEST( MyFn(do_tests)(pCtx) ); TEMPLATE_MAIN_END; }
MyMain() { TEMPLATE_MAIN_START; #ifdef SG_NIGHTLY_BUILD VERIFY_ERR_CHECK( MyFn(test__big_changeset)(pCtx) ); #endif VERIFY_ERR_CHECK( MyFn(test__simple)(pCtx) ); VERIFY_ERR_CHECK( MyFn(test__long_dag)(pCtx) ); VERIFY_ERR_CHECK( MyFn(test__wide_dag)(pCtx) ); // Fall through to common cleanup. fail: TEMPLATE_MAIN_END; }
MyMain() { TEMPLATE_MAIN_START; BEGIN_TEST( MyFn(test1)(pCtx) ); BEGIN_TEST( MyFn(alloc__copy__shallow)(pCtx) ); BEGIN_TEST( MyFn(alloc__copy__deep)(pCtx) ); BEGIN_TEST( MyFn(clear__with_assoc)(pCtx) ); BEGIN_TEST( MyFn(find)(pCtx) ); BEGIN_TEST( MyFn(remove__index)(pCtx) ); BEGIN_TEST( MyFn(remove__value)(pCtx) ); BEGIN_TEST( MyFn(remove__if)(pCtx) ); BEGIN_TEST( MyFn(match_value__append)(pCtx) ); TEMPLATE_MAIN_END; }
MyMain() { TEMPLATE_MAIN_START; #if defined(HAVE_EXEC_DEBUG_STACKTRACE) BEGIN_TEST( MyFn(test1)(pCtx) ); #else INFOP("u0085_crash", ("Skipping crash stacktrace test....") ); #endif TEMPLATE_MAIN_END; }
void MyFn(remove__if)(SG_context* pCtx) { static SG_uint32 uItemCount = 20u; static const char* szItemValue = "abc"; static SG_uint32 uStartDivisor = 5u; SG_vector* pVector = NULL; SG_uint32 uIndex = 0u; SG_uint32 uCount = uItemCount; SG_uint32 uDivisor = 0u; SG_uint32 uRemoved = 0u; // create a vector with test data VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, 10u) ); for (uIndex = 0u; uIndex < uItemCount; ++uIndex) { VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, (void*)szItemValue, NULL) ); } // run tests, removing every Nth, then every (N-1)th item, etc // the last iteration (every 1th item) will remove every remaining item for (uDivisor = uStartDivisor; uDivisor > 0u; --uDivisor) { SG_uint32 uExpected = uCount / uDivisor; VERIFY_ERR_CHECK( SG_vector__remove__if(pCtx, pVector, MyFn(remove__if__predicate), (void*)&uDivisor, &uRemoved, NULL) ); VERIFYP_COND("Wrong number of items removed.", uRemoved == uExpected, ("Removed(%u) Expected(%u)", uRemoved, uExpected)); uCount -= uExpected; } VERIFY_COND("Expected size should be zero.", uCount == 0u); // verify that the vector is empty VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, 0u) ); fail: SG_VECTOR_NULLFREE(pCtx, pVector); return; }
void MyFn(create_some_blobs_from_bytes)(SG_context * pCtx, SG_repo * pRepo) { SG_uint64 k; char * szRepoId; VERIFY_ERR_CHECK_DISCARD( SG_repo__get_repo_id(pCtx, pRepo, &szRepoId) ); INFOP("create_some_blobs_from_bytes",("RepoID[%s] ", szRepoId )); SG_NULLFREE(pCtx, szRepoId); ////////////////////////////////////////////////////////////////// // create a series of blobs of various known lengths and contents. for (k=1; k <= MyMaxFile; k+= MyStepFile) { SG_ERR_IGNORE( MyFn(create_blob_from_bytes)(pCtx, pRepo,(SG_uint32)k,"Hello World!\nThis is line 2.\n") ); SG_ERR_IGNORE( MyFn(create_blob_from_bytes)(pCtx, pRepo,(SG_uint32)k,"Welcome to the middle of the film!\n") ); } }
// Store a blob. Make sure it doesn't show up until/unless the repo tx is committed. void MyFn(one_blob)(SG_context * pCtx, SG_repo* pRepo) { SG_byte* pBufIn = NULL; SG_uint32 lenBufIn = 0; SG_byte* pBufOut = NULL; SG_uint64 lenBufOut = 0; SG_repo_tx_handle* pTx; char* pszHidReturned = NULL; VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pBufIn, &lenBufIn) ); // Start writing blob. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_blob_from_memory(pCtx, pRepo, pTx, SG_FALSE, pBufIn, lenBufIn, &pszHidReturned) ); // Should fail: tx not committed. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszHidReturned, &pBufOut, &lenBufOut), SG_ERR_BLOB_NOT_FOUND ); // Blob visible before repo tx committed SG_NULLFREE(pCtx, pBufOut); // Abort repo tx. VERIFY_ERR_CHECK( SG_repo__abort_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__abort_tx should null/free the repo transaction.", !pTx); // Should fail: tx aborted. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszHidReturned, &pBufOut, &lenBufOut), SG_ERR_BLOB_NOT_FOUND ); // Blob exists after repo tx abort SG_NULLFREE(pCtx, pBufOut); SG_NULLFREE(pCtx, pszHidReturned); // Write blob, commit tx. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_blob_from_memory(pCtx, pRepo, pTx, SG_FALSE, pBufIn, lenBufIn, &pszHidReturned) ); VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__commit_tx should null/free the repo transaction.", !pTx); // Read back the blob. It should exist now. VERIFY_ERR_CHECK( SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszHidReturned, &pBufOut, &lenBufOut) ); // Just verify the length. It's another test's job to roundtrip blobs and verify data. VERIFY_COND( "blob length mismatch", lenBufOut == lenBufIn ); // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pszHidReturned); SG_NULLFREE(pCtx, pBufIn); SG_NULLFREE(pCtx, pBufOut); }
void MyFn(match_value__append)(SG_context* pCtx) { SG_vector* pVector = NULL; SG_uint32 uSize = 0u; SG_uint32 uIndex = 0u; SG_vector* pTarget = NULL; // create a test vector VERIFY_ERR_CHECK( MyFn(_create_test_vector)(pCtx, &pVector, &uSize) ); // run through each test item and copy all indices with that value to another vector // verify that the other vector receives the correct number of items for (uIndex = 0u; uIndex < SG_NrElements(gaTestItems); ++uIndex) { test_item* pTestItem = gaTestItems + uIndex; // allocate a target vector VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pTarget, uSize) ); // find all values that match the current item // add them to the target vector VERIFY_ERR_CHECK( SG_vector__find__all(pCtx, pVector, SG_vector__predicate__match_value, (void*)pTestItem->szValue, SG_vector__callback__append, pTarget) ); // verify the size of the target vector VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pTarget, pTestItem->uCount) ); // free the target vector SG_VECTOR_NULLFREE(pCtx, pTarget); pTarget = NULL; } fail: SG_VECTOR_NULLFREE(pCtx, pVector); SG_VECTOR_NULLFREE(pCtx, pTarget); return; }
// Commit a repo tx while a blob is being stored. // // We jump through some awkward hoops to clean up memory in this case. If it becomes more trouble than // it's worth, this test might not be worth running. void MyFn(commit_mid_blob)(SG_context * pCtx, SG_repo* pRepo) { SG_blobset* pbs = NULL; SG_uint32 count_blobs_before = 0; SG_uint64 len_encoded_before = 0; SG_uint64 len_full_before = 0; SG_uint32 count_blobs_after = 0; SG_uint64 len_encoded_after = 0; SG_uint64 len_full_after = 0; SG_byte* pRandomBuf = NULL; SG_uint32 lenRandomBuf; SG_uint64 lenTotal = 0; SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_uint32 i; SG_uint32 iLenWritten = 0; VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_before, &len_encoded_before, &len_full_before, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pRandomBuf, &lenRandomBuf) ); lenTotal = lenRandomBuf * 5; // Start writing blob. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenTotal, 0, NULL, &pbh) ); for (i=0; i < 3; i++) { VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenRandomBuf, pRandomBuf, &iLenWritten) ); // This chunk is much smaller than SG_STREAMING_BUFFER_SIZE, so the whole thing should be written. VERIFY_COND("SG_repo__store_blob__chunk length mismatch.", iLenWritten == lenRandomBuf); } // We're not done yet, so this should fail. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__commit_tx(pCtx, pRepo, &pTx), SG_ERR_INCOMPLETE_BLOB_IN_REPO_TX ); // SG_repo__commit_tx should return SG_ERR_INCOMPLETE_BLOB_IN_REPO_TX VERIFY_COND("SG_repo__commit_tx should free the repo transaction.", !pTx); SG_NULLFREE(pCtx, pRandomBuf); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", count_blobs_before == count_blobs_after); VERIFY_COND("len_encoded mismatch", len_encoded_before == len_encoded_after); VERIFY_COND("len_full mismatch", len_full_before == len_full_after); return; fail: SG_NULLFREE(pCtx, pRandomBuf); }
void MyFn(remove__index)(SG_context* pCtx) { static const SG_uint32 uSize = 20u; SG_vector* pVector = NULL; SG_uint32 uIndex = 0u; SG_uint32 uCount = uSize; // create some test data // uSize elements, each with an SG_uint32* whose value equals its index VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, uSize) ); for (uIndex = 0u; uIndex < uSize; ++uIndex) { SG_uint32* pValue = NULL; SG_alloc1(pCtx, pValue); *pValue = uIndex; VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, pValue, NULL) ); } VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uCount) ); // remove the last index and verify VERIFY_ERR_CHECK( SG_vector__remove__index(pCtx, pVector, uSize - 1u, MyFn(_free_simple_value)) ); uCount -= 1u; VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uCount) ); VERIFY_ERR_CHECK( MyFn(_verify_offset_values)(pCtx, pVector, 0u, uCount, 0) ); // remove the first index and verify VERIFY_ERR_CHECK( SG_vector__remove__index(pCtx, pVector, 0u, MyFn(_free_simple_value)) ); uCount -= 1u; VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uCount) ); VERIFY_ERR_CHECK( MyFn(_verify_offset_values)(pCtx, pVector, 0u, uCount, 1) ); // remove a middle index and verify uIndex = uCount / 2u; VERIFY_ERR_CHECK( SG_vector__remove__index(pCtx, pVector, uIndex, MyFn(_free_simple_value)) ); uCount -= 1u; VERIFY_ERR_CHECK( MyFn(_verify_size)(pCtx, pVector, uCount) ); VERIFY_ERR_CHECK( MyFn(_verify_offset_values)(pCtx, pVector, 0u, uIndex, 1) ); VERIFY_ERR_CHECK( MyFn(_verify_offset_values)(pCtx, pVector, uIndex, uCount, 2) ); fail: SG_vector__free__with_assoc(pCtx, pVector, MyFn(_free_simple_value)); return; }
void MyFn(test__wide_dag)(SG_context* pCtx) { char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathTopDir = NULL; char buf_client_repo_name[SG_TID_MAX_BUFFER_LENGTH]; char buf_server_repo_name[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_vhash* pvh = NULL; SG_repo* pClientRepo = NULL; SG_client* pClient = NULL; char* pszidFirstChangeset = NULL; SG_pathname* pPathCsDir = NULL; SG_uint32 lines; SG_uint32 i, j; SG_repo* pServerRepo = NULL; SG_bool bMatch = SG_FALSE; char buf_filename[7]; SG_varray* pvaZingMergeLog = NULL; SG_varray* pvaZingMergeErr = NULL; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathTopDir,bufTopDir) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx,pPathTopDir) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_server_repo_name, sizeof(buf_server_repo_name), 32) ); INFOP("test__wide_dag", ("client repo: %s", buf_client_repo_name)); INFOP("test__wide_dag", ("server repo: %s", buf_server_repo_name)); /* create the repo */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, buf_server_repo_name) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__new_repo2(pCtx, buf_server_repo_name, pPathWorkingDir, &pszidFirstChangeset) ); /* open that repo */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_server_repo_name, &pServerRepo) ); /* create an empty clone to pull into */ VERIFY_ERR_CHECK( SG_repo__create_empty_clone(pCtx, buf_server_repo_name, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); /* add stuff to server repo */ for (i = 0; i < 20; i++) // number of changesets { VERIFY_ERR_CHECK( _ut_pt__set_baseline(pCtx, pPathWorkingDir, pszidFirstChangeset) ); VERIFY_ERR_CHECK( SG_sprintf(pCtx, buf_filename, sizeof(buf_filename), "%d", i) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathCsDir, pPathWorkingDir, buf_filename) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathCsDir) ); for (j = 0; j < 1; j++) // number of files added per changeset { VERIFY_ERR_CHECK( SG_sprintf(pCtx, buf_filename, sizeof(buf_filename), "%d", j) ); lines = (int)(2500.0 * (rand() / (RAND_MAX + 1.0))); VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathCsDir, buf_filename, lines) ); } SG_PATHNAME_NULLFREE(pCtx, pPathCsDir); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); } /* verify pre-pull repos are different */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("pre-pull repos differ", !bMatch); /* get a client and pull from server repo to empty client repo */ VERIFY_ERR_CHECK( SG_client__open(pCtx, buf_server_repo_name, NULL_CREDENTIAL, &pClient) ); // TODO Credentials VERIFY_ERR_CHECK( SG_pull__all(pCtx, buf_client_repo_name, pClient, &pvaZingMergeErr, &pvaZingMergeLog) ); VERIFY_COND("", !pvaZingMergeErr); /* verify post-pull repos are identical */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); SG_REPO_NULLFREE(pCtx, pClientRepo); /* Make another copy with clone */ VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); SG_ERR_CHECK( SG_repo__create_empty_clone_from_remote(pCtx, pClient, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_pull__clone(pCtx, buf_client_repo_name, pClient) ); /* verify post-clone repos are identical */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-clone repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-clone repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); /* TODO: verify more stuff? */ /* Fall through to common cleanup */ fail: /* close client */ SG_CLIENT_NULLFREE(pCtx, pClient); /* close both repos */ SG_REPO_NULLFREE(pCtx, pServerRepo); SG_REPO_NULLFREE(pCtx, pClientRepo); SG_NULLFREE(pCtx, pszidFirstChangeset); SG_PATHNAME_NULLFREE(pCtx, pPathTopDir); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathCsDir); SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeLog); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeErr); }
void MyFn(test__simple)(SG_context* pCtx) { char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathTopDir = NULL; char buf_client_repo_name[SG_TID_MAX_BUFFER_LENGTH]; char buf_server_repo_name[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_vhash* pvh = NULL; SG_repo* pClientRepo = NULL; SG_client* pClient = NULL; SG_repo* pServerRepo = NULL; SG_bool bMatch = SG_FALSE; SG_varray* pvaZingMergeLog = NULL; SG_varray* pvaZingMergeErr = NULL; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathTopDir,bufTopDir) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx,pPathTopDir) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_server_repo_name, sizeof(buf_server_repo_name), 32) ); INFOP("test__simple", ("client repo: %s", buf_client_repo_name)); INFOP("test__simple", ("server repo: %s", buf_server_repo_name)); /* create the repo */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, buf_server_repo_name) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__new_repo2(pCtx, buf_server_repo_name, pPathWorkingDir, NULL) ); /* open that repo */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_server_repo_name, &pServerRepo) ); /* create an empty clone to pull into */ VERIFY_ERR_CHECK( SG_repo__create_empty_clone(pCtx, buf_server_repo_name, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); /* add stuff to server repo */ VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathWorkingDir, "aaa", 10) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathWorkingDir, "bbb", 10) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); /* verify pre-pull repos are different */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("pre-pull repos differ", !bMatch); /* get a client and pull from server repo to empty client repo */ VERIFY_ERR_CHECK( SG_client__open(pCtx, buf_server_repo_name, NULL_CREDENTIAL, &pClient) ); // TODO Credentials VERIFY_ERR_CHECK( SG_pull__all(pCtx, buf_client_repo_name, pClient, &pvaZingMergeErr, &pvaZingMergeLog) ); VERIFY_COND("", !pvaZingMergeErr); SG_CLIENT_NULLFREE(pCtx, pClient); /* verify post-pull repos are identical */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); /* TODO: verify more stuff? */ /* Fall through to common cleanup */ fail: /* close client */ SG_CLIENT_NULLFREE(pCtx, pClient); /* close both repos */ SG_REPO_NULLFREE(pCtx, pServerRepo); SG_REPO_NULLFREE(pCtx, pClientRepo); SG_PATHNAME_NULLFREE(pCtx, pPathTopDir); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeLog); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeErr); }
void MyFn(no_repo_tx)(SG_context * pCtx, SG_repo* pRepo) { SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_byte* pBufIn = NULL; SG_uint32 lenBufIn = 0; char* pszHidReturned = NULL; char* pId = NULL; SG_dagnode* pdnCreated = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__commit_tx(pCtx, pRepo, NULL), SG_ERR_INVALIDARG ); // commit_tx without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__abort_tx(pCtx, pRepo, NULL), SG_ERR_INVALIDARG ); // abort_tx without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__begin(pCtx, pRepo, NULL, SG_BLOBENCODING__FULL, NULL, 10, 0, NULL, &pbh), SG_ERR_INVALIDARG ); // store_blob__begin without repo tx didn't fail with INVALIDARG. // Create a repo tx so we can test the other blob functions. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pBufIn, &lenBufIn) ); VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenBufIn, 0, NULL, &pbh) ); VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenBufIn, pBufIn, NULL) ); VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__end(pCtx, pRepo, NULL, &pbh, &pszHidReturned), SG_ERR_INVALIDARG ); // store_blob__end without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__abort(pCtx, pRepo, NULL, &pbh), SG_ERR_INVALIDARG ); // store_blob__abort without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK( SG_repo__store_blob__end(pCtx, pRepo, pTx, &pbh, &pszHidReturned) ); VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); // create a TID just to get some random data. use it to create a HID. // use the HID as the HID of a hypothetical changeset so that we can create the dagnode. VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, &pId) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_dagnode(pCtx, pRepo, NULL, SG_DAGNUM__TESTING__NOTHING, pdnCreated), SG_ERR_INVALIDARG ); // store_dagnode without repo tx didn't fail with INVALIDARG. // We're intentionally not testing the higher-level store_blob_from_memory and store_blob_from_file // routines here because they're just wrappers for the begin/chunk/end routines we do test. // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pszHidReturned); SG_NULLFREE(pCtx, pBufIn); SG_NULLFREE(pCtx, pId); SG_DAGNODE_NULLFREE(pCtx, pdnCreated); }
void MyFn(do_tests)(SG_context * pCtx) { SG_repo* pRepo = NULL; SG_uint8 blobMask; VERIFY_ERR_CHECK( MyFn(create_repo)(pCtx, &pRepo) ); VERIFY_ERR_CHECK( MyFn(no_repo_tx)(pCtx, pRepo) ); VERIFY_ERR_CHECK( MyFn(one_blob)(pCtx, pRepo) ); VERIFY_ERR_CHECK( MyFn(abort_mid_blob)(pCtx, pRepo) ); VERIFY_ERR_CHECK( MyFn(commit_mid_blob)(pCtx, pRepo) ); // Abort nothing. blobMask = 0xFF; VERIFY_ERR_CHECK( MyFn(eight_blobs_commit_masked)(pCtx, pRepo, blobMask) ); // Abort the odd blobs. blobMask = 0xAA; VERIFY_ERR_CHECK( MyFn(eight_blobs_commit_masked)(pCtx, pRepo, blobMask) ); // Abort the even blobs. blobMask = (~blobMask); VERIFY_ERR_CHECK( MyFn(eight_blobs_commit_masked)(pCtx, pRepo, blobMask) ); // Abort the first 4 blobs. blobMask = 0x0F; VERIFY_ERR_CHECK( MyFn(eight_blobs_commit_masked)(pCtx, pRepo, blobMask) ); // Abort the last 4 blobs. blobMask = 0xF0; VERIFY_ERR_CHECK( MyFn(eight_blobs_commit_masked)(pCtx, pRepo, blobMask) ); // Abort all the blobs. blobMask = 0x0; VERIFY_ERR_CHECK( MyFn(eight_blobs_commit_masked)(pCtx, pRepo, blobMask) ); VERIFY_ERR_CHECK( MyFn(multiple_stores_fail)(pCtx, pRepo) ); VERIFY_ERR_CHECK( MyFn(empty_tx)(pCtx, pRepo) ); VERIFY_ERR_CHECK( MyFn(one_dagnode)(pCtx, pRepo) ); // fall thru fail: SG_repo__free(pCtx, pRepo); }
// Create 8 blobs, commit those whose bit is set in blobMask, abort the rest. Verify results. void MyFn(eight_blobs_commit_masked)(SG_context * pCtx, SG_repo* pRepo, SG_uint8 blobMask) { SG_blobset* pbs = NULL; SG_uint32 count_blobs_before = 0; SG_uint64 len_encoded_before = 0; SG_uint64 len_full_before = 0; SG_uint32 count_blobs_after = 0; SG_uint64 len_encoded_after = 0; SG_uint64 len_full_after = 0; SG_byte* pRandomBuf = NULL; SG_uint32 lenRandomBuf; SG_uint64 lenTotal = 0; SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_uint32 i,j; SG_uint32 iLenWritten = 0; char* apszHids[8]; SG_uint8 countBlobsToAdd; SG_uint8 mask = blobMask; // Count the number of bits set in blobMask. for (countBlobsToAdd = 0; mask; mask >>= 1) countBlobsToAdd += mask & 1; VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_before, &len_encoded_before, &len_full_before, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); for (i=0; i < 8; i++) { VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pRandomBuf, &lenRandomBuf) ); lenTotal = lenRandomBuf * 3; VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenTotal, 0, NULL, &pbh) ); for (j=0; j < 3; j++) { VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenRandomBuf, pRandomBuf, &iLenWritten) ); // This chunk is much smaller than SG_STREAMING_BUFFER_SIZE, so the whole thing should be written. VERIFY_COND("SG_repo__store_blob__chunk length mismatch.", iLenWritten == lenRandomBuf); if ((1 == j) && ((1 << i & blobMask) == 0)) // we're mid-blob and blob is to be aborted (bit is unset) break; } if ((1 << i & blobMask) != 0) // blob is to be commit (bit is set) { // Finish the blob. VERIFY_ERR_CHECK( SG_repo__store_blob__end(pCtx, pRepo, pTx, &pbh, &(apszHids[i])) ); } else { // Abort the blob. VERIFY_ERR_CHECK( SG_repo__store_blob__abort(pCtx, pRepo, pTx, &pbh) ); apszHids[i] = NULL; } SG_NULLFREE(pCtx, pRandomBuf); } VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, 0, 0, 0, &pbs) ); VERIFY_ERR_CHECK( SG_blobset__get_stats( pCtx, pbs, 0, &count_blobs_after, &len_encoded_after, &len_full_after, NULL, NULL ) ); SG_BLOBSET_NULLFREE(pCtx, pbs); VERIFY_COND("blob count mismatch", (count_blobs_before + countBlobsToAdd) == count_blobs_after); // Verify HIDs we think we added, were. for (i=0; i < 8; i++) { if (apszHids[i]) { SG_uint64 len = 0; VERIFY_ERR_CHECK( SG_repo__fetch_blob_into_memory(pCtx, pRepo, apszHids[i], &pRandomBuf, &len) ); SG_NULLFREE(pCtx, pRandomBuf); } } // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pRandomBuf); for (i=0; i < 8; i++) SG_NULLFREE(pCtx, apszHids[i]); }