void MyFn(one_dagnode)(SG_context * pCtx, SG_repo* pRepo) { char* pId = NULL; SG_dagnode* pdnCreated = NULL; SG_dagnode* pdnFetched = NULL; SG_repo_tx_handle* pTx = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, &pId) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); // Add dagnode. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, SG_DAGNUM__TESTING__NOTHING, pdnCreated) ); pdnCreated = NULL; // Should fail: tx not committed. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched), SG_ERR_NOT_FOUND ); // Dag node visible before repo tx committed. // Abort repo tx. VERIFY_ERR_CHECK( SG_repo__abort_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__abort_tx should null/free the repo transaction.", !pTx); // Should fail: tx aborted. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched), SG_ERR_NOT_FOUND ); // Dag node exists after repo tx abort // Write dagnode, commit tx. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, SG_DAGNUM__TESTING__NOTHING, pdnCreated) ); pdnCreated = NULL; VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__commit_tx should null/free the repo transaction.", !pTx); // Read back the dagnode. It should exist now. VERIFY_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched) ); // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pId); SG_DAGNODE_NULLFREE(pCtx, pdnCreated); SG_DAGNODE_NULLFREE(pCtx, pdnFetched); }
void u0048_multidag__add_dagnode(SG_context * pCtx, char** ppszid, const char* pszidParent, SG_uint32 iDagNum, SG_repo* pRepo) { char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; SG_dagnode* pdn = NULL; SG_repo_tx_handle* pTx; // create a TID just to get some random data. use it to create a HID. // use the HID as the HID of a hypothetical changeset so that we can create the dagnode. VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, ppszid) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdn,*ppszid, pszidParent ? 2: 1 ) ); if (pszidParent) { VERIFY_ERR_CHECK( SG_dagnode__add_parent(pCtx, pdn,pszidParent) ); } VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdn) ); VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, iDagNum, pdn) ); pdn = NULL; VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo,&pTx) ); return; fail: return; }
void MyFn(no_repo_tx)(SG_context * pCtx, SG_repo* pRepo) { SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_byte* pBufIn = NULL; SG_uint32 lenBufIn = 0; char* pszHidReturned = NULL; char* pId = NULL; SG_dagnode* pdnCreated = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__commit_tx(pCtx, pRepo, NULL), SG_ERR_INVALIDARG ); // commit_tx without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__abort_tx(pCtx, pRepo, NULL), SG_ERR_INVALIDARG ); // abort_tx without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__begin(pCtx, pRepo, NULL, SG_BLOBENCODING__FULL, NULL, 10, 0, NULL, &pbh), SG_ERR_INVALIDARG ); // store_blob__begin without repo tx didn't fail with INVALIDARG. // Create a repo tx so we can test the other blob functions. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pBufIn, &lenBufIn) ); VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenBufIn, 0, NULL, &pbh) ); VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenBufIn, pBufIn, NULL) ); VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__end(pCtx, pRepo, NULL, &pbh, &pszHidReturned), SG_ERR_INVALIDARG ); // store_blob__end without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__abort(pCtx, pRepo, NULL, &pbh), SG_ERR_INVALIDARG ); // store_blob__abort without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK( SG_repo__store_blob__end(pCtx, pRepo, pTx, &pbh, &pszHidReturned) ); VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); // create a TID just to get some random data. use it to create a HID. // use the HID as the HID of a hypothetical changeset so that we can create the dagnode. VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, &pId) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_dagnode(pCtx, pRepo, NULL, SG_DAGNUM__TESTING__NOTHING, pdnCreated), SG_ERR_INVALIDARG ); // store_dagnode without repo tx didn't fail with INVALIDARG. // We're intentionally not testing the higher-level store_blob_from_memory and store_blob_from_file // routines here because they're just wrappers for the begin/chunk/end routines we do test. // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pszHidReturned); SG_NULLFREE(pCtx, pBufIn); SG_NULLFREE(pCtx, pId); SG_DAGNODE_NULLFREE(pCtx, pdnCreated); }