void u0025_vhash__create(SG_context * pCtx, SG_vhash** pph) { SG_vhash* ph = NULL; SG_varray* pa = NULL; SG_vhash* pvhSub = NULL; SG_varray* pvaSub = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &ph) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, ph, "hello", "world") ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pa) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 31) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 51) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhSub) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhSub, "not", "now") ); SG_ERR_CHECK( SG_varray__append__vhash(pCtx, pa, &pvhSub) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 71) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaSub) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 81) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 82) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 83) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 84) ); SG_ERR_CHECK( SG_varray__append__varray(pCtx, pa, &pvaSub) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 91) ); SG_ERR_CHECK( SG_vhash__add__varray(pCtx, ph, "a", &pa) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, ph, "b", "fiddle") ); *pph = ph; return; fail: // TODO free return; }
void u0038_test_wdmapping(SG_context * pCtx) { SG_vhash* pvh = NULL; SG_pathname* pPath = NULL; SG_pathname* pMappedPath = NULL; SG_string* pstrRepoDescriptorName = NULL; char* pszidGid = NULL; VERIFY_ERR_CHECK_DISCARD( SG_PATHNAME__ALLOC(pCtx, &pPath) ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__set__from_cwd(pCtx, pPath) ); VERIFY_ERR_CHECK_DISCARD( SG_VHASH__ALLOC(pCtx, &pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hello", "world") ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hola", "mundo") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "r1") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "r1", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__set_mapping(pCtx, pPath, "r1", NULL) ); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__find_mapping(pCtx, pPath, &pMappedPath, &pstrRepoDescriptorName, &pszidGid) ); VERIFY_COND("ridesc match", (0 == strcmp("r1", SG_string__sz(pstrRepoDescriptorName)))); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "foo") ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "bar") ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "plok") ); SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_PATHNAME_NULLFREE(pCtx, pMappedPath); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__find_mapping(pCtx, pPath, &pMappedPath, &pstrRepoDescriptorName, &pszidGid) ); SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_PATHNAME_NULLFREE(pCtx, pMappedPath); SG_PATHNAME_NULLFREE(pCtx, pPath); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_vfile__update__string__sz( SG_context* pCtx, const SG_pathname* pPath, /**< The path of the file containing the JSON text */ const char* putf8Key, const char* pszVal ) { SG_vfile* pvf = NULL; SG_vhash* pvh = NULL; SG_ERR_CHECK( SG_vfile__begin(pCtx, pPath, SG_FILE_RDWR | SG_FILE_OPEN_OR_CREATE, &pvh, &pvf) ); if (!pvh) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); } SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, pvh, putf8Key, pszVal) ); SG_ERR_CHECK( SG_vfile__end(pCtx, &pvf, pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_ERR_IGNORE( sg_vfile__dispose(pCtx, pvf) ); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_sync_remote__heartbeat( SG_context* pCtx, SG_repo* pRepo, SG_vhash** ppvh) { SG_vhash* pvh = NULL; char* pszRepoId = NULL; char* pszAdminId = NULL; char* pszHashMethod = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId) ); SG_ERR_CHECK( SG_repo__get_hash_method(pCtx, pRepo, &pszHashMethod) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__REPO_ID, pszRepoId) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, pszAdminId) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__HASH_METHOD, pszHashMethod) ); *ppvh = pvh; pvh = NULL; /* fall through */ fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); SG_NULLFREE(pCtx, pszHashMethod); }
void SG_workingdir__set_mapping( SG_context* pCtx, const SG_pathname* pPathLocalDirectory, const char* pszNameRepoInstanceDescriptor, /**< The name of the repo instance descriptor */ const char* pszidGidAnchorDirectory /**< The GID of the directory within the repo to which this is anchored. Usually it's user root. */ ) { SG_vhash* pvhNew = NULL; SG_vhash* pvh = NULL; SG_pathname* pMyPath = NULL; SG_pathname* pDrawerPath = NULL; SG_pathname* pMappingFilePath = NULL; SG_NULLARGCHECK_RETURN(pPathLocalDirectory); SG_NULLARGCHECK_RETURN(pszNameRepoInstanceDescriptor); /* make a copy of the path so we can modify it (adding the final slash) */ SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pMyPath, pPathLocalDirectory) ); /* null the original parameter pointer to make sure we don't use it anymore */ pPathLocalDirectory = NULL; /* make sure the path we were given is a directory that exists */ SG_ERR_CHECK( SG_fsobj__verify_directory_exists_on_disk__pathname(pCtx, pMyPath) ); /* it's a directory, so it should have a final slash */ SG_ERR_CHECK( SG_pathname__add_final_slash(pCtx, pMyPath) ); /* make sure the name of the repo instance descriptor is valid */ SG_ERR_CHECK( SG_closet__descriptors__get(pCtx, pszNameRepoInstanceDescriptor, &pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); // TODO verify that the anchor GID is valid for that repo? SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhNew) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhNew, "descriptor", pszNameRepoInstanceDescriptor) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhNew, "anchor", pszidGidAnchorDirectory) ); SG_ERR_CHECK( SG_workingdir__verify_drawer_exists(pCtx, pMyPath, &pDrawerPath) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pMappingFilePath, pDrawerPath, "repo.json") ); SG_ERR_CHECK( SG_vfile__update__vhash(pCtx, pMappingFilePath, "mapping", pvhNew) ); SG_VHASH_NULLFREE(pCtx, pvhNew); SG_PATHNAME_NULLFREE(pCtx, pMyPath); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); return; fail: SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); SG_PATHNAME_NULLFREE(pCtx, pMyPath); SG_VHASH_NULLFREE(pCtx, pvhNew); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_dbrecord__alloc(SG_context* pCtx, SG_dbrecord** ppResult) { SG_dbrecord * prec; SG_NULLARGCHECK_RETURN(ppResult); SG_ERR_CHECK_RETURN( SG_alloc1(pCtx, prec) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &prec->pvh) ); *ppResult = prec; return; fail: SG_NULLFREE(pCtx, prec); }
void SG_vc_hooks__ASK__WIT__LIST_ITEMS( SG_context* pCtx, SG_repo* pRepo, const char * psz_search_term, SG_varray *pBugs ) { SG_vhash* pvh_params = NULL; SG_vhash* pvh_result = NULL; SG_vhash* pvh_hook = NULL; const char* psz_js = NULL; const char* psz_descriptor_name = NULL; SG_bool hasBugs = SG_FALSE; SG_ERR_CHECK( sg_vc_hooks__lookup_by_interface__single_result( pCtx, pRepo, SG_VC_HOOK__INTERFACE__ASK__WIT__LIST_ITEMS, &pvh_hook ) ); if (!pvh_hook) return; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "text", psz_search_term) ); SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, &pvh_result) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "items", &hasBugs) ); if (hasBugs && pBugs) { SG_varray *bugs = NULL; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvh_result, "items", &bugs) ); SG_ERR_CHECK( SG_varray__copy_items(pCtx, bugs, pBugs) ); } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VHASH_NULLFREE(pCtx, pvh_result); SG_VHASH_NULLFREE(pCtx, pvh_hook); }
void SG_vfile__update__varray(SG_context* pCtx, const SG_pathname* pPath, const char* putf8Key, SG_varray ** ppValue) { SG_vfile* pvf = NULL; SG_vhash* pvh = NULL; SG_ERR_CHECK( SG_vfile__begin(pCtx, pPath, SG_FILE_RDWR | SG_FILE_OPEN_OR_CREATE, &pvh, &pvf) ); if (!pvh) SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_vhash__update__varray(pCtx, pvh, putf8Key, ppValue) ); SG_ERR_CHECK( SG_vfile__end(pCtx, &pvf, pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_ERR_IGNORE( sg_vfile__dispose(pCtx, pvf) ); SG_VHASH_NULLFREE(pCtx, pvh); }
void sg_sync_client__http__pull_clone( SG_context* pCtx, SG_sync_client* pSyncClient, SG_vhash* pvh_clone_request, const SG_pathname* pStagingPathname, char** ppszFragballName) { SG_vhash* pvhRequest = NULL; char* pszFragballName = NULL; SG_pathname* pPathFragball = NULL; SG_string* pstrRequest = NULL; char* pszUrl = NULL; SG_ERR_CHECK( SG_log__push_operation(pCtx, "Requesting repository from server", SG_LOG__FLAG__NONE) ); SG_NULLARGCHECK_RETURN(pSyncClient); SG_ERR_CHECK( SG_allocN(pCtx, SG_TID_MAX_BUFFER_LENGTH, pszFragballName) ); SG_ERR_CHECK( SG_tid__generate(pCtx, pszFragballName, SG_TID_MAX_BUFFER_LENGTH) ); SG_ERR_CHECK( _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec, SYNC_URL_SUFFIX FRAGBALL_URL_SUFFIX, NULL, NULL, &pszUrl) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhRequest) ); SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE) ); if (pvh_clone_request) { SG_ERR_CHECK( SG_vhash__addcopy__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE_REQUEST, pvh_clone_request) ); } SG_ERR_CHECK( SG_STRING__ALLOC__RESERVE(pCtx, &pstrRequest, 50) ); SG_ERR_CHECK( SG_vhash__to_json(pCtx, pvhRequest, pstrRequest) ); SG_ERR_CHECK( SG_pathname__alloc__pathname_sz(pCtx, &pPathFragball, pStagingPathname, (const char*)pszFragballName) ); SG_ERR_CHECK( do_url(pCtx, pszUrl, "POST", SG_string__sz(pstrRequest), pSyncClient->psz_username, pSyncClient->psz_password, NULL, pPathFragball, SG_TRUE) ); SG_RETURN_AND_NULL(pszFragballName, ppszFragballName); /* fall through */ fail: SG_log__pop_operation(pCtx); SG_NULLFREE(pCtx, pszFragballName); SG_VHASH_NULLFREE(pCtx, pvhRequest); SG_PATHNAME_NULLFREE(pCtx, pPathFragball); SG_NULLFREE(pCtx, pszUrl); SG_STRING_NULLFREE(pCtx, pstrRequest); }
void SG_jscontextpool__init(SG_context * pCtx, const char * szApplicationRoot) { if(gpJSContextPoolGlobalState != NULL) return; SG_ERR_CHECK_RETURN( SG_alloc1(pCtx, gpJSContextPoolGlobalState) ); SG_localsettings__get__collapsed_vhash(pCtx, "server", NULL, &gpJSContextPoolGlobalState->pServerConfig); if(SG_context__has_err(pCtx)) { SG_log__report_error__current_error(pCtx); SG_context__err_reset(pCtx); } if(gpJSContextPoolGlobalState->pServerConfig==NULL) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &gpJSContextPoolGlobalState->pServerConfig) ); } SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "enable_diagnostics", NULL) ); SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "readonly", NULL) ); SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "remote_ajax_libs", NULL) ); SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "ssjs_mutable", &gpJSContextPoolGlobalState->ssjsMutable) ); if(szApplicationRoot==NULL) szApplicationRoot=""; SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, gpJSContextPoolGlobalState->pServerConfig, "application_root", szApplicationRoot) ); // Start up SpiderMonkey. SG_jscore__new_runtime(pCtx, SG_jsglue__context_callback, NULL, SG_FALSE, NULL); //If jscore is already initialized, just move on. if (SG_context__err_equals(pCtx, SG_ERR_ALREADY_INITIALIZED)) { SG_context__err_reset(pCtx); } SG_ERR_CHECK( SG_mutex__init(pCtx, &gpJSContextPoolGlobalState->lock) ); return; fail: SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig); SG_NULLFREE(pCtx, gpJSContextPoolGlobalState); }
void MyFn(create_repo)(SG_context * pCtx, SG_repo ** ppRepo) { // caller must free returned value. SG_repo * pRepo; SG_pathname * pPathnameRepoDir = NULL; SG_vhash* pvhPartialDescriptor = NULL; char buf_repo_id[SG_GID_BUFFER_LENGTH]; char buf_admin_id[SG_GID_BUFFER_LENGTH]; char* pszRepoImpl = NULL; VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_repo_id, sizeof(buf_repo_id)) ); VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_admin_id, sizeof(buf_admin_id)) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &pPathnameRepoDir) ); VERIFY_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, pPathnameRepoDir) ); VERIFY_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhPartialDescriptor) ); VERIFY_ERR_CHECK_DISCARD( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__NEWREPO_DRIVER, NULL, &pszRepoImpl, NULL) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvhPartialDescriptor, SG_RIDESC_KEY__STORAGE, pszRepoImpl) ); VERIFY_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhPartialDescriptor, SG_RIDESC_FSLOCAL__PATH_PARENT_DIR, SG_pathname__sz(pPathnameRepoDir)) ); VERIFY_ERR_CHECK( SG_repo__create_repo_instance(pCtx,pvhPartialDescriptor,SG_TRUE,NULL,buf_repo_id,buf_admin_id,&pRepo) ); SG_VHASH_NULLFREE(pCtx, pvhPartialDescriptor); { const SG_vhash * pvhRepoDescriptor = NULL; VERIFY_ERR_CHECK( SG_repo__get_descriptor(pCtx, pRepo,&pvhRepoDescriptor) ); //INFOP("open_repo",("Repo is [%s]",SG_string__sz(pstrRepoDescriptor))); } *ppRepo = pRepo; fail: SG_VHASH_NULLFREE(pCtx, pvhPartialDescriptor); SG_PATHNAME_NULLFREE(pCtx, pPathnameRepoDir); SG_NULLFREE(pCtx, pszRepoImpl); }
void MyFn(create_repo)(SG_context * pCtx, SG_repo** ppRepo) { SG_repo* pRepo = NULL; SG_pathname* pPath_repo = NULL; char buf_repo_id[SG_GID_BUFFER_LENGTH]; char buf_admin_id[SG_GID_BUFFER_LENGTH]; SG_vhash* pvhPartialDescriptor = NULL; char* pszRepoImpl = NULL; VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_repo_id, sizeof(buf_repo_id)) ); VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_admin_id, sizeof(buf_admin_id)) ); /* Get our paths fixed up */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &pPath_repo) ); VERIFY_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, pPath_repo) ); VERIFY_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pPath_repo, "repo") ); SG_fsobj__mkdir__pathname(pCtx, pPath_repo); SG_context__err_reset(pCtx); // Create the repo VERIFY_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhPartialDescriptor) ); VERIFY_ERR_CHECK_DISCARD( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__NEWREPO_DRIVER, NULL, &pszRepoImpl, NULL) ); if (pszRepoImpl) { VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvhPartialDescriptor, SG_RIDESC_KEY__STORAGE, pszRepoImpl) ); } VERIFY_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhPartialDescriptor, SG_RIDESC_FSLOCAL__PATH_PARENT_DIR, SG_pathname__sz(pPath_repo)) ); VERIFY_ERR_CHECK( SG_repo__create_repo_instance(pCtx,NULL,pvhPartialDescriptor,SG_TRUE,NULL,buf_repo_id,buf_admin_id,&pRepo) ); *ppRepo = pRepo; // Fall through to common cleanup fail: SG_VHASH_NULLFREE(pCtx, pvhPartialDescriptor); SG_PATHNAME_NULLFREE(pCtx, pPath_repo); SG_NULLFREE(pCtx, pszRepoImpl); }
void SG_sync__get_account_info__from_repo_info(SG_context* pCtx, const SG_vhash* pvhRepoInfo, SG_vhash** ppvhAccountInfo) { SG_vhash* pvhRef = NULL; SG_vhash* pvhAccountInfo = NULL; SG_NULLARGCHECK_RETURN(ppvhAccountInfo); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__ACCOUNT, &pvhRef) ); if (pvhRef) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhAccountInfo) ); SG_ERR_CHECK( SG_vhash__copy_items(pCtx, pvhRef, pvhAccountInfo) ); *ppvhAccountInfo = pvhAccountInfo; pvhAccountInfo = NULL; } return; fail: SG_NULLFREE(pCtx, pvhAccountInfo); }
void u0038_test_ridesc(SG_context * pCtx) { SG_vhash* pvh = NULL; SG_vhash* pvh2 = NULL; SG_vhash* pvh_all = NULL; SG_uint32 count = 0; VERIFY_ERR_CHECK_DISCARD( SG_VHASH__ALLOC(pCtx, &pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hello", "world") ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hola", "mundo") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "1") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "1", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__remove(pCtx, "1") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "2") ); /* This may or may not be an error */ /* delete one that is not there should be an error */ VERIFY_ERR_CHECK_HAS_ERR_DISCARD( SG_closet__descriptors__remove(pCtx, "2") ); /* fetch one that is not there should be an error */ VERIFY_ERR_CHECK_HAS_ERR_DISCARD( SG_closet__descriptors__get(pCtx, "2", &pvh2) ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "3") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "4") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "3", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "4", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__get(pCtx, "3", &pvh2) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__list(pCtx, &pvh_all) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__count(pCtx, pvh_all, &count) ); VERIFY_COND("count", (count >= 2)); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "3") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "4") ); SG_VHASH_NULLFREE(pCtx, pvh_all); SG_VHASH_NULLFREE(pCtx, pvh); SG_VHASH_NULLFREE(pCtx, pvh2); }
void sg_repo__query_implementation__list_vtables(SG_context * pCtx, SG_vhash ** pp_vhash) { SG_vhash * pvh = NULL; SG_rbtree_iterator* pit = NULL; SG_bool b = SG_FALSE; const char* psz_name = NULL; SG_ERR_CHECK_RETURN( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, g_prb_repo_vtables, &b, &psz_name, NULL) ); while (b) { SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvh, psz_name) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &b, &psz_name, NULL) ); } *pp_vhash = pvh; pvh = NULL; fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_sync_remote__get_repo_info( SG_context* pCtx, SG_repo* pRepo, SG_bool bIncludeBranches, SG_bool b_include_areas, SG_vhash** ppvh) { SG_vhash* pvh = NULL; char* pszRepoId = NULL; char* pszAdminId = NULL; char* pszHashMethod = NULL; SG_uint32 count_dagnums = 0; SG_uint64* paDagNums = NULL; SG_uint32 i = 0; SG_vhash* pvh_dags = NULL; SG_vhash* pvh_areas = NULL; SG_vhash* pvhBranchPile = NULL; SG_bool bHasBranchDag = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); /* Protocol version */ SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__PROTOCOL_VERSION, 1) ); /* Basic repository info */ SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId) ); SG_ERR_CHECK( SG_repo__get_hash_method(pCtx, pRepo, &pszHashMethod) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__REPO_ID, pszRepoId) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, pszAdminId) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__HASH_METHOD, pszHashMethod) ); /* All DAGs in the repository */ SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); SG_ERR_CHECK( SG_vhash__addnew__vhash(pCtx, pvh, "dags", &pvh_dags) ); for (i=0; i<count_dagnums; i++) { char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_ERR_CHECK_RETURN( SG_dagnum__to_sz__hex(pCtx, paDagNums[i], buf_dagnum, sizeof(buf_dagnum)) ); SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvh_dags, buf_dagnum) ); /* Asking for a DAG for the first time in a repo will create that DAG. * When pushing into an empty repo, we don't want this initial query to create * empty new DAGs, so we make sure they exist before we query them. */ if (paDagNums[i] == SG_DAGNUM__VC_BRANCHES) bHasBranchDag = SG_TRUE; } // TODO the following code is a problem, because it requires that this repo // instance have indexes, and we would prefer to preserve the ability of // an index-free instance to support push, pull, and clone. /* All areas in the repository */ if (b_include_areas) { SG_ERR_CHECK( SG_area__list(pCtx, pRepo, &pvh_areas) ); if (pvh_areas) { SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvh, "areas", &pvh_areas) ); } } /* Branches */ if (bIncludeBranches && bHasBranchDag) { SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile) ); if (pvhBranchPile) { SG_bool bHasBranches; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches) ); if (bHasBranches) SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvh, "branches", &pvhBranchPile) ); } } *ppvh = pvh; pvh = NULL; /* fall through */ fail: SG_NULLFREE(pCtx, paDagNums); SG_VHASH_NULLFREE(pCtx, pvh); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); SG_NULLFREE(pCtx, pszHashMethod); SG_VHASH_NULLFREE(pCtx, pvh_areas); SG_VHASH_NULLFREE(pCtx, pvhBranchPile); }
// TODO not sure we really want to pass this much stuff to this interface void SG_vc_hooks__ASK__WIT__VALIDATE_ASSOCIATIONS( SG_context* pCtx, SG_repo* pRepo, const char* const* paszAssocs, SG_uint32 count_assocs, SG_varray *pBugs ) { SG_vhash* pvh_params = NULL; SG_vhash* pvh_result = NULL; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_vhash* pvh_hook = NULL; const char* psz_js = NULL; SG_uint32 i = 0; SG_varray* pva_ids = NULL; const char* psz_descriptor_name = NULL; SG_ERR_CHECK( sg_vc_hooks__lookup_by_interface__single_result( pCtx, pRepo, SG_VC_HOOK__INTERFACE__ASK__WIT__VALIDATE_ASSOCIATIONS, &pvh_hook ) ); if (!pvh_hook) return; SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_repo__get_repo_id( pCtx, pRepo, &psz_repo_id ) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "repo_id", psz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "admin_id", psz_admin_id) ); if (psz_descriptor_name) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); } SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pvh_params, "wit_ids", &pva_ids) ); for (i=0; i<count_assocs; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_ids, paszAssocs[i]) ); } SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, &pvh_result) ); // TODO process the result if (pvh_result) { SG_bool hasErrors = SG_FALSE; SG_bool hasBugs = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "error", &hasErrors) ); if (hasErrors) { const char *emsg = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_result, "error", &emsg) ); SG_ERR_THROW2( SG_ERR_VC_HOOK_REFUSED, (pCtx, "%s", emsg) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "bugs", &hasBugs) ); if (hasBugs && pBugs) { SG_varray *bugs = NULL; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvh_result, "bugs", &bugs) ); SG_ERR_CHECK( SG_varray__copy_items(pCtx, bugs, pBugs) ); } } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VHASH_NULLFREE(pCtx, pvh_result); SG_VHASH_NULLFREE(pCtx, pvh_hook); SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); }
void SG_vc_hooks__BROADCAST__AFTER_COMMIT( SG_context* pCtx, SG_repo* pRepo, SG_changeset* pcs, const char* psz_tied_branch_name, const SG_audit* pq, const char* psz_comment, const char* const* paszAssocs, SG_uint32 count_assocs, const SG_stringarray* psa_stamps ) { SG_varray* pva_hooks = NULL; SG_vhash* pvh_params = NULL; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_ERR_CHECK( SG_vc_hooks__lookup_by_interface( pCtx, pRepo, SG_VC_HOOK__INTERFACE__BROADCAST__AFTER_COMMIT, &pva_hooks ) ); if (pva_hooks) { SG_uint32 count_hooks = 0; SG_uint32 i_hook = 0; const char* psz_descriptor_name = NULL; SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_repo__get_repo_id( pCtx, pRepo, &psz_repo_id ) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_varray__count(pCtx, pva_hooks, &count_hooks) ); for (i_hook=0; i_hook<count_hooks; i_hook++) { SG_vhash* pvh_hook = NULL; const char* psz_js = NULL; const char* psz_csid = NULL; SG_vhash* pvh_changeset = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva_hooks, i_hook, &pvh_hook) ); SG_ERR_CHECK( SG_changeset__get_id_ref(pCtx, pcs, &psz_csid) ); SG_ERR_CHECK( SG_changeset__get_vhash_ref(pCtx, pcs, &pvh_changeset) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "csid", psz_csid) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "repo_id", psz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "admin_id", psz_admin_id) ); if (psz_descriptor_name) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); } if (pq) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "userid", pq->who_szUserId) ); } if (psz_comment) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "comment", psz_comment) ); } if (psz_tied_branch_name) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "branch", psz_tied_branch_name) ); } SG_ERR_CHECK( SG_vhash__addcopy__vhash(pCtx, pvh_params, "changeset", pvh_changeset) ); if (paszAssocs && count_assocs) { SG_uint32 i = 0; SG_varray* pva_ids = NULL; SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pvh_params, "wit_ids", &pva_ids) ); for (i=0; i<count_assocs; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_ids, paszAssocs[i]) ); } } if (psa_stamps) { SG_uint32 count = 0; SG_uint32 i = 0; SG_varray* pva_stamps = NULL; SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pvh_params, "stamps", &pva_stamps) ); SG_ERR_CHECK( SG_stringarray__count(pCtx, psa_stamps, &count) ); for (i=0; i<count; i++) { const char* psz_stamp = NULL; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psa_stamps, i, &psz_stamp) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_stamps, psz_stamp) ); } } SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, NULL) ); SG_VHASH_NULLFREE(pCtx, pvh_params); } } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VARRAY_NULLFREE(pCtx, pva_hooks); SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); }
void SG_repo__db__calc_delta_from_root( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh ) { SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh); SG_ERR_CHECK( SG_repo__dag__find_direct_path_from_root( pCtx, pRepo, dagnum, psz_csid_to, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_uint32 count_remove = 0; SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_remove, &count_remove) ); if (count_remove) { SG_uint32 i = 0; // TODO it would be nice to have a batch remove so vhash would only have to rehash once for (i=0; i<count_remove; i++) { const char* psz = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_remove, i, &psz, NULL) ); SG_ERR_CHECK( SG_vhash__remove(pCtx, pvh_add, psz) ); } } } *ppvh = pvh_add; pvh_add = NULL; fail: SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); }
void SG_repo__db__calc_delta( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_from, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh_add, SG_vhash** ppvh_remove ) { SG_dagnode* pdn_from = NULL; SG_dagnode* pdn_to = NULL; SG_int32 gen_from = -1; SG_int32 gen_to = -1; SG_varray* pva_direct_backward_path = NULL; SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_rbtree* prb_temp = NULL; SG_daglca* plca = NULL; char* psz_csid_ancestor = NULL; SG_NULLARGCHECK_RETURN(psz_csid_from); SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh_add); SG_NULLARGCHECK_RETURN(ppvh_remove); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_from, &gen_from) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_to, &gen_to) ); if (gen_from > gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_to, &pva_direct_backward_path ) ); if (pva_direct_backward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); } } else if (gen_from < gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_from, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } } if (!pvh_add && !pvh_remove) { SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_temp) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_from) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_to) ); SG_ERR_CHECK( SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca) ); { const char* psz_hid = NULL; SG_daglca_node_type node_type = 0; SG_int32 gen = -1; SG_ERR_CHECK( SG_daglca__iterator__first(pCtx, NULL, plca, SG_FALSE, &psz_hid, &node_type, &gen, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor) ); } SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_ancestor, &pva_direct_backward_path ) ); SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_ancestor, &pva_direct_forward_path ) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } *ppvh_add = pvh_add; pvh_add = NULL; *ppvh_remove = pvh_remove; pvh_remove = NULL; fail: SG_NULLFREE(pCtx, psz_csid_ancestor); SG_RBTREE_NULLFREE(pCtx, prb_temp); SG_DAGLCA_NULLFREE(pCtx, plca); SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); SG_DAGNODE_NULLFREE(pCtx, pdn_from); SG_DAGNODE_NULLFREE(pCtx, pdn_to); }
void u0026_jsonparser__create_2(SG_context* pCtx, SG_string* pStr) { SG_jsonwriter* pjson = NULL; SG_vhash* pvh = NULL; SG_varray* pva = NULL; SG_uint32 i; char* pid = NULL; SG_ERR_CHECK( SG_jsonwriter__alloc(pCtx, &pjson, pStr) ); SG_ERR_CHECK( SG_jsonwriter__write_start_object(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "hello", "world") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__int64(pCtx, pjson, "x", 5) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__double(pCtx, pjson, "pi", 3.14159) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b1", SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b2", SG_FALSE) ); SG_ERR_CHECK( SG_jsonwriter__write_begin_pair(pCtx, pjson, "furball") ); SG_ERR_CHECK( SG_jsonwriter__write_start_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, "plok") ); SG_ERR_CHECK( SG_jsonwriter__write_element__double(pCtx, pjson, 47.567) ); SG_ERR_CHECK( SG_jsonwriter__write_element__int64(pCtx, pjson, 22222) ); SG_ERR_CHECK( SG_jsonwriter__write_element__null(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_FALSE) ); SG_ERR_CHECK( SG_gid__alloc(pCtx, &pid) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, pid) ); SG_NULLFREE(pCtx, pid); SG_ERR_CHECK( SG_jsonwriter__write_end_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__null(pCtx, pjson, "nope") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "messy", U0026_MESSY) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, "fried", "tomatoes") ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvh, "q", 333) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__vhash(pCtx, pjson, "sub", pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva) ); for (i=0; i<1000; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva, "plok") ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pva, 22) ); SG_ERR_CHECK( SG_varray__append__double(pCtx, pva, 1.414) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_TRUE) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_FALSE) ); SG_ERR_CHECK( SG_varray__append__null(pCtx, pva) ); } SG_ERR_CHECK( SG_jsonwriter__write_pair__varray(pCtx, pjson, "a", pva) ); SG_VARRAY_NULLFREE(pCtx, pva); SG_ERR_CHECK( SG_jsonwriter__write_end_object(pCtx, pjson) ); SG_JSONWRITER_NULLFREE(pCtx, pjson); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_VARRAY_NULLFREE(pCtx, pva); SG_JSONWRITER_NULLFREE(pCtx, pjson); }
// TODO not sure we really want to pass this much stuff to this interface void SG_vc_hooks__ASK__WIT__ADD_ASSOCIATIONS( SG_context* pCtx, SG_repo* pRepo, SG_changeset* pcs, const char* psz_tied_branch_name, const SG_audit* pq, const char* psz_comment, const char* const* paszAssocs, SG_uint32 count_assocs, const SG_stringarray* psa_stamps ) { SG_vhash* pvh_hook = NULL; SG_vhash* pvh_params = NULL; SG_vhash* pvh_result = NULL; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_ERR_CHECK( sg_vc_hooks__lookup_by_interface__single_result( pCtx, pRepo, SG_VC_HOOK__INTERFACE__ASK__WIT__ADD_ASSOCIATIONS, &pvh_hook ) ); if (pvh_hook) { const char* psz_js = NULL; SG_uint32 i = 0; SG_varray* pva_ids = NULL; const char* psz_descriptor_name = NULL; const char* psz_csid = NULL; SG_vhash* pvh_changeset = NULL; SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_repo__get_repo_id( pCtx, pRepo, &psz_repo_id ) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_changeset__get_id_ref(pCtx, pcs, &psz_csid) ); SG_ERR_CHECK( SG_changeset__get_vhash_ref(pCtx, pcs, &pvh_changeset) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "csid", psz_csid) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "repo_id", psz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "admin_id", psz_admin_id) ); if (psz_descriptor_name) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); } if (pq) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "userid", pq->who_szUserId) ); } if (psz_comment) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "comment", psz_comment) ); } if (psz_tied_branch_name) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "branch", psz_tied_branch_name) ); } SG_ERR_CHECK( SG_vhash__addcopy__vhash(pCtx, pvh_params, "changeset", pvh_changeset) ); SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pvh_params, "wit_ids", &pva_ids) ); for (i=0; i<count_assocs; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_ids, paszAssocs[i]) ); } if (psa_stamps) { SG_uint32 count = 0; SG_uint32 i = 0; SG_varray* pva_stamps = NULL; SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pvh_params, "stamps", &pva_stamps) ); SG_ERR_CHECK( SG_stringarray__count(pCtx, psa_stamps, &count) ); for (i=0; i<count; i++) { const char* psz_stamp = NULL; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psa_stamps, i, &psz_stamp) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_stamps, psz_stamp) ); } } SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, &pvh_result) ); // TODO process the result if (pvh_result) { SG_bool hasErrors = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "error", &hasErrors) ); if (hasErrors) { const char *emsg = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_result, "error", &emsg) ); SG_ERR_THROW2( SG_ERR_VC_HOOK_REFUSED, (pCtx, "\n:%s: %s", SG_VC_HOOK__INTERFACE__ASK__WIT__ADD_ASSOCIATIONS, emsg) ); } } } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VHASH_NULLFREE(pCtx, pvh_result); SG_VHASH_NULLFREE(pCtx, pvh_hook); SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); }
/** * Add to the fragball request vhash (see SG_server_prototypes.h for format). */ void SG_pull__add( SG_context* pCtx, SG_pull* pPull, SG_uint32 iDagnum, SG_rbtree* prbDagnodes, SG_rbtree* prbTags, SG_rbtree* prbDagnodePrefixes) { _sg_pull* pMyPull = NULL; char bufDagnum[SG_DAGNUM__BUF_MAX__DEC]; SG_bool found = SG_FALSE; SG_vhash* pvhDags = NULL; // Needs to be freed SG_vhash* pvhDagsRef = NULL; // Does not need to be freed, owned by parent vhash SG_vhash* pvhDagnum = NULL; // Needs to be freed SG_vhash* pvhDagnumRef = NULL; // Does not need to be freed, owned by parent vhash SG_rbtree_iterator* pit = NULL; SG_NULLARGCHECK_RETURN(pPull); SG_ARGCHECK_RETURN(iDagnum, iDagnum); pMyPull = (_sg_pull*)pPull; if (!pMyPull->pvhFragballRequest) SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pMyPull->pvhFragballRequest) ); SG_ERR_CHECK( SG_dagnum__to_sz__decimal(pCtx, iDagnum, bufDagnum, sizeof(bufDagnum)) ); /* Get dagnum vhash, adding it if necessary. */ SG_ERR_CHECK( SG_vhash__has(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDagsRef) ); else { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhDags) ); pvhDagsRef = pvhDags; SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvhDagsRef, bufDagnum, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhDagsRef, bufDagnum, &pvhDagnumRef) ); if (!pvhDagnumRef) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhDagnum) ); pvhDagnumRef = pvhDagnum; SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvhDagsRef, bufDagnum, &pvhDagnum) ); } /* If dagnodes were provided, add them to the dagnum vhash */ if (prbDagnodes) { const char* pszHid; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &pszHid, NULL) ); while (found) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvhDagnumRef, pszHid) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &pszHid, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); } /* If tags were provided, add them to the dagnum vhash */ if (prbTags) { const char* pszTag; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbTags, &found, &pszTag, NULL) ); while (found) { SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, pvhDagnumRef, pszTag, SG_SYNC_REQUEST_VALUE_TAG) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &pszTag, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); } /* If dagnode hid prefixes were provided, add them to the dagnum vhash */ if (prbDagnodePrefixes) { const char* pszHidPrefix; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodePrefixes, &found, &pszHidPrefix, NULL) ); while (found) { SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, pvhDagnumRef, pszHidPrefix, SG_SYNC_REQUEST_VALUE_HID_PREFIX) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &pszHidPrefix, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); } return; fail: SG_VHASH_NULLFREE(pCtx, pvhDagnum); SG_VHASH_NULLFREE(pCtx, pvhDags); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); }
static void _sg_workingdir__get_entry2(SG_context * pCtx, SG_repo * pRepo, const SG_pathname * pPathSub, const char * pszGid, SG_treenode_entry_type type, const char * pszidHidContent, const char * pszidHidXattrs, SG_int64 iAttributeBits, SG_vhash * pvhTimestamps) { SG_file* pFile = NULL; SG_string* pstrLink = NULL; SG_byte* pBytes = NULL; SG_vhash * pvhGid = NULL; if (SG_TREENODEENTRY_TYPE_DIRECTORY == type) { /* create the directory and then recurse into it */ SG_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathSub) ); SG_ERR_CHECK( _sg_workingdir__get_dir(pCtx, pRepo, pPathSub, pszidHidContent, pvhTimestamps) ); } else if (SG_TREENODEENTRY_TYPE_REGULAR_FILE == type) { SG_ERR_CHECK( SG_file__open__pathname(pCtx, pPathSub, SG_FILE_RDWR | SG_FILE_CREATE_NEW, SG_FSOBJ_PERMS__MASK, &pFile) ); SG_ERR_CHECK( SG_repo__fetch_blob_into_file(pCtx, pRepo, pszidHidContent, pFile, NULL) ); SG_ERR_CHECK( SG_file__close(pCtx, &pFile) ); } else if (SG_TREENODEENTRY_TYPE_SYMLINK == type) { SG_uint64 iLenBytes = 0; SG_ERR_CHECK( SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszidHidContent, &pBytes, &iLenBytes) ); SG_ERR_CHECK( SG_STRING__ALLOC__BUF_LEN(pCtx, &pstrLink, pBytes, (SG_uint32) iLenBytes) ); SG_ERR_CHECK( SG_fsobj__symlink(pCtx, pstrLink, pPathSub) ); SG_NULLFREE(pCtx, pBytes); SG_STRING_NULLFREE(pCtx, pstrLink); } else { SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED); } if (pszidHidXattrs) { #ifdef SG_BUILD_FLAG_FEATURE_XATTR SG_ERR_CHECK( _sg_workingdir__set_xattrs(pCtx, pRepo, pPathSub, pszidHidXattrs) ); #else // TODO do we need to stuff something into the pendingtree to remind us // TODO that the entry originally had an XAttr and we just didn't restore // TODO it when we populated the WD on this Windows system? #endif } SG_ERR_CHECK( SG_attributes__bits__apply(pCtx, pPathSub, iAttributeBits) ); if (pvhTimestamps && (SG_TREENODEENTRY_TYPE_REGULAR_FILE == type)) { SG_fsobj_stat stat; SG_int64 iTimeNow; SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pPathSub, &stat) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &iTimeNow) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhGid) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvhGid, "mtime_ms", stat.mtime_ms) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvhGid, "clock_ms", iTimeNow) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvhTimestamps, pszGid, &pvhGid) ); // this steals our vhash } fail: SG_VHASH_NULLFREE(pCtx, pvhGid); }
/** * Deal with the status for 1 item and accumulate * the result in pvaStatus. * */ static void _sg_wc_tx__status(SG_context * pCtx, SG_wc_tx * pWcTx, SG_varray * pvaStatus, const char * pszInput, SG_uint32 depth, SG_bool bListUnchanged, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bListSparse, SG_bool bListReserved, SG_vhash ** ppvhLegend) { SG_string * pStringRepoPath = NULL; sg_wc_liveview_item * pLVI; // we do not own this SG_bool bKnown; char chDomain; SG_vhash * pvhCSets = NULL; SG_vhash * pvhLegend = NULL; const char * pszWasLabel_l = "Baseline (B)"; const char * pszWasLabel_r = "Working"; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( pvaStatus ); // pszInput is optional -- if omitted, we assume "@/". // ppvhLegend is optional SG_ERR_CHECK( sg_wc_db__path__anything_to_repopath(pCtx, pWcTx->pDb, pszInput, SG_WC_DB__PATH__IMPORT_FLAGS__TREAT_NULL_AS_ROOT, &pStringRepoPath, &chDomain) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__status: '%s' normalized to [domain %c] '%s'\n", pszInput, chDomain, SG_string__sz(pStringRepoPath)) ); #endif SG_ERR_CHECK( sg_wc_tx__liveview__fetch_item__domain(pCtx, pWcTx, pStringRepoPath, &bKnown, &pLVI) ); if (!bKnown) { // We only get this if the path is completely bogus and // took us off into the weeds (as opposed to reporting // something just not-controlled). SG_ERR_THROW2( SG_ERR_NOT_FOUND, (pCtx, "Unknown item '%s'.", SG_string__sz(pStringRepoPath)) ); } SG_ERR_CHECK( sg_wc_tx__rp__status__lvi(pCtx, pWcTx, pLVI, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, pszWasLabel_l, pszWasLabel_r, pvaStatus) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__status: computed status [depth %d][bListUnchanged %d][bNoIgnores %d][bNoTSC %d][bListSparse %d][bListReserved %d] on '%s':\n", depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, SG_string__sz(pStringRepoPath)) ); SG_ERR_IGNORE( SG_varray_debug__dump_varray_of_vhashes_to_console(pCtx, pvaStatus, "") ); #endif if (ppvhLegend) { const char * pszHid_x; SG_ERR_CHECK( SG_wc_tx__get_wc_csets__vhash(pCtx, pWcTx, &pvhCSets) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhLegend) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhCSets, "A", &pszHid_x) ); if (pszHid_x) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhLegend, "A", pszHid_x) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhCSets, "L0", &pszHid_x) ); if (pszHid_x) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhLegend, "B", pszHid_x) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhCSets, "L1", &pszHid_x) ); if (pszHid_x) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhLegend, "C", pszHid_x) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_vhash_debug__dump_to_console__named(pCtx, pvhLegend, "Legend") ); #endif *ppvhLegend = pvhLegend; pvhLegend = NULL; } fail: SG_STRING_NULLFREE(pCtx, pStringRepoPath); SG_VHASH_NULLFREE(pCtx, pvhLegend); SG_VHASH_NULLFREE(pCtx, pvhCSets); }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }
static void _tree__process_next_pending_item(SG_context * pCtx, _tree_t * pTree, SG_vhash * pMergeBaselines) { SG_uint32 i; _node_t * pNode = NULL; // The node we are processing. SG_uint32 iNode = 0; // Index of pNode in the 'pending' list. SG_uint32 countVcParents = 0; const char ** paszVcParentHids = NULL; SG_uint32 iVcParent; // The first pending node that needs to be processed is always the one with // the highest revno. Find it in the list. for(i=1; i < pTree->pending.count; ++i) { if(pTree->pending.p[i]->revno > pTree->pending.p[iNode]->revno) iNode = i; } pNode = pTree->pending.p[iNode]; // Load in the node's display children/vc parents. SG_ASSERT(pNode->displayChildren.count==0); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pNode->pVcParents) ); SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pNode->pDagnode, &countVcParents, &paszVcParentHids) ); for(iVcParent=0; iVcParent<countVcParents; ++iVcParent) { // Each vc parent is a candidate display child. const char * pszHidCandidate = paszVcParentHids[iVcParent]; _node_t * pNodeRef = NULL; // Scan through the list of 'pending' nodes to see if we have already // fetched this one... SG_uint32 iCandidate = pTree->pending.count; for(i=0; i < pTree->pending.count && iCandidate==pTree->pending.count; ++i) { if(strcmp(pTree->pending.p[i]->pszHidRef, pszHidCandidate)==0) { iCandidate = i; pNodeRef = pTree->pending.p[i]; } } if(iCandidate == pTree->pending.count) { // Node was not found. Add it new. SG_ERR_CHECK( _tree__add_new_node(pCtx, pTree, pNode, pszHidCandidate, &pNodeRef) ); } else if(iCandidate > iNode) { // Node was found further to the right in the tree. Steal it. SG_ERR_CHECK( _tree__move_node(pCtx, pTree->pending.p[iCandidate], pNode) ); // Also, remove it from the pending list. (It gets re-added later.) _node_list__remove_at(&pTree->pending, iCandidate); } else { // Node was found further to the left. Do nothing. } SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pNode->pVcParents, pszHidCandidate, pNodeRef->revno) ); } // We have all this node's display children (still pending--they could get // stolen later). Now we need to sort them. if(pNode->displayChildren.count>1) { // First we pick one to go on the far left, if one stands out as most likely // to be the "old"/baseline node into which the others were "brought in". SG_uint32 iBaseline = pNode->displayChildren.count; // Allow the caller to have hand-picked the baseline node: if(pMergeBaselines!=NULL) { SG_int_to_string_buffer sz; SG_int64 baseline = 0; SG_ERR_CHECK( SG_vhash__check__int64(pCtx, pMergeBaselines, SG_int64_to_sz(pNode->revno, sz), &baseline) ); if(baseline!=0) { for(i=0; i<pNode->displayChildren.count; ++i) { if(pNode->displayChildren.p[i]->revno==(SG_uint32)baseline) { iBaseline = i; break; } } } } if(iBaseline == pNode->displayChildren.count) { // No baseline node from the user. See if there's one unique node whose // user *doesn't* match. for(i=0; i<pNode->displayChildren.count; ++i) { SG_bool match = SG_FALSE; SG_ERR_CHECK( _user_match_found(pCtx, pTree->pRepoRef, pNode->displayChildren.p[i], pNode, &match) ); if(!match) { if(iBaseline == pNode->displayChildren.count) { iBaseline = i; } else { // Whoops. "Nevermind." iBaseline = pNode->displayChildren.count; break; } } } } // Finally, sort _node_list__sort(&pNode->displayChildren, iBaseline); } // In the 'pending' list, replace this node with its children. if(pNode->displayChildren.count == 0) _node_list__remove_at(&pTree->pending, iNode); else { pTree->pending.p[iNode] = pNode->displayChildren.p[0]; if(pNode->displayChildren.count > 1) { SG_ERR_CHECK( _node_list__insert_at(pCtx, &pTree->pending, iNode+1, &pNode->displayChildren.p[1], pNode->displayChildren.count-1) ); } } // This node is no longer pending. pNode->isPending = SG_FALSE; return; fail: ; }
/** * Request to UNLOCK on one or more files. * * WARNING: This routine deviates from the model of most * WARNING: of the SG_wc__ level-8 and/or SG_wc_tx level-7 * WARNING: API routines because we cannot just "queue" an * WARNING: unlock like we do a RENAME with everything * WARNING: contained within the pWcTx; we actually have * WARNING: to update the locks dag (which is outside of * WARNING: the scope of the WC TX). * WARNING: * WARNING: So we only have a level-8 API * WARNING: so that we can completely control/bound the TX. * * We also deviate in that we don't take a --test * nor --verbose option. Which means we don't have a * JOURNAL to mess with. * */ void SG_wc__unlock(SG_context * pCtx, const SG_pathname* pPathWc, const SG_stringarray * psaInputs, SG_bool bForce, const char * psz_username, const char * psz_password, const char * psz_repo_upstream) { SG_wc_tx * pWcTx = NULL; SG_audit q; SG_uint32 nrInputs = 0; SG_uint32 k; char * psz_tied_branch_name = NULL; char * psz_repo_upstream_allocated = NULL; SG_vhash * pvh_gids = NULL; const char * pszRepoDescriptorName = NULL; // we do not own this if (psaInputs) SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &nrInputs) ); if (nrInputs == 0) SG_ERR_THROW2( SG_ERR_INVALIDARG, (pCtx, "Nothing to unlock") ); // psz_username is optional (assume no auth required) // psz_password is optional (assume no auth required) // psz_server is optional (assume default server) // Begin a WC TX so that we get all of the good stuff // (like mapping the CWD into a REPO handle and mapping // the inputs into GIDs). // // At this point I don't believe that setting a lock // will actually make any changes in WC.DB, so I'm // making it a READ-ONLY TX. // // My assumption is that the lock actually gets // written to the Locks DAG and shared with the server. // But I still want a TX handle for all of the other stuff. SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, pPathWc, SG_FALSE) ); // We need the repo descriptor name later for the push/pull // and to optionally look up the default destination for // this repo. The pRepo stores this *IFF* it was properly // opened (using a name). SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pWcTx->pDb->pRepo, &pszRepoDescriptorName) ); SG_ASSERT_RELEASE_FAIL2( (pszRepoDescriptorName && *pszRepoDescriptorName), (pCtx, "SG_wc__unlock: Could not get repo descriptor name.") ); // now we need to know what branch we are tied to. // if we're not tied, fail SG_ERR_CHECK( SG_wc_tx__branch__get(pCtx, pWcTx, &psz_tied_branch_name) ); if (!psz_tied_branch_name) SG_ERR_THROW( SG_ERR_NOT_TIED ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_gids) ); for (k=0; k<nrInputs; k++) { const char * pszInput_k; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaInputs, k, &pszInput_k) ); SG_ERR_CHECK( _map_input(pCtx, pWcTx, pvh_gids, pszInput_k) ); } if (!psz_repo_upstream) { SG_localsettings__descriptor__get__sz(pCtx, pszRepoDescriptorName, "paths/default", &psz_repo_upstream_allocated); if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND)) SG_ERR_REPLACE_ANY_RETHROW( SG_ERR_NO_SERVER_SPECIFIED ); else SG_ERR_CHECK_CURRENT; psz_repo_upstream = psz_repo_upstream_allocated; } SG_ERR_CHECK( SG_audit__init(pCtx, &q, pWcTx->pDb->pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); // OK, we have all the pieces. Time to call the unlock code SG_ERR_CHECK( SG_vc_locks__unlock( pCtx, pszRepoDescriptorName, psz_repo_upstream, psz_username, psz_password, psz_tied_branch_name, pvh_gids, bForce, &q ) ); // Fall through and let the normal fail code discard/cancel // the read-only WC TX. This will not affect the Locks DAG // nor the server. fail: SG_ERR_IGNORE( SG_wc_tx__cancel(pCtx, pWcTx) ); SG_WC_TX__NULLFREE(pCtx, pWcTx); SG_NULLFREE(pCtx, psz_tied_branch_name); SG_NULLFREE(pCtx, psz_repo_upstream_allocated); SG_VHASH_NULLFREE(pCtx, pvh_gids); }