/**
 * remove the first item in the work-queue and return it for processing.
 *
 * You DO NOT own the returned sg_vv2status_od pointer.
 */
void sg_vv2__status__remove_first_from_work_queue(SG_context * pCtx,
												  sg_vv2status * pST,
												  SG_bool * pbFound,
												  sg_vv2status_od ** ppOD)
{
	const char * szKey;
	sg_vv2status_od * pOD;
	SG_bool bFound;

	SG_ERR_CHECK_RETURN(  SG_rbtree__iterator__first(pCtx,
													 NULL,pST->prbWorkQueue,
													 &bFound,
													 &szKey,(void **)&pOD)  );
	if (bFound)
	{
		SG_ERR_CHECK_RETURN(  SG_rbtree__remove(pCtx, pST->prbWorkQueue,szKey)  );

#if TRACE_VV2_STATUS
		SG_console(pCtx,SG_CS_STDERR,"TD_RMQUE %s (head)\n",szKey);
		SG_ERR_DISCARD;
#endif
	}

	*pbFound = bFound;
	*ppOD = ((bFound) ? pOD : NULL);
}
void sg_wc_db__gid__create_table(SG_context * pCtx, sg_wc_db * pDb)
{
	SG_ERR_CHECK_RETURN(  sg_wc_db__tx__assert(pCtx, pDb)  );

	SG_ERR_CHECK_RETURN(  sg_sqlite__exec__va(pCtx, pDb->psql,
											  ("CREATE TABLE tbl_gid"
											   "  ("
											   "    alias_gid INTEGER PRIMARY KEY AUTOINCREMENT,"
											   "    gid       VARCHAR NOT NULL,"
											   "    tmp       INTEGER NOT NULL"
											   "  )"))  );

	SG_ERR_CHECK_RETURN(  sg_sqlite__exec__va(pCtx, pDb->psql,
											  ("CREATE UNIQUE INDEX index_gid ON tbl_gid ( gid )"))  );

	// put (0,"*undefined*") in the table as a GID string so that we
	// have a mapping for a fictional temporary id (for FOUND and
	// IGNORED items until we decide they need a real/permanent id).

	SG_ERR_CHECK_RETURN(  sg_wc_db__gid__insert_known(pCtx, pDb, SG_WC_DB__ALIAS_GID__UNDEFINED, SG_WC_DB__GID__UNDEFINED)  );

	// put (1,"*null*)" in the table as a GID string so that we have a mapping
	// for a fictional parent of the root directory.  this lets us
	// keep the non-null constraint on the column and avoid having
	// to special case it in the SQL.

	SG_ERR_CHECK_RETURN(  sg_wc_db__gid__insert_known(pCtx, pDb, SG_WC_DB__ALIAS_GID__NULL_ROOT, SG_WC_DB__GID__NULL_ROOT)  );
}
Esempio n. 3
0
void SG_dagfrag__foreach_member(SG_context * pCtx,
								SG_dagfrag * pFrag,
								SG_dagfrag__foreach_member_callback * pcb,
								void * pVoidCallerData)
{
	// we want to iterate over the START_ and INTERIOR_ MEMBERS in the CACHE.
	// we need to use the SORTED MEMBER CACHE so that ancestors are presented
	// before descendants.

	struct _fm_data fm_data;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NULLARGCHECK_RETURN(pcb);

	if (!pFrag->m_pRB_GenerationSortedMemberCache)
		SG_ERR_CHECK_RETURN(  _my_create_generation_sorted_member_cache(pCtx,pFrag)  );

	fm_data.pFrag = pFrag;
	fm_data.pcb = pcb;
	fm_data.pVoidCallerData = pVoidCallerData;

	// we wrap their callback with our own so that we can munge the arguments
	// that they see.

#if TRACE_DAGFRAG && 0
	SG_ERR_CHECK_RETURN(  SG_console(pCtx, SG_CS_STDERR, "SORTED MEMBER CACHE:\r\n")  );
	SG_ERR_CHECK_RETURN(  SG_rbtree_debug__dump_keys_to_console(pCtx, pFrag->m_pRB_GenerationSortedMemberCache)  );
	SG_ERR_CHECK_RETURN(  SG_console__flush(pCtx, SG_CS_STDERR)  );
#endif

	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,
											 pFrag->m_pRB_GenerationSortedMemberCache,
											 _sg_dagfrag__my_foreach_member_callback,
											 &fm_data)  );
}
void sg_sync_client__bind_vtable(SG_context* pCtx, SG_sync_client * pClient)
{
	SG_bool bRemote = SG_FALSE;

	SG_NULLARGCHECK_RETURN(pClient);

	if (pClient->p_vtable) // can only be bound once
		SG_ERR_THROW2_RETURN(SG_ERR_INVALIDARG, (pCtx, "vtable already bound"));

	// Select the vtable based on pClient's remote repo specification or the presence of a local repo handle.
	if (pClient->psz_remote_repo_spec)
		SG_ERR_CHECK_RETURN(  SG_sync_client__spec_is_remote(pCtx, pClient->psz_remote_repo_spec, &bRemote)  );
	else if (!pClient->pRepoOther)
		SG_ERR_THROW2_RETURN(SG_ERR_INVALIDARG, (pCtx, "a repo spec or a local repo handle must be set"));
		
	if (bRemote)
		pClient->p_vtable = &s_client_vtable__http;
	else
	{
		pClient->p_vtable = &s_client_vtable__c;
		/* It would be better if this were done inside the C vtable, where pClient->pRepoOther is 
		 * actually used, but there's not a sane way to do that now; it would require a significant 
		 * re-org. */
		if (!pClient->pRepoOther)
		{
			SG_ERR_CHECK_RETURN(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pClient->psz_remote_repo_spec, &pClient->pRepoOther)  );
			pClient->bRepoOtherIsMine = SG_TRUE;
		}
	}
}
Esempio n. 5
0
void SG_getopt__set_command_name(SG_context * pCtx, SG_getopt * pGetopt, ARGV_CHAR_T * pBuf)
{
	if (!pGetopt->pStringCommandName)
		SG_ERR_CHECK_RETURN(  SG_STRING__ALLOC(pCtx, &pGetopt->pStringCommandName)  );

	SG_ERR_CHECK_RETURN(  SG_UTF8__INTERN_FROM_OS_BUFFER(pCtx,pGetopt->pStringCommandName,pBuf)  );
}
Esempio n. 6
0
static void _dagwalk_callback(SG_context* pCtx,
							  SG_UNUSED_PARAM(SG_repo* pRepo),
							  void* pData,
							  SG_dagnode* pCurrentNode,
							  SG_UNUSED_PARAM(SG_rbtree* pDagnodeCache),
							  SG_bool* pbContinue)
{
	_dagwalk_data* pDagWalkData = (_dagwalk_data*)pData;
	const char* pszCurrentNodeHid = NULL;
	SG_int32 genCurrentNode;

	SG_UNUSED(pRepo);
	SG_UNUSED(pDagnodeCache);

	SG_ERR_CHECK_RETURN(  SG_dagnode__get_generation(pCtx, pCurrentNode, &genCurrentNode)  );
	if (genCurrentNode < pDagWalkData->genLimit)
	{
		*pbContinue = SG_FALSE;
		return;
	}

	SG_ERR_CHECK_RETURN(  SG_dagnode__get_id_ref(pCtx, (const SG_dagnode*)pCurrentNode, &pszCurrentNodeHid)  );
	if (!strcmp(pDagWalkData->pszStartNodeHid, (const char*)pszCurrentNodeHid))
		return;

	SG_ERR_CHECK_RETURN(  SG_rbtree__update(pCtx, pDagWalkData->prbVisitedNodes, (const char*)pszCurrentNodeHid)  );

	// TODO: Stop walking when this node and all it siblings are already in prbVisitedNodes?
}
Esempio n. 7
0
void SG_time__format_local__i64(SG_context* pCtx, SG_int64 iTime,
								  char * pBuf, SG_uint32 lenBuf)
{
	// do simple formatting of time value and generate a localtime-based, human-readable time.
	// this is like ctime(3), asctime(3), gmtime(3), localtime(3) only i don't currently
	// care about all the various formatting options.
	//
	// that is, we don't care about:
	// [] which fields we show,
	// [] localization (which order we show the fields and month names),
	// [] 12 vs 24 hour time.
	// [] UTF8.
	//
	// we are given a buffer that must be SG_TIME_FORMAT_LENGTH+1 characters or larger.

	SG_time tm;

	SG_NULLARGCHECK_RETURN( pBuf );
	SG_ARGCHECK_RETURN( (lenBuf >= SG_TIME_FORMAT_LENGTH+1), lenBuf );

	// convert milliseconds-since-epoch into individual fields.

	SG_ERR_CHECK_RETURN(  SG_time__decode__local(pCtx, iTime,&tm)  );

	SG_ERR_CHECK_RETURN( SG_sprintf(pCtx, pBuf,lenBuf,"%04d/%02d/%02d %02d:%02d:%02d.%03d %+03d%02d",
					 tm.year,tm.month,tm.mday,
					 tm.hour,tm.min,tm.sec,tm.msec,
					 tm.offsetUTC/3600,abs((tm.offsetUTC/60)%60)) );

}
Esempio n. 8
0
void SG_pull__clone(
	SG_context* pCtx, 
	const char* pszPullIntoRepoDescriptorName, 
	SG_client* pClient)
{
	sg_pull_instance_data* pMe = NULL;
	char* pszFragballName = NULL;
	const SG_pathname* pStagingPathname;

	SG_NULLARGCHECK_RETURN(pszPullIntoRepoDescriptorName);
	SG_NULLARGCHECK_RETURN(pClient);

	SG_ERR_CHECK(  _pull_init(pCtx, pClient, pszPullIntoRepoDescriptorName, &pMe)  );
	SG_ERR_CHECK(  SG_staging__get_pathname(pCtx, pMe->pStaging, &pStagingPathname)  );

	/* Request a fragball containing the entire repo */
	SG_ERR_CHECK(  SG_client__pull_clone(pCtx, pClient, pStagingPathname, &pszFragballName)  );

	/* commit and cleanup */
	SG_ERR_CHECK_RETURN(  SG_staging__commit_fragball(pCtx, pMe->pStaging, pszFragballName)  );

	SG_ERR_CHECK(  SG_context__msg__emit(pCtx, "Cleaning up...")  );
	SG_ERR_CHECK_RETURN(  SG_staging__cleanup(pCtx, &pMe->pStaging)  );
	SG_ERR_CHECK(  SG_context__msg__emit(pCtx, "done")  );

	/* fall through */
fail:
	_NULLFREE_INSTANCE_DATA(pCtx, pMe);
	SG_NULLFREE(pCtx, pszFragballName);
	SG_ERR_IGNORE(  SG_context__msg__emit(pCtx, "\n")  );
}
Esempio n. 9
0
void SG_dagfrag__alloc(SG_context * pCtx,
					   SG_dagfrag ** ppNew,
					   const char* psz_repo_id,
					   const char* psz_admin_id,
					   SG_uint32 iDagNum)
{
	SG_dagfrag * pFrag = NULL;

	SG_NULLARGCHECK_RETURN(ppNew);
	SG_ARGCHECK_RETURN( (iDagNum != SG_DAGNUM__NONE), iDagNum );
	SG_ERR_CHECK_RETURN(  SG_gid__argcheck(pCtx, psz_repo_id)  );
	SG_ERR_CHECK_RETURN(  SG_gid__argcheck(pCtx, psz_admin_id)  );

    SG_ERR_CHECK(  SG_alloc(pCtx, 1, sizeof(SG_dagfrag), &pFrag)  );

    SG_ERR_CHECK(  SG_strcpy(pCtx, pFrag->m_sz_repo_id, sizeof(pFrag->m_sz_repo_id), psz_repo_id)  );
    SG_ERR_CHECK(  SG_strcpy(pCtx, pFrag->m_sz_admin_id, sizeof(pFrag->m_sz_admin_id), psz_admin_id)  );

    pFrag->m_iDagNum = iDagNum;

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx,&pFrag->m_pRB_Cache)  );

	pFrag->m_pRB_GenerationSortedMemberCache = NULL;		// only create this if needed

	*ppNew = pFrag;
	return;

fail:
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
}
Esempio n. 10
0
static void _dump_cb(SG_context * pCtx, const char * szHid, void * pAssocData, void * pVoidDumpData)
{
	struct _dump_data * pDumpData = (struct _dump_data *)pVoidDumpData;
	_my_data * pMyData = (_my_data *)pAssocData;

	if (pDumpData->stateWanted == pMyData->m_state)
	{
		if (pMyData->m_pDagnode)
		{
			SG_ERR_CHECK_RETURN(  SG_dagnode_debug__dump(pCtx,
														 pMyData->m_pDagnode,
														 pDumpData->nrDigits,
														 pDumpData->indent,
														 pDumpData->pStringOutput)  );
		}
		else
		{
			SG_ERR_CHECK_RETURN(  my_dump_id(pCtx,
											 szHid,
											 pDumpData->nrDigits,
											 pDumpData->indent,
											 pDumpData->pStringOutput)  );
		}
	}
}
void SG_sync_remote__push_clone__commit(
	SG_context* pCtx,
	const char* pszCloneId,
	SG_pathname** ppPathFragball)
{
	SG_ERR_CHECK_RETURN(  _remote_clone_allowed(pCtx)  );
	SG_ERR_CHECK_RETURN(  SG_staging__clone__commit(pCtx, pszCloneId, ppPathFragball)  );
}
/**
 * Construct a pathname for a DB in the drawer.
 * This is something of the form:
 * 
 *        <root>/.sgdrawer/wc.db
 *
 */
void sg_wc_db__path__compute_pathname_in_drawer(SG_context * pCtx, sg_wc_db * pDb)
{
	SG_ERR_CHECK_RETURN(  SG_workingdir__get_drawer_path(pCtx,
														 pDb->pPathWorkingDirectoryTop,
														 &pDb->pPathDB)  );

	SG_ERR_CHECK_RETURN(  SG_pathname__append__from_sz(pCtx, pDb->pPathDB, "wc.db")  );
}
Esempio n. 13
0
void SG_dbrecord__get_value__int64(SG_context* pCtx, const SG_dbrecord* prec, const char* psz_name, SG_int64* piValue)
{
    const char* psz_value = NULL;
    SG_int64 iv = 0;
    SG_ERR_CHECK_RETURN(  SG_dbrecord__get_value(pCtx, prec, psz_name, &psz_value)  );
    SG_ERR_CHECK_RETURN(  SG_int64__parse__strict(pCtx, &iv, psz_value)  );
    *piValue = iv;
}
static void _write_file_chunk(SG_context* pCtx, SG_curl* pCurl, char* buffer, SG_uint32 bufLen, void* pVoidState, SG_uint32* pLenHandled)
{
	SG_file* pFile = (SG_file*)pVoidState;

	SG_ERR_CHECK_RETURN(  _check_for_and_handle_json_500(pCtx, pCurl, buffer, bufLen)  );

	if (bufLen) // libcurl docs say we may be called with zero length if file is empty
		SG_ERR_CHECK_RETURN(  SG_file__write(pCtx, pFile, bufLen, (SG_byte*)buffer, pLenHandled)  );
}
Esempio n. 15
0
void SG_workingdir__construct_absolute_path_from_repo_path2(SG_context * pCtx,
															const SG_pendingtree * pPendingTree,
															const char * pszRepoPath,
															SG_pathname ** ppPathAbsolute)
{
	const SG_pathname * pPathWorkingDirectoryTop;				// we do not own this

	SG_ERR_CHECK_RETURN(  SG_pendingtree__get_working_directory_top__ref(pCtx, pPendingTree, &pPathWorkingDirectoryTop)  );
	SG_ERR_CHECK_RETURN(  SG_workingdir__construct_absolute_path_from_repo_path(pCtx, pPathWorkingDirectoryTop, pszRepoPath, ppPathAbsolute)  );
}
Esempio n. 16
0
static void _resolve__step_pathnames__delete_temp_files(SG_context * pCtx,
														_resolve__step_pathnames * pStepPathnames)
{
	SG_ERR_CHECK_RETURN(  SG_fsobj__remove__pathname(pCtx, pStepPathnames->pPath_Mine)  );
	SG_ERR_CHECK_RETURN(  SG_fsobj__remove__pathname(pCtx, pStepPathnames->pPath_Other)  );
	SG_ERR_CHECK_RETURN(  SG_fsobj__remove__pathname(pCtx, pStepPathnames->pPath_Ancestor)  );

	// DO NOT DELETE pPath_Result because it is either the final result
	// or to be used as input to the next step in the plan.
}
Esempio n. 17
0
void sg_client__c__close(SG_context * pCtx, SG_client * pClient)
{
	sg_client_c_instance_data* pMe = NULL;

	if (!pClient)
		return;

	pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data;
	SG_ERR_CHECK_RETURN(  SG_server__free(pCtx, pMe->pServer)  );
	SG_ERR_CHECK_RETURN(  SG_NULLFREE(pCtx, pMe)  );
}
/**
 * Append the HTTP response to the end of the provided SG_string.
 * The caller retains ownership of the string and should free it.
 */
void SG_curl__set__write_string(SG_context* pCtx, SG_curl* pCurl, SG_string* pString)
{
	_sg_curl* pMe = (_sg_curl*)pCurl;

	SG_NULLARGCHECK_RETURN(pCurl);

	pMe->pWriteState = pString;
	pMe->pFnWriteResponse = _write_string_chunk;
	SG_ERR_CHECK_RETURN(  _setopt__pv(pCtx, pCurl, CURLOPT_WRITEDATA, pCurl)  );
	SG_ERR_CHECK_RETURN(  _setopt__write_cb(pCtx, pCurl, CURLOPT_WRITEFUNCTION, _write_callback_shim)  );
}
/**
 * Create an INDEX on the above table.
 * We probably only need this for the L0 cset
 * since we drive everything from it.  If we
 * have tables for L1 (or others) we tend to
 * use them for random lookups from a loop or
 * something driven from L0, so we don't really
 * need indexes for them.  But we DO allow it.
 * (See the tricks that UPDATE plays with the
 * DB.)
 *
 */
void sg_wc_db__tne__create_index(SG_context * pCtx,
								 sg_wc_db * pDb,
								 const sg_wc_db__cset_row * pCSetRow)
{
	SG_ERR_CHECK_RETURN(  sg_wc_db__tx__assert(pCtx, pDb)  );
	
	SG_ERR_CHECK_RETURN(  sg_sqlite__exec__va(pCtx, pDb->psql,
											  ("CREATE INDEX ndx_%s ON %s ( alias_gid_parent, entryname )"),
											  pCSetRow->psz_tne_table_name,
											  pCSetRow->psz_tne_table_name)  );
}
Esempio n. 20
0
static void _add_blobs_until_done(SG_context* pCtx, 
								  SG_staging* pStaging, 
								  SG_client* pClient) 
{
	SG_bool need_blobs = SG_FALSE;
	SG_vhash* pvhFragballRequest = NULL;
	char* pszFragballName = NULL;
	SG_vhash* pvhRequestStatus = NULL;
	const SG_pathname* pStagingPathname = NULL;
	SG_vhash* pvhStagingStatus = NULL;

	SG_ERR_CHECK_RETURN(  SG_context__msg__emit(pCtx, "Retrieving blobs...")  );

	SG_ERR_CHECK(  SG_staging__check_status(pCtx, pStaging, SG_FALSE, SG_FALSE, SG_FALSE, SG_TRUE, SG_TRUE, &pvhStagingStatus)  );

	SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhStagingStatus, SG_SYNC_STATUS_KEY__BLOBS, &need_blobs)  );
	
	if (need_blobs)
		SG_ERR_CHECK(  SG_staging__get_pathname(pCtx, pStaging, &pStagingPathname)  );

	while (need_blobs)
	{
		pvhFragballRequest = pvhStagingStatus;
		pvhStagingStatus = NULL;

		SG_ERR_CHECK(  SG_client__pull_request_fragball(pCtx, pClient, pvhFragballRequest, pStagingPathname, &pszFragballName, &pvhRequestStatus)  );

		/* Ian TODO: inspect pvhRequestStatus */

		SG_VHASH_NULLFREE(pCtx, pvhRequestStatus);
		SG_VHASH_NULLFREE(pCtx, pvhFragballRequest);

		SG_ERR_CHECK(  SG_staging__slurp_fragball(pCtx, pStaging, (const char*)pszFragballName)  );
		SG_NULLFREE(pCtx, pszFragballName);

		SG_ERR_CHECK(  SG_staging__check_status(pCtx, pStaging, SG_FALSE, SG_FALSE, SG_FALSE, SG_TRUE, SG_TRUE, &pvhStagingStatus)  );

#if TRACE_PULL
		SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhStagingStatus, "pull staging status")  );
#endif

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhStagingStatus, SG_SYNC_STATUS_KEY__BLOBS, &need_blobs)  );
	}

	SG_ERR_CHECK_RETURN(  SG_context__msg__emit(pCtx, "done")  );

	/* fall through */
fail:
	SG_VHASH_NULLFREE(pCtx, pvhStagingStatus);
	SG_VHASH_NULLFREE(pCtx, pvhFragballRequest);
	SG_NULLFREE(pCtx, pszFragballName);
	SG_VHASH_NULLFREE(pCtx, pvhRequestStatus);
	SG_ERR_IGNORE(  SG_context__msg__emit(pCtx, "\n")  );
}
void SG_repo__install_implementation(
        SG_context* pCtx, 
        sg_repo__vtable* pvtable
        )
{
    if (!g_prb_repo_vtables)
    {
        SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx, &g_prb_repo_vtables)  );
    }

    SG_ERR_CHECK_RETURN(  SG_rbtree__add__with_assoc(pCtx, g_prb_repo_vtables, pvtable->pszStorage, pvtable)  );
}
void SG_mrg_cset_entry_conflict__append_delete(SG_context * pCtx,
											   SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict,
											   SG_mrg_cset * pMrgCSet_Leaf_k)
{
	SG_NULLARGCHECK_RETURN(pMrgCSetEntryConflict);
	SG_NULLARGCHECK_RETURN(pMrgCSet_Leaf_k);

	if (!pMrgCSetEntryConflict->pVec_MrgCSet_Deletes)
		SG_ERR_CHECK_RETURN(  SG_vector__alloc(pCtx,&pMrgCSetEntryConflict->pVec_MrgCSet_Deletes,2)  );

	SG_ERR_CHECK_RETURN(  SG_vector__append(pCtx,pMrgCSetEntryConflict->pVec_MrgCSet_Deletes,(void *)pMrgCSet_Leaf_k,NULL)  );
}
void sg_wc_db__branch__create_table(SG_context * pCtx,
                                    sg_wc_db * pDb)
{
    SG_ERR_CHECK_RETURN(  sg_wc_db__tx__assert(pCtx, pDb)  );

    SG_ERR_CHECK_RETURN(  sg_sqlite__exec__va(pCtx, pDb->psql,
                          ("CREATE TABLE tbl_branch"
                           "  ("
                           "    id   INTEGER PRIMARY KEY,"
                           "    name VARCHAR NULL"
                           "  )"))  );
}
void SG_jscontext__acquire(SG_context * pCtx, SG_jscontext ** ppJs)
{
	SG_ASSERT(pCtx!=NULL);
	SG_NULLARGCHECK_RETURN(ppJs);

	if(gpJSContextPoolGlobalState->ssjsMutable)
	{
		_sg_jscontext__create(pCtx, ppJs);
		return;
	}

	SG_ERR_CHECK_RETURN(  SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock)  );

	if(gpJSContextPoolGlobalState->pFirstAvailableContext!=NULL)
	{
		SG_jscontext * pJs = gpJSContextPoolGlobalState->pFirstAvailableContext;

		gpJSContextPoolGlobalState->pFirstAvailableContext = pJs->pNextAvailableContext;
		pJs->pNextAvailableContext = NULL;

		++gpJSContextPoolGlobalState->numContextsCheckedOut;

		SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSREQUEST_TOGGLING);
		(void)JS_SetContextThread(pJs->cx);
		JS_BeginRequest(pJs->cx);
		pJs->isInARequest = SG_TRUE;
		SG_httprequestprofiler__stop();

		JS_SetContextPrivate(pJs->cx, pCtx);

		*ppJs = pJs;
	}
	else
	{
		++gpJSContextPoolGlobalState->numContextsCheckedOut;
		SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		_sg_jscontext__create(pCtx, ppJs);

		if(SG_context__has_err(pCtx) || *ppJs==NULL)
		{
			/* Use the version of the mutex routines that doesn't touch pCtx,
			because we're already in an error state. */
			SG_mutex__lock__bare(&gpJSContextPoolGlobalState->lock);
			--gpJSContextPoolGlobalState->numContextsCheckedOut;
			SG_mutex__unlock__bare(&gpJSContextPoolGlobalState->lock);
		}
	}
}
void sg_wc_db__tne__drop_named_table(SG_context * pCtx,
									 sg_wc_db * pDb,
									 const sg_wc_db__cset_row * pCSetRow)
{
	SG_ERR_CHECK_RETURN(  sg_wc_db__tx__assert(pCtx, pDb)  );

	SG_ERR_CHECK_RETURN(  sg_wc_db__tx__free_cached_statements(pCtx, pDb)  );

	// DROP INDEX is implicit when the referenced table is dropped.

	SG_ERR_CHECK_RETURN(  sg_sqlite__exec__va(pCtx, pDb->psql,
											  ("DROP TABLE IF EXISTS %s"),
											  pCSetRow->psz_tne_table_name)  );
}
static void _foreach_cb_interlude(SG_context * pCtx,
								  const char * pszKey,
								  void * pVoidAssoc,
								  void * pVoidData)
{
	struct _cb_interlude_data * pInterludeData = (struct _cb_interlude_data *)pVoidData;
	SG_uint64 ui64_key = 0ULL;

	SG_ERR_CHECK_RETURN(  SG_hex__parse_hex_uint64(pCtx, pszKey, SG_STRLEN(pszKey), &ui64_key)  );
	SG_ERR_CHECK_RETURN(  (*pInterludeData->pfn_cb_ui64)(pCtx,
														 ui64_key,
														 pVoidAssoc,
														 pInterludeData->pVoidData_ui64)  );
}
/**
 * Use the provided progress callback for this request/response.
 * The caller retains ownership of pState and should free it after SG_curl__perform.
 */
void SG_curl__set__progress_cb(SG_context* pCtx, SG_curl* pCurl, SG_curl_progress_callback* pcb, void* pState)
{
	_sg_curl* pMe = (_sg_curl*)pCurl;

	SG_NULLARGCHECK_RETURN(pCurl);
	SG_NULLARGCHECK_RETURN(pcb);

	pMe->pProgressState = pState;
	pMe->pFnProgress = pcb;

	SG_ERR_CHECK_RETURN(  SG_curl__setopt__int32(pCtx, pCurl, CURLOPT_NOPROGRESS, 0)  );
	SG_ERR_CHECK_RETURN(  _setopt__pv(pCtx, pCurl, CURLOPT_PROGRESSDATA, pCurl)  );
	SG_ERR_CHECK_RETURN(  _setopt__progress_cb(pCtx, pCurl, CURLOPT_PROGRESSFUNCTION, _progress_callback_shim)  );
}
/**
 * Generate a NEW GID, add it to the tbl_gid, and
 * return the alias to it.
 *
 */
void sg_wc_db__gid__insert_new(SG_context * pCtx,
							   sg_wc_db * pDb,
							   SG_bool bIsTmp,
							   SG_uint64 * puiAliasGidNew)
{
	char bufGid[SG_GID_BUFFER_LENGTH+1];

	SG_ERR_CHECK_RETURN(  SG_gid__generate(pCtx, bufGid, sizeof(bufGid))  );
	SG_ERR_CHECK_RETURN(  _sg_wc_db__gid__insert(pCtx, pDb, bufGid, bIsTmp)  );
	SG_ERR_CHECK_RETURN(  sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, bufGid, puiAliasGidNew)  );

	if (bIsTmp)
		pDb->nrTmpGids++;
}
Esempio n. 29
0
static void _add_one_parent(SG_context * pCtx, SG_dagfrag* pFrag, const char * szHid)
{
	_my_data * pDataCached = NULL;
	SG_bool bPresent = SG_FALSE;

	SG_ERR_CHECK_RETURN(  _cache__lookup(pCtx,pFrag,szHid,&pDataCached,&bPresent)  );
	if (!bPresent)
	{
		// dagnode is not present in the cache.  therefore, it's in the fringe

		SG_ERR_CHECK_RETURN(  _cache__add__fringe(pCtx, pFrag, szHid)  );
	}
	else
	{
		// dagnode already present in the cache. therefore, we have already visited it
		// before.  we can change our minds about the state of this dagnode if something
		// has changed (such as the fragment bounds being widened).

		switch (pDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_RETURN2(  (0),
								(pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
								 pDataCached->m_state,szHid)  );

		case SG_DFS_START_MEMBER:
			// a dagnode has a parent that we are considering a START node.
			// this can happen when we were started from a non-leaf node and
			// then a subsequent call to __load is given a true leaf node or
			// a node deeper in the tree that has our original start node as
			// a parent.
			//
			// clear the start bit.  (we only want true fragment-terminal
			// nodes marked as start nodes.)

			pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
			break;

		case SG_DFS_INTERIOR_MEMBER:
            /* nothing to do here */
			break;

		case SG_DFS_END_FRINGE:
            /* nothing to do here */
			break;
		}
	}
}
void SG_sync_remote__push_clone__commit_maybe_usermap(
	SG_context* pCtx,
	const char* pszCloneId,
	const char* pszClosetAdminId,
	SG_pathname** ppPathFragball,	/*< [in] Required. On success, this routine takes ownership of the pointer. It will free memory and delete the file. */
	char** ppszDescriptorName,		/*< [out] Optional. Caller should free. */
	SG_bool* pbAvailable,			/*< [out] Optional. */
	SG_closet__repo_status* pStatus,/*< [out] Optional. */
	char** ppszStatus				/*< [out] Optional. Caller should free. */
)
{
	SG_ERR_CHECK_RETURN(  _remote_clone_allowed(pCtx)  );
	SG_ERR_CHECK_RETURN(  SG_staging__clone__commit_maybe_usermap(pCtx, pszCloneId, pszClosetAdminId, ppPathFragball,
		ppszDescriptorName, pbAvailable, pStatus, ppszStatus)  );
}