/**
 * Wrapper around SG_utf8__from_utf32 that allows empty strings.
 * I'm not sure why that function doesn't allow them, but I didn't want to change
 * something so low-level right now, so I'm wrapping it instead.  Will log something
 * to have it looked into later.
 */
static void _utf32_to_utf8(
    SG_context* pCtx,   //< [in] [out] Error and context info.
    SG_int32*   pUtf32, //< [in] UTF32 string to convert, may be empty but not NULL.
    char**      ppUtf8, //< [in] Converted UTF8 string.
    SG_uint32*  pLength //< [out] Length of converted UTF32 string, in bytes.
)
{
    char*     szUtf8  = NULL;
    SG_uint32 uLength = 0u;

    SG_NULLARGCHECK(pUtf32);
    SG_NULLARGCHECK(ppUtf8);

    if (pUtf32[0] == 0)
    {
        SG_alloc1(pCtx, szUtf8);
        szUtf8[0] = 0;
        uLength = 1u;
    }
    else
    {
        SG_ERR_CHECK(  SG_utf8__from_utf32(pCtx, pUtf32, &szUtf8, &uLength)  );
    }

    *ppUtf8 = szUtf8;
    szUtf8 = NULL;
    if (pLength != NULL)
    {
        *pLength = uLength;
    }

fail:
    SG_NULLFREE(pCtx, szUtf8);
    return;
}
示例#2
0
文件: sg_getopt.c 项目: avar/veracity
void SG_getopt__get_option_from_code2(SG_context* pCtx,
									 SG_int32 code,
									 const SG_getopt_option* option_table,
									 char* pszdesc_override,
									 const SG_getopt_option** ppOpt)
{
	SG_int32 i;

	for (i = 0; option_table[i].optch; i++)
		if (option_table[i].optch == code)
		{
			if (pszdesc_override)
			{
				SG_getopt_option *tmpopt;
				SG_ERR_CHECK(  SG_alloc1(pCtx, tmpopt)  );
				*tmpopt = option_table[i];
				tmpopt->pStringDescription = pszdesc_override;
				*ppOpt = tmpopt;
			}
			*ppOpt = &(option_table[i]);
		}

fail:
	return;
}
void MyFn(clear__with_assoc)(SG_context * pCtx)
{
	static const SG_uint32 uSize = 100u;
	
	SG_vector* pVector = NULL;
	SG_uint32  uIndex  = 0u;
	SG_uint32  uOutput = 0u;

	VERIFY_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx, &pVector, uSize)  );

	// add some allocated data to the vector
	for (uIndex = 0u; uIndex < uSize; ++uIndex)
	{
		SG_uint32* pValue = NULL;
		VERIFY_ERR_CHECK(  SG_alloc1(pCtx, pValue)  );
		*pValue = uIndex;
		VERIFY_ERR_CHECK(  SG_vector__append(pCtx, pVector, pValue, &uOutput)  );
		VERIFY_COND("Added item has unexpected index.", uOutput == uIndex);
	}

	// verify that the length is what we expect
	VERIFY_ERR_CHECK(  SG_vector__length(pCtx, pVector, &uOutput)  );
	VERIFY_COND("Vector's length doesn't match added item count.", uOutput == uSize);

	// clear the vector using our free callback
	VERIFY_ERR_CHECK(  SG_vector__clear__with_assoc(pCtx, pVector, MyFn(free_uint32))  );
	// if we get memory leaks, then the callback wasn't properly called to free the elements

	// verify that the vector is now empty
	VERIFY_ERR_CHECK(  SG_vector__length(pCtx, pVector, &uOutput)  );
	VERIFY_COND("Vector's length is non-zero after being cleared.", uOutput == 0u);

fail:
	SG_VECTOR_NULLFREE(pCtx, pVector);
}
void SG_mrg_cset_entry_conflict__alloc(SG_context * pCtx,
									   SG_mrg_cset * pMrgCSet,
									   SG_mrg_cset_entry * pMrgCSetEntry_Ancestor,
									   SG_mrg_cset_entry * pMrgCSetEntry_Baseline,
									   SG_mrg_cset_entry_conflict ** ppMrgCSetEntryConflict)
{
	SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict = NULL;

	SG_NULLARGCHECK_RETURN(pMrgCSet);
	SG_NULLARGCHECK_RETURN(pMrgCSetEntry_Ancestor);
	// pMrgCSetEntry_Baseline may or may not be null
	SG_NULLARGCHECK_RETURN(ppMrgCSetEntryConflict);

	// we allocate and return this.  we DO NOT automatically add it to
	// the conflict-list in the cset.

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx,pMrgCSetEntryConflict)  );

	pMrgCSetEntryConflict->pMrgCSet = pMrgCSet;
	pMrgCSetEntryConflict->pMrgCSetEntry_Ancestor = pMrgCSetEntry_Ancestor;
	pMrgCSetEntryConflict->pMrgCSetEntry_Baseline = pMrgCSetEntry_Baseline;
	// defer alloc of vectors until we need them.
	// defer alloc of rbtrees until we need them.

	pMrgCSetEntryConflict->pMergeTool = NULL;

	*ppMrgCSetEntryConflict = pMrgCSetEntryConflict;
}
/**
 * Wrapper around SG_utf8__to_utf32 that allows empty strings.
 * I'm not sure why that function doesn't allow them, but I didn't want to change
 * something so low-level right now, so I'm wrapping it instead.  Will log something
 * to have it looked into later.
 */
static void _utf8_to_utf32(
    SG_context* pCtx,    //< [in] [out] Error and context info.
    const char* szUtf8,  //< [in] UTF8 string to convert, may be empty but not NULL.
    SG_int32**  ppUtf32, //< [out] Converted UTF32 string.
    SG_uint32*  pLength  //< [out] Length of converted UTF32 string, in characters.
)
{
    SG_int32* pUtf32  = NULL;
    SG_uint32 uLength = 0u;

    SG_NULLARGCHECK(szUtf8);
    SG_NULLARGCHECK(ppUtf32);

    if (szUtf8[0] == 0)
    {
        SG_alloc1(pCtx, pUtf32);
        pUtf32[0] = 0;
        uLength = 0u;
    }
    else
    {
        SG_ERR_CHECK(  SG_utf8__to_utf32__sz(pCtx, szUtf8, &pUtf32, &uLength)  );
    }

    *ppUtf32 = pUtf32;
    pUtf32 = NULL;
    if (pLength != NULL)
    {
        *pLength = uLength;
    }

fail:
    SG_NULLFREE(pCtx, pUtf32);
    return;
}
void sg_sync_client__http__open(
    SG_context* pCtx,
    SG_sync_client * pSyncClient)
{
    sg_client_http_instance_data* pMe = NULL;

    SG_NULLARGCHECK_RETURN(pSyncClient);

    SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, pMe)  );
    pSyncClient->p_vtable_instance_data = (sg_sync_client__vtable__instance_data *)pMe;
}
示例#7
0
void SG_dbrecord__alloc(SG_context* pCtx, SG_dbrecord** ppResult)
{
	SG_dbrecord * prec;
	SG_NULLARGCHECK_RETURN(ppResult);
	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, prec)  );
	SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &prec->pvh)  );
	*ppResult = prec;

	return;
fail:
	SG_NULLFREE(pCtx, prec);
}
示例#8
0
static void SG_localsettings__factory__get_nth__variant(
    SG_context* pCtx,
    SG_uint32 i,
    SG_variant** ppv
    )
{
	SG_variant* pv = NULL;

    if (SG_VARIANT_TYPE_SZ == g_factory_defaults[i].type)
    {
        SG_ERR_CHECK(  SG_alloc1(pCtx, pv)  );
        pv->type = g_factory_defaults[i].type;
        SG_ERR_CHECK(  SG_strdup(pCtx, g_factory_defaults[i].psz_val, (char**) &pv->v.val_sz)  );
    }
    else if (SG_VARIANT_TYPE_VARRAY == g_factory_defaults[i].type)
    {
        const char** pp_el = NULL;

        SG_ERR_CHECK(  SG_alloc1(pCtx, pv)  );
        pv->type = g_factory_defaults[i].type;
        SG_ERR_CHECK(  SG_varray__alloc(pCtx, &pv->v.val_varray)  );

        pp_el = g_factory_defaults[i].pasz_array;
        while (*pp_el)
        {
            SG_ERR_CHECK(  SG_varray__append__string__sz(pCtx, pv->v.val_varray, *pp_el)  );
            pp_el++;
        }
    }
    else
    {
        SG_ERR_THROW(  SG_ERR_NOTIMPLEMENTED  );
    }

    *ppv = pv;
    pv = NULL;

fail:
    SG_VARIANT_NULLFREE(pCtx, pv);
}
static void MyFn(copy_uint32)(SG_context* pCtx, void* pInput, void** pOutput)
{
	SG_uint32* pInputInt  = (SG_uint32*)pInput;
	SG_uint32* pOutputInt = NULL;

	VERIFY_ERR_CHECK(  SG_alloc1(pCtx, pOutputInt)  );
	*pOutputInt = *pInputInt;

	*pOutput = pOutputInt;

fail:
	return;
}
void sg_wc_db__tne_row__alloc(SG_context * pCtx, sg_wc_db__tne_row ** ppTneRow)
{
	sg_wc_db__tne_row * pTneRow = NULL;

	SG_ERR_CHECK(  SG_alloc1(pCtx, pTneRow)  );
	SG_ERR_CHECK(  sg_wc_db__state_structural__alloc(pCtx, &pTneRow->p_s)  );
	SG_ERR_CHECK(  sg_wc_db__state_dynamic__alloc(pCtx, &pTneRow->p_d)  );

	*ppTneRow = pTneRow;
	return;

fail:
	SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow);
}
示例#11
0
/**
 * Creates a staging area and returns an initialized instance data structure.
 */
static void _pull_init(SG_context* pCtx, 
					   SG_client* pClient,
					   const char* pszPullIntoRepoDescriptorName,
					   sg_pull_instance_data** ppMe)
{
	char* pszThisRepoId = NULL;
	char* pszThisHashMethod = NULL;
	char* pszOtherRepoId = NULL;
	char* pszOtherHashMethod = NULL;

	sg_pull_instance_data* pMe = NULL;
	
	SG_repo* pPullIntoRepo = NULL;

	SG_NULLARGCHECK_RETURN(pszPullIntoRepoDescriptorName);
	SG_NULLARGCHECK_RETURN(ppMe);

	SG_ERR_CHECK(  SG_client__get_repo_info(pCtx, pClient, &pszOtherRepoId, NULL, &pszOtherHashMethod)  );

	SG_ERR_CHECK(  SG_repo__open_repo_instance(pCtx, pszPullIntoRepoDescriptorName, &pPullIntoRepo)  );

	/* TODO This will care about dagnums once we're using the user dag. */
	SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pPullIntoRepo, &pszThisRepoId)  );
	if (strcmp(pszThisRepoId, pszOtherRepoId) != 0)
		SG_ERR_THROW(SG_ERR_REPO_ID_MISMATCH);

	/* TODO check admin id when appropriate */

	SG_ERR_CHECK(  SG_repo__get_hash_method(pCtx, pPullIntoRepo, &pszThisHashMethod)  );
	if (strcmp(pszThisHashMethod, pszOtherHashMethod) != 0)
		SG_ERR_THROW(SG_ERR_REPO_HASH_METHOD_MISMATCH);

	// alloc instance data, store pull id in it (which identifies the staging area)
	SG_ERR_CHECK(  SG_alloc1(pCtx, pMe)  );
	SG_ERR_CHECK(  SG_staging__create(pCtx, pszPullIntoRepoDescriptorName, &pMe->pszPullId, &pMe->pStaging)  );
	pMe->pPullIntoRepo = pPullIntoRepo;
	pPullIntoRepo = NULL;

	SG_RETURN_AND_NULL(pMe, ppMe);

	/* fall through */
fail:
	SG_NULLFREE(pCtx, pszThisRepoId);
	SG_NULLFREE(pCtx, pszThisHashMethod);
	SG_NULLFREE(pCtx, pszOtherRepoId);
	SG_NULLFREE(pCtx, pszOtherHashMethod);
	SG_NULLFREE(pCtx, pPullIntoRepo);
	_NULLFREE_INSTANCE_DATA(pCtx, pMe);
}
void MyFn(alloc__copy__deep)(SG_context * pCtx)
{
	static const SG_uint32 uSize = 100u;
	
	SG_vector* pVector  = NULL;
	SG_vector* pCopy    = NULL;
	SG_uint32  uIndex   = 0u;
	SG_uint32  uOutput1 = 0u;
	SG_uint32  uOutput2 = 0u;
	void*      pOutput1 = NULL;
	void*      pOutput2 = NULL;

	VERIFY_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx, &pVector, uSize)  );

	// add some allocated data to the vector
	for (uIndex = 0u; uIndex < uSize; ++uIndex)
	{
		SG_uint32* pValue = NULL;
		VERIFY_ERR_CHECK(  SG_alloc1(pCtx, pValue)  );
		*pValue = uIndex;
		VERIFY_ERR_CHECK(  SG_vector__append(pCtx, pVector, pValue, &uOutput1)  );
		VERIFY_COND("Added item has unexpected index.", uOutput1 == uIndex);
	}

	// copy the vector
	VERIFY_ERR_CHECK(  SG_VECTOR__ALLOC__COPY(pCtx, pVector, MyFn(copy_uint32), MyFn(free_uint32), &pCopy)  );

	// verify that the copy matches the original
	VERIFY_ERR_CHECK(  SG_vector__length(pCtx, pVector, &uOutput1)  );
	VERIFY_ERR_CHECK(  SG_vector__length(pCtx, pCopy,   &uOutput2)  );
	VERIFY_COND("Copied vector's length doesn't match added item count.", uOutput1 == uSize);
	VERIFY_COND("Copied vector's length doesn't match original.", uOutput1 == uOutput2);
	for (uIndex = 0u; uIndex < uOutput1; ++uIndex)
	{
		VERIFY_ERR_CHECK(  SG_vector__get(pCtx, pVector, uIndex, &pOutput1)  );
		VERIFY_ERR_CHECK(  SG_vector__get(pCtx, pCopy,   uIndex, &pOutput2)  );
		VERIFYP_COND("Copied vector's pointer value matches original after deep copy.", pOutput1 != pOutput2, ("index(%d)", uIndex));
		uOutput1 = *((SG_uint32*)pOutput1);
		uOutput2 = *((SG_uint32*)pOutput2);
		VERIFYP_COND("Copied vector's pointed-to value doesn't match original after deep copy.", uOutput1 == uOutput2, ("index(%d)", uIndex));
	}

fail:
	SG_context__push_level(pCtx);
	SG_vector__free__with_assoc(pCtx, pVector, MyFn(free_uint32));
	SG_vector__free__with_assoc(pCtx, pCopy,   MyFn(free_uint32));
	SG_context__pop_level(pCtx);
}
void SG_mrg_cset_entry_collision__alloc(SG_context * pCtx,
										SG_mrg_cset_entry_collision ** ppMrgCSetEntryCollision)
{
	SG_mrg_cset_entry_collision * pMrgCSetEntryCollision = NULL;

	SG_NULLARGCHECK_RETURN(ppMrgCSetEntryCollision);

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx,pMrgCSetEntryCollision)  );
	SG_ERR_CHECK(  SG_vector__alloc(pCtx,&pMrgCSetEntryCollision->pVec_MrgCSetEntry,2)  );

	*ppMrgCSetEntryCollision = pMrgCSetEntryCollision;
	return;

fail:
	SG_MRG_CSET_ENTRY_COLLISION_NULLFREE(pCtx,pMrgCSetEntryCollision);
}
示例#14
0
static void _sg_dbrecord__alloc_e__from_json(SG_context* pCtx, SG_dbrecord ** ppNew, const char * szString)
{
	SG_dbrecord * prec;

	SG_NULLARGCHECK_RETURN(ppNew);
	SG_NONEMPTYCHECK_RETURN(szString);

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, prec)  );
	SG_ERR_CHECK(  SG_VHASH__ALLOC__FROM_JSON(pCtx, &prec->pvh, szString)  );
	SG_ERR_CHECK(  _sg_dbrecord__validate_vhash(pCtx, prec)  );
	*ppNew = prec;

	return;
fail:
	SG_DBRECORD_NULLFREE(pCtx, prec);
}
void SG_mrg__alloc(SG_context * pCtx,
				   SG_wc_tx * pWcTx,
				   const SG_wc_merge_args * pMergeArgs,
				   SG_mrg ** ppMrg_New)
{
	SG_mrg * pMrg = NULL;

	SG_NULLARGCHECK_RETURN(ppMrg_New);

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx,pMrg)  );
	pMrg->pWcTx = pWcTx;			// we do not own this
	pMrg->pMergeArgs = pMergeArgs;	// we do not own this

	// Defer allocating pMrg->prbCSets_Aux until needed

	*ppMrg_New = pMrg;
}
示例#16
0
void SG_server__alloc(
	SG_context* pCtx,
	SG_server** ppNew
	)
{
	SG_server* pThis = NULL;

	SG_ERR_CHECK(  SG_alloc1(pCtx, pThis)  );

	*ppNew = pThis;
	pThis = NULL;

	/* fallthru */

fail:
	SG_SERVER_NULLFREE(pCtx, pThis);
}
static void _tree__add_new_node(SG_context * pCtx, _tree_t * pTree, _node_t * pDisplayParent, const char * pszHid, _node_t ** ppNewNodeRef)
{
	// Adds a new node to the tree. The new node is considered 'pending', though
	// it not actually added to the pending list at this point.
	_node_t * pNode = NULL;
	
	// Get the memory.
	if(pTree->pFreeList==NULL)
	{
		SG_ERR_CHECK(  SG_alloc1(pCtx, pNode)  );
		SG_ERR_CHECK(  _node_list__init(pCtx, &pNode->displayChildren, 2)  );
	}
	else
	{
		pNode = pTree->pFreeList;
		pTree->pFreeList = pNode->displayChildren.p[0];
		pNode->displayChildren.count = 0;
	}
	
	if(ppNewNodeRef!=NULL)
		*ppNewNodeRef = pNode;
	
	// Populate the node.
	pNode->pDisplayParent = pDisplayParent;
	SG_ERR_CHECK(  SG_repo__fetch_dagnodes__one(pCtx, pTree->pRepoRef, pTree->pDagnodeFetcher, pszHid, &pNode->pDagnode)  );
	SG_ERR_CHECK(  SG_dagnode__get_revno(pCtx, pNode->pDagnode, &pNode->revno)  );
	SG_ERR_CHECK(  SG_dagnode__get_id_ref(pCtx, pNode->pDagnode, &pNode->pszHidRef)  );
	pNode->isPending = SG_TRUE;
	
	// Add the node to its display parent.
	if(pDisplayParent!=NULL)
	{
		SG_ERR_CHECK(  _node_list__append(pCtx, &pDisplayParent->displayChildren, &pNode)  );
	}
	else
	{
		SG_ASSERT(pTree->pRoot==NULL);
		pTree->pRoot = pNode;
	}
	
	return;
fail:
	_node__nullfree(pCtx, &pNode);
}
void MyFn(remove__index)(SG_context* pCtx)
{
	static const SG_uint32 uSize = 20u;

	SG_vector* pVector = NULL;
	SG_uint32  uIndex  = 0u;
	SG_uint32  uCount  = uSize;

	// create some test data
	// uSize elements, each with an SG_uint32* whose value equals its index
	VERIFY_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx, &pVector, uSize)  );
	for (uIndex = 0u; uIndex < uSize; ++uIndex)
	{
		SG_uint32* pValue = NULL;
		SG_alloc1(pCtx, pValue);
		*pValue = uIndex;
		VERIFY_ERR_CHECK(  SG_vector__append(pCtx, pVector, pValue, NULL)  );
	}
	VERIFY_ERR_CHECK(  MyFn(_verify_size)(pCtx, pVector, uCount)  );

	// remove the last index and verify
	VERIFY_ERR_CHECK(  SG_vector__remove__index(pCtx, pVector, uSize - 1u, MyFn(_free_simple_value))  );
	uCount -= 1u;
	VERIFY_ERR_CHECK(  MyFn(_verify_size)(pCtx, pVector, uCount)  );
	VERIFY_ERR_CHECK(  MyFn(_verify_offset_values)(pCtx, pVector, 0u, uCount, 0)  );

	// remove the first index and verify
	VERIFY_ERR_CHECK(  SG_vector__remove__index(pCtx, pVector, 0u, MyFn(_free_simple_value))  );
	uCount -= 1u;
	VERIFY_ERR_CHECK(  MyFn(_verify_size)(pCtx, pVector, uCount)  );
	VERIFY_ERR_CHECK(  MyFn(_verify_offset_values)(pCtx, pVector, 0u, uCount, 1)  );

	// remove a middle index and verify
	uIndex = uCount / 2u;
	VERIFY_ERR_CHECK(  SG_vector__remove__index(pCtx, pVector, uIndex, MyFn(_free_simple_value))  );
	uCount -= 1u;
	VERIFY_ERR_CHECK(  MyFn(_verify_size)(pCtx, pVector, uCount)  );
	VERIFY_ERR_CHECK(  MyFn(_verify_offset_values)(pCtx, pVector, 0u, uIndex, 1)  );
	VERIFY_ERR_CHECK(  MyFn(_verify_offset_values)(pCtx, pVector, uIndex, uCount, 2)  );

fail:
	SG_vector__free__with_assoc(pCtx, pVector, MyFn(_free_simple_value));
	return;
}
示例#19
0
/**
 * Find the appropriate external tool to let the user perform a TEXT MERGE
 * on a file.
 *
 * TODO 2010/07/13 For now, this is hard-coded to use DiffMerge.
 * TODO            Later we want to allow them to have multiple
 * TODO            tools configured and/or to use the file suffix
 * TODO            and so on.
 */
static void _resolve__external_tool__lookup(SG_context * pCtx,
											struct _resolve_data * pData,
											const char * pszGid,
											const SG_vhash * pvhIssue,
											SG_string * pStrRepoPath,
											_resolve__external_tool ** ppET)
{
	_resolve__external_tool * pET = NULL;
	SG_repo * pRepo;

	SG_UNUSED( pszGid );
	SG_UNUSED( pvhIssue );

	SG_ERR_CHECK(  SG_pendingtree__get_repo(pCtx, pData->pPendingTree, &pRepo)  );

	SG_ERR_CHECK(  SG_alloc1(pCtx, pET)  );

	// TODO 2010/07/13 Use localsettings to determine WHICH external tool we should use.
	// TODO            (This could be based upon suffixes and/or whatever.)
	// TODO            Then -- for THAT tool -- lookup the program executable path and
	// TODO            the argument list.
	// TODO            Substitute the given pathnames into the argument list.
	// TODO
	// TODO            For now, we just hard-code DiffMerge.

	SG_ERR_CHECK(  SG_strdup(pCtx, "DiffMerge", &pET->pszName)  );

	SG_localsettings__get__sz(pCtx, "merge/diffmerge/program", pRepo, &pET->pszExe, NULL);
	if (SG_context__has_err(pCtx) || (!pET->pszExe) || (!*pET->pszExe))
	{
		SG_context__err_reset(pCtx);
		SG_ERR_THROW2(  SG_ERR_NO_MERGE_TOOL_CONFIGURED,
						(pCtx, "'%s'  Use 'vv localsetting set merge/diffmerge/program' and retry -or- manually merge content and then use 'vv resolve --mark'.",
						 SG_string__sz(pStrRepoPath))  );
	}

	// TODO 2010/07/13 Get argvec.

	*ppET = pET;
	return;

fail:
	_RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET);
}
void SG_jscore__mutex__lock(
	SG_context* pCtx,
	const char* pszName)
{
	SG_bool bExists = SG_FALSE;
	_namedMutex* pFreeThisNamedMutex = NULL;
	_namedMutex* pNamedMutex = NULL;

	SG_ASSERT(gpJSCoreGlobalState);

	/* We always acquire the rbtree mutex first, then the specific named mutex. A deadlock is impossible. */
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for JS mutex manager in LOCK.")  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, &bExists, (void**)&pNamedMutex)  );
	if (!bExists)
	{
		SG_ERR_CHECK(  SG_alloc1(pCtx, pFreeThisNamedMutex)  );
		pNamedMutex = pFreeThisNamedMutex;
		pNamedMutex->count = 0;
		SG_ERR_CHECK(  SG_mutex__init(pCtx, &pNamedMutex->mutex)  );
		SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, pNamedMutex)  );
		pFreeThisNamedMutex = NULL;
	}
	pNamedMutex->count++; // Cannot be touched unless you hold mutexJsNamed. We do here.
	if (pNamedMutex->count > 10)
		SG_ERR_CHECK(  SG_log__report_info(pCtx, "%u threads are waiting for named JS mutex: %s", pNamedMutex->count-1, pszName)  );

	/* We deliberately unlock the rbtree mutex before locking the named mutex.
	 * We want to hold the lock on the rbtree for as little time as possible. Any subsequent
	 * attempts to lock the same name will yield the correct named mutex and correctly block
	 * on it below, without blocking access to the name management rbtree. */
	SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Released JS mutex manager in LOCK.")  );

	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for named JS mutex: %s", pszName)  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &pNamedMutex->mutex)  );
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Acquired named JS mutex: %s", pszName)  );

	return;

fail:
	SG_ERR_IGNORE(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_NULLFREE(pCtx, pFreeThisNamedMutex);
}
void sg_wc_liveview_item__alloc__clone_from_prescan(SG_context * pCtx,
													sg_wc_liveview_item ** ppLVI,
													SG_wc_tx * pWcTx,
													const sg_wc_prescan_row * pPrescanRow)
{
	sg_wc_liveview_item * pLVI = NULL;
	SG_bool bFoundIssue = SG_FALSE;

	SG_ERR_CHECK(  SG_alloc1(pCtx, pLVI)  );

	// caller needs to set the backptr if appropriate
	// if/when it adds this LVI to the LVD's vector.
	pLVI->pLiveViewDir = NULL;

	pLVI->uiAliasGid = pPrescanRow->uiAliasGid;

	SG_ERR_CHECK(  SG_STRING__ALLOC__COPY(pCtx,
										  &pLVI->pStringEntryname,
										  pPrescanRow->pStringEntryname)  );

	pLVI->pPrescanRow = pPrescanRow;	// borrow a reference rather than cloning the scanrow.

	// because a liveview_item must start as an
	// exact clone of a scanrow, there cannot be
	// any in-tx changes yet for it.
	pLVI->pPcRow_PC = NULL;

	pLVI->scan_flags_Live = pPrescanRow->scan_flags_Ref;
	pLVI->tneType = pPrescanRow->tneType;

	SG_ERR_CHECK(  sg_wc_db__issue__get_issue(pCtx, pWcTx->pDb,
											  pLVI->uiAliasGid,
											  &bFoundIssue,
											  &pLVI->statusFlags_x_xr_xu,
											  &pLVI->pvhIssue,
											  &pLVI->pvhSavedResolutions)  );

	*ppLVI = pLVI;
	return;

fail:
	SG_WC_LIVEVIEW_ITEM__NULLFREE(pCtx, pLVI);
}
void SG_varpool__alloc(SG_context* pCtx, SG_varpool** ppResult, SG_uint32 subpool_space)
{
	SG_varpool* pThis = NULL;

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, pThis)  );

	pThis->subpool_space = subpool_space;

	SG_ERR_CHECK(  sg_variantsubpool__alloc(pCtx, pThis->subpool_space, NULL, &pThis->pHead)  );

	pThis->count_subpools = 1;

	*ppResult = pThis;

	return;

fail:
	SG_ERR_IGNORE(  SG_varpool__free(pCtx, pThis)  );
}
void SG_jscontextpool__init(SG_context * pCtx, const char * szApplicationRoot)
{
	if(gpJSContextPoolGlobalState != NULL)
		return;

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, gpJSContextPoolGlobalState)  );

	SG_localsettings__get__collapsed_vhash(pCtx, "server", NULL, &gpJSContextPoolGlobalState->pServerConfig);
	if(SG_context__has_err(pCtx))
	{
		SG_log__report_error__current_error(pCtx);
		SG_context__err_reset(pCtx);
	}
	if(gpJSContextPoolGlobalState->pServerConfig==NULL)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &gpJSContextPoolGlobalState->pServerConfig)  );
	}

	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "enable_diagnostics", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "readonly", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "remote_ajax_libs", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "ssjs_mutable", &gpJSContextPoolGlobalState->ssjsMutable)  );

	if(szApplicationRoot==NULL)
		szApplicationRoot="";
	SG_ERR_CHECK(  SG_vhash__update__string__sz(pCtx, gpJSContextPoolGlobalState->pServerConfig, "application_root", szApplicationRoot)  );

	// Start up SpiderMonkey.
	SG_jscore__new_runtime(pCtx, SG_jsglue__context_callback, NULL, SG_FALSE, NULL);

	//If jscore is already initialized, just move on.
	if (SG_context__err_equals(pCtx, SG_ERR_ALREADY_INITIALIZED))
	{
		SG_context__err_reset(pCtx);
	}

	SG_ERR_CHECK(  SG_mutex__init(pCtx, &gpJSContextPoolGlobalState->lock)  );

	return;
fail:
	SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig);
	SG_NULLFREE(pCtx, gpJSContextPoolGlobalState);
}
void SG_jscore__new_runtime(
	SG_context * pCtx,
	JSContextCallback cb,
	JSFunctionSpec *shell_functions,
	SG_bool bSkipModules,
	JSRuntime **ppRt
	)
{
	char * szSsjsMutable = NULL;

	if(gpJSCoreGlobalState != NULL)
		SG_ERR_THROW_RETURN(SG_ERR_ALREADY_INITIALIZED);

	SG_ERR_CHECK(  SG_alloc1(pCtx, gpJSCoreGlobalState)  );

	// Store this for later.
	gpJSCoreGlobalState->cb = cb;
	gpJSCoreGlobalState->shell_functions = shell_functions;
	gpJSCoreGlobalState->bSkipModules = bSkipModules;

	if (! bSkipModules)
		SG_ERR_CHECK(  _sg_jscore_getpaths(pCtx)  );

	SG_ERR_CHECK(  SG_mutex__init(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &gpJSCoreGlobalState->prbJSMutexes)  );

	// Start up SpiderMonkey.
	JS_SetCStringsAreUTF8();
	gpJSCoreGlobalState->rt = JS_NewRuntime(64L * 1024L * 1024L); // TODO decide the right size here
	if(gpJSCoreGlobalState->rt==NULL)
		SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Failed to allocate JS Runtime"));

	if (ppRt)
		*ppRt = gpJSCoreGlobalState->rt;

	return;
fail:
	SG_NULLFREE(pCtx, szSsjsMutable);

	SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS);
	SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToModules);
}
void SG_curl__alloc(SG_context* pCtx, SG_curl** ppCurl)
{
	_sg_curl* p = NULL;

	SG_ERR_CHECK(  SG_alloc1(pCtx, p)  );

	p->pCurl = curl_easy_init();
	if (!p->pCurl)
		SG_ERR_THROW_RETURN(SG_ERR_LIBCURL(CURLE_FAILED_INIT));

	SG_ERR_CHECK(  _set_curl_options(pCtx, p->pCurl)  );
	p->pCtx = pCtx;

	*ppCurl = (SG_curl*)p;

	return;

fail:
	SG_ERR_IGNORE(  SG_curl__free(pCtx, (SG_curl*)p)  );
}
void sg_variantsubpool__alloc(SG_context* pCtx, SG_uint32 space, sg_variantsubpool* pNext,
								  sg_variantsubpool ** ppNew)
{
	sg_variantsubpool* pThis = NULL;

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, pThis)  );

	pThis->space = space;
	SG_ERR_CHECK(  SG_alloc(pCtx, pThis->space,sizeof(SG_variant),&pThis->pVariants)  );

	pThis->pNext = pNext;
	pThis->count = 0;

	*ppNew = pThis;

	return;

fail:
	SG_ERR_IGNORE(  sg_variantsubpool__free(pCtx, pThis)  );
}
示例#27
0
void SG_treendx__open(
	SG_context* pCtx,
	SG_repo* pRepo,
	SG_uint32 iDagNum,
	SG_pathname* pPath,
	SG_bool bQueryOnly,
	SG_treendx** ppNew
	)
{
	SG_treendx* pTreeNdx = NULL;
    SG_bool b_exists = SG_FALSE;

	SG_ERR_CHECK(  SG_alloc1(pCtx, pTreeNdx)  );

    pTreeNdx->pRepo = pRepo;
    pTreeNdx->pPath_db = pPath;
    pPath = NULL;
    pTreeNdx->iDagNum = iDagNum;
    pTreeNdx->bQueryOnly = bQueryOnly;
	pTreeNdx->bInTransaction = SG_FALSE;

    SG_ERR_CHECK(  SG_fsobj__exists__pathname(pCtx, pTreeNdx->pPath_db, &b_exists, NULL, NULL)  );

    if (b_exists)
    {
        SG_ERR_CHECK(  sg_sqlite__open__pathname(pCtx, pTreeNdx->pPath_db, &pTreeNdx->psql)  );
    }
    else
    {
        SG_ERR_CHECK(  sg_sqlite__create__pathname(pCtx, pTreeNdx->pPath_db,&pTreeNdx->psql)  );

        SG_ERR_CHECK(  sg_treendx__create_db(pCtx, pTreeNdx)  );
    }

	*ppNew = pTreeNdx;

	return;
fail:
    SG_PATHNAME_NULLFREE(pCtx, pPath);
    SG_TREENDX_NULLFREE(pCtx, pTreeNdx);
}
示例#28
0
void SG_vector_i64__alloc(SG_context* pCtx, SG_vector_i64 ** ppVector, SG_uint32 suggestedInitialSize)
{
	SG_vector_i64 * pVector = NULL;

	SG_NULLARGCHECK_RETURN(ppVector);

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, pVector)  );

	pVector->m_uiSizeInUse = 0;
	pVector->m_uiAllocated = 0;
	pVector->m_uiChunkSize = MIN_CHUNK_SIZE;
	pVector->m_array = NULL;

	SG_ERR_CHECK(  _sg_vector_i64__grow(pCtx, pVector,suggestedInitialSize)  );

	*ppVector = pVector;

	return;

fail:
	SG_VECTOR_I64_NULLFREE(pCtx, pVector);
}
示例#29
0
void sg_client__c__open(SG_context* pCtx,
						SG_client * pClient,
						const SG_vhash* pvh_credentials)
{
	sg_client_c_instance_data* pMe = NULL;

	SG_NULLARGCHECK_RETURN(pClient);
	SG_ARGCHECK_RETURN(pvh_credentials || pvh_credentials == NULL_CREDENTIAL, pvh_credentials);

	SG_ERR_CHECK(  SG_alloc1(pCtx, pMe)  );
	SG_ERR_CHECK(  SG_server__alloc(pCtx, &pMe->pServer)  );

	pClient->p_vtable_instance_data = (sg_client__vtable__instance_data *)pMe;

	return;

fail:
	if (pMe)
	{
		SG_ERR_IGNORE(  SG_server__free(pCtx, pMe->pServer)  );
		SG_NULLFREE(pCtx, pMe);
	}
}
示例#30
0
void SG_pull__begin(
	SG_context* pCtx,
	const char* pszPullIntoRepoDescriptorName, 
	SG_client* pClient,
	SG_pull** ppPull)
{
	_sg_pull* pMyPull = NULL;

	SG_NULLARGCHECK_RETURN(pszPullIntoRepoDescriptorName);
	SG_NULLARGCHECK_RETURN(pClient);
	SG_NULLARGCHECK_RETURN(ppPull);

	SG_alloc1(pCtx, pMyPull);
	SG_ERR_CHECK(  _pull_init(pCtx, pClient, pszPullIntoRepoDescriptorName, &pMyPull->pPullInstance)  );
	pMyPull->pClient = pClient;

	*ppPull = (SG_pull*)pMyPull;
	pMyPull = NULL;

	/* fall through */
fail:
	SG_ERR_IGNORE(  _sg_pull__nullfree(pCtx, &pMyPull)  );;
}