void MyFn(test_W2771)(SG_context * pCtx)
{
	SG_vector_i64 * pVec = NULL;
	SG_int64 i64 = 0x123456789abcdefLL;
	SG_uint32 len;

	// allocate a vector with a hint of at least 1 cell.
	// (the minimum chunk size will override this, but we don't care.)
	VERIFY_ERR_CHECK(  SG_vector_i64__alloc(pCtx,&pVec,1)  );

	// verify size-in-use is 0.
	VERIFY_ERR_CHECK(  SG_vector_i64__length(pCtx,pVec,&len)  );
	VERIFY_COND("test1",(len==0));

	// do a hard-reserve with zero-fill of the vector.
	// (we pick a size larger than any pre-defined chunk size.)
	VERIFY_ERR_CHECK(  SG_vector_i64__reserve(pCtx, pVec, 1000)  );
	VERIFY_ERR_CHECK(  SG_vector_i64__get(pCtx, pVec, 999, &i64)  );
	VERIFY_COND("get[1000]", (i64 == 0));

	VERIFY_ERR_CHECK(  SG_vector_i64__length(pCtx,pVec,&len)  );
	VERIFY_COND("test1",(len==1000));

fail:
	SG_VECTOR_I64_NULLFREE(pCtx, pVec);
}
示例#2
0
void MyFn(test1)(SG_context * pCtx)
{
	SG_vector_i64 * pVec = NULL;
	SG_uint32 k, ndx, len;

	VERIFY_ERR_CHECK(  SG_vector_i64__alloc(pCtx,&pVec,20)  );

	VERIFY_ERR_CHECK(  SG_vector_i64__length(pCtx,pVec,&len)  );
	VERIFY_COND("test1",(len==0));

	for (k=0; k<100; k++)
	{
		SG_int64 kValue = (SG_int64)k;
		VERIFY_ERR_CHECK(  SG_vector_i64__append(pCtx,pVec,kValue,&ndx)  );
		VERIFY_COND("append",(ndx==k));
	}
	for (k=0; k<100; k++)
	{
		SG_int64 value;
		VERIFY_ERR_CHECK(  SG_vector_i64__get(pCtx,pVec,k,&value)  );
		VERIFY_COND("get1",(value == ((SG_int64)k)));
	}

	for (k=0; k<100; k++)
	{
		SG_int64 kValue = (SG_int64)k+10000;
		VERIFY_ERR_CHECK(  SG_vector_i64__set(pCtx,pVec,k,kValue)  );
	}
	for (k=0; k<100; k++)
	{
		SG_int64 value;
		VERIFY_ERR_CHECK(  SG_vector_i64__get(pCtx,pVec,k,&value)  );
		VERIFY_COND("get2",(value == ((SG_int64)(k+10000))));
	}

	VERIFY_ERR_CHECK(  SG_vector_i64__length(pCtx,pVec,&len)  );
	VERIFY_COND("test1",(len==100));

	VERIFY_ERR_CHECK(  SG_vector_i64__clear(pCtx,pVec)  );

	VERIFY_ERR_CHECK(  SG_vector_i64__length(pCtx,pVec,&len)  );
	VERIFY_COND("test1",(len==0));

	// fall thru to common cleanup

fail:
	SG_VECTOR_I64_NULLFREE(pCtx, pVec);
}
/**
 * During the revert-all setup, we add certain deleted items to the
 * kill-list (so that we'll delete the tbl_pc row for them) effectively
 * marking them clean -- and we don't insert them into the pMrgCSet
 * directly -- we let merge discover they are missing and decide what
 * to do.
 *
 * But if the merge-engine discovers the item and has different plans
 * for it, we cancel the predicted kill for it.
 *
 */
void sg_wc_tx__merge__remove_from_kill_list(SG_context * pCtx,
											SG_mrg * pMrg,
											const SG_uint64 uiAliasGid)
{
	SG_uint32 k, count;

	if (!pMrg->pVecRevertAllKillList)
		return;

	SG_ERR_CHECK(  SG_vector_i64__length(pCtx, pMrg->pVecRevertAllKillList, &count)  );
	for (k=0; k<count; k++)
	{
		SG_uint64 uiAliasGid_k;

		SG_ERR_CHECK(  SG_vector_i64__get(pCtx, pMrg->pVecRevertAllKillList, k, (SG_int64 *)&uiAliasGid_k)  );
		if (uiAliasGid_k == uiAliasGid)
		{
#if TRACE_WC_MERGE
			SG_int_to_string_buffer buf;
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
									   "Cancel kill-list for [0x%s]\n",
									   SG_uint64_to_sz__hex(uiAliasGid_k, buf))  );
#endif
			SG_ERR_CHECK(  SG_vector_i64__set(pCtx, pMrg->pVecRevertAllKillList, k, SG_WC_DB__ALIAS_GID__UNDEFINED)  );
		}
	}

fail:
	return;
}
/**
 * If we are in a REVERT-ALL and it found some ADDED-SPECIAL + REMOVED
 * items, we need to delete the corresponding row in the pc_L0 table
 * so that they won't appear after the REVERT in subsequent STATUS
 * commands.
 *
 */
void sg_wc_tx__merge__queue_plan__kill_list(SG_context * pCtx,
											SG_mrg * pMrg)
{
	SG_uint32 k, count;

	if (!pMrg->pVecRevertAllKillList)
		return;

	SG_ERR_CHECK(  SG_vector_i64__length(pCtx, pMrg->pVecRevertAllKillList, &count)  );
	for (k=0; k<count; k++)
	{
		SG_int64 uiAliasGid;

		SG_ERR_CHECK(  SG_vector_i64__get(pCtx, pMrg->pVecRevertAllKillList, k, (SG_int64 *)&uiAliasGid)  );
		if (uiAliasGid != SG_WC_DB__ALIAS_GID__UNDEFINED)
			SG_ERR_CHECK(  sg_wc_tx__queue__kill_pc_row(pCtx, pMrg->pWcTx, uiAliasGid)  );
	}

fail:
	return;
}