static void expect_elog() { expect_any(elog_start, filename); expect_any(elog_start, lineno); expect_any(elog_start, funcname); will_be_called(elog_start); expect_any(elog_finish, elevel); expect_any(elog_finish, fmt); will_be_called(elog_finish); }
/* * Test that the ExecWorkFile struct is allocated in TopMemoryContext */ void test__ExecWorkFile_Create__InTopMemContext(void **state) { char *test_filename = "foo"; will_return(WorkfileQueryspace_AddWorkfile, true); expect_value(BufFileCreateFile, fileName, test_filename); expect_value(BufFileCreateFile, delOnClose, true); expect_value(BufFileCreateFile, interXact, false); will_return(BufFileCreateFile, NULL); expect_value(BufFileSetWorkfile, buffile, NULL); will_be_called(BufFileSetWorkfile); /* * Create a new memory context, so that we can distinguish it from * TopMemoryContext. */ CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext, "mock test context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* * ExecWorkFile_Create will call our mocked palloc0 function execWorkfile__palloc0_mock * and our mocked pstrdup function execWorkfile_pstrdup_mock. * These functions will assert that the allocation of the result happens * in the TopMemoryContext. */ ExecWorkFile *ewf = ExecWorkFile_Create(test_filename, BUFFILE, true /* delOnClose */, 0 /* compressType */); }
/* * Basic enqueue tests, including compaction upon enqueuing into a * full queue. */ void test__ForwardFsyncRequest_enqueue(void **state) { bool ret; int i; RelFileNode dummy = {1,1,1}; init_request_queue(); ProcGlobal->checkpointerLatch = NULL; expect_value(LWLockAcquire, l, CheckpointerCommLock); expect_value(LWLockAcquire, mode, LW_EXCLUSIVE); will_return(LWLockAcquire, true); expect_value(LWLockRelease, l, CheckpointerCommLock); will_be_called(LWLockRelease); /* basic enqueue */ ret = ForwardFsyncRequest(dummy, MAIN_FORKNUM, 1); assert_true(ret); assert_true(CheckpointerShmem->num_requests == 1); /* fill up the queue */ for (i=2; i<=MAX_BGW_REQUESTS; i++) { expect_value(LWLockAcquire, l, CheckpointerCommLock); expect_value(LWLockAcquire, mode, LW_EXCLUSIVE); will_return(LWLockAcquire, true); expect_value(LWLockRelease, l, CheckpointerCommLock); will_be_called(LWLockRelease); ret = ForwardFsyncRequest(dummy, MAIN_FORKNUM, i); assert_true(ret); } expect_value(LWLockAcquire, l, CheckpointerCommLock); expect_value(LWLockAcquire, mode, LW_EXCLUSIVE); will_return(LWLockAcquire, true); expect_value(LWLockRelease, l, CheckpointerCommLock); will_be_called(LWLockRelease); #ifdef USE_ASSERT_CHECKING expect_value(LWLockHeldByMe, l, CheckpointerCommLock); will_return(LWLockHeldByMe, true); #endif /* * This enqueue request should trigger compaction but no * duplicates are in the queue. So the queue should remain * full. */ ret = ForwardFsyncRequest(dummy, MAIN_FORKNUM, 0); assert_false(ret); assert_true(CheckpointerShmem->num_requests == CheckpointerShmem->max_requests); free(CheckpointerShmem); }
static void expect_headers_append(CHURL_HEADERS headers_handle, const char *header_key, const char *header_value) { expect_value(churl_headers_append, headers, headers_handle); expect_string(churl_headers_append, key, header_key); expect_string(churl_headers_append, value, header_value); will_be_called(churl_headers_append); }
/* * Add an expect clause on a churl_headers_append with given * key and value */ void expect_churl_headers(const char *key, const char *value) { expect_value(churl_headers_append, headers, input_data->headers); expect_string(churl_headers_append, key, key); expect_string(churl_headers_append, value, value); will_be_called(churl_headers_append); }
/* * Expected variables for Assertions */ void _ExceptionalCondition() { expect_any(ExceptionalCondition,conditionName); expect_any(ExceptionalCondition,errorType); expect_any(ExceptionalCondition,fileName); expect_any(ExceptionalCondition,lineNumber); will_be_called(ExceptionalCondition); }
/* * A bug found in MPP-20426 was we were overrunnig to the next page * of DistributedLog. The intention of the memset with zeors is to * reset the reset of the current page if we are in the middle of page, * so that we won't see uncommited data due to some recovery work. * However, we were doing the wrong math that calculates the size of * rest of page as the size of the part preceding to the current xid. * The worst scenario was for the subtransaction shared memory, which * follows distributed log shared memory to be overwritten. */ static MPP_20426(void **state, TransactionId nextXid) { char pages[BLCKSZ * DtxLogStartupNumPage]; char zeros[BLCKSZ]; int bytes; /* Setup DistributedLogCtl */ DistributedLogCtl->shared = (SlruShared) malloc(sizeof(SlruSharedData)); DistributedLogCtl->shared->page_buffer = (char **) malloc(DtxLogStartupNumPage * sizeof(char *)); DistributedLogCtl->shared->page_dirty = (bool *) malloc(DtxLogStartupNumPage * sizeof(bool)); DistributedLogCtl->shared->page_buffer[0] = &pages[0]; DistributedLogCtl->shared->page_buffer[1] = &pages[BLCKSZ]; memset(pages, 0x7f, sizeof(pages)); memset(zeros, 0, sizeof(zeros)); expect_value(LWLockAcquire, lockid, DistributedLogControlLock); expect_value(LWLockAcquire, mode, LW_EXCLUSIVE); will_be_called(LWLockAcquire); /* This test is only for the case xid is not on the boundary. */ expect_value(SimpleLruReadPage, ctl, DistributedLogCtl); expect_any(SimpleLruReadPage, pageno); expect_value(SimpleLruReadPage, xid, nextXid); will_return(SimpleLruReadPage, 0); expect_value(LWLockRelease, lockid, DistributedLogControlLock); will_be_called(LWLockRelease); /* Run the function. */ DistributedLog_Startup(nextXid, nextXid); /* DistributedLog_Startup should not overwrite the subsequent block. */ assert_true(pages[BLCKSZ] == 0x7f); /* Make sure the part following the xid is zeroed. */ bytes = TransactionIdToEntry(nextXid) * sizeof(DistributedLogEntry); assert_memory_equal(&pages[bytes], zeros, BLCKSZ - bytes); free(DistributedLogCtl->shared->page_dirty); free(DistributedLogCtl->shared->page_buffer); free(DistributedLogCtl->shared); }
/* * Tests ExecEagerFreeShareInputScan when plan->share_type = SHARE_MATERIAL * Verifies that the tuplestore accessor and the tuplestore state are destroyed, * and that all the pointers are set to NULL */ void test__ExecEagerFreeShareInputScan_SHARE_MATERIAL(void **state) { ShareInputScanState *sisc = makeNode(ShareInputScanState); ShareInputScan *plan = makeNode(ShareInputScan); sisc->ss.ps.plan = plan; sisc->ts_markpos = NULL; sisc->ts_pos = FIXED_POINTER_VAL; sisc->ts_state = (GenericTupStore *) palloc0(sizeof(GenericTupStore)); sisc->ts_state->matstore = FIXED_POINTER_VAL; sisc->freed = false; plan->share_type = SHARE_MATERIAL; expect_value(ntuplestore_destroy_accessor, acc, FIXED_POINTER_VAL); will_be_called(ntuplestore_destroy_accessor); expect_value(ntuplestore_is_readerwriter_reader, nts, FIXED_POINTER_VAL); will_return(ntuplestore_is_readerwriter_reader, true); expect_value(ntuplestore_destroy, ts, FIXED_POINTER_VAL); will_be_called(ntuplestore_destroy); ShareNodeEntry *shareNodeEntry = makeNode(ShareNodeEntry); shareNodeEntry->refcount = SHARE_NODE_ENTRY_REFCOUNT; expect_any(ExecGetShareNodeEntry, estate); expect_any(ExecGetShareNodeEntry, shareidx); expect_value(ExecGetShareNodeEntry, fCreate, false); will_return(ExecGetShareNodeEntry, shareNodeEntry); ExecEagerFreeShareInputScan(sisc); assert_int_equal(sisc->ts_markpos, NULL); assert_int_equal(sisc->ts_pos, NULL); assert_int_equal(sisc->ts_state, NULL); assert_int_equal(shareNodeEntry->refcount, SHARE_NODE_ENTRY_REFCOUNT - 1); assert_true(sisc->freed); return; }
static void expect_lwlock(LWLockMode lockmode) { expect_value(LWLockAcquire, l, SyncRepLock); expect_value(LWLockAcquire, mode, lockmode); will_return(LWLockAcquire, true); expect_value(LWLockRelease, l, SyncRepLock); will_be_called(LWLockRelease); }
/* * Tests that ExecEageFree calls the new ExecEagerFreeShareInputScan * function when the input is a ShareInputScanState */ void test__ExecEagerFree_ExecEagerFreeShareInputScan(void **state) { ShareInputScanState *sisc = makeNode(ShareInputScanState); expect_value(ExecEagerFreeShareInputScan, node, sisc); will_be_called(ExecEagerFreeShareInputScan); ExecEagerFree(sisc); }
static void expect_ereport() { expect_any(errstart, elevel); expect_any(errstart, filename); expect_any(errstart, lineno); expect_any(errstart, funcname); expect_any(errstart, domain); will_be_called(errstart); }
void expect_external_vars() { expect_any(external_set_env_vars, extvar); expect_string(external_set_env_vars, uri, gphd_uri->uri); expect_value(external_set_env_vars, csv, false); expect_value(external_set_env_vars, escape, NULL); expect_value(external_set_env_vars, quote, NULL); expect_value(external_set_env_vars, header, false); expect_value(external_set_env_vars, scancounter, 0); will_assign_memory(external_set_env_vars, extvar, mock_extvar, sizeof(extvar_t)); will_be_called(external_set_env_vars); }
void test_build_http_headers_empty_user_error(void **state) { /* setup mock data and expectations */ PxfInputData *input = (PxfInputData *) palloc0(sizeof(PxfInputData)); CHURL_HEADERS headers = (CHURL_HEADERS) palloc0(sizeof(CHURL_HEADERS)); GPHDUri *gphd_uri = (GPHDUri *) palloc0(sizeof(GPHDUri)); Relation rel = (Relation) palloc0(sizeof(RelationData)); ExtTableEntry ext_tbl; struct tupleDesc tuple; input->headers = headers; input->gphduri = gphd_uri; input->rel = NULL; gphd_uri->uri = "testuri"; expect_any(external_set_env_vars, extvar); expect_string(external_set_env_vars, uri, gphd_uri->uri); expect_value(external_set_env_vars, csv, false); expect_value(external_set_env_vars, escape, NULL); expect_value(external_set_env_vars, quote, NULL); expect_value(external_set_env_vars, header, false); expect_value(external_set_env_vars, scancounter, 0); struct extvar_t mock_extvar; mock_extvar.GP_USER = ""; snprintf(mock_extvar.GP_SEGMENT_ID, sizeof(mock_extvar.GP_SEGMENT_ID), "SegId"); snprintf(mock_extvar.GP_SEGMENT_COUNT, sizeof(mock_extvar.GP_SEGMENT_COUNT), "10"); snprintf(mock_extvar.GP_XID, sizeof(mock_extvar.GP_XID), "20"); will_assign_memory(external_set_env_vars, extvar, &mock_extvar, sizeof(extvar_t)); will_be_called(external_set_env_vars); MemoryContext old_context = CurrentMemoryContext; PG_TRY(); { build_http_headers(input); assert_false("Expected Exception"); } PG_CATCH(); { MemoryContextSwitchTo(old_context); ErrorData *edata = CopyErrorData(); assert_true(edata->elevel == ERROR); char *expected_message = pstrdup("User identity is unknown"); assert_string_equal(edata->message, expected_message); pfree(expected_message); } PG_END_TRY(); }
/* * Mocked object initializations required for dispatchPlan. */ void _init_cdbdisp_dispatchPlan(QueryDesc *queryDesc) { queryDesc->estate = (struct EState *)palloc0(sizeof(struct EState)); queryDesc->estate->es_sliceTable = (struct SliceTable *) palloc0(sizeof(struct SliceTable)); queryDesc->operation = CMD_NOTHING; queryDesc->plannedstmt = (PlannedStmt *)palloc0(sizeof(PlannedStmt)); will_be_called(clear_relsize_cache); expect_any(RootSliceIndex, estate); will_return(RootSliceIndex,0); }
/* * Make sure resetSessionForPrimaryGangLoss doesn't access catalog. */ static void test__resetSessionForPrimaryGangLoss(void **state) { PROC_HDR dummyGlobal; PGPROC dummyProc; will_be_called(RedZoneHandler_DetectRunawaySession); will_return(ProcCanSetMppSessionId, true); /* Assum we have created a temporary namespace. */ will_return(TempNamespaceOidIsValid, true); will_return(ResetTempNamespace, 9999); OldTempNamespace = InvalidOid; resetSessionForPrimaryGangLoss(); assert_int_equal(OldTempNamespace, 9999); }
void test__ReleaseTupleDesc__ref_count(void **state) { TupleDesc td = CreateTemplateTupleDesc(2, true); td->tdrefcount = 3; expect_any(ResourceOwnerForgetTupleDesc, owner); expect_value(ResourceOwnerForgetTupleDesc, tupdesc, td); will_be_called(ResourceOwnerForgetTupleDesc); /* should decrement refcount but not free */ ReleaseTupleDesc(td); assert_int_equal(2, td->tdrefcount); pfree(td); }
/* * Test for MPP-24515 */ void test__FileRep_StartChildProcess(void **state) { pid_t pid; FileRepSubProc FileRepSubProcListLocal[FileRepProcessType__EnumerationCount]; FileRepSubProcList = &FileRepSubProcListLocal; memcpy(FileRepSubProcList, FileRepSubProcListInitial, sizeof(FileRepSubProcListInitial)); will_return(fork_process, -1); will_be_called(RedZoneHandler_DetectRunawaySession); pid = FileRep_StartChildProcess(FileRepProcessTypeMirrorConsumerAppendOnly1); assert_int_equal(pid, -1); assert_int_equal( FileRepSubProcList[FileRepProcessTypeMirrorConsumerAppendOnly1].pid, 0); }
/* Releases a SessionState entry for the specified sessionId */ static void ReleaseSessionState(int sessionId) { /* We call shutdown twice */ will_be_called_count(LWLockAcquire, 2); will_be_called_count(LWLockRelease, 2); expect_any_count(LWLockAcquire, lockid, 2); expect_any_count(LWLockAcquire, mode, 2); expect_any_count(LWLockRelease, lockid, 2); gp_session_id = sessionId; /* First find the previously allocated session state */ SessionState *foundSessionState = AcquireSessionState(sessionId, gp_sessionstate_loglevel); assert_true(foundSessionState->sessionId == sessionId); /* * It was pre-allocated and we incremented the pinCount * for finding it */ assert_true(foundSessionState->pinCount > 1); /* Satisfy assertion */ sessionStateInited = true; EXPECT_EREPORT(gp_sessionstate_loglevel); /* Undo for our search pinCount */ SessionState_Shutdown(); /* The pinCount should not drop to 0 as we just undid our own increment */ assert_true(foundSessionState->pinCount >= 1); MySessionState = foundSessionState; sessionStateInited = true; /* * If we are releasing this SessionState permanently, we need to ensure * that RunawayCleaner_RunawayCleanupDoneForSession() will be called */ if (foundSessionState->pinCount == 1) { will_be_called(RunawayCleaner_RunawayCleanupDoneForSession); } EXPECT_EREPORT(gp_sessionstate_loglevel); /* Undo one more to truly undo previously acquired one */ SessionState_Shutdown(); }
/* * aocs_begin_headerscan() * * Verify that we are setting correct storage attributes (no * compression, no checksum) for scanning an existing column in ALTER * TABLE ADD COLUMN case. */ void test__aocs_begin_headerscan(void **state) { AOCSHeaderScanDesc desc; RelationData reldata; FormData_pg_appendonly pgappendonly; pgappendonly.checksum = true; reldata.rd_appendonly = &pgappendonly; FormData_pg_class pgclass; reldata.rd_rel = &pgclass; StdRdOptions opt; opt.blocksize = 8192 * 5; StdRdOptions *opts[1]; opts[0] = &opt; strncpy(&pgclass.relname.data[0], "mock_relation", 13); expect_value(RelationGetAttributeOptions, rel, &reldata); will_return(RelationGetAttributeOptions, &opts); expect_any(AppendOnlyStorageRead_Init, storageRead); expect_any(AppendOnlyStorageRead_Init, memoryContext); expect_any(AppendOnlyStorageRead_Init, maxBufferLen); expect_any(AppendOnlyStorageRead_Init, relationName); expect_any(AppendOnlyStorageRead_Init, title); expect_any(AppendOnlyStorageRead_Init, storageAttributes); /* * AppendOnlyStorageRead_Init assigns storageRead->storageAttributes. * will_assign_*() functions mandate a paramter as an argument. Here we * want to set selective members of a parameter. I don't know how this * can be achieved using cmockery. This test will be meaningful only when * we are able to set storageAttributes member of desc.ao_read. */ will_be_called(AppendOnlyStorageRead_Init); desc = aocs_begin_headerscan(&reldata, 0); assert_false(desc->ao_read.storageAttributes.compress); assert_int_equal(desc->colno, 0); }
/* * Mocked object initializations required for dispatchPlan. */ void _init_cdbdisp_dispatchPlan(QueryDesc *queryDesc) { #ifdef USE_ASSERT_CHECKING _ExceptionalCondition( ); _ExceptionalCondition( ); #endif queryDesc->estate = (struct EState *)palloc0(sizeof(struct EState)); queryDesc->estate->es_sliceTable = (struct SliceTable *) palloc0(sizeof(struct SliceTable)); queryDesc->operation = CMD_NOTHING; queryDesc->plannedstmt = (PlannedStmt *)palloc0(sizeof(PlannedStmt)); will_be_called(clear_relsize_cache); expect_any(RootSliceIndex, estate); will_return(RootSliceIndex,0); }
/* * Checks if SessionState_Shutdown marks the session clean when the pinCount * drops to 0 (i.e., releasing the entry back to the freeList) */ void test__SessionState_Shutdown__MarksSessionCleanUponRelease(void **state) { /* Only 3 entries to test the reuse */ CreateSessionStateArray(1); /* These should be new */ SessionState *first = AcquireSessionState(1, gp_sessionstate_loglevel); SessionState *reuseFirst = AcquireSessionState(1, gp_sessionstate_loglevel); SessionState *reuseAgain = AcquireSessionState(1, gp_sessionstate_loglevel); assert_true(reuseFirst == first && reuseAgain == first); assert_true(reuseFirst->pinCount == 3); /* Entry 1 had 2 pinCount. So, we need 3 release call. */ ReleaseSessionState(1); assert_true(reuseFirst->pinCount == 2 && AllSessionStateEntries->numSession == 1); ReleaseSessionState(1); assert_true(reuseFirst->pinCount == 1 && AllSessionStateEntries->numSession == 1); will_be_called_count(LWLockAcquire, 1); will_be_called_count(LWLockRelease, 1); expect_any_count(LWLockAcquire, lockid, 1); expect_any_count(LWLockAcquire, mode, 1); expect_any_count(LWLockRelease, lockid, 1); /* Bypass assertion */ MySessionState = first; sessionStateInited = true; will_be_called(RunawayCleaner_RunawayCleanupDoneForSession); EXPECT_EREPORT(gp_sessionstate_loglevel); /* This will finally release the entry */ SessionState_Shutdown(); assert_true(AllSessionStateEntries->numSession == 0); DestroySessionStateArray(); }
/* * Checks if RunawayCleaner_RunawayCleanupDoneForProcess reactivates a process * if the deactivation process triggers cleanup for a pending runaway event */ void test__RunawayCleaner_RunawayCleanupDoneForProcess__UndoDeactivation(void **state) { InitFakeSessionState(2 /* activeProcessCount */, 2 /* cleanupCountdown */, RunawayStatus_PrimaryRunawaySession /* runawayStatus */, 2 /* pinCount */, 12345 /* vmem */); static fakeLatestRunawayVersion = 10; latestRunawayVersion = &fakeLatestRunawayVersion; /* * Set beginCleanupRunawayVersion to latestRunawayVersion and endCleanupRunawayVersion * to a smaller value to simulate an ongoing cleanup */ beginCleanupRunawayVersion = *latestRunawayVersion; endCleanupRunawayVersion = 1; /* Valid isRunawayDetector is necessary for Assert */ static uint32 fakeIsRunawayDetector = 1; isRunawayDetector = &fakeIsRunawayDetector; /* Make sure we became idle after a pending runaway event */ activationVersion = 1; deactivationVersion = *latestRunawayVersion + 1; /* Make sure the cleanup goes through */ vmemTrackerInited = true; isProcessActive = false; /* We must undo the idle state */ will_be_called(IdleTracker_ActivateProcess); RunawayCleaner_RunawayCleanupDoneForProcess(false /* ignoredCleanup */); /* The cleanupCountdown must be decremented as we cleaned up */ assert_true(MySessionState->cleanupCountdown == 1); /* We updated the endCleanupRunawayVersion to indicate that we finished cleanup */ assert_true(endCleanupRunawayVersion == beginCleanupRunawayVersion); }
/* * Checks if MemoryContextInit() calls MemoryAccounting_Reset() */ void test__MemoryContextInit__CallsMemoryAccountingReset(void **state) { will_be_called(MemoryAccounting_Reset); MemoryContextInit(); }
/* * SUT: rest_request * call_rest throws an error while in HA mode * and the failover method finds an active IP so the second * call to call_rest does no throw an exception */ void test__rest_request__callRestThrowsHAFirstTime(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* the second call from ha_failover */ expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called(call_rest); /* test */ rest_request(hadoop_uri, client_context, restMsg); pfree(hadoop_uri); pfree(client_context); } /* * SUT: rest_request * call_rest throws an error while in HA mode * and the failover method finds an an active IP so the second * call to call_rest is issued on the second IP. This call also throws * an exception - but this time the exception is not caught. */ void test__rest_request__callRestThrowsHASecondTime(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* the second call from ha_failover */ expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &SecondException, NULL); /* test */ PG_TRY(); { rest_request(hadoop_uri, client_context, restMsg); } PG_CATCH(); { pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); CurrentMemoryContext = 1; ErrorData *edata = CopyErrorData(); /*Validate the type of expected error */ assert_string_equal(edata->message, "second exception"); /* the first exception was caught by rest_request() */ return; } PG_END_TRY(); assert_true(false); } /* * SUT: rest_request * the first time call_rest is called we succeed, since the first IP is valid * No exceptions are thrown */ void test__rest_request__callRestHASuccessFromTheFirstCall(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called(call_rest); /* test */ rest_request(hadoop_uri, client_context, restMsg); pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); } void test__normalize_size(void **state) { float4 result = normalize_size(10000000, "B"); assert_int_equal(result, 10000000); result = normalize_size(10000000, "KB"); assert_int_equal(result, 10240000000); result = normalize_size(500, "MB"); assert_int_equal(result, 524288000); result = normalize_size(10, "GB"); assert_int_equal(result, 10737418240); result = normalize_size(10000, "TB"); assert_int_equal(result, 10995116277760000); } int main(int argc, char *argv[]) { cmockery_parse_arguments(argc, argv); const UnitTest tests[] = { unit_test(test__rest_request__callRestThrowsNoHA), unit_test(test__rest_request__callRestThrowsHAFirstTime), unit_test(test__rest_request__callRestThrowsHASecondTime), unit_test(test__rest_request__callRestHASuccessFromTheFirstCall), unit_test(test__normalize_size) }; return run_tests(tests); }
void test_build_http_headers(void **state) { /* setup mock data and expectations */ PxfInputData *input = (PxfInputData *) palloc0(sizeof(PxfInputData)); CHURL_HEADERS headers = (CHURL_HEADERS) palloc0(sizeof(CHURL_HEADERS)); GPHDUri *gphd_uri = (GPHDUri *) palloc0(sizeof(GPHDUri)); Relation rel = (Relation) palloc0(sizeof(RelationData)); ExtTableEntry ext_tbl; struct tupleDesc tuple; input->headers = headers; input->gphduri = gphd_uri; input->rel = rel; gphd_uri->host = "testhost"; gphd_uri->port = "101"; gphd_uri->data = "this is test data"; gphd_uri->uri = "testuri"; OptionData *option_data1 = (OptionData *) palloc0(sizeof(OptionData)); option_data1->key = "option1-key"; option_data1->value = "option1-value"; OptionData *option_data2 = (OptionData *) palloc0(sizeof(OptionData)); option_data2->key = "option2-key"; option_data2->value = "option2-value"; gphd_uri->options = list_make2(option_data1, option_data2); tuple.natts = 0; ext_tbl.fmtcode = 'c'; rel->rd_id = 56; rel->rd_att = &tuple; expect_value(GetExtTableEntry, relid, rel->rd_id); will_return(GetExtTableEntry, &ext_tbl); expect_headers_append(headers, "X-GP-FORMAT", TextFormatName); expect_headers_append(headers, "X-GP-ATTRS", "0"); expect_any(external_set_env_vars, extvar); expect_string(external_set_env_vars, uri, gphd_uri->uri); expect_value(external_set_env_vars, csv, false); expect_value(external_set_env_vars, escape, NULL); expect_value(external_set_env_vars, quote, NULL); expect_value(external_set_env_vars, header, false); expect_value(external_set_env_vars, scancounter, 0); struct extvar_t mock_extvar; mock_extvar.GP_USER = "******"; snprintf(mock_extvar.GP_SEGMENT_ID, sizeof(mock_extvar.GP_SEGMENT_ID), "SegId"); snprintf(mock_extvar.GP_SEGMENT_COUNT, sizeof(mock_extvar.GP_SEGMENT_COUNT), "10"); snprintf(mock_extvar.GP_XID, sizeof(mock_extvar.GP_XID), "20"); will_assign_memory(external_set_env_vars, extvar, &mock_extvar, sizeof(extvar_t)); will_be_called(external_set_env_vars); expect_headers_append(headers, "X-GP-USER", "user"); expect_headers_append(headers, "X-GP-SEGMENT-ID", "SegId"); expect_headers_append(headers, "X-GP-SEGMENT-COUNT", "10"); expect_headers_append(headers, "X-GP-XID", "20"); char alignment[3]; pg_ltoa(sizeof(char *), alignment); expect_headers_append(headers, "X-GP-ALIGNMENT", alignment); expect_headers_append(headers, "X-GP-URL-HOST", gphd_uri->host); expect_headers_append(headers, "X-GP-URL-PORT", gphd_uri->port); expect_headers_append(headers, "X-GP-DATA-DIR", gphd_uri->data); expect_string(normalize_key_name, key, "option1-key"); will_return(normalize_key_name, pstrdup("X-GP-OPTION1-KEY")); expect_headers_append(headers, "X-GP-OPTION1-KEY", "option1-value"); expect_string(normalize_key_name, key, "option2-key"); will_return(normalize_key_name, pstrdup("X-GP-OPTION2-KEY")); expect_headers_append(headers, "X-GP-OPTION2-KEY", "option2-value"); expect_headers_append(headers, "X-GP-URI", gphd_uri->uri); expect_headers_append(headers, "X-GP-HAS-FILTER", "0"); /* call function under test */ build_http_headers(input); /* no asserts as the function just calls to set headers */ /* cleanup */ pfree(rel); pfree(gphd_uri); pfree(headers); pfree(input); }
/* * A shared method to test proper deactivation during IdleTracker_Shutdown() * or a direct call to IdleTracker_DeactivateProcess() during regular * deactivation of an active process when a proper cleanup was not possible * (e.g., a transaction is in progress). */ static void CheckForDeactivationWithoutCleanup(void (*testFunc)(void)) { static EventVersion fakeCurrentVersion = 10; CurrentVersion = &fakeCurrentVersion; InitFakeSessionState(1 /* activeProcessCount */, CLEANUP_COUNTDOWN_BEFORE_RUNAWAY /* cleanupCountdown */, RunawayStatus_NotRunaway /* runawayStatus */, 1 /* pinCount */, 0 /* vmem */); EventVersion oldVersion = *CurrentVersion; /* Ensure we have a pending runaway event */ EventVersion fakeLatestRunawayVersion = *CurrentVersion - 1; latestRunawayVersion = &fakeLatestRunawayVersion; activationVersion = 0; deactivationVersion = 0; assert_true(*CurrentVersion != activationVersion); assert_true(*CurrentVersion != deactivationVersion); /* * Set to true as we want to verify that it gets set to false * once the testFunc() call returns */ isProcessActive = true; assert_true(MySessionState->activeProcessCount == 1); /* * Deactivation must call RunawayCleaner_StartCleanup before finishing deactivation * to check for cleanup requirement for any pending runaway event. The method is * supposed to throw an exception, but for this test we are mocking the function * without side effect. I.e., the function behaves as if a proper cleanup is not * possible */ will_be_called(RunawayCleaner_StartCleanup); #ifdef USE_ASSERT_CHECKING will_return(RunawayCleaner_IsCleanupInProgress, true); /* * Expecting an exception as we are indicating an ongoing cleanup * and yet returning to IdleTracker_DactivateProcess */ EXPECT_EXCEPTION(); #endif PG_TRY(); { testFunc(); #ifdef USE_ASSERT_CHECKING assert_false("Expected an assertion failure"); #endif } PG_CATCH(); { } PG_END_TRY(); assert_true(activationVersion == 0); assert_true(deactivationVersion == *CurrentVersion); assert_true(isProcessActive == false); assert_true(MySessionState->activeProcessCount == 0); }
/* Checks CdbCheckDispatchResult is called when queryDesc * is not null (when shouldDispatch is true). * This test falls in PG_CATCH when SetupInterconnect * does not allocate queryDesc->estate->interconnect_context. * The test is successful if the */ void test__ExecSetParamPlan__Check_Dispatch_Results(void **state) { /*Set plan to explain.*/ SubPlanState *plan = makeNode(SubPlanState); plan->xprstate.expr = makeNode(SubPlanState); plan->planstate = makeNode(SubPlanState); plan->planstate->instrument = (Instrumentation *)palloc(sizeof(Instrumentation)); plan->planstate->plan = makeNode(SubPlanState); EState *estate = CreateExecutorState(); /*Assign mocked estate to plan.*/ ((PlanState *)(plan->planstate))->state= estate; /*Re-use estate mocked object. Needed as input parameter for tested function */ ExprContext *econtext = GetPerTupleExprContext(estate); /*Set QueryDescriptor input parameter for tested function */ PlannedStmt *plannedstmt = (PlannedStmt *)palloc(sizeof(PlannedStmt)); QueryDesc *queryDesc = (QueryDesc *)palloc(sizeof(QueryDesc)); queryDesc->plannedstmt = plannedstmt; queryDesc->estate = (EState *)palloc(sizeof(EState)); queryDesc->estate->es_sliceTable = (SliceTable *) palloc(sizeof(SliceTable)); /*QueryDescriptor generated when shouldDispatch is true.*/ QueryDesc *internalQueryDesc = (QueryDesc *)palloc(sizeof(QueryDesc)); internalQueryDesc->estate = (EState *)palloc(sizeof(EState)); /* Added to force assertion on queryDesc->estate->interconnect_context; to fail */ internalQueryDesc->estate->interconnect_context=NULL; internalQueryDesc->estate->es_sliceTable = (SliceTable *) palloc(sizeof(SliceTable)); expect_any(CreateQueryDesc,plannedstmt); expect_any(CreateQueryDesc,sourceText); expect_any(CreateQueryDesc,snapshot); expect_any(CreateQueryDesc,crosscheck_snapshot); expect_any(CreateQueryDesc,dest); expect_any(CreateQueryDesc,params); expect_any(CreateQueryDesc,doInstrument); will_return(CreateQueryDesc,internalQueryDesc); Gp_role = GP_ROLE_DISPATCH; plan->planstate->plan->dispatch=DISPATCH_PARALLEL; will_be_called(isCurrentDtxTwoPhase); expect_any(cdbdisp_dispatchPlan,queryDesc); expect_any(cdbdisp_dispatchPlan,planRequiresTxn); expect_any(cdbdisp_dispatchPlan,cancelOnError); expect_any(cdbdisp_dispatchPlan,ds); will_be_called(cdbdisp_dispatchPlan); expect_any(SetupInterconnect,estate); /* Force SetupInterconnect to fail */ will_be_called_with_sideeffect(SetupInterconnect,&_RETHROW,NULL); expect_any(cdbexplain_localExecStats,planstate); expect_any(cdbexplain_localExecStats,showstatctx); will_be_called(cdbexplain_localExecStats); expect_any(CdbCheckDispatchResult,ds); expect_any(CdbCheckDispatchResult,waitMode); will_be_called(CdbCheckDispatchResult); expect_any(cdbexplain_recvExecStats,planstate); expect_any(cdbexplain_recvExecStats,dispatchResults); expect_any(cdbexplain_recvExecStats,sliceIndex); expect_any(cdbexplain_recvExecStats,showstatctx); will_be_called(cdbexplain_recvExecStats); will_be_called(TeardownSequenceServer); expect_any(TeardownInterconnect,transportStates); expect_any(TeardownInterconnect,mlStates); expect_any(TeardownInterconnect,forceEOS); will_be_called(TeardownInterconnect); /* Catch PG_RE_THROW(); after cleaning with CdbCheckDispatchResult */ PG_TRY(); ExecSetParamPlan(plan,econtext,queryDesc); PG_CATCH(); assert_true(true); PG_END_TRY(); }
/* * Checks if RunawayCleaner_StartCleanup() starts the cleanup process if * all conditions are met (i.e., no commit is in progress and vmem tracker * is initialized) */ void test__RunawayCleaner_StartCleanup__StartsCleanupIfPossible(void **state) { InitFakeSessionState(2 /* activeProcessCount */, 2 /* cleanupCountdown */, RunawayStatus_PrimaryRunawaySession /* runawayStatus */, 2 /* pinCount */, 12345 /* vmem */); static fakeLatestRunawayVersion = 10; latestRunawayVersion = &fakeLatestRunawayVersion; *latestRunawayVersion = 10; /* * Set beginCleanupRunawayVersion to less than *latestRunawayVersion * to trigger a cleanup */ beginCleanupRunawayVersion = 1; endCleanupRunawayVersion = 1; isProcessActive = true; /* Make sure the cleanup goes through */ vmemTrackerInited = true; CritSectionCount = 0; InterruptHoldoffCount = 0; /* We need a valid gp_command_count to execute cleanup */ gp_command_count = 1; will_return(superuser, false); #ifdef FAULT_INJECTOR expect_value(FaultInjector_InjectFaultIfSet, identifier, RunawayCleanup); expect_value(FaultInjector_InjectFaultIfSet, ddlStatement, DDLNotSpecified); expect_value(FaultInjector_InjectFaultIfSet, databaseName, ""); expect_value(FaultInjector_InjectFaultIfSet, tableName, ""); will_be_called(FaultInjector_InjectFaultIfSet); #endif EXPECT_EREPORT(ERROR); PG_TRY(); { RunawayCleaner_StartCleanup(); assert_false("Cleanup didn't throw error"); } PG_CATCH(); { } PG_END_TRY(); assert_true(beginCleanupRunawayVersion == *latestRunawayVersion); /* We should not finish the cleanup as we errored out */ assert_true(endCleanupRunawayVersion == 1); /* cleanupCountdown shouldn't change as we haven't finished cleanup */ assert_true(MySessionState->cleanupCountdown == 2); /* * If we call RunawayCleaner_StartCleanup again for the same runaway event, * it should be a noop, therefore requiring no "will_be_called" setup */ RunawayCleaner_StartCleanup(); }
/* * Ensure that the column having the smallest on-disk segfile is * chosen for headerscan during ALTER TABLE ADD COLUMN operation. */ void test__column_to_scan(void **state) { List *drop_segno_list = NIL; RelationData reldata; AOCSFileSegInfo *segInfos[4]; int numcols = 3; int col; /* Empty segment, should be skipped over */ segInfos[0] = (AOCSFileSegInfo *) malloc(sizeof(AOCSFileSegInfo) + sizeof(AOCSVPInfoEntry)*numcols); segInfos[0]->segno = 3; segInfos[0]->state = AOSEG_STATE_DEFAULT; segInfos[0]->total_tupcount = 0; segInfos[0]->vpinfo.nEntry = 3; /* number of columns */ segInfos[0]->vpinfo.entry[0].eof = 200; segInfos[0]->vpinfo.entry[0].eof_uncompressed = 200; segInfos[0]->vpinfo.entry[1].eof = 100; segInfos[0]->vpinfo.entry[1].eof_uncompressed = 165; segInfos[0]->vpinfo.entry[2].eof = 50; segInfos[0]->vpinfo.entry[2].eof_uncompressed = 85; /* Valid segment, col=1 is the smallest */ segInfos[1] = (AOCSFileSegInfo *) malloc(sizeof(AOCSFileSegInfo) + sizeof(AOCSVPInfoEntry)*numcols); segInfos[1]->segno = 2; segInfos[1]->total_tupcount = 51; segInfos[1]->state = AOSEG_STATE_DEFAULT; segInfos[1]->vpinfo.nEntry = 3; /* number of columns */ segInfos[1]->vpinfo.entry[0].eof = 120; segInfos[1]->vpinfo.entry[0].eof_uncompressed = 200; segInfos[1]->vpinfo.entry[1].eof = 100; segInfos[1]->vpinfo.entry[1].eof_uncompressed = 100; segInfos[1]->vpinfo.entry[2].eof = 320; segInfos[1]->vpinfo.entry[2].eof_uncompressed = 400; /* AWATING_DROP segment, should be skipped over */ segInfos[2] = (AOCSFileSegInfo *) malloc(sizeof(AOCSFileSegInfo) + sizeof(AOCSVPInfoEntry)*numcols); segInfos[2]->segno = 3; segInfos[2]->state = AOSEG_STATE_AWAITING_DROP; segInfos[2]->total_tupcount = 15; segInfos[2]->vpinfo.nEntry = 3; /* number of columns */ segInfos[2]->vpinfo.entry[0].eof = 141; segInfos[2]->vpinfo.entry[0].eof_uncompressed = 200; segInfos[2]->vpinfo.entry[1].eof = 51; segInfos[2]->vpinfo.entry[1].eof_uncompressed = 65; segInfos[2]->vpinfo.entry[2].eof = 20; segInfos[2]->vpinfo.entry[2].eof_uncompressed = 80; /* Valid segment, col=0 is the smallest */ segInfos[3] = (AOCSFileSegInfo *) malloc(sizeof(AOCSFileSegInfo) + sizeof(AOCSVPInfoEntry)*numcols); segInfos[3]->segno = 1; segInfos[3]->state = AOSEG_STATE_USECURRENT; segInfos[3]->total_tupcount = 135; segInfos[3]->vpinfo.nEntry = 3; /* number of columns */ segInfos[3]->vpinfo.entry[0].eof = 60; segInfos[3]->vpinfo.entry[0].eof_uncompressed = 80; segInfos[3]->vpinfo.entry[1].eof = 500; segInfos[3]->vpinfo.entry[1].eof_uncompressed = 650; segInfos[3]->vpinfo.entry[2].eof = 100; segInfos[3]->vpinfo.entry[2].eof_uncompressed = 120; /* AOCSDrop should be called with segno 3 to drop */ drop_segno_list = lappend_int(drop_segno_list, 3); Gp_role = GP_ROLE_EXECUTE; expect_value(AOCSDrop, aorel, &reldata); expect_check(AOCSDrop, compaction_segno, check_segno_list, drop_segno_list); will_be_called(AOCSDrop); /* Column 1 (vpe index 1) has the smallest eof */ col = column_to_scan(segInfos, 4, numcols, &reldata); assert_int_equal(col, 1); }