/* * Checks if RedZoneHandler_ShmemInit() properly initializes the global variables * as the postmaster */ void test__RedZoneHandler_ShmemInit__InitializesGlobalVarsWhenPostmaster(void **state) { vmemTrackerInited = false; IsUnderPostmaster = false; /* Assign weird value to test the re-initialization */ fakeIsRunawayDetector = 1234; isRunawayDetector = NULL; expect_any_count(ShmemInitStruct, name, 2); expect_any_count(ShmemInitStruct, size, 2); expect_any_count(ShmemInitStruct, foundPtr, 2); will_assign_value(ShmemInitStruct, foundPtr, false); will_assign_value(ShmemInitStruct, foundPtr, false); will_return_count(ShmemInitStruct, &fakeIsRunawayDetector, 2); /* * When vmem limit is not activated or runaway_detector_activation_percent is * set to 0,, red zone should be very high (i.e., red-zone will be disabled). * Note, it doesn't matter what runaway_detector_activation_percent is set for * this test, as the VmemTracker_ConvertVmemMBToChunks is returning 0. */ will_return(VmemTracker_ConvertVmemMBToChunks, 0); expect_any(VmemTracker_ConvertVmemMBToChunks, mb); RedZoneHandler_ShmemInit(); assert_true(isRunawayDetector == &fakeIsRunawayDetector); assert_true(redZoneChunks == INT32_MAX); assert_true(*isRunawayDetector == 0); /* * When the activation percent is set to 100, we will not even attempt calculating * the redZoneChunks and instead assign INT32_MAX directly. Note, we don't even * call VmemTracker_ConvertVmemMBToChunks() */ runaway_detector_activation_percent = 100; redZoneChunks = 0; RedZoneHandler_ShmemInit(); assert_true(redZoneChunks == INT32_MAX); }
/* * Initializes the shared memory states of the vmem tracker. This * will also initialize the shared memory states of event version * provider, red zone handler and idle tracker. */ void VmemTracker_ShmemInit() { Assert(!vmemTrackerInited); trackedVmemChunks = 0; maxVmemChunksTracked = 0; trackedBytes = 0; bool alreadyInShmem = false; segmentVmemChunks = (int32 *) ShmemInitStruct(SHMEM_AVAILABLE_VMEM, sizeof(int32), &alreadyInShmem); Assert(alreadyInShmem || !IsUnderPostmaster); Assert(NULL != segmentVmemChunks); if(!IsUnderPostmaster) { Assert(chunkSizeInBits == BITS_IN_MB); vmemChunksQuota = gp_vmem_protect_limit; /* * If vmem is larger than 16GB (i.e., 16K MB), we make the chunks bigger * so that the vmem limit in chunks unit is not larger than 16K. */ while(vmemChunksQuota > (16 * 1024)) { chunkSizeInBits++; vmemChunksQuota >>= 1; } /* * gp_vmem_limit_per_query is in kB. So, first convert it to MB, and then shift it * to adjust for cases where we enlarged our chunk size */ maxChunksPerQuery = ceil(gp_vmem_limit_per_query / (1024.0 * (1 << (chunkSizeInBits - BITS_IN_MB)))); /* Initialize the sub-systems */ EventVersion_ShmemInit(); RedZoneHandler_ShmemInit(); IdleTracker_ShmemInit(); *segmentVmemChunks = 0; }
/* * Checks if RedZoneHandler_ShmemInit() properly initializes the global variables * when under postmaster */ void test__RedZoneHandler_ShmemInit__InitializesUnderPostmaster(void **state) { vmemTrackerInited = false; IsUnderPostmaster = true; /* Assign weird value to test the re-initialization */ fakeIsRunawayDetector = 1234; isRunawayDetector = NULL; expect_any(ShmemInitStruct, name); expect_any(ShmemInitStruct, size); expect_any(ShmemInitStruct, foundPtr); will_assign_value(ShmemInitStruct, foundPtr, true); will_return(ShmemInitStruct, &fakeIsRunawayDetector); /* For testing that we don't change this value */ redZoneChunks = 1234; RedZoneHandler_ShmemInit(); assert_true(isRunawayDetector == &fakeIsRunawayDetector); assert_true(redZoneChunks == 1234); assert_true(*isRunawayDetector == 1234); }
/* * Initializes the shared memory states of the vmem tracker. This * will also initialize the shared memory states of event version * provider, red zone handler and idle tracker. */ void VmemTracker_ShmemInit() { Assert(!vmemTrackerInited); trackedVmemChunks = 0; maxVmemChunksTracked = 0; trackedBytes = 0; bool alreadyInShmem = false; segmentVmemChunks = (int32 *) ShmemInitStruct(SHMEM_AVAILABLE_VMEM, sizeof(int32), &alreadyInShmem); Assert(alreadyInShmem || !IsUnderPostmaster); Assert(NULL != segmentVmemChunks); alreadyInShmem = false; segmentVmemQuotaChunks = (int32 *) ShmemInitStruct(SHMEM_DYNAMIC_VMEM_QUOTA, sizeof(int32), &alreadyInShmem); Assert(alreadyInShmem || !IsUnderPostmaster); Assert(NULL != segmentVmemQuotaChunks); if(!IsUnderPostmaster) { chunkSizeInBits = BITS_IN_MB; physicalMemQuotaInMB = VmemTracker_GetPhysicalMemQuotaInMB(); physicalMemQuotaInChunks = physicalMemQuotaInMB; int32 vmemChunksQuota = gp_vmem_protect_limit; /* * If vmem is larger than 16GB (i.e., 16K MB), we make the chunks bigger * so that the vmem limit in chunks unit is not larger than 16K. */ while(physicalMemQuotaInChunks > (16 * 1024)) { chunkSizeInBits++; physicalMemQuotaInChunks >>= 1; vmemChunksQuota >>= 1; } /* There is at least one chunk if memory enforcement is enabled */ if (gp_vmem_protect_limit > 0) { vmemChunksQuota = Max(vmemChunksQuota, (int32)1); } /* * gp_vmem_limit_per_query is in kB. So, first convert it to MB, and then shift it * to adjust for cases where we enlarged our chunk size */ maxChunksPerQuery = ceil(gp_vmem_limit_per_query / (1024.0 * (1 << (chunkSizeInBits - BITS_IN_MB)))); /* Initialize the sub-systems */ EventVersion_ShmemInit(); RedZoneHandler_ShmemInit(); IdleTracker_ShmemInit(); *segmentVmemChunks = 0; /* Initialize memory enforcement for dynamic resource manager */ *segmentVmemQuotaChunks = vmemChunksQuota; }