static int32_t testPoolNewElement(OMRPortLibrary *portLib, PoolInputData *inputData, J9Pool *currentPool) { void *element = NULL; uint8_t *walkBytes; uint32_t elementsInPuddle = 0; uint32_t expectedNumElems = (inputData->numberElements == 0) ? 1 : inputData->numberElements; uint32_t expectedAlignment = (inputData->elementAlignment == 0) ? MIN_GRANULARITY : inputData->elementAlignment; J9PoolPuddleList* puddleList = J9POOL_PUDDLELIST(currentPool); J9PoolPuddle* initialPuddle = J9POOLPUDDLELIST_NEXTAVAILABLEPUDDLE(puddleList); J9PoolPuddle* currentPuddle = initialPuddle; /* Call pool_newElement until a new puddle is allocated... and then allocate a couple of new elements into the new puddle */ while (initialPuddle == currentPuddle) { uint32_t j = 0; /* Get a single element and check that it's the right size and that each byte can be written to */ element = pool_newElement(currentPool); if (NULL == element) { return -1; } /* Verify element alignment. Aligned elements are expected for use in AVL trees, for example. */ if (((uintptr_t) element % expectedAlignment) != 0) { return -2; } if (inputData->poolFlags & POOL_NO_ZERO) { memset(element, 0, inputData->structSize); /* Expect a crash if the element is too small */ } /* Walk each byte of the returned element */ walkBytes = (uint8_t *)element; for (j = 0; j < inputData->structSize; j++) { if (walkBytes[j] != 0) { return -3; } /* Test that we can write to each byte */ if (j == 0) { walkBytes[j] = FIRST_BYTE_MARKER; } else { walkBytes[j] = BYTE_MARKER; } } if (inputData->structSize > 1) { walkBytes[inputData->structSize - 1] = LAST_BYTE_MARKER; } /* currentPuddle will become NULL when the puddle being used becomes FULL if there has been no deletions */ elementsInPuddle++; currentPuddle = J9POOLPUDDLELIST_NEXTAVAILABLEPUDDLE(puddleList); } if (elementsInPuddle < expectedNumElems) { return -4; } return elementsInPuddle; }
/** * Initialize a new lock object. * A lock must be initialized before it may be used. * * @param env * @param options * @param name Lock name * @return TRUE on success * @note Creates a store barrier. */ bool MM_LightweightNonReentrantLock::initialize(MM_EnvironmentBase *env, ModronLnrlOptions *options, const char * name) { OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); /* initialize variables in case constructor was not called */ _initialized = false; _tracing = NULL; _extensions = env->getExtensions(); if (NULL != _extensions) { J9Pool* tracingPool = _extensions->_lightweightNonReentrantLockPool; if (NULL != tracingPool) { omrthread_monitor_enter(_extensions->_lightweightNonReentrantLockPoolMutex); _tracing = (J9ThreadMonitorTracing *)pool_newElement(tracingPool); omrthread_monitor_exit(_extensions->_lightweightNonReentrantLockPoolMutex); if (NULL == _tracing) { goto error_no_memory; } _tracing->monitor_name = NULL; if (NULL != name) { uintptr_t length = omrstr_printf(NULL, 0, "[%p] %s", this, name) + 1; if (length > MAX_LWNR_LOCK_NAME_SIZE) { goto error_no_memory; } _tracing->monitor_name = _nameBuf; if (NULL == _tracing->monitor_name) { goto error_no_memory; } omrstr_printf(_tracing->monitor_name, length, "[%p] %s", this, name); } } } #if defined(OMR_ENV_DATA64) if(0 != (((uintptr_t)this) % sizeof(uintptr_t))) { omrtty_printf("GC FATAL: LWNRL misaligned.\n"); abort(); } #endif #if defined(J9MODRON_USE_CUSTOM_SPINLOCKS) _initialized = omrgc_spinlock_init(&_spinlock) ? false : true; _spinlock.spinCount1 = options->spinCount1; _spinlock.spinCount2 = options->spinCount2; _spinlock.spinCount3 = options->spinCount3; #else /* J9MODRON_USE_CUSTOM_SPINLOCKS */ _initialized = MUTEX_INIT(_mutex) ? true : false; #endif /* J9MODRON_USE_CUSTOM_SPINLOCKS */ return _initialized; error_no_memory: return false; }
MM_EnvironmentBase * MM_EnvironmentBase::newInstance(MM_GCExtensionsBase *extensions, OMR_VMThread *omrVMThread) { void *envPtr; MM_EnvironmentBase *env = NULL; envPtr = (void *)pool_newElement(extensions->environments); if (NULL != envPtr) { env = new(envPtr) MM_EnvironmentBase(omrVMThread); if (!env->initialize(extensions)) { env->kill(); env = NULL; } } return env; }
int32_t testPoolPuddleListSharing(OMRPortLibrary *portLib) { uint32_t index = 0; uintptr_t numElements = 0; void *element = NULL; J9Pool *pool[NUM_POOLS_TO_SHARE_PUDDLE_LIST]; pool_state state[NUM_POOLS_TO_SHARE_PUDDLE_LIST]; int32_t result = 0; memset(pool, 0, sizeof(pool)); for (index = 0; index < NUM_POOLS_TO_SHARE_PUDDLE_LIST; index++) { pool[index] = pool_new(sizeof(U_64), 0, 0, 0, OMR_GET_CALLSITE(), OMRMEM_CATEGORY_VM, (omrmemAlloc_fptr_t) sharedPuddleListAlloc, (omrmemFree_fptr_t) sharedPuddleListFree, portLib); if (NULL == pool[index]) { result = -1; goto error; } /* Create an element every time we create a pool. */ pool_newElement(pool[index]); /* * Since the underlying puddle list is shared, the number of elements in the pool * should be the same as the 1 + index of the pool we're on. */ numElements = pool_numElements(pool[index]); if (numElements != (1 + index)) { result = -2; goto error; } } /* Verify again that numElements is the same for all pools. */ numElements = pool_numElements(pool[0]); for (index = 1; index < NUM_POOLS_TO_SHARE_PUDDLE_LIST; index++) { if (numElements != pool_numElements(pool[index])) { result = -3; goto error; } } /* Iterate over the elements in each pool, and verify that they match. */ element = pool_startDo(pool[0], &state[0]); /* Check the first element. */ for (index = 1; index < NUM_POOLS_TO_SHARE_PUDDLE_LIST; index++) { if (element != pool_startDo(pool[index], &state[index])) { result = -4; goto error; } } /* And all other elements. */ while (element) { element = pool_nextDo(&state[0]); for (index = 1; index < NUM_POOLS_TO_SHARE_PUDDLE_LIST; index++) { if (element != pool_nextDo(&state[index])) { result = -5; goto error; } } } /* Now, kill the pools. */ for (index = 0; index < NUM_POOLS_TO_SHARE_PUDDLE_LIST; index++) { if (numElements != pool_numElements(pool[index])) { result = -6; goto error; } pool_kill(pool[index]); pool[index] = NULL; } if (sharedPuddleList != NULL) { result = -7; goto error; } return result; error: for (index = 0; index < NUM_POOLS_TO_SHARE_PUDDLE_LIST; index++) { if (NULL != pool[index]) { pool_kill(pool[index]); pool[index] = NULL; } } return result; }
static intptr_t J9HookRegisterWithCallSitePrivate(struct J9HookInterface **hookInterface, uintptr_t taggedEventNum, J9HookFunction function, const char *callsite, void *userData, uintptr_t agentID) { J9CommonHookInterface *commonInterface = (J9CommonHookInterface *)hookInterface; J9HookRegistrationEvent eventStruct; intptr_t rc = 0; uintptr_t eventNum = taggedEventNum & J9HOOK_EVENT_NUM_MASK; omrthread_monitor_enter(commonInterface->lock); if (HOOK_FLAGS(commonInterface, eventNum) & J9HOOK_FLAG_DISABLED) { rc = -1; } else { J9HookRecord *insertionPoint = NULL; J9HookRecord *emptyRecord = NULL; J9HookRecord *record = HOOK_RECORD(commonInterface, eventNum); /* at the end of the loop, insertionPoint will point to the last record which should be triggered before the one we're adding */ while (record) { if ((taggedEventNum & J9HOOK_TAG_REVERSE_ORDER) ? record->agentID >= agentID : record->agentID <= agentID) { insertionPoint = record; } if (!HOOK_IS_VALID_ID(record->id)) { if ((taggedEventNum & J9HOOK_TAG_REVERSE_ORDER) ? record->agentID >= agentID : record->agentID <= agentID) { emptyRecord = record; } } else if ((record->function == function) && (record->userData == userData)) { /* this listener is already registered */ ++(record->count); omrthread_monitor_exit(commonInterface->lock); return 0; } record = record->next; } /* * Re-use the empty record if it is in a legitimate position for the requested agent. * (It is a legitimate position if all records before this position have the same or lower agent IDs (tested previously) * and all records after this position have the same or higher IDs) */ if ((emptyRecord != NULL) && ((emptyRecord->next == NULL) || ((taggedEventNum & J9HOOK_TAG_REVERSE_ORDER) ? (emptyRecord->next->agentID <= agentID) : (emptyRecord->next->agentID >= agentID))) ) { emptyRecord->function = function; emptyRecord->callsite = callsite; emptyRecord->userData = userData; emptyRecord->count = 1; emptyRecord->agentID = agentID; VM_AtomicSupport::writeBarrier(); emptyRecord->id = HOOK_VALID_ID(emptyRecord->id); HOOK_FLAGS(commonInterface, eventNum) |= J9HOOK_FLAG_HOOKED | J9HOOK_FLAG_RESERVED; } else { record = (J9HookRecord *)pool_newElement(commonInterface->pool); if (record == NULL) { rc = -1; } else { if (insertionPoint == NULL) { record->next = HOOK_RECORD(commonInterface, eventNum); } else { record->next = insertionPoint->next; } record->function = function; record->callsite = callsite; record->userData = userData; record->count = 1; record->id = HOOK_INITIAL_ID; record->agentID = agentID; VM_AtomicSupport::writeBarrier(); if (insertionPoint == NULL) { HOOK_RECORD(commonInterface, eventNum) = record; } else { insertionPoint->next = record; } HOOK_FLAGS(commonInterface, eventNum) |= J9HOOK_FLAG_HOOKED | J9HOOK_FLAG_RESERVED; } } } omrthread_monitor_exit(commonInterface->lock); /* report the registration event */ eventStruct.eventNum = eventNum; eventStruct.function = function; eventStruct.userData = userData; eventStruct.isRegistration = 1; eventStruct.agentID = agentID; (*hookInterface)->J9HookDispatch(hookInterface, J9HOOK_REGISTRATION_EVENT, &eventStruct); return rc; }