uint64 Sampler::recordAllocationSample(const void* item, uint64 size, bool callback_ok) { AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?"); if(!samplingNow) return 0; if(!samplingAllAllocs) return 0; if(!sampleSpaceCheck(callback_ok)) return 0; (void)item; lastAllocSample = currentSample; writeRawSample(NEW_AUX_SAMPLE); uint64 uid = allocId++; uids.add(item, (void*)uid); write(currentSample, uid); write(currentSample, item); write(currentSample, (uintptr)0); write(currentSample, size); AvmAssertMsg((uintptr)currentSample % 4 == 0, "Alignment should have occurred at end of raw sample.\n"); numSamples++; return uid; }
uint64 Sampler::recordAllocationInfo(AvmPlusScriptableObject *obj, uintptr typeOrVTable) { AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?"); if(!samplingNow) return 0; if( !samplingAllAllocs ) { // Turn on momentarily to record the alloc for this object. samplingAllAllocs = true; recordAllocationSample(obj, 0); samplingAllAllocs = false; } byte* old_sample = lastAllocSample; Sample s; readSample(old_sample, s); old_sample = lastAllocSample; if(typeOrVTable < 7 && core->codeContext() && core->codeContext()->domainEnv()) { // and in toplevel typeOrVTable |= (uintptr)core->codeContext()->domainEnv()->toplevel(); } AvmAssertMsg(s.sampleType == NEW_AUX_SAMPLE, "Sample stream corrupt - can only add info to an AUX sample.\n"); AvmAssertMsg(s.ptr == (void*)obj, "Sample stream corrupt - last sample is not for same object.\n"); byte* pos = currentSample; currentSample = old_sample; // Rewrite the sample as a NEW_OBJECT_SAMPLE writeRawSample(NEW_OBJECT_SAMPLE); write(currentSample, s.id); AvmAssertMsg( ptrSamples->get(obj)==0, "Missing dealloc sample - same memory alloc'ed twice.\n"); ptrSamples->add(obj, currentSample); write(currentSample, s.ptr); write(currentSample, typeOrVTable); write(currentSample, s.alloc_size); AvmAssertMsg((uintptr)currentSample % 4 == 0, "Alignment should have occurred at end of raw sample.\n"); currentSample = pos; return s.id; }
void Sampler::sample() { AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?"); if(!samplingNow || !core->callStack || !sampleSpaceCheck()) return; writeRawSample(RAW_SAMPLE); numSamples++; }
void Sampler::recordDeallocationSample(const void* item, uint64 size) { AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?"); AvmAssert(item != 0); // recordDeallocationSample doesn't honor the samplingNow flag // this is to avoid dropping deleted object samples when sampling is paused. uint64 uid = (uint64)uids.get(item); // If we didn't find a UID then this wasn't memory that the sampler knew was allocated if(uid && sampleSpaceCheck(false)) { // if( !uid ) // uid = (uint64)-1; writeRawSample(DELETED_OBJECT_SAMPLE); write(currentSample, uid); write(currentSample, size); numSamples++; AvmAssertMsg((uintptr)currentSample % 4 == 0, "Alignment should have occurred at end of raw sample.\n"); } // Nuke the ptr in the sample stream for the newobject sample if( samples ) { byte* oldptr = 0; if( (oldptr = (byte*)ptrSamples->get(item)) != 0 ) { #ifdef _DEBUG void* oldval = 0; read(oldptr, oldval); AvmAssertMsg(oldval==item, "Sample stream corrupt, dealloc doesn't point to correct address"); rewind(oldptr, sizeof(void*)); #endif write(oldptr, (void*)0); ptrSamples->remove(item); } } if(uid) uids.remove(item); }
bool ConsistencyChecker::phiOperandsExistsInPredecessorBlock(PhiInstruction* phiInstruction) { BasicBlock* parentBlock = phiInstruction->getInBasicBlock(); int numberOfOperands = phiInstruction->numberOfOperands(); char errorMessage[256]; for (int i = 0; i < numberOfOperands; i++) { BasicBlock* operandBlock = phiInstruction->getIncomingEdge(i); TessaInstruction* phiOperand = phiInstruction->getOperand(operandBlock); BasicBlock* phiOperandBlock = phiOperand->getInBasicBlock(); VMPI_snprintf(errorMessage, sizeof(errorMessage), "Phi %d has operand %d with incoming block %d, but operand does not exist in block\n\n", phiInstruction->getValueId(), phiOperand->getValueId(), operandBlock->getBasicBlockId()); AvmAssertMsg(operandBlock == phiOperandBlock, errorMessage); } return true; }
void Sampler::sample() { AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?"); if(!samplingNow) return; uint64_t nowMicros = this->nowMicros(); const uint64_t sampleFrequencyMicros = SAMPLE_FREQUENCY_MILLIS * 1000; if (takeSample) { if (core->callStack) { // We may want to write more than one sample. E.g. if 5.5 milliseconds have // passed, we'll write 5 samples. int sampleCount = 0; if (lastSampleCheckMicros != 0) sampleCount = (int) ((nowMicros - lastSampleCheckMicros) / sampleFrequencyMicros); if (sampleCount <= 0) sampleCount = 1; for (int sampleNum = sampleCount-1; sampleNum >= 0; sampleNum--) { if (!sampleSpaceCheck()) break; // We artificially manufacture a different time for each sample. uint64_t sampleTimeMicros = nowMicros - (sampleNum * sampleFrequencyMicros); writeRawSample(RAW_SAMPLE, sampleTimeMicros); numSamples++; } } } // Even if the callstack was empty, don't take another sample until the next timer tick. takeSample = 0; // Don't just set lastSampleCheckMicros equal to nowMicros -- we want to keep the // sampling frequency as close to one per millisecond as we can. uint64_t elapsed = nowMicros - lastSampleCheckMicros; lastSampleCheckMicros += (elapsed / sampleFrequencyMicros * sampleFrequencyMicros); }
void Sampler::readSample(byte *&p, Sample &s) { VMPI_memset(&s, 0, sizeof(Sample)); read(p, s.micros); read(p, s.sampleType); AvmAssertMsg(s.sampleType == RAW_SAMPLE || s.sampleType == NEW_OBJECT_SAMPLE || s.sampleType == DELETED_OBJECT_SAMPLE || s.sampleType == NEW_AUX_SAMPLE, "Sample stream corruption.\n"); if(s.sampleType != DELETED_OBJECT_SAMPLE) { read(p, s.stack.depth); s.stack.trace = p; #ifndef AVMPLUS_64BIT AvmAssert(sizeof(StackTrace::Element) == sizeof(MethodInfo *) + sizeof(Stringp) + sizeof(Stringp) + sizeof(int32_t)); #else // Extra int because of the structure padding AvmAssert(sizeof(StackTrace::Element) == sizeof(MethodInfo *) + sizeof(Stringp) + sizeof(Stringp) + sizeof(int32_t) + sizeof(int32_t)); #endif p += s.stack.depth * sizeof(StackTrace::Element); } // padding to keep 8 byte alignment align(p); if(s.sampleType != Sampler::RAW_SAMPLE) { read(p, s.id); if(s.sampleType == Sampler::NEW_OBJECT_SAMPLE || s.sampleType == Sampler::NEW_AUX_SAMPLE) { read(p, s.ptr); read(p, s.typeOrVTable); read(p, s.alloc_size); } else { read(p, s.size); } } }
InlineHashtable* ScriptObject::getTable() const { AvmAssert(vtable->traits->getHashtableOffset() != 0); union { uint8_t* p; InlineHashtable* iht; HeapHashtable** hht; }; p = (uint8_t*)this + vtable->traits->getHashtableOffset(); if(!vtable->traits->isDictionary()) { if (iht->needsInitialize()) const_cast<ScriptObject*>(this)->initHashtable(); return iht; } else { //DictionaryObjects store pointer to HeapHashtable at //the hashtable offset InlineHashtable *ihp = (*hht)->get_ht(); AvmAssertMsg(ihp != NULL, "Illegal to call getTable before Dictionary init is called"); return ihp; } }