void ciMethodData::print_data_on(outputStream* st) { ResourceMark rm; ciProfileData* data; for (data = first_data(); is_valid(data); data = next_data(data)) { st->print("%d", dp_to_di(data->dp())); st->fill_to(6); data->print_data_on(st); } st->print_cr("--- Extra data:"); DataLayout* dp = data_layout_at(data_size()); DataLayout* end = data_layout_at(data_size() + extra_data_size()); for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) { if (dp->tag() == DataLayout::no_tag) continue; if (dp->tag() == DataLayout::bit_data_tag) { data = new BitData(dp); } else { assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo"); data = new ciArgInfoData(dp); dp = end; // ArgInfoData is at the end of extra data section. } st->print("%d", dp_to_di(data->dp())); st->fill_to(6); data->print_data_on(st); } }
/// \brief Checks if a type could have padding bytes. bool ArgPromotion::isDenselyPacked(Type *type, const DataLayout &DL) { // There is no size information, so be conservative. if (!type->isSized()) return false; // If the alloc size is not equal to the storage size, then there are padding // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type)) return false; if (!isa<CompositeType>(type)) return true; // For homogenous sequential types, check for padding within members. if (SequentialType *seqTy = dyn_cast<SequentialType>(type)) return isa<PointerType>(seqTy) || isDenselyPacked(seqTy->getElementType(), DL); // Check for padding within and between elements of a struct. StructType *StructTy = cast<StructType>(type); const StructLayout *Layout = DL.getStructLayout(StructTy); uint64_t StartPos = 0; for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) { Type *ElTy = StructTy->getElementType(i); if (!isDenselyPacked(ElTy, DL)) return false; if (StartPos != Layout->getElementOffsetInBits(i)) return false; StartPos += DL.getTypeAllocSizeInBits(ElTy); } return true; }
// Translate a bci to its corresponding data, or NULL. ciProfileData* ciMethodData::bci_to_data(int bci) { ciProfileData* data = data_before(bci); for ( ; is_valid(data); data = next_data(data)) { if (data->bci() == bci) { set_hint_di(dp_to_di(data->dp())); return data; } else if (data->bci() > bci) { break; } } // bci_to_extra_data(bci) ... DataLayout* dp = data_layout_at(data_size()); DataLayout* end = data_layout_at(data_size() + extra_data_size()); for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) { if (dp->tag() == DataLayout::no_tag) { _saw_free_extra_data = true; // observed an empty slot (common case) return NULL; } if (dp->tag() == DataLayout::arg_info_data_tag) { break; // ArgInfoData is at the end of extra data section. } if (dp->bci() == bci) { assert(dp->tag() == DataLayout::bit_data_tag, "sane"); return new ciBitData(dp); } } return NULL; }
StructLayout::StructLayout(StructType *ST, const DataLayout &DL) { assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); StructAlignment = 0; StructSize = 0; NumElements = ST->getNumElements(); // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = NumElements; i != e; ++i) { Type *Ty = ST->getElementType(i); unsigned TyAlign = ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty); // Add padding if necessary to align the data element properly. if ((StructSize & (TyAlign-1)) != 0) StructSize = RoundUpToAlignment(StructSize, TyAlign); // Keep track of maximum alignment constraint. StructAlignment = std::max(TyAlign, StructAlignment); MemberOffsets[i] = StructSize; StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item } // Empty structures have alignment of 1 byte. if (StructAlignment == 0) StructAlignment = 1; // Add padding to the end of the struct so that it could be put in an array // and all array elements would be aligned correctly. if ((StructSize & (StructAlignment-1)) != 0) StructSize = RoundUpToAlignment(StructSize, StructAlignment); }
bool MemorySafetyChecker::runOnModule(Module& m) { DataLayout* dataLayout = new DataLayout(&m); Function* memorySafetyFunction = m.getFunction(Naming::MEMORY_SAFETY_FUNCTION); assert(memorySafetyFunction != NULL && "Couldn't find memory safety function"); for (auto& F : m) { if (!Naming::isSmackName(F.getName())) { for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { Value* pointer = NULL; if (LoadInst* li = dyn_cast<LoadInst>(&*I)) { pointer = li->getPointerOperand(); } else if (StoreInst* si = dyn_cast<StoreInst>(&*I)) { pointer = si->getPointerOperand(); } if (pointer) { // Finding the exact type of the second argument to our memory safety function Type* sizeType = memorySafetyFunction->getFunctionType()->getParamType(1); PointerType* pointerType = cast<PointerType>(pointer->getType()); uint64_t storeSize = dataLayout->getTypeStoreSize(pointerType->getPointerElementType()); Value* size = ConstantInt::get(sizeType, storeSize); Type *voidPtrTy = PointerType::getUnqual(IntegerType::getInt8Ty(F.getContext())); CastInst* castPointer = CastInst::Create(Instruction::BitCast, pointer, voidPtrTy, "", &*I); Value* args[] = {castPointer, size}; CallInst::Create(memorySafetyFunction, ArrayRef<Value*>(args, 2), "", &*I); } } } } return true; }
/** * Get size of allocated type. * @param I instruction. * @param M module. * @return size of allocated type. */ uint64_t getAllocatedSize(Instruction *I, Module* M){ DataLayout* DL = new DataLayout(M); Type* Ty; if(const AllocaInst *AI = dyn_cast<AllocaInst>(I)){ Ty = AI->getAllocatedType(); } else if(const StoreInst *SI = dyn_cast<StoreInst>(I)){ Ty = SI->getOperand(0)->getType(); } else if(const LoadInst *LI = dyn_cast<LoadInst>(I)){ Ty = LI->getType(); } else{ return 0; } if(!Ty->isSized()) return 0; uint64_t size = DL->getTypeAllocSize(Ty); delete DL; return size; }
/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of /// EVTs that represent all the individual underlying /// non-aggregate types that comprise it. /// /// If Offsets is non-null, it points to a vector to be filled in /// with the in-memory offsets of each of the individual values. /// void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl<EVT> &ValueVTs, SmallVectorImpl<uint64_t> *Offsets, uint64_t StartingOffset) { // Given a struct type, recursively traverse the elements. if (StructType *STy = dyn_cast<StructType>(Ty)) { const StructLayout *SL = DL.getStructLayout(STy); for (StructType::element_iterator EB = STy->element_begin(), EI = EB, EE = STy->element_end(); EI != EE; ++EI) ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets, StartingOffset + SL->getElementOffset(EI - EB)); return; } // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { Type *EltTy = ATy->getElementType(); uint64_t EltSize = DL.getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets, StartingOffset + i * EltSize); return; } // Interpret void as zero return values. if (Ty->isVoidTy()) return; // Base case: we can get an EVT for this LLVM IR type. ValueVTs.push_back(TLI.getValueType(DL, Ty)); if (Offsets) Offsets->push_back(StartingOffset); }
bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { // When dereferenceability information is provided by a dereferenceable // attribute, we know exactly how many bytes are dereferenceable. If we can // determine the exact offset to the attributed variable, we can use that // information here. Type *VTy = V->getType(); Type *Ty = VTy->getPointerElementType(); // Require ABI alignment for loads without alignment specification if (Align == 0) Align = DL.getABITypeAlignment(Ty); if (Ty->isSized()) { APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); if (Offset.isNonNegative()) if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) && isAligned(BV, Offset, Align, DL)) return true; } SmallPtrSet<const Value *, 32> Visited; return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI, Visited); }
static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, bool &VariableIdxFound, const DataLayout &DL) { // Skip over the first indices. gep_type_iterator GTI = gep_type_begin(GEP); for (unsigned i = 1; i != Idx; ++i, ++GTI) /*skip along*/; // Compute the offset implied by the rest of the indices. int64_t Offset = 0; for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); if (!OpC) return VariableIdxFound = true; if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. if (StructType *STy = dyn_cast<StructType>(*GTI)) { Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } // Otherwise, we have a sequential type like an array or vector. Multiply // the index by the ElementSize. uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); Offset += Size*OpC->getSExtValue(); } return Offset; }
static void getNameWithPrefixx(raw_ostream &OS, const Twine &GVName, Mangler::ManglerPrefixTy PrefixTy, const DataLayout &DL, char Prefix) { SmallString<256> TmpData; StringRef Name = GVName.toStringRef(TmpData); assert(!Name.empty() && "getNameWithPrefix requires non-empty name"); // No need to do anything special if the global has the special "do not // mangle" flag in the name. if (Name[0] == '\1') { OS << Name.substr(1); return; } if (PrefixTy == Mangler::Private) OS << DL.getPrivateGlobalPrefix(); else if (PrefixTy == Mangler::LinkerPrivate) OS << DL.getLinkerPrivateGlobalPrefix(); if (Prefix != '\0') OS << Prefix; // If this is a simple string that doesn't need escaping, just append it. OS << Name; }
bool GEPOperator::accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const { assert(Offset.getBitWidth() == DL.getPointerBaseSizeInBits(getPointerAddressSpace()) && "The offset must have exactly as many bits as our pointer."); for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this); GTI != GTE; ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); if (!OpC) return false; if (OpC->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. if (StructType *STy = dyn_cast<StructType>(*GTI)) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); continue; } // For array or vector indices, scale the index by the size of the type. APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); Offset += Index * APInt(Offset.getBitWidth(), DL.getTypeAllocSize(GTI.getIndexedType())); } return true; }
void methodDataOopDesc::print_data_on(outputStream* st) { ResourceMark rm; ProfileData* data = first_data(); for ( ; is_valid(data); data = next_data(data)) { st->print("%d", dp_to_di(data->dp())); st->fill_to(6); data->print_data_on(st); } st->print_cr("--- Extra data:"); DataLayout* dp = extra_data_base(); DataLayout* end = extra_data_limit(); for (; dp < end; dp = next_extra(dp)) { // No need for "OrderAccess::load_acquire" ops, // since the data structure is monotonic. if (dp->tag() == DataLayout::no_tag) continue; if (dp->tag() == DataLayout::bit_data_tag) { data = new BitData(dp); } else { assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo"); data = new ArgInfoData(dp); dp = end; // ArgInfoData is at the end of extra data section. } st->print("%d", dp_to_di(data->dp())); st->fill_to(6); data->print_data_on(st); } }
void ciMethodData::dump_replay_data_extra_data_helper(outputStream* out, int round, int& count) { DataLayout* dp = extra_data_base(); DataLayout* end = args_data_limit(); for (;dp < end; dp = MethodData::next_extra(dp)) { switch(dp->tag()) { case DataLayout::no_tag: case DataLayout::arg_info_data_tag: return; case DataLayout::bit_data_tag: break; case DataLayout::speculative_trap_data_tag: { ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp); ciMethod* m = data->method(); if (m != NULL) { if (round == 0) { count++; } else { out->print(" %d ", (int)(dp_to_di(((address)dp) + in_bytes(ciSpeculativeTrapData::method_offset())) / sizeof(intptr_t))); m->dump_name_as_ascii(out); } } break; } default: fatal(err_msg("bad tag = %d", dp->tag())); } } }
ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots) { DataLayout* dp = extra_data_base(); DataLayout* end = args_data_limit(); two_free_slots = false; for (;dp < end; dp = MethodData::next_extra(dp)) { switch(dp->tag()) { case DataLayout::no_tag: _saw_free_extra_data = true; // observed an empty slot (common case) two_free_slots = (MethodData::next_extra(dp)->tag() == DataLayout::no_tag); return NULL; case DataLayout::arg_info_data_tag: return NULL; // ArgInfoData is at the end of extra data section. case DataLayout::bit_data_tag: if (m == NULL && dp->bci() == bci) { return new ciBitData(dp); } break; case DataLayout::speculative_trap_data_tag: { ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp); // data->method() might be null if the MDO is snapshotted // concurrently with a trap if (m != NULL && data->method() == m && dp->bci() == bci) { return data; } break; } default: fatal(err_msg("bad tag = %d", dp->tag())); } } return NULL; }
static KernelArg::Metadata getRuntimeMDForKernelArg(const DataLayout &DL, Type *T, KernelArg::Kind Kind, StringRef BaseTypeName = "", StringRef TypeName = "", StringRef ArgName = "", StringRef TypeQual = "", StringRef AccQual = "") { KernelArg::Metadata Arg; // Set ArgSize and ArgAlign. Arg.Size = DL.getTypeAllocSize(T); Arg.Align = DL.getABITypeAlignment(T); if (auto PT = dyn_cast<PointerType>(T)) { auto ET = PT->getElementType(); if (PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && ET->isSized()) Arg.PointeeAlign = DL.getABITypeAlignment(ET); } // Set ArgTypeName. Arg.TypeName = TypeName; // Set ArgName. Arg.Name = ArgName; // Set ArgIsVolatile, ArgIsRestrict, ArgIsConst and ArgIsPipe. SmallVector<StringRef, 1> SplitQ; TypeQual.split(SplitQ, " ", -1, false /* Drop empty entry */); for (StringRef KeyName : SplitQ) { auto *P = StringSwitch<uint8_t *>(KeyName) .Case("volatile", &Arg.IsVolatile) .Case("restrict", &Arg.IsRestrict) .Case("const", &Arg.IsConst) .Case("pipe", &Arg.IsPipe) .Default(nullptr); if (P) *P = 1; } // Set ArgKind. Arg.Kind = Kind; // Set ArgValueType. Arg.ValueType = getRuntimeMDValueType(T, BaseTypeName); // Set ArgAccQual. if (!AccQual.empty()) { Arg.AccQual = StringSwitch<KernelArg::AccessQualifer>(AccQual) .Case("read_only", KernelArg::ReadOnly) .Case("write_only", KernelArg::WriteOnly) .Case("read_write", KernelArg::ReadWrite) .Default(KernelArg::AccNone); } // Set ArgAddrQual. if (auto *PT = dyn_cast<PointerType>(T)) { Arg.AddrQual = getRuntimeAddrSpace(static_cast<AMDGPUAS::AddressSpaces>( PT->getAddressSpace())); } return Arg; }
LayoutedBlob::LayoutedBlob(const DataLayout & layout, Blob && blob) : m_layout(layout), m_data(std::move(blob)) { AssertM( blob.size() % layout.stride() == 0, "Layout can't be matched to Blob size"); m_count = blob.size() / layout.stride(); }
bool EfficiencySanitizer::instrumentLoadOrStore(Instruction *I, const DataLayout &DL) { IRBuilder<> IRB(I); bool IsStore; Value *Addr; unsigned Alignment; if (LoadInst *Load = dyn_cast<LoadInst>(I)) { IsStore = false; Alignment = Load->getAlignment(); Addr = Load->getPointerOperand(); } else if (StoreInst *Store = dyn_cast<StoreInst>(I)) { IsStore = true; Alignment = Store->getAlignment(); Addr = Store->getPointerOperand(); } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { IsStore = true; Alignment = 0; Addr = RMW->getPointerOperand(); } else if (AtomicCmpXchgInst *Xchg = dyn_cast<AtomicCmpXchgInst>(I)) { IsStore = true; Alignment = 0; Addr = Xchg->getPointerOperand(); } else llvm_unreachable("Unsupported mem access type"); Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); const uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8; Value *OnAccessFunc = nullptr; // Convert 0 to the default alignment. if (Alignment == 0) Alignment = DL.getPrefTypeAlignment(OrigTy); if (IsStore) NumInstrumentedStores++; else NumInstrumentedLoads++; int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) { OnAccessFunc = IsStore ? EsanUnalignedStoreN : EsanUnalignedLoadN; IRB.CreateCall(OnAccessFunc, {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), ConstantInt::get(IntptrTy, TypeSizeBytes)}); } else { if (ClInstrumentFastpath && instrumentFastpath(I, DL, IsStore, Addr, Alignment)) { NumFastpaths++; return true; } if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0) OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx]; else OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx]; IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); } return true; }
ArgInfoData *methodDataOopDesc::arg_info() { DataLayout* dp = extra_data_base(); DataLayout* end = extra_data_limit(); for (; dp < end; dp = next_extra(dp)) { if (dp->tag() == DataLayout::arg_info_data_tag) return new ArgInfoData(dp); } return NULL; }
// // Function: Wants8ByteAlignment () // // Description: // Determine if an object of the specified type should be allocated on an // 8-byte boundary. This may either be required by the target platform or may // merely improve performance by aligning data the way the processor wants // it. // // Inputs: // Ty - The type of the object for which alignment should be tested. // Offs - The offset of the type within a derived type (e.g., a structure). // We will try to align a structure on an 8 byte boundary if one of its // elements can/needs to be. // TD - A reference to the DataLayout pass. // // Return value: // true - This type should be allocated on an 8-byte boundary. // false - This type does not need to be allocated on an 8-byte boundary. // // Notes: // FIXME: This is a complete hack for X86 right now. // FIXME: This code needs to be updated for x86-64. // FIXME: This code needs to handle LLVM first-class structures and vectors. // static bool Wants8ByteAlignment(Type *Ty, unsigned Offs, const DataLayout &TD) { // // If the user has requested this optimization to be turned off, don't bother // doing it. // if (DisableAlignOpt) return true; // // If this type is at an align-able offset within its larger data structure, // see if we should 8 byte align it. // if ((Offs & 7) == 0) { // // Doubles always want to be 8-byte aligned regardless of what DataLayout // claims. // if (Ty->isDoubleTy()) return true; // // If we are on a 64-bit system, we want to align 8-byte integers and // pointers. // if (TD.getPrefTypeAlignment(Ty) == 8) return true; } // // If this is a first-class data type, but it is located at an offset within // a structure that cannot be 8-byte aligned, then we cannot ever guarantee // to 8-byte align it. Therefore, do not try to force it to 8-byte // alignment. if (Ty->isFirstClassType()) return false; // // If this is a structure or array type, check if any of its elements at // 8-byte alignment desire to have 8-byte alignment. If so, then the entire // object wants 8-byte alignment. // if (StructType *STy = dyn_cast<StructType>(Ty)) { const StructLayout *SL = TD.getStructLayout(STy); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { if (Wants8ByteAlignment(STy->getElementType(i), Offs+SL->getElementOffset(i), TD)) return true; } } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { return Wants8ByteAlignment(STy->getElementType(), Offs, TD); } else { errs() << *Ty << "\n"; assert(0 && "Unknown type!"); } return false; }
ciArgInfoData *ciMethodData::arg_info() const { // Should be last, have to skip all traps. DataLayout* dp = data_layout_at(data_size()); DataLayout* end = data_layout_at(data_size() + extra_data_size()); for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) { if (dp->tag() == DataLayout::arg_info_data_tag) return new ciArgInfoData(dp); } return NULL; }
ciArgInfoData *ciMethodData::arg_info() const { // Should be last, have to skip all traps. DataLayout* dp = extra_data_base(); DataLayout* end = args_data_limit(); for (; dp < end; dp = MethodData::next_extra(dp)) { if (dp->tag() == DataLayout::arg_info_data_tag) return new ciArgInfoData(dp); } return NULL; }
/// Return true if the specified constant can be handled by the code generator. /// We don't want to generate something like: /// void *X = &X/42; /// because the code generator doesn't have a relocation that can handle that. /// /// This function should be called if C was not found (but just got inserted) /// in SimpleConstants to avoid having to rescan the same constants all the /// time. static bool isSimpleEnoughValueToCommitHelper(Constant *C, SmallPtrSetImpl<Constant *> &SimpleConstants, const DataLayout &DL) { // Simple global addresses are supported, do not allow dllimport or // thread-local globals. if (auto *GV = dyn_cast<GlobalValue>(C)) return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal(); // Simple integer, undef, constant aggregate zero, etc are all supported. if (C->getNumOperands() == 0 || isa<BlockAddress>(C)) return true; // Aggregate values are safe if all their elements are. if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || isa<ConstantVector>(C)) { for (Value *Op : C->operands()) if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL)) return false; return true; } // We don't know exactly what relocations are allowed in constant expressions, // so we allow &global+constantoffset, which is safe and uniformly supported // across targets. ConstantExpr *CE = cast<ConstantExpr>(C); switch (CE->getOpcode()) { case Instruction::BitCast: // Bitcast is fine if the casted value is fine. return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); case Instruction::IntToPtr: case Instruction::PtrToInt: // int <=> ptr is fine if the int type is the same size as the // pointer type. if (DL.getTypeSizeInBits(CE->getType()) != DL.getTypeSizeInBits(CE->getOperand(0)->getType())) return false; return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); // GEP is fine if it is simple + constant offset. case Instruction::GetElementPtr: for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) if (!isa<ConstantInt>(CE->getOperand(i))) return false; return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); case Instruction::Add: // We allow simple+cst. if (!isa<ConstantInt>(CE->getOperand(1))) return false; return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); } return false; }
// Translate a bci to its corresponding extra data, or NULL. ProfileData* methodDataOopDesc::bci_to_extra_data(int bci, bool create_if_missing) { DataLayout* dp = extra_data_base(); DataLayout* end = extra_data_limit(); DataLayout* avail = NULL; for (; dp < end; dp = next_extra(dp)) { // No need for "OrderAccess::load_acquire" ops, // since the data structure is monotonic. if (dp->tag() == DataLayout::no_tag) break; if (dp->tag() == DataLayout::arg_info_data_tag) { dp = end; // ArgInfoData is at the end of extra data section. break; } if (dp->bci() == bci) { assert(dp->tag() == DataLayout::bit_data_tag, "sane"); return new BitData(dp); } } if (create_if_missing && dp < end) { // Allocate this one. There is no mutual exclusion, // so two threads could allocate different BCIs to the // same data layout. This means these extra data // records, like most other MDO contents, must not be // trusted too much. DataLayout temp; temp.initialize(DataLayout::bit_data_tag, bci, 0); dp->release_set_header(temp.header()); assert(dp->tag() == DataLayout::bit_data_tag, "sane"); //NO: assert(dp->bci() == bci, "no concurrent allocation"); return new BitData(dp); } return NULL; }
static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, const LoadInst *LI) { unsigned StoreAlign = SI->getAlignment(); if (!StoreAlign) StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); unsigned LoadAlign = LI->getAlignment(); if (!LoadAlign) LoadAlign = DL.getABITypeAlignment(LI->getType()); return std::min(StoreAlign, LoadAlign); }
// Initialize the methodDataOop corresponding to a given method. void methodDataOopDesc::initialize(methodHandle method) { ResourceMark rm; // Set the method back-pointer. _method = method(); set_creation_mileage(mileage_of(method())); // Initialize flags and trap history. _nof_decompiles = 0; _nof_overflow_recompiles = 0; _nof_overflow_traps = 0; assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align"); Copy::zero_to_words((HeapWord*) &_trap_hist, sizeof(_trap_hist) / sizeof(HeapWord)); // Go through the bytecodes and allocate and initialize the // corresponding data cells. int data_size = 0; int empty_bc_count = 0; // number of bytecodes lacking data BytecodeStream stream(method); Bytecodes::Code c; while ((c = stream.next()) >= 0) { int size_in_bytes = initialize_data(&stream, data_size); data_size += size_in_bytes; if (size_in_bytes == 0) empty_bc_count += 1; } _data_size = data_size; int object_size = in_bytes(data_offset()) + data_size; // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); // Add a cell to record information about modified arguments. // Set up _args_modified array after traps cells so that // the code for traps cells works. DataLayout *dp = data_layout_at(data_size + extra_size); int arg_size = method->size_of_parameters(); dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1); // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. // In that situation, _hint_di is never used, but at // least well-defined. _hint_di = first_di(); post_initialize(&stream); set_object_is_parsable(object_size); }
static unsigned getTypeSize(const DataLayout &TD, Type *type) { if (type->isFunctionTy()) /* it is not sized, weird */ return TD.getPointerSize(); if (!type->isSized()) return 100; /* FIXME */ if (StructType *ST = dyn_cast<StructType>(type)) return TD.getStructLayout(ST)->getSizeInBytes(); return TD.getTypeAllocSize(type); }
void GcInfo::getGcPointers(StructType *StructTy, const DataLayout &DataLayout, SmallVector<uint32_t, 4> &Pointers) { assert(StructTy->isSized()); const uint32_t PointerSize = DataLayout.getPointerSize(); const uint32_t TypeSize = DataLayout.getTypeStoreSize(StructTy); const StructLayout *MainStructLayout = DataLayout.getStructLayout(StructTy); // Walk through the type in pointer-sized jumps. for (uint32_t GcOffset = 0; GcOffset < TypeSize; GcOffset += PointerSize) { const uint32_t FieldIndex = MainStructLayout->getElementContainingOffset(GcOffset); Type *FieldTy = StructTy->getStructElementType(FieldIndex); // If the field is a value class we need to dive in // to its fields and so on, until we reach a primitive type. if (FieldTy->isStructTy()) { // Prepare to loop through the nesting. const StructLayout *OuterStructLayout = MainStructLayout; uint32_t OuterOffset = GcOffset; uint32_t OuterIndex = FieldIndex; while (FieldTy->isStructTy()) { // Offset of the Inner class within the outer class const uint32_t InnerBaseOffset = OuterStructLayout->getElementOffset(OuterIndex); // Inner class should start at or before the outer offset assert(InnerBaseOffset <= OuterOffset); // Determine target offset relative to this inner class. const uint32_t InnerOffset = OuterOffset - InnerBaseOffset; // Get the inner class layout StructType *InnerStructTy = cast<StructType>(FieldTy); const StructLayout *InnerStructLayout = DataLayout.getStructLayout(InnerStructTy); // Find the field at that target offset. const uint32_t InnerIndex = InnerStructLayout->getElementContainingOffset(InnerOffset); // Update for next iteration. FieldTy = InnerStructTy->getStructElementType(InnerIndex); OuterStructLayout = InnerStructLayout; OuterOffset = InnerOffset; OuterIndex = InnerIndex; } } if (GcInfo::isGcPointer(FieldTy)) { Pointers.push_back(GcOffset); } } }
void MethodData::initialize() { No_Safepoint_Verifier no_safepoint; // init function atomic wrt GC ResourceMark rm; init(); set_creation_mileage(mileage_of(method())); // Go through the bytecodes and allocate and initialize the // corresponding data cells. int data_size = 0; int empty_bc_count = 0; // number of bytecodes lacking data _data[0] = 0; // apparently not set below. BytecodeStream stream(method()); Bytecodes::Code c; while ((c = stream.next()) >= 0) { int size_in_bytes = initialize_data(&stream, data_size); data_size += size_in_bytes; if (is_empty_data(size_in_bytes, c)) empty_bc_count++; } _data_size = data_size; int object_size = in_bytes(data_offset()) + data_size; // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); object_size += extra_size; Copy::zero_to_bytes((HeapWord*) extra_data_base(), extra_size); #ifndef GRAALVM // Add a cell to record information about modified arguments. // Set up _args_modified array after traps cells so that // the code for traps cells works. DataLayout *dp = data_layout_at(data_size + extra_size); int arg_size = method()->size_of_parameters(); dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); object_size += DataLayout::compute_size_in_bytes(arg_size+1); #endif // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. // In that situation, _hint_di is never used, but at // least well-defined. _hint_di = first_di(); post_initialize(&stream); set_size(object_size); }
uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull) const { assert(getType()->isPointerTy() && "must be pointer"); uint64_t DerefBytes = 0; CanBeNull = false; if (const Argument *A = dyn_cast<Argument>(this)) { DerefBytes = A->getDereferenceableBytes(); if (DerefBytes == 0 && (A->hasByValAttr() || A->hasStructRetAttr())) { Type *PT = cast<PointerType>(A->getType())->getElementType(); if (PT->isSized()) DerefBytes = DL.getTypeStoreSize(PT); } if (DerefBytes == 0) { DerefBytes = A->getDereferenceableOrNullBytes(); CanBeNull = true; } } else if (auto CS = ImmutableCallSite(this)) { DerefBytes = CS.getDereferenceableBytes(AttributeList::ReturnIndex); if (DerefBytes == 0) { DerefBytes = CS.getDereferenceableOrNullBytes(AttributeList::ReturnIndex); CanBeNull = true; } } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); } if (DerefBytes == 0) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); } CanBeNull = true; } } else if (auto *AI = dyn_cast<AllocaInst>(this)) { if (!AI->isArrayAllocation()) { DerefBytes = DL.getTypeStoreSize(AI->getAllocatedType()); CanBeNull = false; } } else if (auto *GV = dyn_cast<GlobalVariable>(this)) { if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) { // TODO: Don't outright reject hasExternalWeakLinkage but set the // CanBeNull flag. DerefBytes = DL.getTypeStoreSize(GV->getValueType()); CanBeNull = false; } } return DerefBytes; }
unsigned Value::getPointerAlignment(const DataLayout &DL) const { assert(getType()->isPointerTy() && "must be pointer"); unsigned Align = 0; if (auto *GO = dyn_cast<GlobalObject>(this)) { // Don't make any assumptions about function pointer alignment. Some // targets use the LSBs to store additional information. if (isa<Function>(GO)) return 0; Align = GO->getAlignment(); if (Align == 0) { if (auto *GVar = dyn_cast<GlobalVariable>(GO)) { Type *ObjectType = GVar->getValueType(); if (ObjectType->isSized()) { // If the object is defined in the current Module, we'll be giving // it the preferred alignment. Otherwise, we have to assume that it // may only have the minimum ABI alignment. if (GVar->isStrongDefinitionForLinker()) Align = DL.getPreferredAlignment(GVar); else Align = DL.getABITypeAlignment(ObjectType); } } } } else if (const Argument *A = dyn_cast<Argument>(this)) { Align = A->getParamAlignment(); if (!Align && A->hasStructRetAttr()) { // An sret parameter has at least the ABI alignment of the return type. Type *EltTy = cast<PointerType>(A->getType())->getElementType(); if (EltTy->isSized()) Align = DL.getABITypeAlignment(EltTy); } } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) { Align = AI->getAlignment(); if (Align == 0) { Type *AllocatedType = AI->getAllocatedType(); if (AllocatedType->isSized()) Align = DL.getPrefTypeAlignment(AllocatedType); } } else if (auto CS = ImmutableCallSite(this)) Align = CS.getAttributes().getRetAlignment(); else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); Align = CI->getLimitedValue(); } return Align; }