const HexagonSubtarget * HexagonTargetMachine::getSubtargetImpl(const Function &F) const { AttributeSet FnAttrs = F.getAttributes(); Attribute CPUAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu"); Attribute FSAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features"); std::string CPU = !CPUAttr.hasAttribute(Attribute::None) ? CPUAttr.getValueAsString().str() : TargetCPU; std::string FS = !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString().str() : TargetFS; auto &I = SubtargetMap[CPU + FS]; if (!I) { // This needs to be done before we create a new subtarget since any // creation will depend on the TM and the code generation flags on the // function that reside in TargetOptions. resetTargetOptions(F); I = llvm::make_unique<HexagonSubtarget>(TargetTriple, CPU, FS, *this); } return I.get(); }
// removeAttribute() currently does not work on Attribute::Alignment // (it fails with an assertion error), so we have to take a more // convoluted route to removing this attribute by recreating the // AttributeSet. AttributeSet RemoveAttrs(LLVMContext &Context, AttributeSet Attrs) { SmallVector<AttributeSet, 8> AttrList; for (unsigned Slot = 0; Slot < Attrs.getNumSlots(); ++Slot) { unsigned Index = Attrs.getSlotIndex(Slot); AttrBuilder AB; for (AttributeSet::iterator Attr = Attrs.begin(Slot), E = Attrs.end(Slot); Attr != E; ++Attr) { if (Attr->isEnumAttribute() && Attr->getKindAsEnum() != Attribute::ByVal && Attr->getKindAsEnum() != Attribute::StructRet) { AB.addAttribute(*Attr); } // IR semantics require that ByVal implies NoAlias. However, IR // semantics do not require StructRet to imply NoAlias. For // example, a global variable address can be passed as a // StructRet argument, although Clang does not do so and Clang // explicitly adds NoAlias to StructRet arguments. if (Attr->isEnumAttribute() && Attr->getKindAsEnum() == Attribute::ByVal) { AB.addAttribute(Attribute::get(Context, Attribute::NoAlias)); } } AttrList.push_back(AttributeSet::get(Context, Index, AB)); } return AttributeSet::get(Context, AttrList); }
static bool produceCompactUnwindFrame(MachineFunction &MF) { const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); AttributeSet Attrs = MF.getFunction()->getAttributes(); return Subtarget.isTargetMachO() && !(Subtarget.getTargetLowering()->supportSwiftError() && Attrs.hasAttrSomewhere(Attribute::SwiftError)); }
/// Infer nonnull attributes for the arguments at the specified callsite. static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { SmallVector<unsigned, 4> Indices; unsigned ArgNo = 0; for (Value *V : CS.args()) { PointerType *Type = dyn_cast<PointerType>(V->getType()); // Try to mark pointer typed parameters as non-null. We skip the // relatively expensive analysis for constants which are obviously either // null or non-null to start with. if (Type && !CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) && !isa<Constant>(V) && LVI->getPredicateAt(ICmpInst::ICMP_EQ, V, ConstantPointerNull::get(Type), CS.getInstruction()) == LazyValueInfo::False) Indices.push_back(ArgNo + 1); ArgNo++; } assert(ArgNo == CS.arg_size() && "sanity check"); if (Indices.empty()) return false; AttributeSet AS = CS.getAttributes(); LLVMContext &Ctx = CS.getInstruction()->getContext(); AS = AS.addAttribute(Ctx, Indices, Attribute::get(Ctx, Attribute::NonNull)); CS.setAttributes(AS); return true; }
Value* ARMIREmitter::visitCALL(const SDNode *N) { const ConstantSDNode *DestNode = dyn_cast<ConstantSDNode>(N->getOperand(0)); if (!DestNode) { printError("visitCALL: Not a constant integer for call!"); return NULL; } int64_t DestInt = DestNode->getSExtValue(); int64_t PC = Dec->getDisassembler()->getDebugOffset(N->getDebugLoc()); unsigned InstrSize = 8; // Note: ARM defaults to 4; should be 8. int64_t Tgt = PC + InstrSize + DestInt; // TODO: Look up address in symbol table. std::string FName = Dec->getDisassembler()->getFunctionName(Tgt); Module *Mod = IRB->GetInsertBlock()->getParent()->getParent(); FunctionType *FT = FunctionType::get(Type::getPrimitiveType(Mod->getContext(), Type::VoidTyID), false); Twine TgtAddr(Tgt); AttributeSet AS; AS = AS.addAttribute(Mod->getContext(), AttributeSet::FunctionIndex, "Address", TgtAddr.str()); Value* Proto = Mod->getOrInsertFunction(FName, FT, AS); // CallInst* Call = IRB->CreateCall(dyn_cast<Value>(Proto)); // TODO: Technically visitCall sets the LR to IP+8. We should return that. VisitMap[N] = NULL; return NULL; }
/// \brief If the inlined function had a higher stack protection level than the /// calling function, then bump up the caller's stack protection level. static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) { // If upgrading the SSP attribute, clear out the old SSP Attributes first. // Having multiple SSP attributes doesn't actually hurt, but it adds useless // clutter to the IR. AttrBuilder B; B.addAttribute(Attribute::StackProtect) .addAttribute(Attribute::StackProtectStrong); AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(), AttributeSet::FunctionIndex, B); AttributeSet CallerAttr = Caller->getAttributes(), CalleeAttr = Callee->getAttributes(); if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtectReq)) { Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); Caller->addFnAttr(Attribute::StackProtectReq); } else if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtectStrong) && !CallerAttr.hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtectReq)) { Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); Caller->addFnAttr(Attribute::StackProtectStrong); } else if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtect) && !CallerAttr.hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtectReq) && !CallerAttr.hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtectStrong)) Caller->addFnAttr(Attribute::StackProtect); }
// // Returns of float, double and complex need to be handled with a helper // function. // static bool fixupFPReturnAndCall (Function &F, Module *M, const MipsSubtarget &Subtarget) { bool Modified = false; LLVMContext &C = M->getContext(); Type *MyVoid = Type::getVoidTy(C); for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { Instruction &Inst = *I; if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) { Value *RVal = RI->getReturnValue(); if (!RVal) continue; // // If there is a return value and it needs a helper function, // figure out which one and add a call before the actual // return to this helper. The purpose of the helper is to move // floating point values from their soft float return mapping to // where they would have been mapped to in floating point registers. // Type *T = RVal->getType(); FPReturnVariant RV = whichFPReturnVariant(T); if (RV == NoFPRet) continue; static const char* Helper[NoFPRet] = {"__mips16_ret_sf", "__mips16_ret_df", "__mips16_ret_sc", "__mips16_ret_dc"}; const char *Name = Helper[RV]; AttributeSet A; Value *Params[] = {RVal}; Modified = true; // // These helper functions have a different calling ABI so // this __Mips16RetHelper indicates that so that later // during call setup, the proper call lowering to the helper // functions will take place. // A = A.addAttribute(C, AttributeSet::FunctionIndex, "__Mips16RetHelper"); A = A.addAttribute(C, AttributeSet::FunctionIndex, Attribute::ReadNone); A = A.addAttribute(C, AttributeSet::FunctionIndex, Attribute::NoInline); Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL)); CallInst::Create(F, Params, "", &Inst ); } else if (const CallInst *CI = dyn_cast<CallInst>(I)) { // pic mode calls are handled by already defined // helper functions if (Subtarget.getRelocationModel() != Reloc::PIC_ ) { Function *F_ = CI->getCalledFunction(); if (F_ && !isIntrinsicInline(F_) && needsFPHelperFromSig(*F_)) { assureFPCallStub(*F_, M, Subtarget); Modified=true; } } } } return Modified; }
extern "C" void LLVMRemoveFunctionAttributes(LLVMValueRef Fn, unsigned index, uint64_t Val) { Function *A = unwrap<Function>(Fn); const AttributeSet PAL = A->getAttributes(); AttrBuilder B(Val); const AttributeSet PALnew = PAL.removeAttributes(A->getContext(), index, AttributeSet::get(A->getContext(), index, B)); A->setAttributes(PALnew); }
int main(int argc, char **argv){ // Load the bitcode cl::ParseCommandLineOptions(argc, argv, "helper_call_modifier\n"); SMDiagnostic Err; LLVMContext &Context = getGlobalContext(); Module *Mod = ParseIRFile(InputFile, Err, Context); if (!Mod) { Err.print(argv[0], errs()); exit(1); } /* * This iterates through the list of functions, copies/renames, and deletes * the original function. This is how we have to do it with the while loop * because of how the LLVM function list is implemented. */ Module::iterator i = Mod->begin(); while (i != Mod->end()){ Function *f = i; i++; Module *m = f->getParent(); assert(m); if (!f->isDeclaration()){ // internal functions only ValueToValueMapTy VMap; Function *newFunc = CloneFunction(f, VMap, false); std::string origName = f->getName(); std::string newName = origName.append("_llvm"); newFunc->setName(newName); /* * XXX: We need to remove stack smash protection from helper * functions that are to be compiled with the JIT. There is a bug * in LLVM 3.0 that causes the JIT to generate stack protection code * that causes the program to segfault. More information available * here: http://llvm.org/bugs/show_bug.cgi?id=11089 */ const AttributeSet AS = newFunc->getAttributes(); newFunc->setAttributes(AS.removeAttribute(newFunc->getContext(), AttributeSet::FunctionIndex, Attribute::StackProtectReq)); // push to the front so the iterator doesn't see them again m->getFunctionList().push_front(newFunc); f->replaceAllUsesWith(newFunc); f->eraseFromParent(); } } // Verify the new bitcode and write it out, printing errors if necessary std::string errstring; verifyModule(*Mod, PrintMessageAction, &errstring); raw_fd_ostream *fstream = new raw_fd_ostream(OutputFile.c_str(), errstring); WriteBitcodeToFile(Mod, *fstream); printf("%s", errstring.c_str()); fstream->close(); return 0; }
void ValueEnumerator::EnumerateAttributes(const AttributeSet &PAL) { if (PAL.isEmpty()) return; // null is always 0. // Do a lookup. unsigned &Entry = AttributeMap[PAL.getRawPointer()]; if (Entry == 0) { // Never saw this before, add it. Attribute.push_back(PAL); Entry = Attribute.size(); } }
void LLVMRemoveFunctionAttr2(LLVMValueRef Fn, uint64_t PA) { Function *Func = unwrap<Function>(Fn); const AttributeSet PAL = Func->getAttributes(); AttrBuilder B(PA); const AttributeSet PALnew = PAL.removeAttributes(Func->getContext(), AttributeSet::FunctionIndex, AttributeSet::get(Func->getContext(), AttributeSet::FunctionIndex, B)); Func->setAttributes(PALnew); }
static std::string getAttributesAsString(AttributeSet Attrs) { std::string AttrsAsString; for (unsigned Slot = 0; Slot < Attrs.getNumSlots(); ++Slot) { for (AttributeSet::iterator Attr = Attrs.begin(Slot), E = Attrs.end(Slot); Attr != E; ++Attr) { AttrsAsString += " "; AttrsAsString += Attr->getAsString(); } } return AttrsAsString; }
extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn, unsigned Index, LLVMRustAttribute RustAttr) { Function *F = unwrap<Function>(Fn); const AttributeSet PAL = F->getAttributes(); Attribute Attr = Attribute::get(F->getContext(), fromRust(RustAttr)); AttrBuilder B(Attr); const AttributeSet PALNew = PAL.removeAttributes( F->getContext(), Index, AttributeSet::get(F->getContext(), Index, B)); F->setAttributes(PALNew); }
// // remove the use-soft-float attribute // static void removeUseSoftFloat(Function &F) { AttributeSet A; DEBUG(errs() << "removing -use-soft-float\n"); A = A.addAttribute(F.getContext(), AttributeSet::FunctionIndex, "use-soft-float", "false"); F.removeAttributes(AttributeSet::FunctionIndex, A); if (F.hasFnAttribute("use-soft-float")) { DEBUG(errs() << "still has -use-soft-float\n"); } F.addAttributes(AttributeSet::FunctionIndex, A); }
extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn, unsigned index, LLVMAttributeRef attr) { Function *F = unwrap<Function>(Fn); const AttributeSet PAL = F->getAttributes(); AttrBuilder B(unwrap(attr)); const AttributeSet PALnew = PAL.removeAttributes(F->getContext(), index, AttributeSet::get(F->getContext(), index, B)); F->setAttributes(PALnew); }
static inline void ReplaceOrRecord(AttributeSet& pParent, const Attribute*& pBase, Attribute*& pCopy) { Attribute* result = pParent.exists(*pCopy); if (result == NULL) { // can not find pParent.record(*pCopy); pBase = pCopy; } else { // find delete pCopy; pBase = result; } }
extern "C" void LLVMRemoveFunctionAttrString(LLVMValueRef fn, unsigned index, const char *Name) { Function *f = unwrap<Function>(fn); LLVMContext &C = f->getContext(); AttrBuilder B; B.addAttribute(Name); AttributeSet to_remove = AttributeSet::get(C, index, B); AttributeSet attrs = f->getAttributes(); f->setAttributes(attrs.removeAttributes(f->getContext(), index, to_remove)); }
ISceneryPtr load_tree(const AttributeSet& attrs) { // Unserialise a tree float angle; string name; attrs.get("name", name); attrs.get("angle", angle); shared_ptr<Tree> tree = load_tree_fromCache(name); tree->set_angle(angle); return ISceneryPtr(tree); }
HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, MachineRegisterInfo &mri, const HexagonInstrInfo &tii, MachineFunction &mf) : MachineEvaluator(tri, mri), MF(mf), MFI(*mf.getFrameInfo()), TII(tii) { // Populate the VRX map (VR to extension-type). // Go over all the formal parameters of the function. If a given parameter // P is sign- or zero-extended, locate the virtual register holding that // parameter and create an entry in the VRX map indicating the type of ex- // tension (and the source type). // This is a bit complicated to do accurately, since the memory layout in- // formation is necessary to precisely determine whether an aggregate para- // meter will be passed in a register or in memory. What is given in MRI // is the association between the physical register that is live-in (i.e. // holds an argument), and the virtual register that this value will be // copied into. This, by itself, is not sufficient to map back the virtual // register to a formal parameter from Function (since consecutive live-ins // from MRI may not correspond to consecutive formal parameters from Func- // tion). To avoid the complications with in-memory arguments, only consi- // der the initial sequence of formal parameters that are known to be // passed via registers. unsigned AttrIdx = 0; unsigned InVirtReg, InPhysReg = 0; const Function &F = *MF.getFunction(); typedef Function::const_arg_iterator arg_iterator; for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { AttrIdx++; const Argument &Arg = *I; Type *ATy = Arg.getType(); unsigned Width = 0; if (ATy->isIntegerTy()) Width = ATy->getIntegerBitWidth(); else if (ATy->isPointerTy()) Width = 32; // If pointer size is not set through target data, it will default to // Module::AnyPointerSize. if (Width == 0 || Width > 64) break; InPhysReg = getNextPhysReg(InPhysReg, Width); if (!InPhysReg) break; InVirtReg = getVirtRegFor(InPhysReg); if (!InVirtReg) continue; AttributeSet Attrs = F.getAttributes(); if (Attrs.hasAttribute(AttrIdx, Attribute::SExt)) VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width))); else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt)) VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width))); } }
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) : MachineFunctionInfo() { ShaderType = ShaderType::COMPUTE; LDSSize = 0; AttributeSet Set = MF.getFunction()->getAttributes(); Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, ShaderTypeAttribute); if (A.isStringAttribute()) { StringRef Str = A.getValueAsString(); if (Str.getAsInteger(0, ShaderType)) llvm_unreachable("Can't parse shader type!"); } }
//============================================================================== // ElementType::validateMissingAttributes // // Test if all required attributes have been specified and add attributes // that have a default value // //============================================================================== void ElementType::validateMissingAttributes(AttributeSet& attSet, bool bValidate, ParserImpl& parser) const { AttributeTypeMap::const_iterator iter; for(iter=m_attributeTypeMap.begin(); iter!=m_attributeTypeMap.end(); ++iter) { const AutoPtr<AttributeType>& rpAttrType = (*iter).second; if(rpAttrType->getDefaultType() == AttributeType::REQUIRED) { if(bValidate && !attSet.getAttribute(rpAttrType->getName().getRawName())) { const String& errMsg = MessageFormatter::Format( System::GetSysMessage(sXML, EXML_ATTRREQUIRED, "required attribute '{0}' has not been supplied for element '{1}'"), rpAttrType->getName().getRawName(), getName().getRawName()); parser.errorDetected(Parser::Error, errMsg, EXML_ATTRREQUIRED); } } else if(rpAttrType->getDefaultType() != AttributeType::IMPLIED) { // XML 1.0 says that attributes with default value // that are not present should be created if(!attSet.getAttribute(rpAttrType->getName().getRawName())) { AutoPtr<Attribute> rpAttr = new Attribute(rpAttrType->getName(), rpAttrType->getDefaultValue(), rpAttrType->getTypeAsString()); attSet.addAttribute(rpAttr.get()); // // If we have had to add a defaulted attribute, and if the attribute // definition is external, and the document claims to be standalone // then we have a vandity constraint error // if(bValidate && parser.isStandaloneDocument() && rpAttrType->isExternallyDeclared()) { const String& errMsg = MessageFormatter::Format( System::GetSysMessage(sXML, EXML_ATTRDEFAULTNOTSA, "externally declared attribute '{0}' for element '{1}' has a default value of '{2}' which must be specified in a standalone document"), rpAttrType->getName().getRawName(), getName().getRawName(), rpAttrType->getDefaultValue()); parser.errorDetected(Parser::Error, errMsg, EXML_ATTRDEFAULTNOTSA); } } } } }
void ARMSubtarget::resetSubtargetFeatures(const MachineFunction *MF) { AttributeSet FnAttrs = MF->getFunction()->getAttributes(); Attribute CPUAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu"); Attribute FSAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features"); std::string CPU = !CPUAttr.hasAttribute(Attribute::None) ?CPUAttr.getValueAsString() : ""; std::string FS = !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString() : ""; if (!FS.empty()) { initializeEnvironment(); resetSubtargetFeatures(CPU, FS); } }
void MipsTargetMachine::resetSubtarget(MachineFunction *MF) { DEBUG(dbgs() << "resetSubtarget\n"); AttributeSet FnAttrs = MF->getFunction()->getAttributes(); bool Mips16Attr = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, "mips16"); bool NoMips16Attr = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, "nomips16"); assert(!(Mips16Attr && NoMips16Attr) && "mips16 and nomips16 specified on the same function"); if (Mips16Attr) Subtarget = &Mips16Subtarget; else if (NoMips16Attr) Subtarget = &NoMips16Subtarget; else Subtarget = &DefaultSubtarget; return; }
AttributeSet DecisionTreeCompiler::collectEvalFunctionAttribs() { std::vector<std::string> features; for (const StringMapEntry<bool> &feature : CpuFeatures) { if (feature.getValue()) features.emplace_back("+" + feature.getKey().str()); } AttributeSet attributeSet; if (features.empty()) return attributeSet; std::sort(features.begin(), features.end()); return attributeSet.addAttribute( Ctx, AttributeSet::FunctionIndex, "target-features", join(features.begin(), features.end(), ",")); }
Cylinder::Cylinder(const AttributeSet& attributes):capped(false) { for(unsigned int i=0; i<attributes.size(); i++) { std::string attributeName(attributes[i].attribute); if(attributeName == "name") { name = attributes[i].value; } else if(attributeName == "radius") { radius = ParserUtilities::toDouble(attributes[i].value); } else if(attributeName == "height") { height = ParserUtilities::toDouble(attributes[i].value); } else if(attributeName == "canCollide") { collideBit = ParserUtilities::toBool(attributes[i].value); } else if(attributeName == "capped") { capped = ParserUtilities::toBool(attributes[i].value); } } calculateCorners(); graphicsHandle = graphicsManager->createNewCylinder(radius, height, capped); }
/// Get the EVTs and ArgFlags collections that represent the legalized return /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. /// TODO: Move this out of TargetLowering.cpp. void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, SmallVectorImpl<ISD::OutputArg> &Outs, const TargetLowering &TLI) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, ReturnType, ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; for (unsigned j = 0, f = NumValues; j != f; ++j) { EVT VT = ValueVTs[j]; ISD::NodeType ExtendKind = ISD::ANY_EXTEND; if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) ExtendKind = ISD::SIGN_EXTEND; else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) ExtendKind = ISD::ZERO_EXTEND; // FIXME: C calling convention requires the return type to be promoted to // at least 32-bit. But this is not necessary for non-C calling // conventions. The frontend should mark functions whose return values // require promoting with signext or zeroext attributes. if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); if (VT.bitsLT(MinVT)) VT = MinVT; } unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg)) Flags.setInReg(); // Propagate extension type if any if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) Flags.setSExt(); else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0, 0)); } }
void Surface::initTexture(const AttributeSet& attributes, GLTexture& texture, int line, int column, ErrorManager* errorManager) { for(unsigned int i=0; i< attributes.size(); i++) { if (attributes[i].attribute == "file") texture.setTexName(attributes[i].value); else if(attributes[i].attribute == "mode") { if(attributes[i].value == "GL_MODULATE") texture.setMode(GL_MODULATE); else if(attributes[i].value == "GL_REPLACE") texture.setMode(GL_REPLACE); else if(attributes[i].value == "GL_ADD") texture.setMode(GL_ADD); else if(attributes[i].value == "GL_DECAL") texture.setMode(GL_DECAL); } else if (attributes[i].attribute == "wrapS") { GLenum mode = (GLenum)ParserUtilities::toTextureWrapMode(attributes[i].value); if(mode == GL_NONE) errorManager->addError("Unknown Texture Mode", "Unknown Texture Mode \"" + attributes[i].value + "\". Try( GL_CLAMP | GL_CLAMP_TO_EDGE | GL_CLAMP_TO_BORDER | GL_MIRRORED_REPEAT | GL_REPEAT)", line, column); else texture.setWrapS(mode); } else if (attributes[i].attribute == "wrapT") { GLenum mode = (GLenum)ParserUtilities::toTextureWrapMode(attributes[i].value); if(mode == GL_NONE) errorManager->addError("Unknown Texture Mode", "Unknown Texture Mode \"" + attributes[i].value + "\". Try( GL_CLAMP | GL_CLAMP_TO_EDGE | GL_CLAMP_TO_BORDER | GL_MIRRORED_REPEAT | GL_REPEAT)", line, column); else texture.setWrapT(mode); } else if (attributes[i].attribute == "wrapR") { GLenum mode = (GLenum)ParserUtilities::toTextureWrapMode(attributes[i].value); if(mode == GL_NONE) errorManager->addError("Unknown Texture Mode", "Unknown Texture Mode \"" + attributes[i].value + "\". Try( GL_CLAMP | GL_CLAMP_TO_EDGE | GL_CLAMP_TO_BORDER | GL_MIRRORED_REPEAT | GL_REPEAT)", line, column); else texture.setWrapR(mode); } else if (attributes[i].attribute == "priority") diffuseTexture.setPriority((GLclampf)ParserUtilities::toDouble(attributes[i].value)); else { errorManager->addError("Unknown Texture Parameter", "The provided texture parameter \"" + name + "\" is unknown", line, column); } } }
FSRSensor::FSRSensor(const AttributeSet &attributes):value(0), numFeedbacks(0) { resetJointFeedback(); for(unsigned int i =0; i < attributes.size(); ++i) { if(attributes[i].attribute == "name"){ name = attributes[i].value; } } }
void ValueEnumerator::EnumerateAttributes(AttributeSet PAL) { if (PAL.isEmpty()) return; // null is always 0. // Do a lookup. unsigned &Entry = AttributeMap[PAL]; if (Entry == 0) { // Never saw this before, add it. Attribute.push_back(PAL); Entry = Attribute.size(); } // Do lookups for all attribute groups. for (unsigned i = 0, e = PAL.getNumSlots(); i != e; ++i) { AttributeSet AS = PAL.getSlotAttributes(i); unsigned &Entry = AttributeGroupMap[AS]; if (Entry == 0) { AttributeGroups.push_back(AS); Entry = AttributeGroups.size(); } } }
//FIXME: This logic for reseting the subtarget along with // the helper classes can probably be simplified but there are a lot of // cases so we will defer rewriting this to later. // void MipsSubtarget::resetSubtarget(MachineFunction *MF) { bool ChangeToMips16 = false, ChangeToNoMips16 = false; DEBUG(dbgs() << "resetSubtargetFeatures" << "\n"); AttributeSet FnAttrs = MF->getFunction()->getAttributes(); ChangeToMips16 = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, "mips16"); ChangeToNoMips16 = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, "nomips16"); assert (!(ChangeToMips16 & ChangeToNoMips16) && "mips16 and nomips16 specified on the same function"); if (ChangeToMips16) { if (PreviousInMips16Mode) return; OverrideMode = Mips16Override; PreviousInMips16Mode = true; TM->setHelperClassesMips16(); return; } else if (ChangeToNoMips16) { if (!PreviousInMips16Mode) return; OverrideMode = NoMips16Override; PreviousInMips16Mode = false; TM->setHelperClassesMipsSE(); return; } else { if (OverrideMode == NoOverride) return; OverrideMode = NoOverride; DEBUG(dbgs() << "back to default" << "\n"); if (inMips16Mode() && !PreviousInMips16Mode) { TM->setHelperClassesMips16(); PreviousInMips16Mode = true; } else if (!inMips16Mode() && PreviousInMips16Mode) { TM->setHelperClassesMipsSE(); PreviousInMips16Mode = false; } return; } }