static void set_param_attrs(TypeFunction* f, llvm::Function* func, FuncDeclaration* fdecl) { LLSmallVector<llvm::AttributeWithIndex, 9> attrs; llvm::AttributeWithIndex PAWI; int idx = 0; // handle implicit args #define ADD_PA(X) \ if (f->fty.X) { \ if (f->fty.X->attrs) { \ PAWI.Index = idx; \ PAWI.Attrs = f->fty.X->attrs; \ attrs.push_back(PAWI); \ } \ idx++; \ } ADD_PA(ret) ADD_PA(arg_sret) ADD_PA(arg_this) ADD_PA(arg_nest) ADD_PA(arg_arguments) ADD_PA(arg_argptr) #undef ADD_PA // set attrs on the rest of the arguments size_t n = Parameter::dim(f->parameters); LLSmallVector<unsigned,8> attrptr(n, 0); for (size_t k = 0; k < n; ++k) { Parameter* fnarg = Parameter::getNth(f->parameters, k); assert(fnarg); attrptr[k] = f->fty.args[k]->attrs; } // reverse params? if (f->fty.reverseParams) { std::reverse(attrptr.begin(), attrptr.end()); } // build rest of attrs list for (int i = 0; i < n; i++) { if (attrptr[i]) { PAWI.Index = idx+i; PAWI.Attrs = attrptr[i]; attrs.push_back(PAWI); } } llvm::AttrListPtr attrlist = llvm::AttrListPtr::get(attrs.begin(), attrs.end()); func->setAttributes(attrlist); }
void DtoAARemove(Loc& loc, DValue* aa, DValue* key) { // D1: // call: // extern(C) void _aaDel(AA aa, TypeInfo keyti, void* pkey) // D2: // call: // extern(C) void _aaDelX(AA aa, TypeInfo keyti, void* pkey) // first get the runtime function #if DMDV2 llvm::Function* func = LLVM_D_GetRuntimeFunction(gIR->module, "_aaDelX"); #else llvm::Function* func = LLVM_D_GetRuntimeFunction(gIR->module, "_aaDel"); #endif const llvm::FunctionType* funcTy = func->getFunctionType(); if (Logger::enabled()) Logger::cout() << "_aaDel = " << *func << '\n'; // aa param LLValue* aaval = aa->getRVal(); if (Logger::enabled()) { Logger::cout() << "aaval: " << *aaval << '\n'; Logger::cout() << "totype: " << *funcTy->getParamType(0) << '\n'; } aaval = DtoBitCast(aaval, funcTy->getParamType(0)); // keyti param #if DMDV2 LLValue* keyti = to_keyti(aa); #else LLValue* keyti = to_keyti(key); #endif keyti = DtoBitCast(keyti, funcTy->getParamType(1)); // pkey param LLValue* pkey = makeLValue(loc, key); pkey = DtoBitCast(pkey, funcTy->getParamType(2)); // build arg vector LLSmallVector<LLValue*, 3> args; args.push_back(aaval); args.push_back(keyti); args.push_back(pkey); // call runtime gIR->CreateCallOrInvoke(func, args.begin(), args.end()); }
void IRLandingPad::constructLandingPad(llvm::BasicBlock* inBB) { // save and rewrite scope IRScope savedscope = gIR->scope(); gIR->scope() = IRScope(inBB,savedscope.end); // eh_ptr = llvm.eh.exception(); llvm::Function* eh_exception_fn = GET_INTRINSIC_DECL(eh_exception); LLValue* eh_ptr = gIR->ir->CreateCall(eh_exception_fn); // build selector arguments LLSmallVector<LLValue*, 6> selectorargs; // put in classinfos in the right order bool hasFinally = false; bool hasCatch = false; std::deque<IRLandingPadInfo>::iterator it = infos.begin(), end = infos.end(); for(; it != end; ++it) { if(it->finallyBody) hasFinally = true; else { hasCatch = true; assert(it->catchType); assert(it->catchType->ir.irStruct); selectorargs.insert(selectorargs.begin(), it->catchType->ir.irStruct->getClassInfoSymbol()); } } // if there's a finally, the eh table has to have a 0 action if(hasFinally) selectorargs.push_back(DtoConstUint(0)); // personality fn llvm::Function* personality_fn = LLVM_D_GetRuntimeFunction(gIR->module, "_d_eh_personality"); LLValue* personality_fn_arg = gIR->ir->CreateBitCast(personality_fn, getPtrToType(LLType::getInt8Ty(gIR->context()))); selectorargs.insert(selectorargs.begin(), personality_fn_arg); // eh storage target selectorargs.insert(selectorargs.begin(), eh_ptr); // if there is a catch and some catch allocated storage, store exception object if(hasCatch && catch_var) { const LLType* objectTy = DtoType(ClassDeclaration::object->type); gIR->ir->CreateStore(gIR->ir->CreateBitCast(eh_ptr, objectTy), catch_var); } // eh_sel = llvm.eh.selector(eh_ptr, cast(byte*)&_d_eh_personality, <selectorargs>); llvm::Function* eh_selector_fn = GET_INTRINSIC_DECL(eh_selector); LLValue* eh_sel = gIR->ir->CreateCall(eh_selector_fn, selectorargs.begin(), selectorargs.end()); // emit finallys and 'if' chain to catch the exception llvm::Function* eh_typeid_for_fn = GET_INTRINSIC_DECL(eh_typeid_for); std::deque<IRLandingPadInfo> infos = this->infos; std::stack<size_t> nInfos = this->nInfos; std::deque<IRLandingPadInfo>::reverse_iterator rit, rend = infos.rend(); for(rit = infos.rbegin(); rit != rend; ++rit) { // if it's a finally, emit its code if(rit->finallyBody) { size_t n = this->nInfos.top(); this->infos.resize(n); this->nInfos.pop(); rit->finallyBody->toIR(gIR); } // otherwise it's a catch and we'll add a if-statement else { llvm::BasicBlock *next = llvm::BasicBlock::Create(gIR->context(), "eh.next", gIR->topfunc(), gIR->scopeend()); LLValue *classInfo = DtoBitCast(rit->catchType->ir.irStruct->getClassInfoSymbol(), getPtrToType(DtoType(Type::tint8))); LLValue *eh_id = gIR->ir->CreateCall(eh_typeid_for_fn, classInfo); gIR->ir->CreateCondBr(gIR->ir->CreateICmpEQ(eh_sel, eh_id), rit->target, next); gIR->scope() = IRScope(next, gIR->scopeend()); } } // restore landing pad infos this->infos = infos; this->nInfos = nInfos; // no catch matched and all finallys executed - resume unwind llvm::Function* unwind_resume_fn = LLVM_D_GetRuntimeFunction(gIR->module, "_d_eh_resume_unwind"); gIR->ir->CreateCall(unwind_resume_fn, eh_ptr); gIR->ir->CreateUnreachable(); gIR->scope() = savedscope; }
void AggrTypeBuilder::addAggregate( AggregateDeclaration *ad, const AggrTypeBuilder::VarInitMap *explicitInits, AggrTypeBuilder::Aliases aliases) { const size_t n = ad->fields.dim; if (n == 0) return; // prioritize overlapping fields LLSmallVector<FieldPriority, 16> priorities; priorities.reserve(n); for (auto f : ad->fields) { priorities.push_back(prioritize(f, explicitInits)); IF_LOG Logger::println("Field priority for %s: %d", f->toChars(), priorities.back()); } // mirror the ad->fields array but only fill in contributors LLSmallVector<VarDeclaration *, 16> data(n, nullptr); // list of pairs: alias => actual field (same offset, same LL type) LLSmallVector<std::pair<VarDeclaration *, VarDeclaration *>, 16> aliasPairs; // one pass per priority in descending order const auto minMaxPriority = std::minmax_element(priorities.begin(), priorities.end()); for (int p = *minMaxPriority.second; p >= *minMaxPriority.first; p--) { // iterate over fields of that priority, in declaration order for (size_t index = 0; index < n; ++index) { if (priorities[index] != p) continue; VarDeclaration *field = ad->fields[index]; const size_t f_begin = field->offset; const size_t f_end = f_begin + field->type->size(); // skip empty fields if (f_begin == f_end) continue; // check for overlapping existing fields bool overlaps = false; if (field->overlapped) { for (const auto vd : data) { if (!vd) continue; const size_t v_begin = vd->offset; const size_t v_end = v_begin + vd->type->size(); if (v_begin < f_end && v_end > f_begin) { if (aliases == Aliases::AddToVarGEPIndices && v_begin == f_begin && DtoMemType(vd->type) == DtoMemType(field->type)) { aliasPairs.push_back(std::make_pair(field, vd)); } overlaps = true; break; } } } if (!overlaps) data[index] = field; } } // Now we can build a list of LLVM types for the actual LL fields. // Make sure to zero out any padding and set the GEP indices for the directly // indexable variables. // first we sort the list by offset std::sort(data.begin(), data.end(), var_offset_sort_cb); for (const auto vd : data) { if (!vd) continue; assert(vd->offset >= m_offset && "Variable overlaps previous field."); // Add an explicit field for any padding so we can zero it, as per TDPL // §7.1.1. if (m_offset < vd->offset) { m_fieldIndex += add_zeros(m_defaultTypes, m_offset, vd->offset); m_offset = vd->offset; } // add default type m_defaultTypes.push_back(DtoMemType(vd->type)); // advance offset to right past this field m_offset += getMemberSize(vd->type); // set the field index m_varGEPIndices[vd] = m_fieldIndex; // let any aliases reuse this field/GEP index for (const auto &pair : aliasPairs) { if (pair.second == vd) m_varGEPIndices[pair.first] = m_fieldIndex; } ++m_fieldIndex; } }