/// Return the struct value represented by v without the padding fields. /// Unions will be expanded, with a value for each member. /// Note: v must be a pointer to a struct, but the return value will be a /// first-class struct value. LLValue* DtoUnpaddedStruct(Type* dty, LLValue* v) { assert(dty->ty == Tstruct); TypeStruct* sty = (TypeStruct*) dty; Array& fields = sty->sym->fields; LLValue* newval = llvm::UndefValue::get(DtoUnpaddedStructType(dty)); for (unsigned i = 0; i < fields.dim; i++) { VarDeclaration* vd = (VarDeclaration*) fields.data[i]; LLValue* fieldptr = DtoIndexStruct(v, sty->sym, vd); LLValue* fieldval; if (vd->type->ty == Tstruct) { // Nested structs are the only members that can contain padding fieldval = DtoUnpaddedStruct(vd->type, fieldptr); } else { fieldval = DtoLoad(fieldptr); } newval = DtoInsertValue(newval, fieldval, i); } return newval; }
void IRLandingPadInfo::toIR() { if (!catchstmt) return; gIR->scope() = IRScope(target, target); DtoDwarfBlockStart(catchstmt->loc); // assign storage to catch var if(catchstmt->var) { // use the same storage for all exceptions that are not accessed in // nested functions if(!catchstmt->var->nestedrefs.dim) { assert(!catchstmt->var->ir.irLocal); catchstmt->var->ir.irLocal = new IrLocal(catchstmt->var); LLValue* catch_var = gIR->func()->gen->landingPadInfo.getExceptionStorage(); catchstmt->var->ir.irLocal->value = gIR->ir->CreateBitCast(catch_var, getPtrToType(DtoType(catchstmt->var->type))); } // this will alloca if we haven't already and take care of nested refs DtoDeclarationExp(catchstmt->var); // the exception will only be stored in catch_var. copy it over if necessary if(catchstmt->var->ir.irLocal->value != gIR->func()->gen->landingPadInfo.getExceptionStorage()) { LLValue* exc = gIR->ir->CreateBitCast(DtoLoad(gIR->func()->gen->landingPadInfo.getExceptionStorage()), DtoType(catchstmt->var->type)); DtoStore(exc, catchstmt->var->ir.irLocal->value); } } // emit handler, if there is one // handler is zero for instance for 'catch { debug foo(); }' if(catchstmt->handler) catchstmt->handler->toIR(gIR); if (!gIR->scopereturned()) gIR->ir->CreateBr(end); DtoDwarfBlockEnd(); }
LLValue* DtoVirtualFunctionPointer(DValue* inst, FuncDeclaration* fdecl, char* name) { // sanity checks assert(fdecl->isVirtual()); assert(!fdecl->isFinal()); assert(fdecl->vtblIndex > 0); // 0 is always ClassInfo/Interface* assert(inst->getType()->toBasetype()->ty == Tclass); // get instance LLValue* vthis = inst->getRVal(); if (Logger::enabled()) Logger::cout() << "vthis: " << *vthis << '\n'; LLValue* funcval = vthis; // get the vtbl for objects funcval = DtoGEPi(funcval, 0, 0, "tmp"); // load vtbl ptr funcval = DtoLoad(funcval); // index vtbl std::string vtblname = name; vtblname.append("@vtbl"); funcval = DtoGEPi(funcval, 0, fdecl->vtblIndex, vtblname.c_str()); // load funcptr funcval = DtoAlignedLoad(funcval); if (Logger::enabled()) Logger::cout() << "funcval: " << *funcval << '\n'; // cast to final funcptr type funcval = DtoBitCast(funcval, getPtrToType(DtoType(fdecl->type))); // postpone naming until after casting to get the name in call instructions funcval->setName(name); if (Logger::enabled()) Logger::cout() << "funcval casted: " << *funcval << '\n'; return funcval; }
void DtoFinalizeScopeClass(Loc &loc, LLValue *inst, bool hasDtor) { if (!isOptimizationEnabled() || hasDtor) { DtoFinalizeClass(loc, inst); return; } // no dtors => only finalize (via druntime call) if monitor is set, // see https://github.com/ldc-developers/ldc/issues/2515 llvm::BasicBlock *ifbb = gIR->insertBB("if"); llvm::BasicBlock *endbb = gIR->insertBBAfter(ifbb, "endif"); const auto monitor = DtoLoad(DtoGEPi(inst, 0, 1), ".monitor"); const auto hasMonitor = gIR->ir->CreateICmp(llvm::CmpInst::ICMP_NE, monitor, getNullValue(monitor->getType()), ".hasMonitor"); llvm::BranchInst::Create(ifbb, endbb, hasMonitor, gIR->scopebb()); gIR->scope() = IRScope(ifbb); DtoFinalizeClass(loc, inst); gIR->ir->CreateBr(endbb); gIR->scope() = IRScope(endbb); }
DRValue *DLValue::getRVal() { if (DtoIsInMemoryOnly(type)) { llvm_unreachable("getRVal() for memory-only type"); return nullptr; } LLValue *rval = DtoLoad(val); if (type->toBasetype()->ty == Tbool) { assert(rval->getType() == llvm::Type::getInt8Ty(gIR->context())); if (isOptimizationEnabled()) { // attach range metadata for i8 being loaded: [0, 2) llvm::MDBuilder mdBuilder(gIR->context()); llvm::cast<llvm::LoadInst>(rval)->setMetadata( llvm::LLVMContext::MD_range, mdBuilder.createRange(llvm::APInt(8, 0), llvm::APInt(8, 2))); } // truncate to i1 rval = gIR->ir->CreateTrunc(rval, llvm::Type::getInt1Ty(gIR->context())); } return new DImValue(type, rval); }
DValue* DtoNestedVariable(Loc& loc, Type* astype, VarDeclaration* vd, bool byref) { IF_LOG Logger::println("DtoNestedVariable for %s @ %s", vd->toChars(), loc.toChars()); LOG_SCOPE; //////////////////////////////////// // Locate context value Dsymbol* vdparent = vd->toParent2(); assert(vdparent); IrFunction* irfunc = gIR->func(); // Check whether we can access the needed frame FuncDeclaration *fd = irfunc->decl; while (fd != vdparent) { if (fd->isStatic()) { error(loc, "function %s cannot access frame of function %s", irfunc->decl->toPrettyChars(), vdparent->toPrettyChars()); return new DVarValue(astype, vd, llvm::UndefValue::get(getPtrToType(DtoType(astype)))); } fd = getParentFunc(fd, false); assert(fd); } // is the nested variable in this scope? if (vdparent == irfunc->decl) { LLValue* val = vd->ir.getIrValue(); return new DVarValue(astype, vd, val); } LLValue *dwarfValue = 0; std::vector<LLValue*> dwarfAddr; // get the nested context LLValue* ctx = 0; if (irfunc->nestedVar) { // If this function has its own nested context struct, always load it. ctx = irfunc->nestedVar; dwarfValue = ctx; } else if (irfunc->decl->isMember2()) { // If this is a member function of a nested class without its own // context, load the vthis member. AggregateDeclaration* cd = irfunc->decl->isMember2(); LLValue* val = irfunc->thisArg; if (cd->isClassDeclaration()) val = DtoLoad(val); ctx = DtoLoad(DtoGEPi(val, 0, cd->vthis->ir.irField->index, ".vthis")); } else { // Otherwise, this is a simple nested function, load from the context // argument. ctx = DtoLoad(irfunc->nestArg); dwarfValue = irfunc->nestArg; if (global.params.symdebug) gIR->DBuilder.OpDeref(dwarfAddr); } assert(ctx); DtoCreateNestedContextType(vdparent->isFuncDeclaration()); assert(vd->ir.irLocal); //////////////////////////////////// // Extract variable from nested context LLValue* val = DtoBitCast(ctx, LLPointerType::getUnqual(irfunc->frameType)); IF_LOG { Logger::cout() << "Context: " << *val << '\n'; Logger::cout() << "of type: " << *irfunc->frameType << '\n'; } unsigned vardepth = vd->ir.irLocal->nestedDepth; unsigned funcdepth = irfunc->depth; IF_LOG { Logger::cout() << "Variable: " << vd->toChars() << '\n'; Logger::cout() << "Variable depth: " << vardepth << '\n'; Logger::cout() << "Function: " << irfunc->decl->toChars() << '\n'; Logger::cout() << "Function depth: " << funcdepth << '\n'; } if (vardepth == funcdepth) { // This is not always handled above because functions without // variables accessed by nested functions don't create new frames. IF_LOG Logger::println("Same depth"); } else { // Load frame pointer and index that... if (dwarfValue && global.params.symdebug) { gIR->DBuilder.OpOffset(dwarfAddr, val, vd->ir.irLocal->nestedDepth); gIR->DBuilder.OpDeref(dwarfAddr); } IF_LOG Logger::println("Lower depth"); val = DtoGEPi(val, 0, vd->ir.irLocal->nestedDepth); IF_LOG Logger::cout() << "Frame index: " << *val << '\n'; val = DtoAlignedLoad(val, (std::string(".frame.") + vdparent->toChars()).c_str()); IF_LOG Logger::cout() << "Frame: " << *val << '\n'; } int idx = vd->ir.irLocal->nestedIndex; assert(idx != -1 && "Nested context not yet resolved for variable."); if (dwarfValue && global.params.symdebug) gIR->DBuilder.OpOffset(dwarfAddr, val, idx); val = DtoGEPi(val, 0, idx, vd->toChars()); IF_LOG { Logger::cout() << "Addr: " << *val << '\n'; Logger::cout() << "of type: " << *val->getType() << '\n'; } if (byref || (vd->isParameter() && vd->ir.irParam->arg->byref)) { val = DtoAlignedLoad(val); //dwarfOpDeref(dwarfAddr); IF_LOG { Logger::cout() << "Was byref, now: " << *val << '\n'; Logger::cout() << "of type: " << *val->getType() << '\n'; } }
DValue* DtoAAIndex(Loc& loc, Type* type, DValue* aa, DValue* key, bool lvalue) { // D1: // call: // extern(C) void* _aaGet(AA* aa, TypeInfo keyti, size_t valuesize, void* pkey) // or // extern(C) void* _aaIn(AA aa*, TypeInfo keyti, void* pkey) // D2: // call: // extern(C) void* _aaGetX(AA* aa, TypeInfo keyti, size_t valuesize, void* pkey) // or // extern(C) void* _aaInX(AA aa*, TypeInfo keyti, void* pkey) // first get the runtime function #if DMDV2 llvm::Function* func = LLVM_D_GetRuntimeFunction(gIR->module, lvalue?"_aaGetX":"_aaInX"); #else llvm::Function* func = LLVM_D_GetRuntimeFunction(gIR->module, lvalue?"_aaGet":"_aaIn"); #endif LLFunctionType* funcTy = func->getFunctionType(); // aa param LLValue* aaval = lvalue ? aa->getLVal() : aa->getRVal(); aaval = DtoBitCast(aaval, funcTy->getParamType(0)); // keyti param #if DMDV2 LLValue* keyti = to_keyti(aa); #else LLValue* keyti = to_keyti(key); #endif keyti = DtoBitCast(keyti, funcTy->getParamType(1)); // pkey param LLValue* pkey = makeLValue(loc, key); pkey = DtoBitCast(pkey, funcTy->getParamType(lvalue ? 3 : 2)); // call runtime LLValue* ret; if (lvalue) { // valuesize param LLValue* valsize = DtoConstSize_t(getTypePaddedSize(DtoType(type))); ret = gIR->CreateCallOrInvoke4(func, aaval, keyti, valsize, pkey, "aa.index").getInstruction(); } else { ret = gIR->CreateCallOrInvoke3(func, aaval, keyti, pkey, "aa.index").getInstruction(); } // cast return value LLType* targettype = getPtrToType(DtoType(type)); if (ret->getType() != targettype) ret = DtoBitCast(ret, targettype); // Only check bounds for rvalues ('aa[key]'). // Lvalue use ('aa[key] = value') auto-adds an element. if (!lvalue && global.params.useArrayBounds) { llvm::BasicBlock* oldend = gIR->scopeend(); llvm::BasicBlock* failbb = llvm::BasicBlock::Create(gIR->context(), "aaboundscheckfail", gIR->topfunc(), oldend); llvm::BasicBlock* okbb = llvm::BasicBlock::Create(gIR->context(), "aaboundsok", gIR->topfunc(), oldend); LLValue* nullaa = LLConstant::getNullValue(ret->getType()); LLValue* cond = gIR->ir->CreateICmpNE(nullaa, ret, "aaboundscheck"); gIR->ir->CreateCondBr(cond, okbb, failbb); // set up failbb to call the array bounds error runtime function gIR->scope() = IRScope(failbb, okbb); std::vector<LLValue*> args; #if DMDV2 // module param LLValue *moduleInfoSymbol = gIR->func()->decl->getModule()->moduleInfoSymbol(); LLType *moduleInfoType = DtoType(Module::moduleinfo->type); args.push_back(DtoBitCast(moduleInfoSymbol, getPtrToType(moduleInfoType))); #else // file param IrModule* irmod = getIrModule(NULL); args.push_back(DtoLoad(irmod->fileName)); #endif // line param LLConstant* c = DtoConstUint(loc.linnum); args.push_back(c); // call llvm::Function* errorfn = LLVM_D_GetRuntimeFunction(gIR->module, "_d_array_bounds"); gIR->CreateCallOrInvoke(errorfn, args); // the function does not return gIR->ir->CreateUnreachable(); // if ok, proceed in okbb gIR->scope() = IRScope(okbb, oldend); } return new DVarValue(type, ret); }
LLValue *DSpecialRefValue::getLVal() { return DtoLoad(val); }
LLValue *get(Type *dty, LLValue *v) override { return DtoLoad(v, ".ImplicitByvalRewrite_getResult"); }
LLValue* DtoNestedContext(Loc loc, Dsymbol* sym) { Logger::println("DtoNestedContext for %s", sym->toPrettyChars()); LOG_SCOPE; IrFunction* irfunc = gIR->func(); bool fromParent = true; LLValue* val; // if this func has its own vars that are accessed by nested funcs // use its own context if (irfunc->nestedVar) { val = irfunc->nestedVar; fromParent = false; } // otherwise, it may have gotten a context from the caller else if (irfunc->nestArg) val = DtoLoad(irfunc->nestArg); // or just have a this argument else if (irfunc->thisArg) { AggregateDeclaration* ad = irfunc->decl->isMember2(); val = ad->isClassDeclaration() ? DtoLoad(irfunc->thisArg) : irfunc->thisArg; if (!ad->vthis) { // This is just a plain 'outer' reference of a class nested in a // function (but without any variables in the nested context). return val; } val = DtoLoad(DtoGEPi(val, 0, ad->vthis->ir.irField->index, ".vthis")); } else { // Use null instead of e.g. LLVM's undef to not break bitwise // comparison for instances of nested struct types which don't have any // nested references. return llvm::ConstantPointerNull::get(getVoidPtrType()); } struct FuncDeclaration* fd = 0; if (AggregateDeclaration *ad = sym->isAggregateDeclaration()) // If sym is a nested struct or a nested class, pass the frame // of the function where sym is declared. fd = ad->toParent()->isFuncDeclaration(); else if (FuncDeclaration* symfd = sym->isFuncDeclaration()) { // Make sure we've had a chance to analyze nested context usage DtoCreateNestedContextType(symfd); // if this is for a function that doesn't access variables from // enclosing scopes, it doesn't matter what we pass. // Tell LLVM about it by passing an 'undef'. if (symfd && symfd->ir.irFunc->depth == -1) return llvm::UndefValue::get(getVoidPtrType()); // If sym is a nested function, and it's parent context is different than the // one we got, adjust it. fd = getParentFunc(symfd, true); } if (fd) { Logger::println("For nested function, parent is %s", fd->toChars()); FuncDeclaration* ctxfd = irfunc->decl; Logger::println("Current function is %s", ctxfd->toChars()); if (fromParent) { ctxfd = getParentFunc(ctxfd, true); assert(ctxfd && "Context from outer function, but no outer function?"); } Logger::println("Context is from %s", ctxfd->toChars()); unsigned neededDepth = fd->ir.irFunc->depth; unsigned ctxDepth = ctxfd->ir.irFunc->depth; Logger::cout() << "Needed depth: " << neededDepth << '\n'; Logger::cout() << "Context depth: " << ctxDepth << '\n'; if (neededDepth >= ctxDepth) { // assert(neededDepth <= ctxDepth + 1 && "How are we going more than one nesting level up?"); // fd needs the same context as we do, so all is well Logger::println("Calling sibling function or directly nested function"); } else { val = DtoBitCast(val, LLPointerType::getUnqual(ctxfd->ir.irFunc->frameType)); val = DtoGEPi(val, 0, neededDepth); val = DtoAlignedLoad(val, (std::string(".frame.") + fd->toChars()).c_str()); } } Logger::cout() << "result = " << *val << '\n'; Logger::cout() << "of type " << *val->getType() << '\n'; return val; }
void AsmBlockStatement_toIR(AsmBlockStatement *stmt, IRState* p) { IF_LOG Logger::println("AsmBlockStatement::toIR(): %s", stmt->loc.toChars()); LOG_SCOPE; // disable inlining by default if (!p->func()->decl->allowInlining) p->func()->setNeverInline(); // create asm block structure assert(!p->asmBlock); IRAsmBlock* asmblock = new IRAsmBlock(stmt); assert(asmblock); p->asmBlock = asmblock; // do asm statements for (unsigned i=0; i < stmt->statements->dim; i++) { Statement* s = static_cast<Statement*>(stmt->statements->data[i]); if (s) { Statement_toIR(s, p); } } // build forwarder for in-asm branches to external labels // this additional asm code sets the __llvm_jump_target variable // to a unique value that will identify the jump target in // a post-asm switch // maps each goto destination to its special value std::map<LabelDsymbol*, int> gotoToVal; // location of the special value determining the goto label // will be set if post-asm dispatcher block is needed llvm::AllocaInst* jump_target = 0; { FuncDeclaration* fd = gIR->func()->decl; const char* fdmangle = mangle(fd); // we use a simple static counter to make sure the new end labels are unique static size_t uniqueLabelsId = 0; std::ostringstream asmGotoEndLabel; printLabelName(asmGotoEndLabel, fdmangle, "_llvm_asm_end"); asmGotoEndLabel << uniqueLabelsId++; // initialize the setter statement we're going to build IRAsmStmt* outSetterStmt = new IRAsmStmt; std::string asmGotoEnd = "\n\tjmp "+asmGotoEndLabel.str()+"\n"; std::ostringstream code; code << asmGotoEnd; int n_goto = 1; size_t n = asmblock->s.size(); for(size_t i=0; i<n; ++i) { IRAsmStmt* a = asmblock->s[i]; // skip non-branch statements if(!a->isBranchToLabel) continue; // if internal, no special handling is necessary, skip std::vector<Identifier*>::const_iterator it, end; end = asmblock->internalLabels.end(); bool skip = false; for(it = asmblock->internalLabels.begin(); it != end; ++it) if((*it)->equals(a->isBranchToLabel->ident)) skip = true; if(skip) continue; // if we already set things up for this branch target, skip if(gotoToVal.find(a->isBranchToLabel) != gotoToVal.end()) continue; // record that the jump needs to be handled in the post-asm dispatcher gotoToVal[a->isBranchToLabel] = n_goto; // provide an in-asm target for the branch and set value IF_LOG Logger::println("statement '%s' references outer label '%s': creating forwarder", a->code.c_str(), a->isBranchToLabel->ident->string); printLabelName(code, fdmangle, a->isBranchToLabel->ident->string); code << ":\n\t"; code << "movl $<<in" << n_goto << ">>, $<<out0>>\n"; //FIXME: Store the value -> label mapping somewhere, so it can be referenced later outSetterStmt->in.push_back(DtoConstUint(n_goto)); outSetterStmt->in_c += "i,"; code << asmGotoEnd; ++n_goto; } if(code.str() != asmGotoEnd) { // finalize code outSetterStmt->code = code.str(); outSetterStmt->code += asmGotoEndLabel.str()+":\n"; // create storage for and initialize the temporary jump_target = DtoAlloca(Type::tint32, "__llvm_jump_target"); gIR->ir->CreateStore(DtoConstUint(0), jump_target); // setup variable for output from asm outSetterStmt->out_c = "=*m,"; outSetterStmt->out.push_back(jump_target); asmblock->s.push_back(outSetterStmt); } else delete outSetterStmt; } // build a fall-off-end-properly asm statement FuncDeclaration* thisfunc = p->func()->decl; bool useabiret = false; p->asmBlock->asmBlock->abiret = NULL; if (thisfunc->fbody->endsWithAsm() == stmt && thisfunc->type->nextOf()->ty != Tvoid) { // there can't be goto forwarders in this case assert(gotoToVal.empty()); emitABIReturnAsmStmt(asmblock, stmt->loc, thisfunc); useabiret = true; } // build asm block std::vector<LLValue*> outargs; std::vector<LLValue*> inargs; std::vector<LLType*> outtypes; std::vector<LLType*> intypes; std::string out_c; std::string in_c; std::string clobbers; std::string code; size_t asmIdx = asmblock->retn; Logger::println("do outputs"); size_t n = asmblock->s.size(); for (size_t i=0; i<n; ++i) { IRAsmStmt* a = asmblock->s[i]; assert(a); size_t onn = a->out.size(); for (size_t j=0; j<onn; ++j) { outargs.push_back(a->out[j]); outtypes.push_back(a->out[j]->getType()); } if (!a->out_c.empty()) { out_c += a->out_c; } remap_outargs(a->code, onn+a->in.size(), asmIdx); asmIdx += onn; } Logger::println("do inputs"); for (size_t i=0; i<n; ++i) { IRAsmStmt* a = asmblock->s[i]; assert(a); size_t inn = a->in.size(); for (size_t j=0; j<inn; ++j) { inargs.push_back(a->in[j]); intypes.push_back(a->in[j]->getType()); } if (!a->in_c.empty()) { in_c += a->in_c; } remap_inargs(a->code, inn+a->out.size(), asmIdx); asmIdx += inn; if (!code.empty()) code += "\n\t"; code += a->code; } asmblock->s.clear(); // append inputs out_c += in_c; // append clobbers typedef std::set<std::string>::iterator clobs_it; for (clobs_it i=asmblock->clobs.begin(); i!=asmblock->clobs.end(); ++i) { out_c += *i; } // remove excessive comma if (!out_c.empty()) out_c.resize(out_c.size()-1); IF_LOG { Logger::println("code = \"%s\"", code.c_str()); Logger::println("constraints = \"%s\"", out_c.c_str()); } // build return types LLType* retty; if (asmblock->retn) retty = asmblock->retty; else retty = llvm::Type::getVoidTy(gIR->context()); // build argument types std::vector<LLType*> types; types.insert(types.end(), outtypes.begin(), outtypes.end()); types.insert(types.end(), intypes.begin(), intypes.end()); llvm::FunctionType* fty = llvm::FunctionType::get(retty, types, false); IF_LOG Logger::cout() << "function type = " << *fty << '\n'; std::vector<LLValue*> args; args.insert(args.end(), outargs.begin(), outargs.end()); args.insert(args.end(), inargs.begin(), inargs.end()); IF_LOG { Logger::cout() << "Arguments:" << '\n'; Logger::indent(); for (std::vector<LLValue*>::iterator b = args.begin(), i = b, e = args.end(); i != e; ++i) { Stream cout = Logger::cout(); cout << '$' << (i - b) << " ==> " << **i; if (!llvm::isa<llvm::Instruction>(*i) && !llvm::isa<LLGlobalValue>(*i)) cout << '\n'; } Logger::undent(); } llvm::InlineAsm* ia = llvm::InlineAsm::get(fty, code, out_c, true); llvm::CallInst* call = p->ir->CreateCall(ia, args, retty == LLType::getVoidTy(gIR->context()) ? "" : "asm"); IF_LOG Logger::cout() << "Complete asm statement: " << *call << '\n'; // capture abi return value if (useabiret) { IRAsmBlock* block = p->asmBlock; if (block->retfixup) block->asmBlock->abiret = (*block->retfixup)(p->ir, call); else if (p->asmBlock->retemu) block->asmBlock->abiret = DtoLoad(block->asmBlock->abiret); else block->asmBlock->abiret = call; } p->asmBlock = NULL; // if asm contained external branches, emit goto forwarder code if(!gotoToVal.empty()) { assert(jump_target); // make new blocks llvm::BasicBlock* oldend = gIR->scopeend(); llvm::BasicBlock* bb = llvm::BasicBlock::Create(gIR->context(), "afterasmgotoforwarder", p->topfunc(), oldend); llvm::LoadInst* val = p->ir->CreateLoad(jump_target, "__llvm_jump_target_value"); llvm::SwitchInst* sw = p->ir->CreateSwitch(val, bb, gotoToVal.size()); // add all cases std::map<LabelDsymbol*, int>::iterator it, end = gotoToVal.end(); for(it = gotoToVal.begin(); it != end; ++it) { llvm::BasicBlock* casebb = llvm::BasicBlock::Create(gIR->context(), "case", p->topfunc(), bb); sw->addCase(LLConstantInt::get(llvm::IntegerType::get(gIR->context(), 32), it->second), casebb); p->scope() = IRScope(casebb,bb); DtoGoto(stmt->loc, it->first, stmt->enclosingFinally); } p->scope() = IRScope(bb,oldend); } }
void TryCatchScope::emitCatchBodies(IRState &irs, llvm::Value *ehPtrSlot) { assert(catchBlocks.empty()); auto &PGO = irs.funcGen().pgo; const auto entryCount = PGO.setCurrentStmt(stmt); struct CBPrototype { Type *t; llvm::BasicBlock *catchBB; uint64_t catchCount; uint64_t uncaughtCount; }; llvm::SmallVector<CBPrototype, 8> cbPrototypes; cbPrototypes.reserve(stmt->catches->dim); for (auto c : *stmt->catches) { auto catchBB = irs.insertBBBefore(endbb, llvm::Twine("catch.") + c->type->toChars()); irs.scope() = IRScope(catchBB); irs.DBuilder.EmitBlockStart(c->loc); PGO.emitCounterIncrement(c); bool isCPPclass = false; if (auto lp = c->langPlugin()) // CALYPSO lp->codegen()->toBeginCatch(irs, c); else { const auto cd = c->type->toBasetype()->isClassHandle(); isCPPclass = cd->isCPPclass(); const auto enterCatchFn = getRuntimeFunction( Loc(), irs.module, isCPPclass ? "__cxa_begin_catch" : "_d_eh_enter_catch"); const auto ptr = DtoLoad(ehPtrSlot); const auto throwableObj = irs.ir->CreateCall(enterCatchFn, ptr); // For catches that use the Throwable object, create storage for it. // We will set it in the code that branches from the landing pads // (there might be more than one) to catchBB. if (c->var) { // This will alloca if we haven't already and take care of nested refs // if there are any. DtoDeclarationExp(c->var); // Copy the exception reference over from the _d_eh_enter_catch return // value. DtoStore(DtoBitCast(throwableObj, DtoType(c->var->type)), getIrLocal(c->var)->value); } } // Emit handler, if there is one. The handler is zero, for instance, // when building 'catch { debug foo(); }' in non-debug mode. if (isCPPclass) { // from DMD: /* C++ catches need to end with call to __cxa_end_catch(). * Create: * try { handler } finally { __cxa_end_catch(); } * Note that this is worst case code because it always sets up an * exception handler. At some point should try to do better. */ FuncDeclaration *fdend = FuncDeclaration::genCfunc(nullptr, Type::tvoid, "__cxa_end_catch"); Expression *efunc = VarExp::create(Loc(), fdend); Expression *ecall = CallExp::create(Loc(), efunc); ecall->type = Type::tvoid; Statement *call = ExpStatement::create(Loc(), ecall); Statement *stmt = c->handler ? TryFinallyStatement::create(Loc(), c->handler, call) : call; Statement_toIR(stmt, &irs); } else { if (c->handler) Statement_toIR(c->handler, &irs); } if (!irs.scopereturned()) { // CALYPSO FIXME: _cxa_end_catch won't be called if it has already returned if (auto lp = c->langPlugin()) lp->codegen()->toEndCatch(irs, c); irs.ir->CreateBr(endbb); } irs.DBuilder.EmitBlockEnd(); // PGO information, currently unused auto catchCount = PGO.getRegionCount(c); // uncaughtCount is handled in a separate pass below cbPrototypes.push_back({c->type->toBasetype(), catchBB, catchCount, 0}); // CALYPSO } // Total number of uncaught exceptions is equal to the execution count at // the start of the try block minus the one after the continuation. // uncaughtCount keeps track of the exception type mismatch count while // iterating through the catch block prototypes in reversed order. auto uncaughtCount = entryCount - PGO.getRegionCount(stmt); for (auto it = cbPrototypes.rbegin(), end = cbPrototypes.rend(); it != end; ++it) { it->uncaughtCount = uncaughtCount; // Add this catch block's match count to the uncaughtCount, because these // failed to match the remaining (lexically preceding) catch blocks. uncaughtCount += it->catchCount; } catchBlocks.reserve(stmt->catches->dim); auto c_it = stmt->catches->begin(); // CALYPSO for (const auto &p : cbPrototypes) { auto branchWeights = PGO.createProfileWeights(p.catchCount, p.uncaughtCount); LLGlobalVariable *ci; if (auto lp = (*c_it)->langPlugin()) // CALYPSO ci = lp->codegen()->toCatchScopeType(irs, p.t); else { ClassDeclaration *cd = p.t->isClassHandle(); DtoResolveClass(cd); if (cd->isCPPclass()) { const char *name = Target::cppTypeInfoMangle(cd); auto cpp_ti = getOrCreateGlobal( cd->loc, irs.module, getVoidPtrType(), /*isConstant=*/true, LLGlobalValue::ExternalLinkage, /*init=*/nullptr, name); // Wrap std::type_info pointers inside a __cpp_type_info_ptr class instance so that // the personality routine may differentiate C++ catch clauses from D ones. OutBuffer mangleBuf; mangleBuf.writestring("_D"); mangleToBuffer(cd, &mangleBuf); mangleBuf.printf("%d%s", 18, "_cpp_type_info_ptr"); const auto wrapperMangle = getIRMangledVarName(mangleBuf.peekString(), LINKd); RTTIBuilder b(ClassDeclaration::cpp_type_info_ptr); b.push(cpp_ti); auto wrapperType = llvm::cast<llvm::StructType>( static_cast<IrTypeClass*>(ClassDeclaration::cpp_type_info_ptr->type->ctype)->getMemoryLLType()); auto wrapperInit = b.get_constant(wrapperType); ci = getOrCreateGlobal( cd->loc, irs.module, wrapperType, /*isConstant=*/true, LLGlobalValue::LinkOnceODRLinkage, wrapperInit, wrapperMangle); } else { ci = getIrAggr(cd)->getClassInfoSymbol(); } } catchBlocks.push_back({ci, p.catchBB, branchWeights}); c_it++; } }
DLValue *DSpecialRefValue::getLVal() { return new DLValue(type, DtoLoad(val)); }
DRValue *DSpecialRefValue::getRVal() { return DLValue(type, DtoLoad(val)).getRVal(); }
void TryCatchScope::emitCatchBodies(IRState &irs, llvm::Value *ehPtrSlot) { assert(catchBlocks.empty()); auto &PGO = irs.funcGen().pgo; const auto entryCount = PGO.setCurrentStmt(stmt); struct CBPrototype { ClassDeclaration *cd; llvm::BasicBlock *catchBB; uint64_t catchCount; uint64_t uncaughtCount; }; llvm::SmallVector<CBPrototype, 8> cbPrototypes; cbPrototypes.reserve(stmt->catches->dim); for (auto c : *stmt->catches) { auto catchBB = irs.insertBBBefore(endbb, llvm::Twine("catch.") + c->type->toChars()); irs.scope() = IRScope(catchBB); irs.DBuilder.EmitBlockStart(c->loc); PGO.emitCounterIncrement(c); const auto enterCatchFn = getRuntimeFunction(Loc(), irs.module, "_d_eh_enter_catch"); auto ptr = DtoLoad(ehPtrSlot); auto throwableObj = irs.ir->CreateCall(enterCatchFn, ptr); // For catches that use the Throwable object, create storage for it. // We will set it in the code that branches from the landing pads // (there might be more than one) to catchBB. if (c->var) { // This will alloca if we haven't already and take care of nested refs // if there are any. DtoDeclarationExp(c->var); // Copy the exception reference over from the _d_eh_enter_catch return // value. DtoStore(DtoBitCast(throwableObj, DtoType(c->var->type)), getIrLocal(c->var)->value); } // Emit handler, if there is one. The handler is zero, for instance, // when building 'catch { debug foo(); }' in non-debug mode. if (c->handler) Statement_toIR(c->handler, &irs); if (!irs.scopereturned()) irs.ir->CreateBr(endbb); irs.DBuilder.EmitBlockEnd(); // PGO information, currently unused auto catchCount = PGO.getRegionCount(c); // uncaughtCount is handled in a separate pass below auto cd = c->type->toBasetype()->isClassHandle(); cbPrototypes.push_back({cd, catchBB, catchCount, 0}); } // Total number of uncaught exceptions is equal to the execution count at // the start of the try block minus the one after the continuation. // uncaughtCount keeps track of the exception type mismatch count while // iterating through the catch block prototypes in reversed order. auto uncaughtCount = entryCount - PGO.getRegionCount(stmt); for (auto it = cbPrototypes.rbegin(), end = cbPrototypes.rend(); it != end; ++it) { it->uncaughtCount = uncaughtCount; // Add this catch block's match count to the uncaughtCount, because these // failed to match the remaining (lexically preceding) catch blocks. uncaughtCount += it->catchCount; } catchBlocks.reserve(stmt->catches->dim); for (const auto &p : cbPrototypes) { auto branchWeights = PGO.createProfileWeights(p.catchCount, p.uncaughtCount); DtoResolveClass(p.cd); auto ci = getIrAggr(p.cd)->getClassInfoSymbol(); catchBlocks.push_back({ci, p.catchBB, branchWeights}); } }
void DtoCreateNestedContext(FuncDeclaration* fd) { Logger::println("DtoCreateNestedContext for %s", fd->toChars()); LOG_SCOPE DtoCreateNestedContextType(fd); // construct nested variables array if (!fd->nestedVars.empty()) { IrFunction* irfunction = fd->ir.irFunc; unsigned depth = irfunction->depth; LLStructType *frameType = irfunction->frameType; // Create frame for current function and append to frames list // FIXME: alignment ? LLValue* frame = 0; if (fd->needsClosure()) frame = DtoGcMalloc(frameType, ".frame"); else frame = DtoRawAlloca(frameType, 0, ".frame"); // copy parent frames into beginning if (depth != 0) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); AggregateDeclaration* cd = fd->isMember2(); assert(cd); assert(cd->vthis); Logger::println("Indexing to 'this'"); if (cd->isStructDeclaration()) src = DtoExtractValue(thisval, cd->vthis->ir.irField->index, ".vthis"); else src = DtoLoad(DtoGEPi(thisval, 0, cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } if (depth > 1) { src = DtoBitCast(src, getVoidPtrType()); LLValue* dst = DtoBitCast(frame, getVoidPtrType()); DtoMemCpy(dst, src, DtoConstSize_t((depth-1) * PTRSIZE), getABITypeAlign(getVoidPtrType())); } // Copy nestArg into framelist; the outer frame is not in the list of pointers src = DtoBitCast(src, frameType->getContainedType(depth-1)); LLValue* gep = DtoGEPi(frame, 0, depth-1); DtoAlignedStore(src, gep); } // store context in IrFunction irfunction->nestedVar = frame; // go through all nested vars and assign addresses where possible. for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; LLValue* gep = DtoGEPi(frame, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LOG_SCOPE IrParameter* parm = vd->ir.irParam; if (parm->arg->byref) { storeVariable(vd, gep); } else { Logger::println("Copying to nested frame"); // The parameter value is an alloca'd stack slot. // Copy to the nesting frame and leave the alloca for // the optimizers to clean up. DtoStore(DtoLoad(parm->value), gep); gep->takeName(parm->value); parm->value = gep; } } else { Logger::println("nested var: %s", vd->toChars()); assert(!vd->ir.irLocal->value); vd->ir.irLocal->value = gep; } if (global.params.symdebug) { LLSmallVector<LLValue*, 2> addr; dwarfOpOffset(addr, frameType, vd->ir.irLocal->nestedIndex); DtoDwarfLocalVariable(frame, vd, addr); } } } }
static void addExplicitArguments(std::vector<LLValue *> &args, AttrSet &attrs, IrFuncTy &irFty, LLFunctionType *callableTy, const std::vector<DValue *> &argvals, int numFormalParams) { // Number of arguments added to the LLVM type that are implicit on the // frontend side of things (this, context pointers, etc.) const size_t implicitLLArgCount = args.size(); // Number of formal arguments in the LLVM type (i.e. excluding varargs). const size_t formalLLArgCount = irFty.args.size(); // The number of explicit arguments in the D call expression (including // varargs), not all of which necessarily generate a LLVM argument. const size_t explicitDArgCount = argvals.size(); // construct and initialize an IrFuncTyArg object for each vararg std::vector<IrFuncTyArg *> optionalIrArgs; for (size_t i = numFormalParams; i < explicitDArgCount; i++) { Type *argType = argvals[i]->getType(); bool passByVal = gABI->passByVal(argType); AttrBuilder initialAttrs; if (passByVal) { initialAttrs.add(LLAttribute::ByVal); } else { initialAttrs.add(DtoShouldExtend(argType)); } optionalIrArgs.push_back(new IrFuncTyArg(argType, passByVal, initialAttrs)); optionalIrArgs.back()->parametersIdx = i; } // let the ABI rewrite the IrFuncTyArg objects gABI->rewriteVarargs(irFty, optionalIrArgs); const size_t explicitLLArgCount = formalLLArgCount + optionalIrArgs.size(); args.resize(implicitLLArgCount + explicitLLArgCount, static_cast<llvm::Value *>(nullptr)); // Iterate the explicit arguments from left to right in the D source, // which is the reverse of the LLVM order if irFty.reverseParams is true. for (size_t i = 0; i < explicitLLArgCount; ++i) { const bool isVararg = (i >= irFty.args.size()); IrFuncTyArg *irArg = nullptr; if (isVararg) { irArg = optionalIrArgs[i - numFormalParams]; } else { irArg = irFty.args[i]; } DValue *const argval = argvals[irArg->parametersIdx]; Type *const argType = argval->getType(); llvm::Value *llVal = nullptr; if (isVararg) { llVal = irFty.putParam(*irArg, argval); } else { llVal = irFty.putParam(i, argval); } const size_t llArgIdx = implicitLLArgCount + (irFty.reverseParams ? explicitLLArgCount - i - 1 : i); llvm::Type *const callableArgType = (isVararg ? nullptr : callableTy->getParamType(llArgIdx)); // Hack around LDC assuming structs and static arrays are in memory: // If the function wants a struct, and the argument value is a // pointer to a struct, load from it before passing it in. if (isaPointer(llVal) && DtoIsPassedByRef(argType) && ((!isVararg && !isaPointer(callableArgType)) || (isVararg && !irArg->byref && !irArg->isByVal()))) { Logger::println("Loading struct type for function argument"); llVal = DtoLoad(llVal); } // parameter type mismatch, this is hard to get rid of if (!isVararg && llVal->getType() != callableArgType) { IF_LOG { Logger::cout() << "arg: " << *llVal << '\n'; Logger::cout() << "expects: " << *callableArgType << '\n'; } if (isaStruct(llVal)) { llVal = DtoAggrPaint(llVal, callableArgType); } else { llVal = DtoBitCast(llVal, callableArgType); } } args[llArgIdx] = llVal; // +1 as index 0 contains the function attributes. attrs.add(llArgIdx + 1, irArg->attrs); if (isVararg) { delete irArg; } }
void DtoCreateNestedContext(FuncDeclaration* fd) { Logger::println("DtoCreateNestedContext for %s", fd->toChars()); LOG_SCOPE DtoCreateNestedContextType(fd); if (nestedCtx == NCArray) { // construct nested variables array if (!fd->nestedVars.empty()) { Logger::println("has nested frame"); // start with adding all enclosing parent frames until a static parent is reached int nparelems = 0; if (!fd->isStatic()) { Dsymbol* par = fd->toParent2(); while (par) { if (FuncDeclaration* parfd = par->isFuncDeclaration()) { nparelems += parfd->nestedVars.size(); // stop at first static if (parfd->isStatic()) break; } else if (par->isClassDeclaration()) { // nothing needed } else { break; } par = par->toParent2(); } } int nelems = fd->nestedVars.size() + nparelems; // make array type for nested vars LLType* nestedVarsTy = LLArrayType::get(getVoidPtrType(), nelems); // alloca it // FIXME align ? LLValue* nestedVars = DtoRawAlloca(nestedVarsTy, 0, ".nested_vars"); IrFunction* irfunction = fd->ir.irFunc; // copy parent frame into beginning if (nparelems) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); ClassDeclaration* cd = fd->isMember2()->isClassDeclaration(); assert(cd); assert(cd->vthis); src = DtoLoad(DtoGEPi(thisval, 0,cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } DtoMemCpy(nestedVars, src, DtoConstSize_t(nparelems*PTRSIZE), getABITypeAlign(getVoidPtrType())); } // store in IrFunction irfunction->nestedVar = nestedVars; // go through all nested vars and assign indices int idx = nparelems; for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; if (!vd->ir.irLocal) vd->ir.irLocal = new IrLocal(vd); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LLValue* gep = DtoGEPi(nestedVars, 0, idx); LLValue* val = DtoBitCast(vd->ir.irLocal->value, getVoidPtrType()); DtoAlignedStore(val, gep); } else { Logger::println("nested var: %s", vd->toChars()); } vd->ir.irLocal->nestedIndex = idx++; } } } else if (nestedCtx == NCHybrid) { // construct nested variables array if (!fd->nestedVars.empty()) { IrFunction* irfunction = fd->ir.irFunc; unsigned depth = irfunction->depth; LLStructType *frameType = irfunction->frameType; // Create frame for current function and append to frames list // FIXME: alignment ? LLValue* frame = 0; #if DMDV2 if (fd->needsClosure()) frame = DtoGcMalloc(frameType, ".frame"); else #endif frame = DtoRawAlloca(frameType, 0, ".frame"); // copy parent frames into beginning if (depth != 0) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); #if DMDV2 AggregateDeclaration* cd = fd->isMember2(); #else ClassDeclaration* cd = fd->isMember2()->isClassDeclaration(); #endif assert(cd); assert(cd->vthis); Logger::println("Indexing to 'this'"); #if DMDV2 if (cd->isStructDeclaration()) src = DtoExtractValue(thisval, cd->vthis->ir.irField->index, ".vthis"); else #endif src = DtoLoad(DtoGEPi(thisval, 0, cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } if (depth > 1) { src = DtoBitCast(src, getVoidPtrType()); LLValue* dst = DtoBitCast(frame, getVoidPtrType()); DtoMemCpy(dst, src, DtoConstSize_t((depth-1) * PTRSIZE), getABITypeAlign(getVoidPtrType())); } // Copy nestArg into framelist; the outer frame is not in the list of pointers src = DtoBitCast(src, frameType->getContainedType(depth-1)); LLValue* gep = DtoGEPi(frame, 0, depth-1); DtoAlignedStore(src, gep); } // store context in IrFunction irfunction->nestedVar = frame; // go through all nested vars and assign addresses where possible. for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; LLValue* gep = DtoGEPi(frame, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LOG_SCOPE LLValue* value = vd->ir.irLocal->value; if (llvm::isa<llvm::AllocaInst>(llvm::GetUnderlyingObject(value))) { Logger::println("Copying to nested frame"); // The parameter value is an alloca'd stack slot. // Copy to the nesting frame and leave the alloca for // the optimizers to clean up. assert(!vd->ir.irLocal->byref); DtoStore(DtoLoad(value), gep); gep->takeName(value); vd->ir.irLocal->value = gep; } else { Logger::println("Adding pointer to nested frame"); // The parameter value is something else, such as a // passed-in pointer (for 'ref' or 'out' parameters) or // a pointer arg with byval attribute. // Store the address into the frame. assert(vd->ir.irLocal->byref); storeVariable(vd, gep); } } else if (vd->isRef() || vd->isOut()) { // This slot is initialized in DtoNestedInit, to handle things like byref foreach variables // which move around in memory. assert(vd->ir.irLocal->byref); } else { Logger::println("nested var: %s", vd->toChars()); if (vd->ir.irLocal->value) Logger::cout() << "Pre-existing value: " << *vd->ir.irLocal->value << '\n'; assert(!vd->ir.irLocal->value); vd->ir.irLocal->value = gep; assert(!vd->ir.irLocal->byref); } if (global.params.symdebug) { LLSmallVector<LLValue*, 2> addr; dwarfOpOffset(addr, frameType, vd->ir.irLocal->nestedIndex); DtoDwarfLocalVariable(frame, vd, addr); } } } else if (FuncDeclaration* parFunc = getParentFunc(fd, true)) { // Propagate context arg properties if the context arg is passed on unmodified. DtoDeclareFunction(parFunc); fd->ir.irFunc->frameType = parFunc->ir.irFunc->frameType; fd->ir.irFunc->depth = parFunc->ir.irFunc->depth; } } else { assert(0 && "Not implemented yet"); } }
DValue *DtoNestedVariable(Loc &loc, Type *astype, VarDeclaration *vd, bool byref) { IF_LOG Logger::println("DtoNestedVariable for %s @ %s", vd->toChars(), loc.toChars()); LOG_SCOPE; //////////////////////////////////// // Locate context value Dsymbol *vdparent = vd->toParent2(); assert(vdparent); IrFunction *irfunc = gIR->func(); // Check whether we can access the needed frame FuncDeclaration *fd = irfunc->decl; while (fd && fd != vdparent) { fd = getParentFunc(fd); } if (!fd) { error(loc, "function `%s` cannot access frame of function `%s`", irfunc->decl->toPrettyChars(), vdparent->toPrettyChars()); return new DLValue(astype, llvm::UndefValue::get(DtoPtrToType(astype))); } // is the nested variable in this scope? if (vdparent == irfunc->decl) { return makeVarDValue(astype, vd); } // get the nested context LLValue *ctx = nullptr; bool skipDIDeclaration = false; auto currentCtx = gIR->funcGen().nestedVar; if (currentCtx) { Logger::println("Using own nested context of current function"); ctx = currentCtx; } else if (irfunc->decl->isMember2()) { Logger::println( "Current function is member of nested class, loading vthis"); AggregateDeclaration *cd = irfunc->decl->isMember2(); LLValue *val = irfunc->thisArg; if (cd->isClassDeclaration()) { val = DtoLoad(val); } ctx = DtoLoad(DtoGEPi(val, 0, getVthisIdx(cd), ".vthis")); skipDIDeclaration = true; } else { Logger::println("Regular nested function, loading context arg"); ctx = DtoLoad(irfunc->nestArg); } assert(ctx); IF_LOG { Logger::cout() << "Context: " << *ctx << '\n'; } DtoCreateNestedContextType(vdparent->isFuncDeclaration()); assert(isIrLocalCreated(vd)); //////////////////////////////////// // Extract variable from nested context const auto frameType = LLPointerType::getUnqual(irfunc->frameType); IF_LOG { Logger::cout() << "casting to: " << *irfunc->frameType << '\n'; } LLValue *val = DtoBitCast(ctx, frameType); IrLocal *const irLocal = getIrLocal(vd); const auto vardepth = irLocal->nestedDepth; const auto funcdepth = irfunc->depth; IF_LOG { Logger::cout() << "Variable: " << vd->toChars() << '\n'; Logger::cout() << "Variable depth: " << vardepth << '\n'; Logger::cout() << "Function: " << irfunc->decl->toChars() << '\n'; Logger::cout() << "Function depth: " << funcdepth << '\n'; } if (vardepth == funcdepth) { // This is not always handled above because functions without // variables accessed by nested functions don't create new frames. IF_LOG Logger::println("Same depth"); } else { // Load frame pointer and index that... IF_LOG Logger::println("Lower depth"); val = DtoGEPi(val, 0, vardepth); IF_LOG Logger::cout() << "Frame index: " << *val << '\n'; val = DtoAlignedLoad( val, (std::string(".frame.") + vdparent->toChars()).c_str()); IF_LOG Logger::cout() << "Frame: " << *val << '\n'; } const auto idx = irLocal->nestedIndex; assert(idx != -1 && "Nested context not yet resolved for variable."); LLSmallVector<int64_t, 2> dwarfAddrOps; LLValue *gep = DtoGEPi(val, 0, idx, vd->toChars()); val = gep; IF_LOG { Logger::cout() << "Addr: " << *val << '\n'; Logger::cout() << "of type: " << *val->getType() << '\n'; } const bool isRefOrOut = vd->isRef() || vd->isOut(); if (isSpecialRefVar(vd)) { // Handled appropriately by makeVarDValue() and EmitLocalVariable(), pass // storage of pointer (reference lvalue). } else if (byref || isRefOrOut) { val = DtoAlignedLoad(val); // ref/out variables get a reference-debuginfo-type in EmitLocalVariable(); // pass the GEP as reference lvalue in that case. if (!isRefOrOut) gIR->DBuilder.OpDeref(dwarfAddrOps); IF_LOG { Logger::cout() << "Was byref, now: " << *irLocal->value << '\n'; Logger::cout() << "of type: " << *irLocal->value->getType() << '\n'; } }
LLValue* get(Type* dty, DValue* v) { LLValue* pointer = v->getRVal(); return DtoLoad(pointer, ".ImplicitByvalRewrite_getResult"); }
LLValue *prepareVaArg(LLValue *pAp) override { // pass a void* pointer to the actual __va_list struct to LLVM's va_arg // intrinsic return DtoLoad(pAp); }
DValue* DtoNestedVariable(Loc loc, Type* astype, VarDeclaration* vd, bool byref) { Logger::println("DtoNestedVariable for %s @ %s", vd->toChars(), loc.toChars()); LOG_SCOPE; //////////////////////////////////// // Locate context value Dsymbol* vdparent = vd->toParent2(); assert(vdparent); IrFunction* irfunc = gIR->func(); // Check whether we can access the needed frame FuncDeclaration *fd = irfunc->decl; while (fd != vdparent) { if (fd->isStatic()) { error(loc, "function %s cannot access frame of function %s", irfunc->decl->toPrettyChars(), vdparent->toPrettyChars()); return new DVarValue(astype, vd, llvm::UndefValue::get(getPtrToType(DtoType(astype)))); } fd = getParentFunc(fd, false); assert(fd); } // is the nested variable in this scope? if (vdparent == irfunc->decl) { LLValue* val = vd->ir.getIrValue(); return new DVarValue(astype, vd, val); } LLValue *dwarfValue = 0; std::vector<LLValue*> dwarfAddr; LLType *int64Ty = LLType::getInt64Ty(gIR->context()); // get the nested context LLValue* ctx = 0; if (irfunc->decl->isMember2()) { #if DMDV2 AggregateDeclaration* cd = irfunc->decl->isMember2(); LLValue* val = irfunc->thisArg; if (cd->isClassDeclaration()) val = DtoLoad(val); #else ClassDeclaration* cd = irfunc->decl->isMember2()->isClassDeclaration(); LLValue* val = DtoLoad(irfunc->thisArg); #endif ctx = DtoLoad(DtoGEPi(val, 0,cd->vthis->ir.irField->index, ".vthis")); } else if (irfunc->nestedVar) { ctx = irfunc->nestedVar; dwarfValue = ctx; } else { ctx = DtoLoad(irfunc->nestArg); dwarfValue = irfunc->nestArg; if (global.params.symdebug) dwarfOpDeref(dwarfAddr); } assert(ctx); DtoCreateNestedContextType(vdparent->isFuncDeclaration()); assert(vd->ir.irLocal); //////////////////////////////////// // Extract variable from nested context if (nestedCtx == NCArray) { LLValue* val = DtoBitCast(ctx, getPtrToType(getVoidPtrType())); val = DtoGEPi1(val, vd->ir.irLocal->nestedIndex); val = DtoAlignedLoad(val); assert(vd->ir.irLocal->value); val = DtoBitCast(val, vd->ir.irLocal->value->getType(), vd->toChars()); return new DVarValue(astype, vd, val); } else if (nestedCtx == NCHybrid) { LLValue* val = DtoBitCast(ctx, LLPointerType::getUnqual(irfunc->frameType)); Logger::cout() << "Context: " << *val << '\n'; Logger::cout() << "of type: " << *val->getType() << '\n'; unsigned vardepth = vd->ir.irLocal->nestedDepth; unsigned funcdepth = irfunc->depth; Logger::cout() << "Variable: " << vd->toChars() << '\n'; Logger::cout() << "Variable depth: " << vardepth << '\n'; Logger::cout() << "Function: " << irfunc->decl->toChars() << '\n'; Logger::cout() << "Function depth: " << funcdepth << '\n'; if (vardepth == funcdepth) { // This is not always handled above because functions without // variables accessed by nested functions don't create new frames. Logger::println("Same depth"); } else { // Load frame pointer and index that... if (dwarfValue && global.params.symdebug) { dwarfOpOffset(dwarfAddr, val, vd->ir.irLocal->nestedDepth); dwarfOpDeref(dwarfAddr); } Logger::println("Lower depth"); val = DtoGEPi(val, 0, vd->ir.irLocal->nestedDepth); Logger::cout() << "Frame index: " << *val << '\n'; val = DtoAlignedLoad(val, (std::string(".frame.") + vdparent->toChars()).c_str()); Logger::cout() << "Frame: " << *val << '\n'; } if (dwarfValue && global.params.symdebug) dwarfOpOffset(dwarfAddr, val, vd->ir.irLocal->nestedIndex); val = DtoGEPi(val, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); Logger::cout() << "Addr: " << *val << '\n'; Logger::cout() << "of type: " << *val->getType() << '\n'; if (vd->ir.irLocal->byref || byref) { val = DtoAlignedLoad(val); //dwarfOpDeref(dwarfAddr); Logger::cout() << "Was byref, now: " << *val << '\n'; Logger::cout() << "of type: " << *val->getType() << '\n'; } if (dwarfValue && global.params.symdebug) DtoDwarfLocalVariable(dwarfValue, vd, dwarfAddr); return new DVarValue(astype, vd, val); } else { assert(0 && "Not implemented yet"); } }