void vaCopy(DLValue *dest, DValue *src) override { // Analog to va_start, we first need to allocate a new __va_list struct on // the stack and set `dest` to its address. LLValue *valistmem = DtoRawAlloca(getValistType(), 0, "__va_list_mem"); DtoStore(valistmem, DtoBitCast(DtoLVal(dest), getPtrToType(valistmem->getType()))); // Then fill the new struct with a bitcopy of the source struct. // `src` is a char* pointer to the source struct. DtoMemCpy(valistmem, DtoRVal(src)); }
LLValue* prepareVaStart(LLValue* pAp) { // Since the user only created a char* pointer (ap) on the stack before invoking va_start, // we first need to allocate the actual __va_list struct and set 'ap' to its address. LLValue* valistmem = DtoRawAlloca(getValistType(), 0, "__va_list_mem"); valistmem = DtoBitCast(valistmem, getVoidPtrType()); DtoStore(valistmem, pAp); // ap = (void*)__va_list_mem // pass a void* pointer to the actual struct to LLVM's va_start intrinsic return valistmem; }
LLValue *prepareVaStart(DLValue *ap) override { // Since the user only created a char* pointer (ap) on the stack before // invoking va_start, we first need to allocate the actual __va_list struct // and set `ap` to its address. LLValue *valistmem = DtoRawAlloca(getValistType(), 0, "__va_list_mem"); DtoStore(valistmem, DtoBitCast(DtoLVal(ap), getPtrToType(valistmem->getType()))); // Pass a i8* pointer to the actual struct to LLVM's va_start intrinsic. return DtoBitCast(valistmem, getVoidPtrType()); }
// Get struct from ABI-mangled representation LLValue* get(Type* dty, DValue* v) { LLValue* lval; if (v->isLVal()) { lval = v->getLVal(); } else { // No memory location, create one. LLValue* rval = v->getRVal(); lval = DtoRawAlloca(rval->getType(), 0); DtoStore(rval, lval); } LLType* pTy = getPtrToType(DtoType(dty)); return DtoLoad(DtoBitCast(lval, pTy), "get-result"); }
// Turn a struct into an ABI-mangled representation LLValue* put(Type* dty, DValue* v) { LLValue* lval; if (v->isLVal()) { lval = v->getLVal(); } else { // No memory location, create one. LLValue* rval = v->getRVal(); lval = DtoRawAlloca(rval->getType(), 0); DtoStore(rval, lval); } LLType* abiTy = getAbiType(dty); assert(abiTy && "Why are we rewriting a non-rewritten type?"); LLType* pTy = getPtrToType(abiTy); return DtoLoad(DtoBitCast(lval, pTy), "put-result"); }
DValue* DtoNewClass(Loc& loc, TypeClass* tc, NewExp* newexp) { // resolve type DtoResolveClass(tc->sym); // allocate LLValue* mem; if (newexp->onstack) { // FIXME align scope class to its largest member mem = DtoRawAlloca(DtoType(tc)->getContainedType(0), 0, ".newclass_alloca"); } // custom allocator else if (newexp->allocator) { DtoResolveFunction(newexp->allocator); DFuncValue dfn(newexp->allocator, getIrFunc(newexp->allocator)->func); DValue* res = DtoCallFunction(newexp->loc, NULL, &dfn, newexp->newargs); mem = DtoBitCast(res->getRVal(), DtoType(tc), ".newclass_custom"); } // default allocator else { llvm::Function* fn = LLVM_D_GetRuntimeFunction(loc, gIR->module, "_d_newclass"); LLConstant* ci = DtoBitCast(getIrAggr(tc->sym)->getClassInfoSymbol(), DtoType(Type::typeinfoclass->type)); mem = gIR->CreateCallOrInvoke(fn, ci, ".newclass_gc_alloc").getInstruction(); mem = DtoBitCast(mem, DtoType(tc), ".newclass_gc"); } // init DtoInitClass(tc, mem); // init inner-class outer reference if (newexp->thisexp) { Logger::println("Resolving outer class"); LOG_SCOPE; DValue* thisval = toElem(newexp->thisexp); unsigned idx = getFieldGEPIndex(tc->sym, tc->sym->vthis); LLValue* src = thisval->getRVal(); LLValue* dst = DtoGEPi(mem, 0, idx); IF_LOG Logger::cout() << "dst: " << *dst << "\nsrc: " << *src << '\n'; DtoStore(src, DtoBitCast(dst, getPtrToType(src->getType()))); } // set the context for nested classes else if (tc->sym->isNested() && tc->sym->vthis) { DtoResolveNestedContext(loc, tc->sym, mem); } // call constructor if (newexp->member) { Logger::println("Calling constructor"); assert(newexp->arguments != NULL); DtoResolveFunction(newexp->member); DFuncValue dfn(newexp->member, getIrFunc(newexp->member)->func, mem); return DtoCallFunction(newexp->loc, tc, &dfn, newexp->arguments); } // return default constructed class return new DImValue(tc, mem); }
llvm::AllocaInst *IrFunction::getOrCreateEhPtrSlot() { if (!ehPtrSlot) { ehPtrSlot = DtoRawAlloca(getVoidPtrType(), 0, "eh.ptr"); } return ehPtrSlot; }
llvm::BasicBlock *ScopeStack::emitLandingPad() { // save and rewrite scope IRScope savedIRScope = irs->scope(); llvm::BasicBlock *beginBB = llvm::BasicBlock::Create(irs->context(), "landingPad", irs->topfunc()); irs->scope() = IRScope(beginBB); llvm::LandingPadInst *landingPad = createLandingPadInst(irs); // Stash away the exception object pointer and selector value into their // stack slots. llvm::Value *ehPtr = DtoExtractValue(landingPad, 0); irs->ir->CreateStore(ehPtr, irs->func()->getOrCreateEhPtrSlot()); llvm::Value *ehSelector = DtoExtractValue(landingPad, 1); if (!irs->func()->ehSelectorSlot) { irs->func()->ehSelectorSlot = DtoRawAlloca(ehSelector->getType(), 0, "eh.selector"); } irs->ir->CreateStore(ehSelector, irs->func()->ehSelectorSlot); // Add landingpad clauses, emit finallys and 'if' chain to catch the // exception. CleanupCursor lastCleanup = currentCleanupScope(); for (auto it = catchScopes.rbegin(), end = catchScopes.rend(); it != end; ++it) { // Insert any cleanups in between the last catch we ran (i.e. tested for // and found that the type does not match) and this one. assert(lastCleanup >= it->cleanupScope); if (lastCleanup > it->cleanupScope) { landingPad->setCleanup(true); llvm::BasicBlock *afterCleanupBB = llvm::BasicBlock::Create( irs->context(), beginBB->getName() + llvm::Twine(".after.cleanup"), irs->topfunc()); runCleanups(lastCleanup, it->cleanupScope, afterCleanupBB); irs->scope() = IRScope(afterCleanupBB); lastCleanup = it->cleanupScope; } // Add the ClassInfo reference to the landingpad instruction so it is // emitted to the EH tables. landingPad->addClause(it->classInfoPtr); llvm::BasicBlock *mismatchBB = llvm::BasicBlock::Create( irs->context(), beginBB->getName() + llvm::Twine(".mismatch"), irs->topfunc()); // "Call" llvm.eh.typeid.for, which gives us the eh selector value to // compare the landing pad selector value with. llvm::Value *ehTypeId = irs->ir->CreateCall(GET_INTRINSIC_DECL(eh_typeid_for), DtoBitCast(it->classInfoPtr, getVoidPtrType())); // Compare the selector value from the unwinder against the expected // one and branch accordingly. irs->ir->CreateCondBr( irs->ir->CreateICmpEQ(irs->ir->CreateLoad(irs->func()->ehSelectorSlot), ehTypeId), it->bodyBlock, mismatchBB); irs->scope() = IRScope(mismatchBB); } // No catch matched. Execute all finallys and resume unwinding. if (lastCleanup > 0) { landingPad->setCleanup(true); runCleanups(lastCleanup, 0, irs->func()->getOrCreateResumeUnwindBlock()); } else if (!catchScopes.empty()) { // Directly convert the last mismatch branch into a branch to the // unwind resume block. irs->scopebb()->replaceAllUsesWith( irs->func()->getOrCreateResumeUnwindBlock()); irs->scopebb()->eraseFromParent(); } else { irs->ir->CreateBr(irs->func()->getOrCreateResumeUnwindBlock()); } irs->scope() = savedIRScope; return beginBB; }
llvm::AllocaInst *TryCatchFinallyScopes::getOrCreateEhPtrSlot() { if (!ehPtrSlot) ehPtrSlot = DtoRawAlloca(getVoidPtrType(), 0, "eh.ptr"); return ehPtrSlot; }
llvm::BasicBlock *TryCatchFinallyScopes::emitLandingPad() { #if LDC_LLVM_VER >= 308 if (useMSVCEH()) { assert(currentCleanupScope() > 0); return emitLandingPadMSVC(currentCleanupScope() - 1); } #endif // save and rewrite scope IRScope savedIRScope = irs.scope(); // insert landing pads at the end of the function, in emission order, // to improve human-readability of the IR llvm::BasicBlock *beginBB = irs.insertBBBefore(nullptr, "landingPad"); irs.scope() = IRScope(beginBB); llvm::LandingPadInst *landingPad = createLandingPadInst(irs); // Stash away the exception object pointer and selector value into their // stack slots. llvm::Value *ehPtr = DtoExtractValue(landingPad, 0); irs.ir->CreateStore(ehPtr, getOrCreateEhPtrSlot()); llvm::Value *ehSelector = DtoExtractValue(landingPad, 1); if (!ehSelectorSlot) ehSelectorSlot = DtoRawAlloca(ehSelector->getType(), 0, "eh.selector"); irs.ir->CreateStore(ehSelector, ehSelectorSlot); // Add landingpad clauses, emit finallys and 'if' chain to catch the // exception. CleanupCursor lastCleanup = currentCleanupScope(); for (auto it = tryCatchScopes.rbegin(), end = tryCatchScopes.rend(); it != end; ++it) { const auto &tryCatchScope = *it; // Insert any cleanups in between the previous (inner-more) try-catch scope // and this one. const auto newCleanup = tryCatchScope.getCleanupScope(); assert(lastCleanup >= newCleanup); if (lastCleanup > newCleanup) { landingPad->setCleanup(true); llvm::BasicBlock *afterCleanupBB = irs.insertBB(beginBB->getName() + llvm::Twine(".after.cleanup")); runCleanups(lastCleanup, newCleanup, afterCleanupBB); irs.scope() = IRScope(afterCleanupBB); lastCleanup = newCleanup; } for (const auto &cb : tryCatchScope.getCatchBlocks()) { // Add the ClassInfo reference to the landingpad instruction so it is // emitted to the EH tables. landingPad->addClause(cb.classInfoPtr); llvm::BasicBlock *mismatchBB = irs.insertBB(beginBB->getName() + llvm::Twine(".mismatch")); // "Call" llvm.eh.typeid.for, which gives us the eh selector value to // compare the landing pad selector value with. llvm::Value *ehTypeId = irs.ir->CreateCall(GET_INTRINSIC_DECL(eh_typeid_for), DtoBitCast(cb.classInfoPtr, getVoidPtrType())); // Compare the selector value from the unwinder against the expected // one and branch accordingly. irs.ir->CreateCondBr( irs.ir->CreateICmpEQ(irs.ir->CreateLoad(ehSelectorSlot), ehTypeId), cb.bodyBB, mismatchBB, cb.branchWeights); irs.scope() = IRScope(mismatchBB); } } // No catch matched. Execute all finallys and resume unwinding. auto resumeUnwindBlock = getOrCreateResumeUnwindBlock(); if (lastCleanup > 0) { landingPad->setCleanup(true); runCleanups(lastCleanup, 0, resumeUnwindBlock); } else if (!tryCatchScopes.empty()) { // Directly convert the last mismatch branch into a branch to the // unwind resume block. irs.scopebb()->replaceAllUsesWith(resumeUnwindBlock); irs.scopebb()->eraseFromParent(); } else { irs.ir->CreateBr(resumeUnwindBlock); } irs.scope() = savedIRScope; return beginBB; }
void emitABIReturnAsmStmt(IRAsmBlock* asmblock, Loc loc, FuncDeclaration* fdecl) { Logger::println("emitABIReturnAsmStmt(%s)", fdecl->mangle()); LOG_SCOPE; IRAsmStmt* as = new IRAsmStmt; LLType* llretTy = DtoType(fdecl->type->nextOf()); asmblock->retty = llretTy; asmblock->retn = 1; // FIXME: This should probably be handled by the TargetABI somehow. // It should be able to do this for a greater variety of types. // x86 if (global.params.targetTriple.getArch() == llvm::Triple::x86) { LINK l = fdecl->linkage; assert((l == LINKd || l == LINKc || l == LINKwindows) && "invalid linkage for asm implicit return"); Type* rt = fdecl->type->nextOf()->toBasetype(); if (rt->isintegral() || rt->ty == Tpointer || rt->ty == Tclass || rt->ty == Taarray) { if (rt->size() == 8) { as->out_c = "=A,"; } else { as->out_c = "={ax},"; } } else if (rt->isfloating()) { if (rt->iscomplex()) { if (fdecl->linkage == LINKd) { // extern(D) always returns on the FPU stack as->out_c = "={st},={st(1)},"; asmblock->retn = 2; } else if (rt->ty == Tcomplex32) { // extern(C) cfloat is return as i64 as->out_c = "=A,"; asmblock->retty = LLType::getInt64Ty(gIR->context()); } else { // cdouble and creal extern(C) are returned in pointer // don't add anything! asmblock->retty = LLType::getVoidTy(gIR->context()); asmblock->retn = 0; return; } } else { as->out_c = "={st},"; } } else if (rt->ty == Tarray || rt->ty == Tdelegate) { as->out_c = "={ax},={dx},"; asmblock->retn = 2; #if 0 // this is to show how to allocate a temporary for the return value // in case the appropriate multi register constraint isn't supported. // this way abi return from inline asm can still be emulated. // note that "$<<out0>>" etc in the asm will translate to the correct // numbered output when the asm block in finalized // generate asm as->out_c = "=*m,=*m,"; LLValue* tmp = DtoRawAlloca(llretTy, 0, ".tmp_asm_ret"); as->out.push_back( tmp ); as->out.push_back( DtoGEPi(tmp, 0,1) ); as->code = "movd %eax, $<<out0>>" "\n\t" "mov %edx, $<<out1>>"; // fix asmblock asmblock->retn = 0; asmblock->retemu = true; asmblock->asmBlock->abiret = tmp; // add "ret" stmt at the end of the block asmblock->s.push_back(as); // done, we don't want anything pushed in the front of the block return; #endif } else { error(loc, "unimplemented return type '%s' for implicit abi return", rt->toChars()); fatal(); } } // x86_64 else if (global.params.targetTriple.getArch() == llvm::Triple::x86_64) { LINK l = fdecl->linkage; /* TODO: Check if this works with extern(Windows), completely untested. * In particular, returning cdouble may not work with * extern(Windows) since according to X86CallingConv.td it * doesn't allow XMM1 to be used. * (So is extern(C), but that should be fine as the calling convention * is identical to that of extern(D)) */ assert((l == LINKd || l == LINKc || l == LINKwindows) && "invalid linkage for asm implicit return"); Type* rt = fdecl->type->nextOf()->toBasetype(); if (rt->isintegral() || rt->ty == Tpointer || rt->ty == Tclass || rt->ty == Taarray) { as->out_c = "={ax},"; } else if (rt->isfloating()) { if (rt == Type::tcomplex80) { // On x87 stack, re=st, im=st(1) as->out_c = "={st},={st(1)},"; asmblock->retn = 2; } else if (rt == Type::tfloat80 || rt == Type::timaginary80) { // On x87 stack as->out_c = "={st},"; } else if (l != LINKd && rt == Type::tcomplex32) { // LLVM and GCC disagree on how to return {float, float}. // For compatibility, use the GCC/LLVM-GCC way for extern(C/Windows) // extern(C) cfloat -> %xmm0 (extract two floats) as->out_c = "={xmm0},"; asmblock->retty = LLType::getDoubleTy(gIR->context()); } else if (rt->iscomplex()) { // cdouble and extern(D) cfloat -> re=%xmm0, im=%xmm1 as->out_c = "={xmm0},={xmm1},"; asmblock->retn = 2; } else { // Plain float/double/ifloat/idouble as->out_c = "={xmm0},"; } } else if (rt->ty == Tarray || rt->ty == Tdelegate) { as->out_c = "={ax},={dx},"; asmblock->retn = 2; } else { error(loc, "unimplemented return type '%s' for implicit abi return", rt->toChars()); fatal(); } } // unsupported else { error(loc, "this target (%s) does not implement inline asm falling off the end of the function", global.params.targetTriple.str().c_str()); fatal(); } // return values always go in the front asmblock->s.push_front(as); }
DValue *DtoNewClass(Loc &loc, TypeClass *tc, NewExp *newexp) { // resolve type DtoResolveClass(tc->sym); // allocate LLValue *mem; bool doInit = true; if (newexp->onstack) { unsigned alignment = tc->sym->alignsize; if (alignment == STRUCTALIGN_DEFAULT) alignment = 0; mem = DtoRawAlloca(DtoType(tc)->getContainedType(0), alignment, ".newclass_alloca"); } // custom allocator else if (newexp->allocator) { DtoResolveFunction(newexp->allocator); DFuncValue dfn(newexp->allocator, DtoCallee(newexp->allocator)); DValue *res = DtoCallFunction(newexp->loc, nullptr, &dfn, newexp->newargs); mem = DtoBitCast(DtoRVal(res), DtoType(tc), ".newclass_custom"); } // default allocator else { const bool useEHAlloc = global.params.ehnogc && newexp->thrownew; llvm::Function *fn = getRuntimeFunction( loc, gIR->module, useEHAlloc ? "_d_newThrowable" : "_d_allocclass"); LLConstant *ci = DtoBitCast(getIrAggr(tc->sym)->getClassInfoSymbol(), DtoType(getClassInfoType())); mem = gIR->CreateCallOrInvoke(fn, ci, useEHAlloc ? ".newthrowable_alloc" : ".newclass_gc_alloc") .getInstruction(); mem = DtoBitCast(mem, DtoType(tc), useEHAlloc ? ".newthrowable" : ".newclass_gc"); doInit = !useEHAlloc; } // init if (doInit) DtoInitClass(tc, mem); // init inner-class outer reference if (newexp->thisexp) { Logger::println("Resolving outer class"); LOG_SCOPE; unsigned idx = getFieldGEPIndex(tc->sym, tc->sym->vthis); LLValue *src = DtoRVal(newexp->thisexp); LLValue *dst = DtoGEPi(mem, 0, idx); IF_LOG Logger::cout() << "dst: " << *dst << "\nsrc: " << *src << '\n'; DtoStore(src, DtoBitCast(dst, getPtrToType(src->getType()))); } // set the context for nested classes else if (tc->sym->isNested() && tc->sym->vthis) { DtoResolveNestedContext(loc, tc->sym, mem); } // call constructor if (newexp->member) { // evaluate argprefix if (newexp->argprefix) { toElemDtor(newexp->argprefix); } Logger::println("Calling constructor"); assert(newexp->arguments != NULL); DtoResolveFunction(newexp->member); DFuncValue dfn(newexp->member, DtoCallee(newexp->member), mem); // ignore ctor return value (C++ ctors on Posix may not return `this`) DtoCallFunction(newexp->loc, tc, &dfn, newexp->arguments); return new DImValue(tc, mem); } assert(newexp->argprefix == NULL); // return default constructed class return new DImValue(tc, mem); }
void DtoCreateNestedContext(FuncDeclaration* fd) { Logger::println("DtoCreateNestedContext for %s", fd->toChars()); LOG_SCOPE DtoCreateNestedContextType(fd); // construct nested variables array if (!fd->nestedVars.empty()) { IrFunction* irfunction = fd->ir.irFunc; unsigned depth = irfunction->depth; LLStructType *frameType = irfunction->frameType; // Create frame for current function and append to frames list // FIXME: alignment ? LLValue* frame = 0; if (fd->needsClosure()) frame = DtoGcMalloc(frameType, ".frame"); else frame = DtoRawAlloca(frameType, 0, ".frame"); // copy parent frames into beginning if (depth != 0) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); AggregateDeclaration* cd = fd->isMember2(); assert(cd); assert(cd->vthis); Logger::println("Indexing to 'this'"); if (cd->isStructDeclaration()) src = DtoExtractValue(thisval, cd->vthis->ir.irField->index, ".vthis"); else src = DtoLoad(DtoGEPi(thisval, 0, cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } if (depth > 1) { src = DtoBitCast(src, getVoidPtrType()); LLValue* dst = DtoBitCast(frame, getVoidPtrType()); DtoMemCpy(dst, src, DtoConstSize_t((depth-1) * PTRSIZE), getABITypeAlign(getVoidPtrType())); } // Copy nestArg into framelist; the outer frame is not in the list of pointers src = DtoBitCast(src, frameType->getContainedType(depth-1)); LLValue* gep = DtoGEPi(frame, 0, depth-1); DtoAlignedStore(src, gep); } // store context in IrFunction irfunction->nestedVar = frame; // go through all nested vars and assign addresses where possible. for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; LLValue* gep = DtoGEPi(frame, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LOG_SCOPE IrParameter* parm = vd->ir.irParam; if (parm->arg->byref) { storeVariable(vd, gep); } else { Logger::println("Copying to nested frame"); // The parameter value is an alloca'd stack slot. // Copy to the nesting frame and leave the alloca for // the optimizers to clean up. DtoStore(DtoLoad(parm->value), gep); gep->takeName(parm->value); parm->value = gep; } } else { Logger::println("nested var: %s", vd->toChars()); assert(!vd->ir.irLocal->value); vd->ir.irLocal->value = gep; } if (global.params.symdebug) { LLSmallVector<LLValue*, 2> addr; dwarfOpOffset(addr, frameType, vd->ir.irLocal->nestedIndex); DtoDwarfLocalVariable(frame, vd, addr); } } } }
void DtoCreateNestedContext(FuncDeclaration* fd) { Logger::println("DtoCreateNestedContext for %s", fd->toChars()); LOG_SCOPE DtoCreateNestedContextType(fd); if (nestedCtx == NCArray) { // construct nested variables array if (!fd->nestedVars.empty()) { Logger::println("has nested frame"); // start with adding all enclosing parent frames until a static parent is reached int nparelems = 0; if (!fd->isStatic()) { Dsymbol* par = fd->toParent2(); while (par) { if (FuncDeclaration* parfd = par->isFuncDeclaration()) { nparelems += parfd->nestedVars.size(); // stop at first static if (parfd->isStatic()) break; } else if (par->isClassDeclaration()) { // nothing needed } else { break; } par = par->toParent2(); } } int nelems = fd->nestedVars.size() + nparelems; // make array type for nested vars LLType* nestedVarsTy = LLArrayType::get(getVoidPtrType(), nelems); // alloca it // FIXME align ? LLValue* nestedVars = DtoRawAlloca(nestedVarsTy, 0, ".nested_vars"); IrFunction* irfunction = fd->ir.irFunc; // copy parent frame into beginning if (nparelems) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); ClassDeclaration* cd = fd->isMember2()->isClassDeclaration(); assert(cd); assert(cd->vthis); src = DtoLoad(DtoGEPi(thisval, 0,cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } DtoMemCpy(nestedVars, src, DtoConstSize_t(nparelems*PTRSIZE), getABITypeAlign(getVoidPtrType())); } // store in IrFunction irfunction->nestedVar = nestedVars; // go through all nested vars and assign indices int idx = nparelems; for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; if (!vd->ir.irLocal) vd->ir.irLocal = new IrLocal(vd); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LLValue* gep = DtoGEPi(nestedVars, 0, idx); LLValue* val = DtoBitCast(vd->ir.irLocal->value, getVoidPtrType()); DtoAlignedStore(val, gep); } else { Logger::println("nested var: %s", vd->toChars()); } vd->ir.irLocal->nestedIndex = idx++; } } } else if (nestedCtx == NCHybrid) { // construct nested variables array if (!fd->nestedVars.empty()) { IrFunction* irfunction = fd->ir.irFunc; unsigned depth = irfunction->depth; LLStructType *frameType = irfunction->frameType; // Create frame for current function and append to frames list // FIXME: alignment ? LLValue* frame = 0; #if DMDV2 if (fd->needsClosure()) frame = DtoGcMalloc(frameType, ".frame"); else #endif frame = DtoRawAlloca(frameType, 0, ".frame"); // copy parent frames into beginning if (depth != 0) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); #if DMDV2 AggregateDeclaration* cd = fd->isMember2(); #else ClassDeclaration* cd = fd->isMember2()->isClassDeclaration(); #endif assert(cd); assert(cd->vthis); Logger::println("Indexing to 'this'"); #if DMDV2 if (cd->isStructDeclaration()) src = DtoExtractValue(thisval, cd->vthis->ir.irField->index, ".vthis"); else #endif src = DtoLoad(DtoGEPi(thisval, 0, cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } if (depth > 1) { src = DtoBitCast(src, getVoidPtrType()); LLValue* dst = DtoBitCast(frame, getVoidPtrType()); DtoMemCpy(dst, src, DtoConstSize_t((depth-1) * PTRSIZE), getABITypeAlign(getVoidPtrType())); } // Copy nestArg into framelist; the outer frame is not in the list of pointers src = DtoBitCast(src, frameType->getContainedType(depth-1)); LLValue* gep = DtoGEPi(frame, 0, depth-1); DtoAlignedStore(src, gep); } // store context in IrFunction irfunction->nestedVar = frame; // go through all nested vars and assign addresses where possible. for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; LLValue* gep = DtoGEPi(frame, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LOG_SCOPE LLValue* value = vd->ir.irLocal->value; if (llvm::isa<llvm::AllocaInst>(llvm::GetUnderlyingObject(value))) { Logger::println("Copying to nested frame"); // The parameter value is an alloca'd stack slot. // Copy to the nesting frame and leave the alloca for // the optimizers to clean up. assert(!vd->ir.irLocal->byref); DtoStore(DtoLoad(value), gep); gep->takeName(value); vd->ir.irLocal->value = gep; } else { Logger::println("Adding pointer to nested frame"); // The parameter value is something else, such as a // passed-in pointer (for 'ref' or 'out' parameters) or // a pointer arg with byval attribute. // Store the address into the frame. assert(vd->ir.irLocal->byref); storeVariable(vd, gep); } } else if (vd->isRef() || vd->isOut()) { // This slot is initialized in DtoNestedInit, to handle things like byref foreach variables // which move around in memory. assert(vd->ir.irLocal->byref); } else { Logger::println("nested var: %s", vd->toChars()); if (vd->ir.irLocal->value) Logger::cout() << "Pre-existing value: " << *vd->ir.irLocal->value << '\n'; assert(!vd->ir.irLocal->value); vd->ir.irLocal->value = gep; assert(!vd->ir.irLocal->byref); } if (global.params.symdebug) { LLSmallVector<LLValue*, 2> addr; dwarfOpOffset(addr, frameType, vd->ir.irLocal->nestedIndex); DtoDwarfLocalVariable(frame, vd, addr); } } } else if (FuncDeclaration* parFunc = getParentFunc(fd, true)) { // Propagate context arg properties if the context arg is passed on unmodified. DtoDeclareFunction(parFunc); fd->ir.irFunc->frameType = parFunc->ir.irFunc->frameType; fd->ir.irFunc->depth = parFunc->ir.irFunc->depth; } } else { assert(0 && "Not implemented yet"); } }
void DtoDefineFunction(FuncDeclaration* fd) { IF_LOG Logger::println("DtoDefineFunction(%s): %s", fd->toPrettyChars(), fd->loc.toChars()); LOG_SCOPE; if (fd->ir.isDefined()) return; if ((fd->type && fd->type->ty == Terror) || (fd->type && fd->type->ty == Tfunction && static_cast<TypeFunction *>(fd->type)->next == NULL) || (fd->type && fd->type->ty == Tfunction && static_cast<TypeFunction *>(fd->type)->next->ty == Terror)) { IF_LOG Logger::println("Ignoring; has error type, no return type or returns error type"); fd->ir.setDefined(); return; } if (fd->semanticRun == PASSsemanticdone) { /* What happened is this function failed semantic3() with errors, * but the errors were gagged. * Try to reproduce those errors, and then fail. */ error(fd->loc, "errors compiling function %s", fd->toPrettyChars()); fd->ir.setDefined(); return; } DtoResolveFunction(fd); if (fd->isUnitTestDeclaration() && !global.params.useUnitTests) { IF_LOG Logger::println("No code generation for unit test declaration %s", fd->toChars()); fd->ir.setDefined(); return; } // Skip array ops implemented in druntime if (fd->isArrayOp && isDruntimeArrayOp(fd)) { IF_LOG Logger::println("No code generation for array op %s implemented in druntime", fd->toChars()); fd->ir.setDefined(); return; } // Check whether the frontend knows that the function is already defined // in some other module (see DMD's FuncDeclaration::toObjFile). for (FuncDeclaration *f = fd; f; ) { if (!f->isInstantiated() && f->inNonRoot()) { IF_LOG Logger::println("Skipping '%s'.", fd->toPrettyChars()); // TODO: Emit as available_externally for inlining purposes instead // (see #673). fd->ir.setDefined(); return; } if (f->isNested()) f = f->toParent2()->isFuncDeclaration(); else break; } DtoDeclareFunction(fd); assert(fd->ir.isDeclared()); // DtoResolveFunction might also set the defined flag for functions we // should not touch. if (fd->ir.isDefined()) return; fd->ir.setDefined(); // We cannot emit nested functions with parents that have not gone through // semantic analysis. This can happen as DMD leaks some template instances // from constraints into the module member list. DMD gets away with being // sloppy as functions in template contraints obviously never need to access // data from the template function itself, but it would still mess up our // nested context creation code. FuncDeclaration* parent = fd; while ((parent = getParentFunc(parent, true))) { if (parent->semanticRun != PASSsemantic3done || parent->semantic3Errors) { IF_LOG Logger::println("Ignoring nested function with unanalyzed parent."); return; } } assert(fd->semanticRun == PASSsemantic3done); assert(fd->ident != Id::empty); if (fd->isUnitTestDeclaration()) { gIR->unitTests.push_back(fd); } else if (fd->isSharedStaticCtorDeclaration()) { gIR->sharedCtors.push_back(fd); } else if (StaticDtorDeclaration *dtorDecl = fd->isSharedStaticDtorDeclaration()) { gIR->sharedDtors.push_front(fd); if (dtorDecl->vgate) gIR->sharedGates.push_front(dtorDecl->vgate); } else if (fd->isStaticCtorDeclaration()) { gIR->ctors.push_back(fd); } else if (StaticDtorDeclaration *dtorDecl = fd->isStaticDtorDeclaration()) { gIR->dtors.push_front(fd); if (dtorDecl->vgate) gIR->gates.push_front(dtorDecl->vgate); } // if this function is naked, we take over right away! no standard processing! if (fd->naked) { DtoDefineNakedFunction(fd); return; } IrFunction *irFunc = getIrFunc(fd); IrFuncTy &irFty = irFunc->irFty; // debug info irFunc->diSubprogram = gIR->DBuilder.EmitSubProgram(fd); Type* t = fd->type->toBasetype(); TypeFunction* f = static_cast<TypeFunction*>(t); // assert(f->ctype); llvm::Function* func = irFunc->func; // is there a body? if (fd->fbody == NULL) return; IF_LOG Logger::println("Doing function body for: %s", fd->toChars()); gIR->functions.push_back(irFunc); if (fd->isMain()) gIR->emitMain = true; func->setLinkage(lowerFuncLinkage(fd)); // On x86_64, always set 'uwtable' for System V ABI compatibility. // TODO: Find a better place for this. // TODO: Is this required for Win64 as well? if (global.params.targetTriple.getArch() == llvm::Triple::x86_64) { func->addFnAttr(LDC_ATTRIBUTE(UWTable)); } #if LDC_LLVM_VER >= 303 if (opts::sanitize != opts::None) { // Set the required sanitizer attribute. if (opts::sanitize == opts::AddressSanitizer) { func->addFnAttr(LDC_ATTRIBUTE(SanitizeAddress)); } if (opts::sanitize == opts::MemorySanitizer) { func->addFnAttr(LDC_ATTRIBUTE(SanitizeMemory)); } if (opts::sanitize == opts::ThreadSanitizer) { func->addFnAttr(LDC_ATTRIBUTE(SanitizeThread)); } } #endif llvm::BasicBlock* beginbb = llvm::BasicBlock::Create(gIR->context(), "", func); llvm::BasicBlock* endbb = llvm::BasicBlock::Create(gIR->context(), "endentry", func); //assert(gIR->scopes.empty()); gIR->scopes.push_back(IRScope(beginbb, endbb)); // create alloca point // this gets erased when the function is complete, so alignment etc does not matter at all llvm::Instruction* allocaPoint = new llvm::AllocaInst(LLType::getInt32Ty(gIR->context()), "alloca point", beginbb); irFunc->allocapoint = allocaPoint; // debug info - after all allocas, but before any llvm.dbg.declare etc gIR->DBuilder.EmitFuncStart(fd); // this hack makes sure the frame pointer elimination optimization is disabled. // this this eliminates a bunch of inline asm related issues. if (fd->hasReturnExp & 8) // has inline asm { // emit a call to llvm_eh_unwind_init LLFunction* hack = GET_INTRINSIC_DECL(eh_unwind_init); gIR->ir->CreateCall(hack, ""); } // give the 'this' argument storage and debug info if (irFty.arg_this) { LLValue* thisvar = irFunc->thisArg; assert(thisvar); LLValue* thismem = thisvar; if (!irFty.arg_this->byref) { thismem = DtoRawAlloca(thisvar->getType(), 0, "this"); // FIXME: align? DtoStore(thisvar, thismem); irFunc->thisArg = thismem; } assert(getIrParameter(fd->vthis)->value == thisvar); getIrParameter(fd->vthis)->value = thismem; gIR->DBuilder.EmitLocalVariable(thismem, fd->vthis); } // give the 'nestArg' storage if (irFty.arg_nest) { LLValue *nestArg = irFunc->nestArg; LLValue *val = DtoRawAlloca(nestArg->getType(), 0, "nestedFrame"); DtoStore(nestArg, val); irFunc->nestArg = val; } // give arguments storage // and debug info if (fd->parameters) { size_t n = irFty.args.size(); assert(n == fd->parameters->dim); for (size_t i=0; i < n; ++i) { Dsymbol* argsym = static_cast<Dsymbol*>(fd->parameters->data[i]); VarDeclaration* vd = argsym->isVarDeclaration(); assert(vd); IrParameter* irparam = getIrParameter(vd); assert(irparam); bool refout = vd->storage_class & (STCref | STCout); bool lazy = vd->storage_class & STClazy; if (!refout && (!irparam->arg->byref || lazy)) { // alloca a stack slot for this first class value arg LLValue* mem = DtoAlloca(irparam->arg->type, vd->ident->toChars()); // let the abi transform the argument back first DImValue arg_dval(vd->type, irparam->value); irFty.getParam(vd->type, i, &arg_dval, mem); // set the arg var value to the alloca irparam->value = mem; } if (global.params.symdebug && !(isaArgument(irparam->value) && isaArgument(irparam->value)->hasByValAttr()) && !refout) gIR->DBuilder.EmitLocalVariable(irparam->value, vd); } } FuncGen fg; irFunc->gen = &fg; DtoCreateNestedContext(fd); if (fd->vresult && ! fd->vresult->nestedrefs.dim // FIXME: not sure here :/ ) { DtoVarDeclaration(fd->vresult); } // D varargs: prepare _argptr and _arguments if (f->linkage == LINKd && f->varargs == 1) { // allocate _argptr (of type core.stdc.stdarg.va_list) LLValue* argptrmem = DtoAlloca(Type::tvalist, "_argptr_mem"); irFunc->_argptr = argptrmem; // initialize _argptr with a call to the va_start intrinsic LLValue* vaStartArg = gABI->prepareVaStart(argptrmem); llvm::CallInst::Create(GET_INTRINSIC_DECL(vastart), vaStartArg, "", gIR->scopebb()); // copy _arguments to a memory location LLType* argumentsType = irFunc->_arguments->getType(); LLValue* argumentsmem = DtoRawAlloca(argumentsType, 0, "_arguments_mem"); new llvm::StoreInst(irFunc->_arguments, argumentsmem, gIR->scopebb()); irFunc->_arguments = argumentsmem; } // output function body codegenFunction(fd->fbody, gIR); irFunc->gen = 0; llvm::BasicBlock* bb = gIR->scopebb(); if (pred_begin(bb) == pred_end(bb) && bb != &bb->getParent()->getEntryBlock()) { // This block is trivially unreachable, so just delete it. // (This is a common case because it happens when 'return' // is the last statement in a function) bb->eraseFromParent(); } else if (!gIR->scopereturned()) { // llvm requires all basic blocks to end with a TerminatorInst but DMD does not put a return statement // in automatically, so we do it here. // pass the previous block into this block gIR->DBuilder.EmitFuncEnd(fd); if (func->getReturnType() == LLType::getVoidTy(gIR->context())) { llvm::ReturnInst::Create(gIR->context(), gIR->scopebb()); } else if (!fd->isMain()) { AsmBlockStatement* asmb = fd->fbody->endsWithAsm(); if (asmb) { assert(asmb->abiret); llvm::ReturnInst::Create(gIR->context(), asmb->abiret, bb); } else { llvm::ReturnInst::Create(gIR->context(), llvm::UndefValue::get(func->getReturnType()), bb); } } else llvm::ReturnInst::Create(gIR->context(), LLConstant::getNullValue(func->getReturnType()), bb); } // erase alloca point if (allocaPoint->getParent()) allocaPoint->eraseFromParent(); allocaPoint = 0; gIR->func()->allocapoint = 0; gIR->scopes.pop_back(); // get rid of the endentry block, it's never used assert(!func->getBasicBlockList().empty()); func->getBasicBlockList().pop_back(); gIR->functions.pop_back(); }