void DtoInitClass(TypeClass* tc, LLValue* dst) { DtoResolveClass(tc->sym); // Set vtable field. Doing this seperately might be optimized better. LLValue* tmp = DtoGEPi(dst, 0, 0, "vtbl"); LLValue* val = DtoBitCast(getIrAggr(tc->sym)->getVtblSymbol(), tmp->getType()->getContainedType(0)); DtoStore(val, tmp); // For D classes, set the monitor field to null. const bool isCPPclass = tc->sym->isCPPclass() ? true : false; if (!isCPPclass) { tmp = DtoGEPi(dst, 0, 1, "monitor"); val = LLConstant::getNullValue(tmp->getType()->getContainedType(0)); DtoStore(val, tmp); } // Copy the rest from the static initializer, if any. unsigned const firstDataIdx = isCPPclass ? 1 : 2; uint64_t const dataBytes = tc->sym->structsize - Target::ptrsize * firstDataIdx; if (dataBytes == 0) return; LLValue* dstarr = DtoGEPi(dst, 0, firstDataIdx); // init symbols might not have valid types LLValue* initsym = getIrAggr(tc->sym)->getInitSymbol(); initsym = DtoBitCast(initsym, DtoType(tc)); LLValue* srcarr = DtoGEPi(initsym, 0, firstDataIdx); DtoMemCpy(dstarr, srcarr, DtoConstSize_t(dataBytes)); }
void DtoInitClass(TypeClass* tc, LLValue* dst) { tc->sym->codegen(Type::sir); uint64_t n = tc->sym->structsize - PTRSIZE * 2; // set vtable field seperately, this might give better optimization LLValue* tmp = DtoGEPi(dst,0,0,"vtbl"); LLValue* val = DtoBitCast(tc->sym->ir.irStruct->getVtblSymbol(), tmp->getType()->getContainedType(0)); DtoStore(val, tmp); // monitor always defaults to zero tmp = DtoGEPi(dst,0,1,"monitor"); val = LLConstant::getNullValue(tmp->getType()->getContainedType(0)); DtoStore(val, tmp); // done? if (n == 0) return; // copy the rest from the static initializer LLValue* dstarr = DtoGEPi(dst,0,2,"tmp"); // init symbols might not have valid types LLValue* initsym = tc->sym->ir.irStruct->getInitSymbol(); initsym = DtoBitCast(initsym, DtoType(tc)); LLValue* srcarr = DtoGEPi(initsym,0,2,"tmp"); DtoMemCpy(dstarr, srcarr, DtoConstSize_t(n)); }
IRLandingPadInfo::IRLandingPadInfo(Catch* catchstmt, llvm::BasicBlock* end) : finallyBody(NULL) { target = llvm::BasicBlock::Create(gIR->context(), "catch", gIR->topfunc(), end); gIR->scope() = IRScope(target,end); DtoDwarfBlockStart(catchstmt->loc); // assign storage to catch var if(catchstmt->var) { // use the same storage for all exceptions that are not accessed in // nested functions #if DMDV2 if(!catchstmt->var->nestedrefs.dim) { #else if(!catchstmt->var->nestedref) { #endif assert(!catchstmt->var->ir.irLocal); catchstmt->var->ir.irLocal = new IrLocal(catchstmt->var); LLValue* catch_var = gIR->func()->gen->landingPadInfo.getExceptionStorage(); catchstmt->var->ir.irLocal->value = gIR->ir->CreateBitCast(catch_var, getPtrToType(DtoType(catchstmt->var->type))); } // this will alloca if we haven't already and take care of nested refs DtoDeclarationExp(catchstmt->var); // the exception will only be stored in catch_var. copy it over if necessary if(catchstmt->var->ir.irLocal->value != gIR->func()->gen->landingPadInfo.getExceptionStorage()) { LLValue* exc = gIR->ir->CreateBitCast(DtoLoad(gIR->func()->gen->landingPadInfo.getExceptionStorage()), DtoType(catchstmt->var->type)); DtoStore(exc, catchstmt->var->ir.irLocal->value); } } // emit handler, if there is one // handler is zero for instance for 'catch { debug foo(); }' if(catchstmt->handler) catchstmt->handler->toIR(gIR); if (!gIR->scopereturned()) gIR->ir->CreateBr(end); assert(catchstmt->type); catchType = catchstmt->type->toBasetype()->isClassHandle(); assert(catchType); catchType->codegen(Type::sir); DtoDwarfBlockEnd(); } IRLandingPadInfo::IRLandingPadInfo(Statement* finallystmt) : target(NULL), finallyBody(finallystmt), catchType(NULL) { } void IRLandingPad::addCatch(Catch* catchstmt, llvm::BasicBlock* end) { unpushed_infos.push_front(IRLandingPadInfo(catchstmt, end)); }
void vaCopy(LLValue* pDest, LLValue* src) { // Analog to va_start, we need to allocate a new __va_list struct on the stack, // fill it with a bitcopy of the source struct... src = DtoLoad(DtoBitCast(src, getValistType()->getPointerTo())); // *(__va_list*)src LLValue* valistmem = DtoAllocaDump(src, 0, "__va_list_mem"); // ... and finally set the passed 'dest' char* pointer to the new struct's address. DtoStore(DtoBitCast(valistmem, getVoidPtrType()), DtoBitCast(pDest, getPtrToType(getVoidPtrType()))); }
LLValue *prepareVaStart(DLValue *ap) override { // Since the user only created a char* pointer (ap) on the stack before // invoking va_start, we first need to allocate the actual __va_list struct // and set `ap` to its address. LLValue *valistmem = DtoRawAlloca(getValistType(), 0, "__va_list_mem"); DtoStore(valistmem, DtoBitCast(DtoLVal(ap), getPtrToType(valistmem->getType()))); // Pass a i8* pointer to the actual struct to LLVM's va_start intrinsic. return DtoBitCast(valistmem, getVoidPtrType()); }
void vaCopy(DLValue *dest, DValue *src) override { // Analog to va_start, we first need to allocate a new __va_list struct on // the stack and set `dest` to its address. LLValue *valistmem = DtoRawAlloca(getValistType(), 0, "__va_list_mem"); DtoStore(valistmem, DtoBitCast(DtoLVal(dest), getPtrToType(valistmem->getType()))); // Then fill the new struct with a bitcopy of the source struct. // `src` is a char* pointer to the source struct. DtoMemCpy(valistmem, DtoRVal(src)); }
LLValue* prepareVaStart(LLValue* pAp) { // Since the user only created a char* pointer (ap) on the stack before invoking va_start, // we first need to allocate the actual __va_list struct and set 'ap' to its address. LLValue* valistmem = DtoRawAlloca(getValistType(), 0, "__va_list_mem"); valistmem = DtoBitCast(valistmem, getVoidPtrType()); DtoStore(valistmem, pAp); // ap = (void*)__va_list_mem // pass a void* pointer to the actual struct to LLVM's va_start intrinsic return valistmem; }
// Get struct from ABI-mangled representation LLValue* get(Type* dty, DValue* v) { LLValue* lval; if (v->isLVal()) { lval = v->getLVal(); } else { // No memory location, create one. LLValue* rval = v->getRVal(); lval = DtoRawAlloca(rval->getType(), 0); DtoStore(rval, lval); } LLType* pTy = getPtrToType(DtoType(dty)); return DtoLoad(DtoBitCast(lval, pTy), "get-result"); }
/// Undo the transformation performed by DtoUnpaddedStruct, writing to lval. void DtoPaddedStruct(Type* dty, LLValue* v, LLValue* lval) { assert(dty->ty == Tstruct); TypeStruct* sty = static_cast<TypeStruct*>(dty); VarDeclarations& fields = sty->sym->fields; for (unsigned i = 0; i < fields.dim; i++) { LLValue* fieldptr = DtoIndexAggregate(lval, sty->sym, fields[i]); LLValue* fieldval = DtoExtractValue(v, i); if (fields[i]->type->ty == Tstruct) { // Nested structs are the only members that can contain padding DtoPaddedStruct(fields[i]->type, fieldval, fieldptr); } else { DtoStore(fieldval, fieldptr); } } }
void DtoResolveNestedContext(Loc loc, ClassDeclaration *decl, LLValue *value) #endif { Logger::println("Resolving nested context"); LOG_SCOPE; // get context LLValue* nest = DtoNestedContext(loc, decl); // store into right location if (!llvm::dyn_cast<llvm::UndefValue>(nest)) { size_t idx = decl->vthis->ir.irField->index; LLValue* gep = DtoGEPi(value,0,idx,".vthis"); DtoStore(DtoBitCast(nest, gep->getType()->getContainedType(0)), gep); } }
/// Undo the transformation performed by DtoUnpaddedStruct, writing to lval. void DtoPaddedStruct(Type* dty, LLValue* v, LLValue* lval) { assert(dty->ty == Tstruct); TypeStruct* sty = (TypeStruct*) dty; Array& fields = sty->sym->fields; for (unsigned i = 0; i < fields.dim; i++) { VarDeclaration* vd = (VarDeclaration*) fields.data[i]; LLValue* fieldptr = DtoIndexStruct(lval, sty->sym, vd); LLValue* fieldval = DtoExtractValue(v, i); if (vd->type->ty == Tstruct) { // Nested structs are the only members that can contain padding DtoPaddedStruct(vd->type, fieldval, fieldptr); } else { DtoStore(fieldval, fieldptr); } } }
// Turn a struct into an ABI-mangled representation LLValue* put(Type* dty, DValue* v) { LLValue* lval; if (v->isLVal()) { lval = v->getLVal(); } else { // No memory location, create one. LLValue* rval = v->getRVal(); lval = DtoRawAlloca(rval->getType(), 0); DtoStore(rval, lval); } LLType* abiTy = getAbiType(dty); assert(abiTy && "Why are we rewriting a non-rewritten type?"); LLType* pTy = getPtrToType(abiTy); return DtoLoad(DtoBitCast(lval, pTy), "put-result"); }
void IRLandingPadInfo::toIR() { if (!catchstmt) return; gIR->scope() = IRScope(target, target); DtoDwarfBlockStart(catchstmt->loc); // assign storage to catch var if(catchstmt->var) { // use the same storage for all exceptions that are not accessed in // nested functions if(!catchstmt->var->nestedrefs.dim) { assert(!catchstmt->var->ir.irLocal); catchstmt->var->ir.irLocal = new IrLocal(catchstmt->var); LLValue* catch_var = gIR->func()->gen->landingPadInfo.getExceptionStorage(); catchstmt->var->ir.irLocal->value = gIR->ir->CreateBitCast(catch_var, getPtrToType(DtoType(catchstmt->var->type))); } // this will alloca if we haven't already and take care of nested refs DtoDeclarationExp(catchstmt->var); // the exception will only be stored in catch_var. copy it over if necessary if(catchstmt->var->ir.irLocal->value != gIR->func()->gen->landingPadInfo.getExceptionStorage()) { LLValue* exc = gIR->ir->CreateBitCast(DtoLoad(gIR->func()->gen->landingPadInfo.getExceptionStorage()), DtoType(catchstmt->var->type)); DtoStore(exc, catchstmt->var->ir.irLocal->value); } } // emit handler, if there is one // handler is zero for instance for 'catch { debug foo(); }' if(catchstmt->handler) catchstmt->handler->toIR(gIR); if (!gIR->scopereturned()) gIR->ir->CreateBr(end); DtoDwarfBlockEnd(); }
void DtoResolveNestedContext(Loc loc, AggregateDeclaration *decl, LLValue *value) { Logger::println("Resolving nested context"); LOG_SCOPE; // get context LLValue* nest = DtoNestedContext(loc, decl); // store into right location if (!llvm::dyn_cast<llvm::UndefValue>(nest)) { // Need to make sure the declaration has already been resolved, because // when multiple source files are specified on the command line, the // frontend sometimes adds "nested" (i.e. a template in module B // instantiated from module A with a type from module A instantiates // another template from module B) into the wrong module, messing up // our codegen order. DtoResolveDsymbol(decl); size_t idx = decl->vthis->ir.irField->index; LLValue* gep = DtoGEPi(value,0,idx,".vthis"); DtoStore(DtoBitCast(nest, gep->getType()->getContainedType(0)), gep); } }
void DtoComplexSet(LLValue* c, LLValue* re, LLValue* im) { DtoStore(re, DtoGEPi(c, 0, 0, "tmp")); DtoStore(im, DtoGEPi(c, 0, 1, "tmp")); }
void DtoCreateNestedContext(FuncDeclaration* fd) { Logger::println("DtoCreateNestedContext for %s", fd->toChars()); LOG_SCOPE DtoCreateNestedContextType(fd); if (nestedCtx == NCArray) { // construct nested variables array if (!fd->nestedVars.empty()) { Logger::println("has nested frame"); // start with adding all enclosing parent frames until a static parent is reached int nparelems = 0; if (!fd->isStatic()) { Dsymbol* par = fd->toParent2(); while (par) { if (FuncDeclaration* parfd = par->isFuncDeclaration()) { nparelems += parfd->nestedVars.size(); // stop at first static if (parfd->isStatic()) break; } else if (par->isClassDeclaration()) { // nothing needed } else { break; } par = par->toParent2(); } } int nelems = fd->nestedVars.size() + nparelems; // make array type for nested vars LLType* nestedVarsTy = LLArrayType::get(getVoidPtrType(), nelems); // alloca it // FIXME align ? LLValue* nestedVars = DtoRawAlloca(nestedVarsTy, 0, ".nested_vars"); IrFunction* irfunction = fd->ir.irFunc; // copy parent frame into beginning if (nparelems) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); ClassDeclaration* cd = fd->isMember2()->isClassDeclaration(); assert(cd); assert(cd->vthis); src = DtoLoad(DtoGEPi(thisval, 0,cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } DtoMemCpy(nestedVars, src, DtoConstSize_t(nparelems*PTRSIZE), getABITypeAlign(getVoidPtrType())); } // store in IrFunction irfunction->nestedVar = nestedVars; // go through all nested vars and assign indices int idx = nparelems; for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; if (!vd->ir.irLocal) vd->ir.irLocal = new IrLocal(vd); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LLValue* gep = DtoGEPi(nestedVars, 0, idx); LLValue* val = DtoBitCast(vd->ir.irLocal->value, getVoidPtrType()); DtoAlignedStore(val, gep); } else { Logger::println("nested var: %s", vd->toChars()); } vd->ir.irLocal->nestedIndex = idx++; } } } else if (nestedCtx == NCHybrid) { // construct nested variables array if (!fd->nestedVars.empty()) { IrFunction* irfunction = fd->ir.irFunc; unsigned depth = irfunction->depth; LLStructType *frameType = irfunction->frameType; // Create frame for current function and append to frames list // FIXME: alignment ? LLValue* frame = 0; #if DMDV2 if (fd->needsClosure()) frame = DtoGcMalloc(frameType, ".frame"); else #endif frame = DtoRawAlloca(frameType, 0, ".frame"); // copy parent frames into beginning if (depth != 0) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); #if DMDV2 AggregateDeclaration* cd = fd->isMember2(); #else ClassDeclaration* cd = fd->isMember2()->isClassDeclaration(); #endif assert(cd); assert(cd->vthis); Logger::println("Indexing to 'this'"); #if DMDV2 if (cd->isStructDeclaration()) src = DtoExtractValue(thisval, cd->vthis->ir.irField->index, ".vthis"); else #endif src = DtoLoad(DtoGEPi(thisval, 0, cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } if (depth > 1) { src = DtoBitCast(src, getVoidPtrType()); LLValue* dst = DtoBitCast(frame, getVoidPtrType()); DtoMemCpy(dst, src, DtoConstSize_t((depth-1) * PTRSIZE), getABITypeAlign(getVoidPtrType())); } // Copy nestArg into framelist; the outer frame is not in the list of pointers src = DtoBitCast(src, frameType->getContainedType(depth-1)); LLValue* gep = DtoGEPi(frame, 0, depth-1); DtoAlignedStore(src, gep); } // store context in IrFunction irfunction->nestedVar = frame; // go through all nested vars and assign addresses where possible. for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; LLValue* gep = DtoGEPi(frame, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LOG_SCOPE LLValue* value = vd->ir.irLocal->value; if (llvm::isa<llvm::AllocaInst>(llvm::GetUnderlyingObject(value))) { Logger::println("Copying to nested frame"); // The parameter value is an alloca'd stack slot. // Copy to the nesting frame and leave the alloca for // the optimizers to clean up. assert(!vd->ir.irLocal->byref); DtoStore(DtoLoad(value), gep); gep->takeName(value); vd->ir.irLocal->value = gep; } else { Logger::println("Adding pointer to nested frame"); // The parameter value is something else, such as a // passed-in pointer (for 'ref' or 'out' parameters) or // a pointer arg with byval attribute. // Store the address into the frame. assert(vd->ir.irLocal->byref); storeVariable(vd, gep); } } else if (vd->isRef() || vd->isOut()) { // This slot is initialized in DtoNestedInit, to handle things like byref foreach variables // which move around in memory. assert(vd->ir.irLocal->byref); } else { Logger::println("nested var: %s", vd->toChars()); if (vd->ir.irLocal->value) Logger::cout() << "Pre-existing value: " << *vd->ir.irLocal->value << '\n'; assert(!vd->ir.irLocal->value); vd->ir.irLocal->value = gep; assert(!vd->ir.irLocal->byref); } if (global.params.symdebug) { LLSmallVector<LLValue*, 2> addr; dwarfOpOffset(addr, frameType, vd->ir.irLocal->nestedIndex); DtoDwarfLocalVariable(frame, vd, addr); } } } else if (FuncDeclaration* parFunc = getParentFunc(fd, true)) { // Propagate context arg properties if the context arg is passed on unmodified. DtoDeclareFunction(parFunc); fd->ir.irFunc->frameType = parFunc->ir.irFunc->frameType; fd->ir.irFunc->depth = parFunc->ir.irFunc->depth; } } else { assert(0 && "Not implemented yet"); } }
void DtoCreateNestedContext(FuncDeclaration* fd) { Logger::println("DtoCreateNestedContext for %s", fd->toChars()); LOG_SCOPE DtoCreateNestedContextType(fd); // construct nested variables array if (!fd->nestedVars.empty()) { IrFunction* irfunction = fd->ir.irFunc; unsigned depth = irfunction->depth; LLStructType *frameType = irfunction->frameType; // Create frame for current function and append to frames list // FIXME: alignment ? LLValue* frame = 0; if (fd->needsClosure()) frame = DtoGcMalloc(frameType, ".frame"); else frame = DtoRawAlloca(frameType, 0, ".frame"); // copy parent frames into beginning if (depth != 0) { LLValue* src = irfunction->nestArg; if (!src) { assert(irfunction->thisArg); assert(fd->isMember2()); LLValue* thisval = DtoLoad(irfunction->thisArg); AggregateDeclaration* cd = fd->isMember2(); assert(cd); assert(cd->vthis); Logger::println("Indexing to 'this'"); if (cd->isStructDeclaration()) src = DtoExtractValue(thisval, cd->vthis->ir.irField->index, ".vthis"); else src = DtoLoad(DtoGEPi(thisval, 0, cd->vthis->ir.irField->index, ".vthis")); } else { src = DtoLoad(src); } if (depth > 1) { src = DtoBitCast(src, getVoidPtrType()); LLValue* dst = DtoBitCast(frame, getVoidPtrType()); DtoMemCpy(dst, src, DtoConstSize_t((depth-1) * PTRSIZE), getABITypeAlign(getVoidPtrType())); } // Copy nestArg into framelist; the outer frame is not in the list of pointers src = DtoBitCast(src, frameType->getContainedType(depth-1)); LLValue* gep = DtoGEPi(frame, 0, depth-1); DtoAlignedStore(src, gep); } // store context in IrFunction irfunction->nestedVar = frame; // go through all nested vars and assign addresses where possible. for (std::set<VarDeclaration*>::iterator i=fd->nestedVars.begin(); i!=fd->nestedVars.end(); ++i) { VarDeclaration* vd = *i; LLValue* gep = DtoGEPi(frame, 0, vd->ir.irLocal->nestedIndex, vd->toChars()); if (vd->isParameter()) { Logger::println("nested param: %s", vd->toChars()); LOG_SCOPE IrParameter* parm = vd->ir.irParam; if (parm->arg->byref) { storeVariable(vd, gep); } else { Logger::println("Copying to nested frame"); // The parameter value is an alloca'd stack slot. // Copy to the nesting frame and leave the alloca for // the optimizers to clean up. DtoStore(DtoLoad(parm->value), gep); gep->takeName(parm->value); parm->value = gep; } } else { Logger::println("nested var: %s", vd->toChars()); assert(!vd->ir.irLocal->value); vd->ir.irLocal->value = gep; } if (global.params.symdebug) { LLSmallVector<LLValue*, 2> addr; dwarfOpOffset(addr, frameType, vd->ir.irLocal->nestedIndex); DtoDwarfLocalVariable(frame, vd, addr); } } } }
// Get struct from ABI-mangled representation, and store in the provided location. void getL(Type* dty, DValue* v, llvm::Value* lval) { LLValue* rval = v->getRVal(); LLType* pTy = getPtrToType(rval->getType()); DtoStore(rval, DtoBitCast(lval, pTy)); }
DValue *DtoNewClass(Loc &loc, TypeClass *tc, NewExp *newexp) { // resolve type DtoResolveClass(tc->sym); // allocate LLValue *mem; bool doInit = true; if (newexp->onstack) { unsigned alignment = tc->sym->alignsize; if (alignment == STRUCTALIGN_DEFAULT) alignment = 0; mem = DtoRawAlloca(DtoType(tc)->getContainedType(0), alignment, ".newclass_alloca"); } // custom allocator else if (newexp->allocator) { DtoResolveFunction(newexp->allocator); DFuncValue dfn(newexp->allocator, DtoCallee(newexp->allocator)); DValue *res = DtoCallFunction(newexp->loc, nullptr, &dfn, newexp->newargs); mem = DtoBitCast(DtoRVal(res), DtoType(tc), ".newclass_custom"); } // default allocator else { const bool useEHAlloc = global.params.ehnogc && newexp->thrownew; llvm::Function *fn = getRuntimeFunction( loc, gIR->module, useEHAlloc ? "_d_newThrowable" : "_d_allocclass"); LLConstant *ci = DtoBitCast(getIrAggr(tc->sym)->getClassInfoSymbol(), DtoType(getClassInfoType())); mem = gIR->CreateCallOrInvoke(fn, ci, useEHAlloc ? ".newthrowable_alloc" : ".newclass_gc_alloc") .getInstruction(); mem = DtoBitCast(mem, DtoType(tc), useEHAlloc ? ".newthrowable" : ".newclass_gc"); doInit = !useEHAlloc; } // init if (doInit) DtoInitClass(tc, mem); // init inner-class outer reference if (newexp->thisexp) { Logger::println("Resolving outer class"); LOG_SCOPE; unsigned idx = getFieldGEPIndex(tc->sym, tc->sym->vthis); LLValue *src = DtoRVal(newexp->thisexp); LLValue *dst = DtoGEPi(mem, 0, idx); IF_LOG Logger::cout() << "dst: " << *dst << "\nsrc: " << *src << '\n'; DtoStore(src, DtoBitCast(dst, getPtrToType(src->getType()))); } // set the context for nested classes else if (tc->sym->isNested() && tc->sym->vthis) { DtoResolveNestedContext(loc, tc->sym, mem); } // call constructor if (newexp->member) { // evaluate argprefix if (newexp->argprefix) { toElemDtor(newexp->argprefix); } Logger::println("Calling constructor"); assert(newexp->arguments != NULL); DtoResolveFunction(newexp->member); DFuncValue dfn(newexp->member, DtoCallee(newexp->member), mem); // ignore ctor return value (C++ ctors on Posix may not return `this`) DtoCallFunction(newexp->loc, tc, &dfn, newexp->arguments); return new DImValue(tc, mem); } assert(newexp->argprefix == NULL); // return default constructed class return new DImValue(tc, mem); }
void TryCatchScope::emitCatchBodies(IRState &irs, llvm::Value *ehPtrSlot) { assert(catchBlocks.empty()); auto &PGO = irs.funcGen().pgo; const auto entryCount = PGO.setCurrentStmt(stmt); struct CBPrototype { ClassDeclaration *cd; llvm::BasicBlock *catchBB; uint64_t catchCount; uint64_t uncaughtCount; }; llvm::SmallVector<CBPrototype, 8> cbPrototypes; cbPrototypes.reserve(stmt->catches->dim); for (auto c : *stmt->catches) { auto catchBB = irs.insertBBBefore(endbb, llvm::Twine("catch.") + c->type->toChars()); irs.scope() = IRScope(catchBB); irs.DBuilder.EmitBlockStart(c->loc); PGO.emitCounterIncrement(c); const auto enterCatchFn = getRuntimeFunction(Loc(), irs.module, "_d_eh_enter_catch"); auto ptr = DtoLoad(ehPtrSlot); auto throwableObj = irs.ir->CreateCall(enterCatchFn, ptr); // For catches that use the Throwable object, create storage for it. // We will set it in the code that branches from the landing pads // (there might be more than one) to catchBB. if (c->var) { // This will alloca if we haven't already and take care of nested refs // if there are any. DtoDeclarationExp(c->var); // Copy the exception reference over from the _d_eh_enter_catch return // value. DtoStore(DtoBitCast(throwableObj, DtoType(c->var->type)), getIrLocal(c->var)->value); } // Emit handler, if there is one. The handler is zero, for instance, // when building 'catch { debug foo(); }' in non-debug mode. if (c->handler) Statement_toIR(c->handler, &irs); if (!irs.scopereturned()) irs.ir->CreateBr(endbb); irs.DBuilder.EmitBlockEnd(); // PGO information, currently unused auto catchCount = PGO.getRegionCount(c); // uncaughtCount is handled in a separate pass below auto cd = c->type->toBasetype()->isClassHandle(); cbPrototypes.push_back({cd, catchBB, catchCount, 0}); } // Total number of uncaught exceptions is equal to the execution count at // the start of the try block minus the one after the continuation. // uncaughtCount keeps track of the exception type mismatch count while // iterating through the catch block prototypes in reversed order. auto uncaughtCount = entryCount - PGO.getRegionCount(stmt); for (auto it = cbPrototypes.rbegin(), end = cbPrototypes.rend(); it != end; ++it) { it->uncaughtCount = uncaughtCount; // Add this catch block's match count to the uncaughtCount, because these // failed to match the remaining (lexically preceding) catch blocks. uncaughtCount += it->catchCount; } catchBlocks.reserve(stmt->catches->dim); for (const auto &p : cbPrototypes) { auto branchWeights = PGO.createProfileWeights(p.catchCount, p.uncaughtCount); DtoResolveClass(p.cd); auto ci = getIrAggr(p.cd)->getClassInfoSymbol(); catchBlocks.push_back({ci, p.catchBB, branchWeights}); } }
DValue * DtoInlineAsmExpr(Loc loc, FuncDeclaration * fd, Expressions * arguments) { Logger::println("DtoInlineAsmExpr @ %s", loc.toChars()); LOG_SCOPE; TemplateInstance* ti = fd->toParent()->isTemplateInstance(); assert(ti && "invalid inline __asm expr"); assert(arguments->dim >= 2 && "invalid __asm call"); // get code param Expression* e = static_cast<Expression*>(arguments->data[0]); Logger::println("code exp: %s", e->toChars()); StringExp* se = static_cast<StringExp*>(e); if (e->op != TOKstring || se->sz != 1) { e->error("__asm code argument is not a char[] string literal"); fatal(); } std::string code(static_cast<char*>(se->string), se->len); // get constraints param e = static_cast<Expression*>(arguments->data[1]); Logger::println("constraint exp: %s", e->toChars()); se = static_cast<StringExp*>(e); if (e->op != TOKstring || se->sz != 1) { e->error("__asm constraints argument is not a char[] string literal"); fatal(); } std::string constraints(static_cast<char*>(se->string), se->len); // build runtime arguments size_t n = arguments->dim; LLSmallVector<llvm::Value*, 8> args; args.reserve(n-2); std::vector<LLType*> argtypes; argtypes.reserve(n-2); for (size_t i = 2; i < n; i++) { e = static_cast<Expression*>(arguments->data[i]); args.push_back(e->toElem(gIR)->getRVal()); argtypes.push_back(args.back()->getType()); } // build asm function type Type* type = fd->type->nextOf()->toBasetype(); LLType* ret_type = DtoType(type); llvm::FunctionType* FT = llvm::FunctionType::get(ret_type, argtypes, false); // build asm call bool sideeffect = true; llvm::InlineAsm* ia = llvm::InlineAsm::get(FT, code, constraints, sideeffect); llvm::Value* rv = gIR->ir->CreateCall(ia, args, ""); // work around missing tuple support for users of the return value if (type->ty == Tstruct) { // make a copy llvm::Value* mem = DtoAlloca(type, ".__asm_tuple_ret"); TypeStruct* ts = static_cast<TypeStruct*>(type); size_t n = ts->sym->fields.dim; for (size_t i = 0; i < n; i++) { llvm::Value* v = gIR->ir->CreateExtractValue(rv, i, ""); llvm::Value* gep = DtoGEPi(mem, 0, i); DtoStore(v, gep); } return new DVarValue(fd->type->nextOf(), mem); } // return call as im value return new DImValue(fd->type->nextOf(), rv); }
void TryCatchScope::emitCatchBodies(IRState &irs, llvm::Value *ehPtrSlot) { assert(catchBlocks.empty()); auto &PGO = irs.funcGen().pgo; const auto entryCount = PGO.setCurrentStmt(stmt); struct CBPrototype { Type *t; llvm::BasicBlock *catchBB; uint64_t catchCount; uint64_t uncaughtCount; }; llvm::SmallVector<CBPrototype, 8> cbPrototypes; cbPrototypes.reserve(stmt->catches->dim); for (auto c : *stmt->catches) { auto catchBB = irs.insertBBBefore(endbb, llvm::Twine("catch.") + c->type->toChars()); irs.scope() = IRScope(catchBB); irs.DBuilder.EmitBlockStart(c->loc); PGO.emitCounterIncrement(c); bool isCPPclass = false; if (auto lp = c->langPlugin()) // CALYPSO lp->codegen()->toBeginCatch(irs, c); else { const auto cd = c->type->toBasetype()->isClassHandle(); isCPPclass = cd->isCPPclass(); const auto enterCatchFn = getRuntimeFunction( Loc(), irs.module, isCPPclass ? "__cxa_begin_catch" : "_d_eh_enter_catch"); const auto ptr = DtoLoad(ehPtrSlot); const auto throwableObj = irs.ir->CreateCall(enterCatchFn, ptr); // For catches that use the Throwable object, create storage for it. // We will set it in the code that branches from the landing pads // (there might be more than one) to catchBB. if (c->var) { // This will alloca if we haven't already and take care of nested refs // if there are any. DtoDeclarationExp(c->var); // Copy the exception reference over from the _d_eh_enter_catch return // value. DtoStore(DtoBitCast(throwableObj, DtoType(c->var->type)), getIrLocal(c->var)->value); } } // Emit handler, if there is one. The handler is zero, for instance, // when building 'catch { debug foo(); }' in non-debug mode. if (isCPPclass) { // from DMD: /* C++ catches need to end with call to __cxa_end_catch(). * Create: * try { handler } finally { __cxa_end_catch(); } * Note that this is worst case code because it always sets up an * exception handler. At some point should try to do better. */ FuncDeclaration *fdend = FuncDeclaration::genCfunc(nullptr, Type::tvoid, "__cxa_end_catch"); Expression *efunc = VarExp::create(Loc(), fdend); Expression *ecall = CallExp::create(Loc(), efunc); ecall->type = Type::tvoid; Statement *call = ExpStatement::create(Loc(), ecall); Statement *stmt = c->handler ? TryFinallyStatement::create(Loc(), c->handler, call) : call; Statement_toIR(stmt, &irs); } else { if (c->handler) Statement_toIR(c->handler, &irs); } if (!irs.scopereturned()) { // CALYPSO FIXME: _cxa_end_catch won't be called if it has already returned if (auto lp = c->langPlugin()) lp->codegen()->toEndCatch(irs, c); irs.ir->CreateBr(endbb); } irs.DBuilder.EmitBlockEnd(); // PGO information, currently unused auto catchCount = PGO.getRegionCount(c); // uncaughtCount is handled in a separate pass below cbPrototypes.push_back({c->type->toBasetype(), catchBB, catchCount, 0}); // CALYPSO } // Total number of uncaught exceptions is equal to the execution count at // the start of the try block minus the one after the continuation. // uncaughtCount keeps track of the exception type mismatch count while // iterating through the catch block prototypes in reversed order. auto uncaughtCount = entryCount - PGO.getRegionCount(stmt); for (auto it = cbPrototypes.rbegin(), end = cbPrototypes.rend(); it != end; ++it) { it->uncaughtCount = uncaughtCount; // Add this catch block's match count to the uncaughtCount, because these // failed to match the remaining (lexically preceding) catch blocks. uncaughtCount += it->catchCount; } catchBlocks.reserve(stmt->catches->dim); auto c_it = stmt->catches->begin(); // CALYPSO for (const auto &p : cbPrototypes) { auto branchWeights = PGO.createProfileWeights(p.catchCount, p.uncaughtCount); LLGlobalVariable *ci; if (auto lp = (*c_it)->langPlugin()) // CALYPSO ci = lp->codegen()->toCatchScopeType(irs, p.t); else { ClassDeclaration *cd = p.t->isClassHandle(); DtoResolveClass(cd); if (cd->isCPPclass()) { const char *name = Target::cppTypeInfoMangle(cd); auto cpp_ti = getOrCreateGlobal( cd->loc, irs.module, getVoidPtrType(), /*isConstant=*/true, LLGlobalValue::ExternalLinkage, /*init=*/nullptr, name); // Wrap std::type_info pointers inside a __cpp_type_info_ptr class instance so that // the personality routine may differentiate C++ catch clauses from D ones. OutBuffer mangleBuf; mangleBuf.writestring("_D"); mangleToBuffer(cd, &mangleBuf); mangleBuf.printf("%d%s", 18, "_cpp_type_info_ptr"); const auto wrapperMangle = getIRMangledVarName(mangleBuf.peekString(), LINKd); RTTIBuilder b(ClassDeclaration::cpp_type_info_ptr); b.push(cpp_ti); auto wrapperType = llvm::cast<llvm::StructType>( static_cast<IrTypeClass*>(ClassDeclaration::cpp_type_info_ptr->type->ctype)->getMemoryLLType()); auto wrapperInit = b.get_constant(wrapperType); ci = getOrCreateGlobal( cd->loc, irs.module, wrapperType, /*isConstant=*/true, LLGlobalValue::LinkOnceODRLinkage, wrapperInit, wrapperMangle); } else { ci = getIrAggr(cd)->getClassInfoSymbol(); } } catchBlocks.push_back({ci, p.catchBB, branchWeights}); c_it++; } }
void ABIRewrite::getL(Type* dty, DValue* v, llvm::Value* lval) { LLValue* rval = get(dty, v); assert(rval->getType() == lval->getType()->getContainedType(0)); DtoStore(rval, lval); }
DValue* DtoNewClass(Loc& loc, TypeClass* tc, NewExp* newexp) { // resolve type DtoResolveClass(tc->sym); // allocate LLValue* mem; if (newexp->onstack) { // FIXME align scope class to its largest member mem = DtoRawAlloca(DtoType(tc)->getContainedType(0), 0, ".newclass_alloca"); } // custom allocator else if (newexp->allocator) { DtoResolveFunction(newexp->allocator); DFuncValue dfn(newexp->allocator, getIrFunc(newexp->allocator)->func); DValue* res = DtoCallFunction(newexp->loc, NULL, &dfn, newexp->newargs); mem = DtoBitCast(res->getRVal(), DtoType(tc), ".newclass_custom"); } // default allocator else { llvm::Function* fn = LLVM_D_GetRuntimeFunction(loc, gIR->module, "_d_newclass"); LLConstant* ci = DtoBitCast(getIrAggr(tc->sym)->getClassInfoSymbol(), DtoType(Type::typeinfoclass->type)); mem = gIR->CreateCallOrInvoke(fn, ci, ".newclass_gc_alloc").getInstruction(); mem = DtoBitCast(mem, DtoType(tc), ".newclass_gc"); } // init DtoInitClass(tc, mem); // init inner-class outer reference if (newexp->thisexp) { Logger::println("Resolving outer class"); LOG_SCOPE; DValue* thisval = toElem(newexp->thisexp); unsigned idx = getFieldGEPIndex(tc->sym, tc->sym->vthis); LLValue* src = thisval->getRVal(); LLValue* dst = DtoGEPi(mem, 0, idx); IF_LOG Logger::cout() << "dst: " << *dst << "\nsrc: " << *src << '\n'; DtoStore(src, DtoBitCast(dst, getPtrToType(src->getType()))); } // set the context for nested classes else if (tc->sym->isNested() && tc->sym->vthis) { DtoResolveNestedContext(loc, tc->sym, mem); } // call constructor if (newexp->member) { Logger::println("Calling constructor"); assert(newexp->arguments != NULL); DtoResolveFunction(newexp->member); DFuncValue dfn(newexp->member, getIrFunc(newexp->member)->func, mem); return DtoCallFunction(newexp->loc, tc, &dfn, newexp->arguments); } // return default constructed class return new DImValue(tc, mem); }
void DtoDefineFunction(FuncDeclaration* fd) { IF_LOG Logger::println("DtoDefineFunction(%s): %s", fd->toPrettyChars(), fd->loc.toChars()); LOG_SCOPE; if (fd->ir.isDefined()) return; if ((fd->type && fd->type->ty == Terror) || (fd->type && fd->type->ty == Tfunction && static_cast<TypeFunction *>(fd->type)->next == NULL) || (fd->type && fd->type->ty == Tfunction && static_cast<TypeFunction *>(fd->type)->next->ty == Terror)) { IF_LOG Logger::println("Ignoring; has error type, no return type or returns error type"); fd->ir.setDefined(); return; } if (fd->semanticRun == PASSsemanticdone) { /* What happened is this function failed semantic3() with errors, * but the errors were gagged. * Try to reproduce those errors, and then fail. */ error(fd->loc, "errors compiling function %s", fd->toPrettyChars()); fd->ir.setDefined(); return; } DtoResolveFunction(fd); if (fd->isUnitTestDeclaration() && !global.params.useUnitTests) { IF_LOG Logger::println("No code generation for unit test declaration %s", fd->toChars()); fd->ir.setDefined(); return; } // Skip array ops implemented in druntime if (fd->isArrayOp && isDruntimeArrayOp(fd)) { IF_LOG Logger::println("No code generation for array op %s implemented in druntime", fd->toChars()); fd->ir.setDefined(); return; } // Check whether the frontend knows that the function is already defined // in some other module (see DMD's FuncDeclaration::toObjFile). for (FuncDeclaration *f = fd; f; ) { if (!f->isInstantiated() && f->inNonRoot()) { IF_LOG Logger::println("Skipping '%s'.", fd->toPrettyChars()); // TODO: Emit as available_externally for inlining purposes instead // (see #673). fd->ir.setDefined(); return; } if (f->isNested()) f = f->toParent2()->isFuncDeclaration(); else break; } DtoDeclareFunction(fd); assert(fd->ir.isDeclared()); // DtoResolveFunction might also set the defined flag for functions we // should not touch. if (fd->ir.isDefined()) return; fd->ir.setDefined(); // We cannot emit nested functions with parents that have not gone through // semantic analysis. This can happen as DMD leaks some template instances // from constraints into the module member list. DMD gets away with being // sloppy as functions in template contraints obviously never need to access // data from the template function itself, but it would still mess up our // nested context creation code. FuncDeclaration* parent = fd; while ((parent = getParentFunc(parent, true))) { if (parent->semanticRun != PASSsemantic3done || parent->semantic3Errors) { IF_LOG Logger::println("Ignoring nested function with unanalyzed parent."); return; } } assert(fd->semanticRun == PASSsemantic3done); assert(fd->ident != Id::empty); if (fd->isUnitTestDeclaration()) { gIR->unitTests.push_back(fd); } else if (fd->isSharedStaticCtorDeclaration()) { gIR->sharedCtors.push_back(fd); } else if (StaticDtorDeclaration *dtorDecl = fd->isSharedStaticDtorDeclaration()) { gIR->sharedDtors.push_front(fd); if (dtorDecl->vgate) gIR->sharedGates.push_front(dtorDecl->vgate); } else if (fd->isStaticCtorDeclaration()) { gIR->ctors.push_back(fd); } else if (StaticDtorDeclaration *dtorDecl = fd->isStaticDtorDeclaration()) { gIR->dtors.push_front(fd); if (dtorDecl->vgate) gIR->gates.push_front(dtorDecl->vgate); } // if this function is naked, we take over right away! no standard processing! if (fd->naked) { DtoDefineNakedFunction(fd); return; } IrFunction *irFunc = getIrFunc(fd); IrFuncTy &irFty = irFunc->irFty; // debug info irFunc->diSubprogram = gIR->DBuilder.EmitSubProgram(fd); Type* t = fd->type->toBasetype(); TypeFunction* f = static_cast<TypeFunction*>(t); // assert(f->ctype); llvm::Function* func = irFunc->func; // is there a body? if (fd->fbody == NULL) return; IF_LOG Logger::println("Doing function body for: %s", fd->toChars()); gIR->functions.push_back(irFunc); if (fd->isMain()) gIR->emitMain = true; func->setLinkage(lowerFuncLinkage(fd)); // On x86_64, always set 'uwtable' for System V ABI compatibility. // TODO: Find a better place for this. // TODO: Is this required for Win64 as well? if (global.params.targetTriple.getArch() == llvm::Triple::x86_64) { func->addFnAttr(LDC_ATTRIBUTE(UWTable)); } #if LDC_LLVM_VER >= 303 if (opts::sanitize != opts::None) { // Set the required sanitizer attribute. if (opts::sanitize == opts::AddressSanitizer) { func->addFnAttr(LDC_ATTRIBUTE(SanitizeAddress)); } if (opts::sanitize == opts::MemorySanitizer) { func->addFnAttr(LDC_ATTRIBUTE(SanitizeMemory)); } if (opts::sanitize == opts::ThreadSanitizer) { func->addFnAttr(LDC_ATTRIBUTE(SanitizeThread)); } } #endif llvm::BasicBlock* beginbb = llvm::BasicBlock::Create(gIR->context(), "", func); llvm::BasicBlock* endbb = llvm::BasicBlock::Create(gIR->context(), "endentry", func); //assert(gIR->scopes.empty()); gIR->scopes.push_back(IRScope(beginbb, endbb)); // create alloca point // this gets erased when the function is complete, so alignment etc does not matter at all llvm::Instruction* allocaPoint = new llvm::AllocaInst(LLType::getInt32Ty(gIR->context()), "alloca point", beginbb); irFunc->allocapoint = allocaPoint; // debug info - after all allocas, but before any llvm.dbg.declare etc gIR->DBuilder.EmitFuncStart(fd); // this hack makes sure the frame pointer elimination optimization is disabled. // this this eliminates a bunch of inline asm related issues. if (fd->hasReturnExp & 8) // has inline asm { // emit a call to llvm_eh_unwind_init LLFunction* hack = GET_INTRINSIC_DECL(eh_unwind_init); gIR->ir->CreateCall(hack, ""); } // give the 'this' argument storage and debug info if (irFty.arg_this) { LLValue* thisvar = irFunc->thisArg; assert(thisvar); LLValue* thismem = thisvar; if (!irFty.arg_this->byref) { thismem = DtoRawAlloca(thisvar->getType(), 0, "this"); // FIXME: align? DtoStore(thisvar, thismem); irFunc->thisArg = thismem; } assert(getIrParameter(fd->vthis)->value == thisvar); getIrParameter(fd->vthis)->value = thismem; gIR->DBuilder.EmitLocalVariable(thismem, fd->vthis); } // give the 'nestArg' storage if (irFty.arg_nest) { LLValue *nestArg = irFunc->nestArg; LLValue *val = DtoRawAlloca(nestArg->getType(), 0, "nestedFrame"); DtoStore(nestArg, val); irFunc->nestArg = val; } // give arguments storage // and debug info if (fd->parameters) { size_t n = irFty.args.size(); assert(n == fd->parameters->dim); for (size_t i=0; i < n; ++i) { Dsymbol* argsym = static_cast<Dsymbol*>(fd->parameters->data[i]); VarDeclaration* vd = argsym->isVarDeclaration(); assert(vd); IrParameter* irparam = getIrParameter(vd); assert(irparam); bool refout = vd->storage_class & (STCref | STCout); bool lazy = vd->storage_class & STClazy; if (!refout && (!irparam->arg->byref || lazy)) { // alloca a stack slot for this first class value arg LLValue* mem = DtoAlloca(irparam->arg->type, vd->ident->toChars()); // let the abi transform the argument back first DImValue arg_dval(vd->type, irparam->value); irFty.getParam(vd->type, i, &arg_dval, mem); // set the arg var value to the alloca irparam->value = mem; } if (global.params.symdebug && !(isaArgument(irparam->value) && isaArgument(irparam->value)->hasByValAttr()) && !refout) gIR->DBuilder.EmitLocalVariable(irparam->value, vd); } } FuncGen fg; irFunc->gen = &fg; DtoCreateNestedContext(fd); if (fd->vresult && ! fd->vresult->nestedrefs.dim // FIXME: not sure here :/ ) { DtoVarDeclaration(fd->vresult); } // D varargs: prepare _argptr and _arguments if (f->linkage == LINKd && f->varargs == 1) { // allocate _argptr (of type core.stdc.stdarg.va_list) LLValue* argptrmem = DtoAlloca(Type::tvalist, "_argptr_mem"); irFunc->_argptr = argptrmem; // initialize _argptr with a call to the va_start intrinsic LLValue* vaStartArg = gABI->prepareVaStart(argptrmem); llvm::CallInst::Create(GET_INTRINSIC_DECL(vastart), vaStartArg, "", gIR->scopebb()); // copy _arguments to a memory location LLType* argumentsType = irFunc->_arguments->getType(); LLValue* argumentsmem = DtoRawAlloca(argumentsType, 0, "_arguments_mem"); new llvm::StoreInst(irFunc->_arguments, argumentsmem, gIR->scopebb()); irFunc->_arguments = argumentsmem; } // output function body codegenFunction(fd->fbody, gIR); irFunc->gen = 0; llvm::BasicBlock* bb = gIR->scopebb(); if (pred_begin(bb) == pred_end(bb) && bb != &bb->getParent()->getEntryBlock()) { // This block is trivially unreachable, so just delete it. // (This is a common case because it happens when 'return' // is the last statement in a function) bb->eraseFromParent(); } else if (!gIR->scopereturned()) { // llvm requires all basic blocks to end with a TerminatorInst but DMD does not put a return statement // in automatically, so we do it here. // pass the previous block into this block gIR->DBuilder.EmitFuncEnd(fd); if (func->getReturnType() == LLType::getVoidTy(gIR->context())) { llvm::ReturnInst::Create(gIR->context(), gIR->scopebb()); } else if (!fd->isMain()) { AsmBlockStatement* asmb = fd->fbody->endsWithAsm(); if (asmb) { assert(asmb->abiret); llvm::ReturnInst::Create(gIR->context(), asmb->abiret, bb); } else { llvm::ReturnInst::Create(gIR->context(), llvm::UndefValue::get(func->getReturnType()), bb); } } else llvm::ReturnInst::Create(gIR->context(), LLConstant::getNullValue(func->getReturnType()), bb); } // erase alloca point if (allocaPoint->getParent()) allocaPoint->eraseFromParent(); allocaPoint = 0; gIR->func()->allocapoint = 0; gIR->scopes.pop_back(); // get rid of the endentry block, it's never used assert(!func->getBasicBlockList().empty()); func->getBasicBlockList().pop_back(); gIR->functions.pop_back(); }