//------------------------------------------------------------------------ // LowerCast: Lower GT_CAST(srcType, DstType) nodes. // // Arguments: // tree - GT_CAST node to be lowered // // Return Value: // None. // // Notes: // Casts from float/double to a smaller int type are transformed as follows: // GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte) // GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte) // GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16) // GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16) // // Note that for the overflow conversions we still depend on helper calls and // don't expect to see them here. // i) GT_CAST(float/double, int type with overflow detection) // void Lowering::LowerCast(GenTree* tree) { assert(tree->OperGet() == GT_CAST); JITDUMP("LowerCast for: "); DISPNODE(tree); JITDUMP("\n"); GenTree* op1 = tree->gtOp.gtOp1; var_types dstType = tree->CastToType(); var_types srcType = genActualType(op1->TypeGet()); var_types tmpType = TYP_UNDEF; if (varTypeIsFloating(srcType)) { noway_assert(!tree->gtOverflow()); assert(!varTypeIsSmall(dstType)); // fgMorphCast creates intermediate casts when converting from float to small // int. } assert(!varTypeIsSmall(srcType)); if (tmpType != TYP_UNDEF) { GenTree* tmp = comp->gtNewCastNode(tmpType, op1, tree->IsUnsigned(), tmpType); tmp->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; tree->gtOp.gtOp1 = tmp; BlockRange().InsertAfter(op1, tmp); } // Now determine if we have operands that should be contained. ContainCheckCast(tree->AsCast()); }
//------------------------------------------------------------------------ // ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) { assert(storeLoc->OperIsLocalStore()); GenTree* op1 = storeLoc->gtGetOp1(); #ifdef FEATURE_SIMD if (varTypeIsSIMD(storeLoc)) { if (op1->IsIntegralConst(0)) { // For an InitBlk we want op1 to be contained MakeSrcContained(storeLoc, op1); } return; } #endif // FEATURE_SIMD // If the source is a containable immediate, make it contained, unless it is // an int-size or larger store of zero to memory, because we can generate smaller code // by zeroing a register and then storing it. if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(storeLoc))) { MakeSrcContained(storeLoc, op1); } #ifdef _TARGET_ARM_ else if (op1->OperGet() == GT_LONG) { MakeSrcContained(storeLoc, op1); } #endif // _TARGET_ARM_ }
//------------------------------------------------------------------------ // LowerCast: Lower GT_CAST(srcType, DstType) nodes. // // Arguments: // tree - GT_CAST node to be lowered // // Return Value: // None. // // Notes: // Casts from float/double to a smaller int type are transformed as follows: // GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte) // GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte) // GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16) // GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16) // // Note that for the overflow conversions we still depend on helper calls and // don't expect to see them here. // i) GT_CAST(float/double, int type with overflow detection) // void Lowering::LowerCast(GenTree* tree) { assert(tree->OperGet() == GT_CAST); JITDUMP("LowerCast for: "); DISPNODE(tree); JITDUMP("\n"); GenTreePtr op1 = tree->gtOp.gtOp1; var_types dstType = tree->CastToType(); var_types srcType = genActualType(op1->TypeGet()); var_types tmpType = TYP_UNDEF; if (varTypeIsFloating(srcType)) { noway_assert(!tree->gtOverflow()); } assert(!varTypeIsSmall(srcType)); // case of src is a floating point type and dst is a small type. if (varTypeIsFloating(srcType) && varTypeIsSmall(dstType)) { NYI_ARM("Lowering for cast from float to small type"); // Not tested yet. tmpType = TYP_INT; } if (tmpType != TYP_UNDEF) { GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType); tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; tree->gtOp.gtOp1 = tmp; BlockRange().InsertAfter(op1, tmp); } // Now determine if we have operands that should be contained. ContainCheckCast(tree->AsCast()); }
Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr * pTree, fgWalkData *data) { Compiler * comp = data->compiler; GenTreePtr tree = *pTree; GenTreePtr asg = NULL; if (tree->gtOper == GT_ASG) { asg = tree; // "asg" is the assignment tree. tree = tree->gtOp.gtOp1; // "tree" is the local var tree at the left-hand size of the assignment. } if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD) { UINT paramNum = tree->gtLclVarCommon.gtLclNum; if (!ShadowParamVarInfo::mayNeedShadowCopy(&comp->lvaTable[paramNum]) || comp->gsShadowVarInfo[paramNum].shadowCopy == NO_SHADOW_COPY) { return WALK_CONTINUE; } tree->gtLclVarCommon.SetLclNum(comp->gsShadowVarInfo[paramNum].shadowCopy); // In gsParamsToShadows(), we create a shadow var of TYP_INT for every small type param. // Make sure we update the type of the local var tree as well. if (varTypeIsSmall(comp->lvaTable[paramNum].TypeGet())) { tree->gtType = TYP_INT; if (asg) { // If this is an assignment tree, propagate the type to it as well. asg->gtType = TYP_INT; } } } return WALK_CONTINUE; }
/***************************************************************************** * gsParamsToShadows * Copy each vulnerable param ptr or buffer to a local shadow copy and replace * uses of the param by the shadow copy */ void Compiler::gsParamsToShadows() { // Cache old count since we'll add new variables, and // gsShadowVarInfo will not grow to accomodate the new ones. UINT lvaOldCount = lvaCount; // Create shadow copy for each param candidate for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++) { LclVarDsc *varDsc = &lvaTable[lclNum]; gsShadowVarInfo[lclNum].shadowCopy = NO_SHADOW_COPY; // Only care about params whose values are on the stack if (!ShadowParamVarInfo::mayNeedShadowCopy(varDsc)) { continue; } if (!varDsc->lvIsPtr && !varDsc->lvIsUnsafeBuffer) { continue; } int shadowVar = lvaGrabTemp(false DEBUGARG("shadowVar")); // Copy some info var_types type = varTypeIsSmall(varDsc->TypeGet()) ? TYP_INT : varDsc->TypeGet(); lvaTable[shadowVar].lvType = type; lvaTable[shadowVar].lvAddrExposed = varDsc->lvAddrExposed; lvaTable[shadowVar].lvDoNotEnregister = varDsc->lvDoNotEnregister; #ifdef DEBUG lvaTable[shadowVar].lvVMNeedsStackAddr = varDsc->lvVMNeedsStackAddr; lvaTable[shadowVar].lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr; lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall; #endif lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo; lvaTable[shadowVar].lvGcLayout = varDsc->lvGcLayout; lvaTable[shadowVar].lvIsUnsafeBuffer = varDsc->lvIsUnsafeBuffer; lvaTable[shadowVar].lvIsPtr = varDsc->lvIsPtr; #ifdef DEBUG if (verbose) { printf("Var V%02u is shadow param candidate. Shadow copy is V%02u.\n", lclNum, shadowVar); } #endif gsShadowVarInfo[lclNum].shadowCopy = shadowVar; } // Replace param uses with shadow copy fgWalkAllTreesPre(gsReplaceShadowParams, (void *)this); // Now insert code to copy the params to their shadow copy. for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++) { LclVarDsc *varDsc = &lvaTable[lclNum]; unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy; if (shadowVar == NO_SHADOW_COPY) { continue; } var_types type = lvaTable[shadowVar].TypeGet(); GenTreePtr src = gtNewLclvNode(lclNum, varDsc->TypeGet()); GenTreePtr dst = gtNewLclvNode(shadowVar, type); src->gtFlags |= GTF_DONT_CSE; dst->gtFlags |= GTF_DONT_CSE; GenTreePtr opAssign = NULL; if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle(); // We don't need unsafe value cls check here since we are copying the params and this flag // would have been set on the original param before reaching here. lvaSetStruct(shadowVar, clsHnd, false); src = gtNewOperNode(GT_ADDR, TYP_BYREF, src); dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst); opAssign = gtNewCpObjNode(dst, src, clsHnd, false); #if FEATURE_MULTIREG_ARGS_OR_RET lvaTable[shadowVar].lvIsMultiRegArgOrRet = lvaTable[lclNum].lvIsMultiRegArgOrRet; #endif // FEATURE_MULTIREG_ARGS_OR_RET } else { opAssign = gtNewAssignNode(dst, src); } fgEnsureFirstBBisScratch(); (void) fgInsertStmtAtBeg(fgFirstBB, fgMorphTree(opAssign)); } // If the method has "Jmp CalleeMethod", then we need to copy shadow params back to original // params before "jmp" to CalleeMethod. if (compJmpOpUsed) { // There could be more than one basic block ending with a "Jmp" type tail call. // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock * block = fgFirstBB; block; block = block->bbNext) { if (block->bbJumpKind != BBJ_RETURN) { continue; } if ((block->bbFlags & BBF_HAS_JMP) == 0) { continue; } for (UINT lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc *varDsc = &lvaTable[lclNum]; unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy; if (shadowVar == NO_SHADOW_COPY) { continue; } GenTreePtr src = gtNewLclvNode(shadowVar, lvaTable[shadowVar].TypeGet()); GenTreePtr dst = gtNewLclvNode(lclNum, varDsc->TypeGet()); src->gtFlags |= GTF_DONT_CSE; dst->gtFlags |= GTF_DONT_CSE; GenTreePtr opAssign = nullptr; if (varDsc->TypeGet() == TYP_STRUCT) { CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle(); src = gtNewOperNode(GT_ADDR, TYP_BYREF, src); dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst); opAssign = gtNewCpObjNode(dst, src, clsHnd, false); } else { opAssign = gtNewAssignNode(dst, src); } (void) fgInsertStmtNearEnd(block, fgMorphTree(opAssign)); } } } }