//------------------------------------------------------------------------ // DecomposeCast: Decompose GT_CAST. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CAST); GenTree* tree = use.Def(); GenTree* loResult = nullptr; GenTree* hiResult = nullptr; assert(tree->gtPrev == tree->gtGetOp1()); NYI_IF(tree->gtOverflow(), "TYP_LONG cast with overflow"); switch (tree->AsCast()->CastFromType()) { case TYP_INT: if (tree->gtFlags & GTF_UNSIGNED) { loResult = tree->gtGetOp1(); Range().Remove(tree); hiResult = new (m_compiler, GT_CNS_INT) GenTreeIntCon(TYP_INT, 0); Range().InsertAfter(loResult, hiResult); } else { NYI("Lowering of signed cast TYP_INT->TYP_LONG"); } break; default: NYI("Unimplemented type for Lowering of cast to TYP_LONG"); break; } return FinalizeDecomposition(use, loResult, hiResult); }
ros::Geometry ContactTransition::computeRelFFfromAbsFF( double sf_abs_x, double sf_abs_y, double sf_abs_yaw, double ff_abs_x, double ff_abs_y, double ff_abs_yaw, char sf_foot){ NYI(); double ff_rel_x, ff_rel_y, ff_rel_yaw; ROS_INFO("sf_abs %f %f %f", sf_abs_x, sf_abs_y, sf_abs_yaw); ROS_INFO("ff_abs %f %f %f", ff_abs_x, ff_abs_y, ff_abs_yaw); double rx = ff_abs_x - sf_abs_x; double ry = ff_abs_y - sf_abs_y; double t = sf_abs_yaw; if(sf_foot == 'L'){ ff_rel_x = cos(t)*rx - sin(t)*ry; ff_rel_y = cos(t)*rx + sin(t)*ry; ff_rel_yaw = ff_abs_yaw + sf_abs_yaw; }else{ ff_rel_x = cos(t)*rx + sin(t)*ry; ff_rel_y = cos(t)*rx - sin(t)*ry; ff_rel_yaw = ff_abs_yaw - sf_abs_yaw; } //while(ff_abs_yaw>M_PI) ff_abs_yaw-=2*M_PI; //while(ff_abs_yaw<-M_PI) ff_abs_yaw+=2*M_PI; ros::Geometry ff_rel; ff_rel.setX(ff_rel_x); ff_rel.setY(ff_rel_y); ff_rel.setRPYRadian(0,0,ff_rel_yaw); return ff_rel; }
//------------------------------------------------------------------------ // DoAnalysis: Walk over basic blocks of the method and detect all local // variables that can be allocated on the stack. // // Assumptions: // Must be run after the dominators have been computed (we need this // information to detect loops). void ObjectAllocator::DoAnalysis() { assert(m_IsObjectStackAllocationEnabled); assert(comp->fgDomsComputed); // TODO-ObjectStackAllocation NYI("DoAnalysis"); }
void something_test::doSomeMoreTests() { PASS(); FAIL(); NYI(); }
//------------------------------------------------------------------------ // MorphAllocObjNodeIntoStackAlloc: Morph a GT_ALLOCOBJ node into stack // allocation. // Arguments: // allocObj - GT_ALLOCOBJ that will be replaced by helper call. // block - a basic block where allocObj is // stmt - a statement where allocObj is // // Return Value: // Address of tree doing stack allocation (can be the same as allocObj). // // Notes: // Must update parents flags after this. // This function can insert additional statements before stmt. GenTree* ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* allocObj, BasicBlock* block, GenTreeStmt* stmt) { assert(allocObj != nullptr); assert(m_AnalysisDone); // TODO-StackAllocation NYI("MorphAllocObjIntoStackAlloc"); return allocObj; }
//------------------------------------------------------------------------ // DecomposeCast: Decompose GT_CAST. // // Arguments: // ppTree - the tree to decompose // data - tree walk context // // Return Value: // None. // void DecomposeLongs::DecomposeCast(GenTree** ppTree, Compiler::fgWalkData* data) { assert(ppTree != nullptr); assert(*ppTree != nullptr); assert(data != nullptr); assert((*ppTree)->OperGet() == GT_CAST); assert(m_compiler->compCurStmt != nullptr); GenTree* tree = *ppTree; GenTree* loResult = nullptr; GenTree* hiResult = nullptr; GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt(); assert(tree->gtPrev == tree->gtGetOp1()); NYI_IF(tree->gtOverflow(), "TYP_LONG cast with overflow"); switch (tree->AsCast()->CastFromType()) { case TYP_INT: if (tree->gtFlags & GTF_UNSIGNED) { loResult = tree->gtGetOp1(); hiResult = new (m_compiler, GT_CNS_INT) GenTreeIntCon(TYP_INT, 0); m_compiler->fgSnipNode(curStmt, tree); } else { NYI("Lowering of signed cast TYP_INT->TYP_LONG"); } break; default: NYI("Unimplemented type for Lowering of cast to TYP_LONG"); break; } FinalizeDecomposition(ppTree, data, loResult, hiResult); }
//------------------------------------------------------------------------ // DecomposeNode: Decompose long-type trees into lower and upper halves. // // Arguments: // *ppTree - A node that may or may not require decomposition. // data - The tree-walk data that provides the context. // // Return Value: // None. It the tree at *ppTree is of TYP_LONG, it will generally be replaced. // void DecomposeLongs::DecomposeNode(GenTree** ppTree, Compiler::fgWalkData* data) { GenTree* tree = *ppTree; // Handle the case where we are implicitly using the lower half of a long lclVar. if ((tree->TypeGet() == TYP_INT) && tree->OperIsLocal()) { LclVarDsc* varDsc = m_compiler->lvaTable + tree->AsLclVarCommon()->gtLclNum; if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { #ifdef DEBUG if (m_compiler->verbose) { printf("Changing implicit reference to lo half of long lclVar to an explicit reference of its promoted half:\n"); m_compiler->gtDispTree(tree); } #endif // DEBUG m_compiler->lvaDecRefCnts(tree); unsigned loVarNum = varDsc->lvFieldLclStart; tree->AsLclVarCommon()->SetLclNum(loVarNum); m_compiler->lvaIncRefCnts(tree); return; } } if (tree->TypeGet() != TYP_LONG) { return; } #ifdef DEBUG if (m_compiler->verbose) { printf("Decomposing TYP_LONG tree. BEFORE:\n"); m_compiler->gtDispTree(tree); } #endif // DEBUG switch (tree->OperGet()) { case GT_PHI: case GT_PHI_ARG: break; case GT_LCL_VAR: DecomposeLclVar(ppTree, data); break; case GT_LCL_FLD: DecomposeLclFld(ppTree, data); break; case GT_STORE_LCL_VAR: DecomposeStoreLclVar(ppTree, data); break; case GT_CAST: DecomposeCast(ppTree, data); break; case GT_CNS_LNG: DecomposeCnsLng(ppTree, data); break; case GT_CALL: DecomposeCall(ppTree, data); break; case GT_RETURN: assert(tree->gtOp.gtOp1->OperGet() == GT_LONG); break; case GT_STOREIND: DecomposeStoreInd(ppTree, data); break; case GT_STORE_LCL_FLD: assert(tree->gtOp.gtOp1->OperGet() == GT_LONG); NYI("st.lclFld of of TYP_LONG"); break; case GT_IND: DecomposeInd(ppTree, data); break; case GT_NOT: DecomposeNot(ppTree, data); break; case GT_NEG: DecomposeNeg(ppTree, data); break; // Binary operators. Those that require different computation for upper and lower half are // handled by the use of GetHiOper(). case GT_ADD: case GT_SUB: case GT_OR: case GT_XOR: case GT_AND: DecomposeArith(ppTree, data); break; case GT_MUL: NYI("Arithmetic binary operators on TYP_LONG - GT_MUL"); break; case GT_DIV: NYI("Arithmetic binary operators on TYP_LONG - GT_DIV"); break; case GT_MOD: NYI("Arithmetic binary operators on TYP_LONG - GT_MOD"); break; case GT_UDIV: NYI("Arithmetic binary operators on TYP_LONG - GT_UDIV"); break; case GT_UMOD: NYI("Arithmetic binary operators on TYP_LONG - GT_UMOD"); break; case GT_LSH: case GT_RSH: case GT_RSZ: NYI("Arithmetic binary operators on TYP_LONG - SHIFT"); break; case GT_ROL: case GT_ROR: NYI("Arithmetic binary operators on TYP_LONG - ROTATE"); break; case GT_MULHI: NYI("Arithmetic binary operators on TYP_LONG - MULHI"); break; case GT_LOCKADD: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: NYI("Interlocked operations on TYP_LONG"); break; default: { JITDUMP("Illegal TYP_LONG node %s in Decomposition.", GenTree::NodeName(tree->OperGet())); noway_assert(!"Illegal TYP_LONG node in Decomposition."); break; } } #ifdef DEBUG if (m_compiler->verbose) { printf(" AFTER:\n"); m_compiler->gtDispTree(*ppTree); } #endif }
void ind_ovs_match_to_cfr(const of_match_t *match, struct ind_ovs_cfr *fields, struct ind_ovs_cfr *masks) { /* TODO support OF 1.1+ match fields */ memset(fields, 0, sizeof(*fields)); memset(masks, 0, sizeof(*masks)); /* input port */ fields->in_port = match->fields.in_port; masks->in_port = match->masks.in_port; masks->in_ports[0] = match->masks.bsn_in_ports_128.hi >> 32; masks->in_ports[1] = match->masks.bsn_in_ports_128.hi; masks->in_ports[2] = match->masks.bsn_in_ports_128.lo >> 32; masks->in_ports[3] = match->masks.bsn_in_ports_128.lo; /* ether addrs */ memcpy(fields->dl_dst, &match->fields.eth_dst, OF_MAC_ADDR_BYTES); memcpy(fields->dl_src, &match->fields.eth_src, OF_MAC_ADDR_BYTES); memcpy(masks->dl_src, &match->masks.eth_src, OF_MAC_ADDR_BYTES); memcpy(masks->dl_dst, &match->masks.eth_dst, OF_MAC_ADDR_BYTES); /* ether type */ fields->dl_type = htons(match->fields.eth_type); masks->dl_type = htons(match->masks.eth_type); /* vlan & pcp are combined, with CFI bit indicating tagged */ if (match->version == OF_VERSION_1_0) { if (match->masks.vlan_vid == 0) { /* wildcarded */ fields->dl_vlan = 0; masks->dl_vlan = 0; } else if (match->fields.vlan_vid == (uint16_t)-1) { /* untagged */ fields->dl_vlan = 0; masks->dl_vlan = 0xffff; } else { /* tagged */ fields->dl_vlan = htons(VLAN_CFI_BIT | VLAN_TCI(match->fields.vlan_vid, match->fields.vlan_pcp)); masks->dl_vlan = htons(VLAN_CFI_BIT | VLAN_TCI(match->masks.vlan_vid, match->masks.vlan_pcp)); } } else if (match->version == OF_VERSION_1_1) { NYI(0); } else { /* CFI bit indicating 'present' is included in the VID match field */ fields->dl_vlan = htons(VLAN_TCI_WITH_CFI(match->fields.vlan_vid, match->fields.vlan_pcp)); masks->dl_vlan = htons(VLAN_TCI_WITH_CFI(match->masks.vlan_vid, match->masks.vlan_pcp)); } if (match->version < OF_VERSION_1_2) { fields->nw_proto = match->fields.ip_proto; masks->nw_proto = match->masks.ip_proto; fields->nw_tos = match->fields.ip_dscp & 0xFC; masks->nw_tos = match->masks.ip_dscp & 0xFC; fields->nw_src = htonl(match->fields.ipv4_src); fields->nw_dst = htonl(match->fields.ipv4_dst); masks->nw_src = htonl(match->masks.ipv4_src); masks->nw_dst = htonl(match->masks.ipv4_dst); fields->tp_src = htons(match->fields.tcp_src); fields->tp_dst = htons(match->fields.tcp_dst); masks->tp_src = htons(match->masks.tcp_src); masks->tp_dst = htons(match->masks.tcp_dst); } else { /* subsequent fields are type dependent */ if (match->fields.eth_type == ETH_P_IP || match->fields.eth_type == ETH_P_IPV6) { fields->nw_proto = match->fields.ip_proto; masks->nw_proto = match->masks.ip_proto; fields->nw_tos = ((match->fields.ip_dscp & 0x3f) << 2) | (match->fields.ip_ecn & 0x3); masks->nw_tos = ((match->masks.ip_dscp & 0x3f) << 2) | (match->masks.ip_ecn & 0x3); if (match->fields.eth_type == ETH_P_IP) { fields->nw_src = htonl(match->fields.ipv4_src); fields->nw_dst = htonl(match->fields.ipv4_dst); masks->nw_src = htonl(match->masks.ipv4_src); masks->nw_dst = htonl(match->masks.ipv4_dst); } else if (match->fields.eth_type == ETH_P_IPV6) { memcpy(&fields->ipv6_src, &match->fields.ipv6_src, OF_IPV6_BYTES); memcpy(&fields->ipv6_dst, &match->fields.ipv6_dst, OF_IPV6_BYTES); memcpy(&masks->ipv6_src, &match->masks.ipv6_src, OF_IPV6_BYTES); memcpy(&masks->ipv6_dst, &match->masks.ipv6_dst, OF_IPV6_BYTES); } if (match->fields.ip_proto == IPPROTO_TCP) { fields->tp_src = htons(match->fields.tcp_src); fields->tp_dst = htons(match->fields.tcp_dst); masks->tp_src = htons(match->masks.tcp_src); masks->tp_dst = htons(match->masks.tcp_dst); } else if (match->fields.ip_proto == IPPROTO_UDP) { fields->tp_src = htons(match->fields.udp_src); fields->tp_dst = htons(match->fields.udp_dst); masks->tp_src = htons(match->masks.udp_src); masks->tp_dst = htons(match->masks.udp_dst); } else if (match->fields.ip_proto == IPPROTO_ICMP) { fields->tp_src = htons(match->fields.icmpv4_type); fields->tp_dst = htons(match->fields.icmpv4_code); masks->tp_src = htons(match->masks.icmpv4_type); masks->tp_dst = htons(match->masks.icmpv4_code); } else if (match->fields.ip_proto == IPPROTO_ICMPV6) { fields->tp_src = htons(match->fields.icmpv6_type); fields->tp_dst = htons(match->fields.icmpv6_code); masks->tp_src = htons(match->masks.icmpv6_type); masks->tp_dst = htons(match->masks.icmpv6_code); } } else if (match->fields.eth_type == ETH_P_ARP) { fields->nw_proto = match->fields.arp_op & 0xff; masks->nw_proto = match->masks.arp_op & 0xff; fields->nw_src = htonl(match->fields.arp_spa); fields->nw_dst = htonl(match->fields.arp_tpa); masks->nw_src = htonl(match->masks.arp_spa); masks->nw_dst = htonl(match->masks.arp_tpa); } } /* Metadata */ fields->lag_id = match->fields.bsn_lag_id; masks->lag_id = match->masks.bsn_lag_id; fields->vrf = match->fields.bsn_vrf; masks->vrf = match->masks.bsn_vrf; fields->l3_interface_class_id = match->fields.bsn_l3_interface_class_id; masks->l3_interface_class_id = match->masks.bsn_l3_interface_class_id; fields->l3_src_class_id = match->fields.bsn_l3_src_class_id; masks->l3_src_class_id = match->masks.bsn_l3_src_class_id; fields->l3_dst_class_id = match->fields.bsn_l3_dst_class_id; masks->l3_dst_class_id = match->masks.bsn_l3_dst_class_id; fields->global_vrf_allowed = match->fields.bsn_global_vrf_allowed & 1; masks->global_vrf_allowed = match->masks.bsn_global_vrf_allowed & 1; fields->pad = 0; masks->pad = 0; /* normalize the flow entry */ int i; char *f = (char *)fields; char *m = (char *)masks; for (i = 0; i < sizeof (struct ind_ovs_cfr); i++) { f[i] &= m[i]; } }
//------------------------------------------------------------------------ // DecomposeCast: Decompose GT_CAST. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CAST); GenTree* cast = use.Def()->AsCast(); GenTree* loResult = nullptr; GenTree* hiResult = nullptr; var_types srcType = cast->CastFromType(); var_types dstType = cast->CastToType(); if ((cast->gtFlags & GTF_UNSIGNED) != 0) { srcType = genUnsignedType(srcType); } if (varTypeIsLong(srcType)) { if (cast->gtOverflow() && (varTypeIsUnsigned(srcType) != varTypeIsUnsigned(dstType))) { GenTree* srcOp = cast->gtGetOp1(); noway_assert(srcOp->OperGet() == GT_LONG); GenTree* loSrcOp = srcOp->gtGetOp1(); GenTree* hiSrcOp = srcOp->gtGetOp2(); // // When casting between long types an overflow check is needed only if the types // have different signedness. In both cases (long->ulong and ulong->long) we only // need to check if the high part is negative or not. Use the existing cast node // to perform a int->uint cast of the high part to take advantage of the overflow // check provided by codegen. // loResult = loSrcOp; hiResult = cast; hiResult->gtType = TYP_INT; hiResult->AsCast()->gtCastType = TYP_UINT; hiResult->gtFlags &= ~GTF_UNSIGNED; hiResult->gtOp.gtOp1 = hiSrcOp; Range().Remove(cast); Range().Remove(srcOp); Range().InsertAfter(hiSrcOp, hiResult); } else { NYI("Unimplemented long->long no-op cast decomposition"); } } else if (varTypeIsIntegralOrI(srcType)) { if (cast->gtOverflow() && !varTypeIsUnsigned(srcType) && varTypeIsUnsigned(dstType)) { // // An overflow check is needed only when casting from a signed type to ulong. // Change the cast type to uint to take advantage of the overflow check provided // by codegen and then zero extend the resulting uint to ulong. // loResult = cast; loResult->AsCast()->gtCastType = TYP_UINT; loResult->gtType = TYP_INT; hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(loResult, hiResult); } else { if (varTypeIsUnsigned(srcType)) { loResult = cast->gtGetOp1(); hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().Remove(cast); Range().InsertAfter(loResult, hiResult); } else { LIR::Use src(Range(), &(cast->gtOp.gtOp1), cast); unsigned lclNum = src.ReplaceWithLclVar(m_compiler, m_blockWeight); loResult = src.Def(); GenTree* loCopy = m_compiler->gtNewLclvNode(lclNum, TYP_INT); GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, loCopy, shiftBy); Range().Remove(cast); Range().InsertAfter(loResult, loCopy, shiftBy, hiResult); m_compiler->lvaIncRefCnts(loCopy); } } } else { NYI("Unimplemented cast decomposition"); } return FinalizeDecomposition(use, loResult, hiResult); }
//------------------------------------------------------------------------ // DecomposeNode: Decompose long-type trees into lower and upper halves. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNode(GenTree* tree) { // Handle the case where we are implicitly using the lower half of a long lclVar. if ((tree->TypeGet() == TYP_INT) && tree->OperIsLocal()) { LclVarDsc* varDsc = m_compiler->lvaTable + tree->AsLclVarCommon()->gtLclNum; if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { #ifdef DEBUG if (m_compiler->verbose) { printf("Changing implicit reference to lo half of long lclVar to an explicit reference of its promoted " "half:\n"); m_compiler->gtDispTreeRange(Range(), tree); } #endif // DEBUG m_compiler->lvaDecRefCnts(tree); unsigned loVarNum = varDsc->lvFieldLclStart; tree->AsLclVarCommon()->SetLclNum(loVarNum); m_compiler->lvaIncRefCnts(tree); return tree->gtNext; } } if (tree->TypeGet() != TYP_LONG) { return tree->gtNext; } #ifdef DEBUG if (m_compiler->verbose) { printf("Decomposing TYP_LONG tree. BEFORE:\n"); m_compiler->gtDispTreeRange(Range(), tree); } #endif // DEBUG LIR::Use use; if (!Range().TryGetUse(tree, &use)) { use = LIR::Use::GetDummyUse(Range(), tree); } GenTree* nextNode = nullptr; switch (tree->OperGet()) { case GT_LCL_VAR: nextNode = DecomposeLclVar(use); break; case GT_LCL_FLD: nextNode = DecomposeLclFld(use); break; case GT_STORE_LCL_VAR: nextNode = DecomposeStoreLclVar(use); break; case GT_CAST: nextNode = DecomposeCast(use); break; case GT_CNS_LNG: nextNode = DecomposeCnsLng(use); break; case GT_CALL: nextNode = DecomposeCall(use); break; case GT_RETURN: assert(tree->gtOp.gtOp1->OperGet() == GT_LONG); break; case GT_STOREIND: nextNode = DecomposeStoreInd(use); break; case GT_STORE_LCL_FLD: assert(tree->gtOp.gtOp1->OperGet() == GT_LONG); NYI("st.lclFld of of TYP_LONG"); break; case GT_IND: nextNode = DecomposeInd(use); break; case GT_NOT: nextNode = DecomposeNot(use); break; case GT_NEG: nextNode = DecomposeNeg(use); break; // Binary operators. Those that require different computation for upper and lower half are // handled by the use of GetHiOper(). case GT_ADD: case GT_SUB: case GT_OR: case GT_XOR: case GT_AND: nextNode = DecomposeArith(use); break; case GT_MUL: nextNode = DecomposeMul(use); break; case GT_DIV: NYI("Arithmetic binary operators on TYP_LONG - GT_DIV"); break; case GT_MOD: NYI("Arithmetic binary operators on TYP_LONG - GT_MOD"); break; case GT_UDIV: NYI("Arithmetic binary operators on TYP_LONG - GT_UDIV"); break; case GT_UMOD: NYI("Arithmetic binary operators on TYP_LONG - GT_UMOD"); break; case GT_LSH: case GT_RSH: case GT_RSZ: nextNode = DecomposeShift(use); break; case GT_ROL: case GT_ROR: NYI("Arithmetic binary operators on TYP_LONG - ROTATE"); break; case GT_MULHI: NYI("Arithmetic binary operators on TYP_LONG - MULHI"); break; case GT_LOCKADD: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: NYI("Interlocked operations on TYP_LONG"); break; default: { JITDUMP("Illegal TYP_LONG node %s in Decomposition.", GenTree::NodeName(tree->OperGet())); noway_assert(!"Illegal TYP_LONG node in Decomposition."); break; } } #ifdef DEBUG if (m_compiler->verbose) { // NOTE: st_lcl_var doesn't dump properly afterwards. printf("Decomposing TYP_LONG tree. AFTER:\n"); m_compiler->gtDispTreeRange(Range(), use.Def()); } #endif return nextNode; }
//------------------------------------------------------------------------ // genBMI2Intrinsic: Generates the code for a BMI2 hardware intrinsic node // // Arguments: // node - The hardware intrinsic node // void CodeGen::genBMI2Intrinsic(GenTreeHWIntrinsic* node) { NYI("Implement BMI2 intrinsic code generation"); }
void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref) { assert(tree->OperKind() & GTK_SMPOP); var_types type = tree->TypeGet(); RegSet::RegisterPreference defaultPref(RBM_ALLFLOAT, RBM_NONE); if (pref == NULL) { pref = &defaultPref; } switch (tree->OperGet()) { // Assignment case GT_ASG: { genFloatAssign(tree); break; } // Arithmetic binops case GT_ADD: case GT_SUB: case GT_MUL: case GT_DIV: { genFloatArith(tree, pref); break; } case GT_NEG: { GenTreePtr op1 = tree->gtOp.gtOp1; // get the tree into a register genCodeForTreeFloat(op1, pref); // change the sign regNumber reg = regSet.PickRegFloat(type, pref); genMarkTreeInReg(tree, reg); inst_RV_RV(ins_MathOp(tree->OperGet(), type), reg, op1->gtRegNum, type); // mark register that holds tree genCodeForTreeFloat_DONE(tree, reg); return; } case GT_IND: { regMaskTP addrReg; // Make sure the address value is 'addressable' */ addrReg = genMakeAddressable(tree, 0, RegSet::FREE_REG); // Load the value onto the FP stack regNumber reg = regSet.PickRegFloat(type, pref); genLoadFloat(tree, reg); genDoneAddressable(tree, addrReg, RegSet::FREE_REG); genCodeForTreeFloat_DONE(tree, reg); break; } case GT_CAST: { genCodeForTreeCastFloat(tree, pref); break; } // Asg-Arithmetic ops case GT_ASG_ADD: case GT_ASG_SUB: case GT_ASG_MUL: case GT_ASG_DIV: { genFloatAsgArith(tree); break; } case GT_INTRINSIC: genFloatMath(tree, pref); break; case GT_RETURN: { GenTreePtr op1 = tree->gtOp.gtOp1; assert(op1); pref->best = (type==TYP_DOUBLE) ? RBM_DOUBLERET : RBM_FLOATRET; // Compute the result genCodeForTreeFloat(op1, pref); inst_RV_TT(ins_FloatConv(tree->TypeGet(), op1->TypeGet()), REG_FLOATRET, op1); if (compiler->info.compIsVarArgs) { if (tree->TypeGet() == TYP_FLOAT) { inst_RV_RV(INS_vmov_f2i, REG_INTRET, REG_FLOATRET, TYP_FLOAT, EA_4BYTE); } else { assert(tree->TypeGet() == TYP_DOUBLE); inst_RV_RV_RV(INS_vmov_d2i, REG_INTRET, REG_NEXT(REG_INTRET), REG_FLOATRET, EA_8BYTE); } } break; } case GT_ARGPLACE: break; case GT_COMMA: { GenTreePtr op1 = tree->gtOp.gtOp1; GenTreePtr op2 = tree->gtGetOp2(); if (tree->gtFlags & GTF_REVERSE_OPS) { genCodeForTreeFloat(op2, pref); regSet.SetUsedRegFloat(op2, true); genEvalSideEffects(op1); regSet.SetUsedRegFloat(op2, false); } else { genEvalSideEffects(op1); genCodeForTreeFloat(op2, pref); } genCodeForTreeFloat_DONE(tree, op2->gtRegNum); break; } case GT_CKFINITE: genFloatCheckFinite(tree, pref); break; default: NYI("Unhandled register FP codegen"); } }
void Confused( void ) { /* don't know what's happening */ NYI(); }
//------------------------------------------------------------------------ // genPCLMULQDQIntrinsic: Generates the code for a PCLMULQDQ hardware intrinsic node // // Arguments: // node - The hardware intrinsic node // void CodeGen::genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node) { NYI("Implement PCLMULQDQ intrinsic code generation"); }
//------------------------------------------------------------------------ // genFMAIntrinsic: Generates the code for an FMA hardware intrinsic node // // Arguments: // node - The hardware intrinsic node // void CodeGen::genFMAIntrinsic(GenTreeHWIntrinsic* node) { NYI("Implement FMA intrinsic code generation"); }
void Lowering::TreeNodeInfoInit(GenTree* stmt) { NYI("ARM TreeNodInfoInit"); }
//------------------------------------------------------------------------ // genAESIntrinsic: Generates the code for an AES hardware intrinsic node // // Arguments: // node - The hardware intrinsic node // void CodeGen::genAESIntrinsic(GenTreeHWIntrinsic* node) { NYI("Implement AES intrinsic code generation"); }