//------------------------------expand_unlock_node----------------------
void PhaseMacroExpand::expand_unlock_node(UnlockNode*lock) {
    if( !InlineFastPathLocking ) return;

    // If inlining the fast-path locking code, do it now.  Inserts a
    // diamond control-flow, with the call on the slow-path.  Fencing is
    // included in the both the FastLock test (on success) and the
    // slow-path call.  Memory Phi's are inserted at the diamond merge to
    // prevent hoisting mem ops into the fast-path side (where they might
    // bypass the FastUnlock because it does not carry memory edges).
    Node*ctl=lock->in(TypeFunc::Control);
    Node*obj=lock->in(TypeFunc::Parms+0);
    Node *flock = new (C, 2) FastUnlockNode( ctl, obj );
    RegionNode *region = new (C, 3) RegionNode(3);
    transform_later(region);
    Node*slow_path=opt_iff(region,flock);

    // Make the merge point
    PhiNode*memphi=new(C,3)PhiNode(region,Type::MEMORY,TypePtr::BOTTOM);
    transform_later(memphi);
    Node *mem = lock->in(TypeFunc::Memory);
    memphi->init_req(2,mem);    // Plug in the fast-path

    Node*lock_ctl=lock->proj_out(TypeFunc::Control);
    Node*lock_mem=lock->proj_out(TypeFunc::Memory);
    _igvn.hash_delete(lock_ctl);
    _igvn.hash_delete(lock_mem);
    _igvn.subsume_node_keep_old(lock_ctl,region);
    _igvn.subsume_node_keep_old(lock_mem,memphi);
    // Plug in the slow-path
    region->init_req(1,lock_ctl);
    memphi->init_req(1,lock_mem);
    lock->set_req(TypeFunc::Control,slow_path);
}
示例#2
0
		void JSEdgeRemovalPass::padExitBlocks(RegionInfo& ri, Region* target) {
			for(Region::element_iterator i = target->element_begin(), e = target->element_end(); i != e; ++i) {
				RegionNode* rn = (RegionNode*)(*i);
				if(rn->isSubRegion()) {
					Region* subRegion = rn->getNodeAs<Region>();
					padExitBlocks(ri, subRegion);
				}
			}
			BasicBlock* exit = target->getExit();
			if(exit != NULL) {
				//we need to split the block into two parts to give padding space
				//Keep the phi nodes and landing pad instructions in the original
				//block and push all instructions into the new block below. That
				//way we have convergence yet space to move elements up onto a
				//given path if it turns out that the given instruction can't 

				//32 is a good number
				unsigned count = 0;
				SmallVector<BasicBlock*,32> elements;
				for(pred_iterator PI = pred_begin(exit), E = pred_end(exit); PI != E; ++PI) {
					BasicBlock* bb = *PI;
					if(target->contains(bb)) {
						elements.push_back(bb);
					}
					count++;
				}
				if(elements.size() > 0 && count != elements.size()) { 
					BasicBlock* update = SplitBlockPredecessors(exit, elements, "Pad", this);
					target->replaceExit(update);
				}
				//ri.splitBlock(update, exit);
			}

		}
/// Take one node from the order vector and wire it up
void StructurizeCFG::wireFlow(bool ExitUseAllowed,
                              BasicBlock *LoopEnd) {
  RegionNode *Node = Order.pop_back_val();
  Visited.insert(Node->getEntry());

  if (isPredictableTrue(Node)) {
    // Just a linear flow
    if (PrevNode) {
      changeExit(PrevNode, Node->getEntry(), true);
    }
    PrevNode = Node;

  } else {
    // Insert extra prefix node (or reuse last one)
    BasicBlock *Flow = needPrefix(false);

    // Insert extra postfix node (or use exit instead)
    BasicBlock *Entry = Node->getEntry();
    BasicBlock *Next = needPostfix(Flow, ExitUseAllowed);

    // let it point to entry and next block
    Conditions.push_back(BranchInst::Create(Entry, Next, BoolUndef, Flow));
    addPhiValues(Flow, Entry);
    DT->changeImmediateDominator(Entry, Flow);

    PrevNode = Node;
    while (!Order.empty() && !Visited.count(LoopEnd) &&
           dominatesPredicates(Entry, Order.back())) {
      handleLoops(false, LoopEnd);
    }

    changeExit(PrevNode, Next, false);
    setPrevNode(Next);
  }
}
示例#4
0
  std::string getEdgeAttributes(RegionNode *srcNode,
                                GraphTraits<RegionInfo *>::ChildIteratorType CI,
                                RegionInfo *G) {
    RegionNode *destNode = *CI;

    if (srcNode->isSubRegion() || destNode->isSubRegion())
      return "";

    // In case of a backedge, do not use it to define the layout of the nodes.
    BasicBlock *srcBB = srcNode->getNodeAs<BasicBlock>();
    BasicBlock *destBB = destNode->getNodeAs<BasicBlock>();

    Region *R = G->getRegionFor(destBB);

    while (R && R->getParent())
      if (R->getParent()->getEntry() == destBB)
        R = R->getParent();
      else
        break;

    if (R && R->getEntry() == destBB && R->contains(srcBB))
      return "constraint=false";

    return "";
  }
示例#5
0
std::size_t global_reg_t::getDataSize() const {
   RegionNode *n = key->getRegionNode( id );
   std::size_t dataSize = 1;

   for ( int dimIdx = key->getNumDimensions() - 1; dimIdx >= 0; dimIdx -= 1 ) {
      std::size_t accessedLength = n->getValue();
      n = n->getParent();
      n = n->getParent();
      dataSize *= accessedLength;
   }
   return dataSize; 
}
示例#6
0
void global_reg_t::fillDimensionData( nanos_region_dimension_internal_t *region) const {
   RegionNode *n = key->getRegionNode( id );
   std::vector< std::size_t > const &sizes = key->getDimensionSizes();
   for ( int dimIdx = key->getNumDimensions() - 1; dimIdx >= 0; dimIdx -= 1 ) {
      std::size_t accessedLength = n->getValue();
      n = n->getParent();
      std::size_t lowerBound = n->getValue();
      n = n->getParent();
      region[ dimIdx ].accessed_length = accessedLength;
      region[ dimIdx ].lower_bound = lowerBound;
      region[ dimIdx ].size = sizes[ dimIdx ];
   }
}
示例#7
0
reg_t global_reg_t::getSlabRegionId( std::size_t slabSize ) const {
   RegionNode *n = key->getRegionNode( id );
   std::vector< std::size_t > const &sizes = key->getDimensionSizes();
   std::size_t acc_size = 1;
   nanos_region_dimension_internal_t fitDimensions[ key->getNumDimensions() ];
   if ( slabSize < this->getBreadth() ) {
      fatal("Can not allocate slab for this region. Not supported yet. slabSize "<< slabSize << " breadth " << this->getBreadth());
   } else if ( this->getBreadth() < slabSize && id == 1 ) {
      return id;
   } else {

      unsigned int lower_bounds[key->getNumDimensions()];
      for ( int idx = key->getNumDimensions() - 1; idx >= 0; idx -= 1 ) {
         //std::size_t accessedLength = n->getValue();
         n = n->getParent();
         std::size_t lowerBound = n->getValue();
         n = n->getParent();
         lower_bounds[idx] = lowerBound;
      }

      bool keep_expanding = true;
      for ( unsigned int idx = 0; idx < key->getNumDimensions(); idx += 1 ) {
         fitDimensions[ idx ].size = sizes[ idx ];
         //std::cerr << "This dimension size " << sizes[ idx ] << std::endl;
         if ( keep_expanding ) {
            acc_size *= sizes[ idx ];
         //std::cerr << "This dimension acc_size " << acc_size << " slab size " << slabSize << std::endl;
            if ( slabSize == acc_size || ( slabSize > acc_size && ( ( slabSize % acc_size ) == 0 ) ) ) {
               fitDimensions[ idx ].lower_bound = 0;
               fitDimensions[ idx ].accessed_length = sizes[ idx ];
            } else if ( slabSize < acc_size && ( acc_size % slabSize ) == 0 ) {
               std::size_t slab_elems = slabSize / ( acc_size / sizes[ idx ] );
               //std::cerr << "slab_elems is " << slab_elems << " lb: " <<  lower_bounds[idx] << std::endl;
               fitDimensions[ idx ].accessed_length = slab_elems;
               fitDimensions[ idx ].lower_bound = ( lower_bounds[idx] / slab_elems ) * slab_elems;
               keep_expanding = false;
            } else {
               fatal("invalid slabSize: " << slabSize << " reg size is " << this->getBreadth() );
            }
         } else {
            fitDimensions[ idx ].accessed_length = 1;
            fitDimensions[ idx ].lower_bound = 1;
         }
      }
      (void ) fitDimensions;
   }
   return key->obtainRegionId( fitDimensions );
}
	void expand(std::vector<RegionNode*> &regions, ProbabilityDensityFunction<RegionNode> &pdf, std::vector<Edge*> &tree, KDTree &kdtree, const State &goal) {
		RegionNode *randomNode = pdf.sample();
		Edge *randomEdge = randomNode->getRandomEdge(zeroToOne);

		for(unsigned int i = 0; i < howManySamplePoints; ++i) {
			State sample = (zeroToOne(GlobalRandomGenerator) < goalBias) ? goal : agent.getRandomStateNearState(randomEdge->end, samplePointRadius);

#ifdef WITHGRAPHICS
samples.push_back(sample);
#endif

			samplesGenerated++;
			unsigned int cellId = discretization.getCellId(sample);
			if(zeroToOne(GlobalRandomGenerator) < regions[cellId]->getWeight()) {
				if(workspace.safeState(agent, sample)) {
					samplesAdded++;
					Edge newEdge = agent.steer(randomEdge->end, sample, std::numeric_limits<double>::infinity());
					edgesGenerated++;
					newEdge.updateParent(randomEdge);
					if(newEdge.gCost() >= gBound) {
						continue;
					}

					if(workspace.safeEdge(agent, newEdge, collisionCheckDT)) {
						edgesAdded++;
						Edge *newEdgePointer = pool.construct(newEdge);

#ifdef WITHGRAPHICS
treeEdges.push_back(newEdgePointer);
#endif

						cellId = discretization.getCellId(newEdgePointer->end);
						regions[cellId]->addEdge(newEdgePointer);

						tree.push_back(newEdgePointer);
						kdtree.insertPoint(newEdgePointer);

						if(regions[cellId]->inPDF) {
							pdf.update(regions[cellId]->pdfId, regions[cellId]->getWeight());
						} else {
							typename ProbabilityDensityFunction<RegionNode>::Element *pdfElem = pdf.add(regions[cellId], regions[cellId]->getWeight()); discretization.setInPDF(cellId);
							regions[cellId]->setInPDF(true, pdfElem->getId());
						}
					}
				}
			}
		}
	}
示例#9
0
/// \brief Helper to call buildExtractionBlockSet with a RegionNode.
static SetVector<BasicBlock *> buildExtractionBlockSet(const RegionNode &RN) {
  if (!RN.isSubRegion())
    // Just a single BasicBlock.
    return buildExtractionBlockSet(RN.getNodeAs<BasicBlock>());

  const Region &R = *RN.getNodeAs<Region>();

  return buildExtractionBlockSet(R.block_begin(), R.block_end());
}
示例#10
0
void StructurizeCFG::handleLoops(bool ExitUseAllowed,
                                 BasicBlock *LoopEnd) {
  RegionNode *Node = Order.front();
  BasicBlock *LoopStart = Node->getEntry();

  if (!Loops.count(LoopStart)) {
    wireFlow(ExitUseAllowed, LoopEnd);
    return;
  }

  if (!isPredictableTrue(Node))
    LoopStart = needPrefix(true);

  LoopEnd = Loops[Node->getEntry()];
  wireFlow(false, LoopEnd);
  while (!Visited.count(LoopEnd)) {
    handleLoops(false, LoopEnd);
  }

  // If the start of the loop is the entry block, we can't branch to it so
  // insert a new dummy entry block.
  Function *LoopFunc = LoopStart->getParent();
  if (LoopStart == &LoopFunc->getEntryBlock()) {
    LoopStart->setName("entry.orig");

    BasicBlock *NewEntry =
      BasicBlock::Create(LoopStart->getContext(),
                         "entry",
                         LoopFunc,
                         LoopStart);
    BranchInst::Create(LoopStart, NewEntry);
    DT->setNewRoot(NewEntry);
  }

  // Create an extra loop end node
  LoopEnd = needPrefix(false);
  BasicBlock *Next = needPostfix(LoopEnd, ExitUseAllowed);
  LoopConds.push_back(BranchInst::Create(Next, LoopStart,
                                         BoolUndef, LoopEnd));
  addPhiValues(LoopEnd, LoopStart);
  setPrevNode(Next);
}
示例#11
0
uint64_t global_reg_t::getFirstAddress( uint64_t baseAddress ) const {
   RegionNode *n = key->getRegionNode( id );
   uint64_t offset = 0;
   std::vector< std::size_t > const &sizes = key->getDimensionSizes();
   uint64_t acumSizes = 1;

   for ( unsigned int dimIdx = 0; dimIdx < key->getNumDimensions() - 1; dimIdx += 1 ) {
      acumSizes *= sizes[ dimIdx ];
   }
   
   for ( int dimIdx = key->getNumDimensions() - 1; dimIdx >= 0; dimIdx -= 1 ) {
      //std::size_t accessedLength = n->getValue();
      n = n->getParent();
      std::size_t lowerBound = n->getValue();
      n = n->getParent();
      offset += acumSizes * lowerBound;
      if ( dimIdx >= 1 ) acumSizes = acumSizes / sizes[ dimIdx - 1 ];
   }
   return baseAddress + offset; 
}
示例#12
0
std::size_t global_reg_t::getBreadth() const {
   RegionNode *n = key->getRegionNode( id );
   std::size_t offset = 0;
   std::size_t lastOffset = 0;
   std::vector< std::size_t > const &sizes = key->getDimensionSizes();
   uint64_t acumSizes = 1;

   for ( unsigned int dimIdx = 0; dimIdx < key->getNumDimensions() - 1; dimIdx += 1 ) {
      acumSizes *= sizes[ dimIdx ];
   }
   
   for ( int dimIdx = key->getNumDimensions() - 1; dimIdx >= 0; dimIdx -= 1 ) {
      std::size_t accessedLength = n->getValue();
      n = n->getParent();
      std::size_t lowerBound = n->getValue();
      n = n->getParent();
      offset += acumSizes * lowerBound;
      lastOffset += acumSizes * ( lowerBound + accessedLength - 1 );
      if ( dimIdx >= 1 ) acumSizes = acumSizes / sizes[ dimIdx - 1 ];
   }
   return ( lastOffset - offset ) + 1;
}
//------------------------------insert_goto_at---------------------------------
// Inserts a goto & corresponding basic block between
// block[block_no] and its succ_no'th successor block
void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
  // get block with block_no
  assert(block_no < _num_blocks, "illegal block number");
  Block* in  = _blocks[block_no];
  // get successor block succ_no
  assert(succ_no < in->_num_succs, "illegal successor number");
  Block* out = in->_succs[succ_no];
  // get ProjNode corresponding to the succ_no'th successor of the in block
  ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
  // create region for basic block
  RegionNode* region = new (C, 2) RegionNode(2);
  region->init_req(1, proj);
  // setup corresponding basic block
  Block* block = new (_bbs._arena) Block(_bbs._arena, region);
  _bbs.map(region->_idx, block);
  C->regalloc()->set_bad(region->_idx);
  // add a goto node
  Node* gto = _goto->clone(); // get a new goto node
  gto->set_req(0, region);
  // add it to the basic block
  block->_nodes.push(gto);
  _bbs.map(gto->_idx, block);
  C->regalloc()->set_bad(gto->_idx);
  // hook up successor block
  block->_succs.map(block->_num_succs++, out);
  // remap successor's predecessors if necessary
  for (uint i = 1; i < out->num_preds(); i++) {
    if (out->pred(i) == proj) out->head()->set_req(i, gto);
  }
  // remap predecessor's successor to new block
  in->_succs.map(succ_no, block);
  // add new basic block to basic block list
  _blocks.insert(block_no + 1, block);
  _num_blocks++;
  // Fixup block freq
  block->_freq = out->_freq;
}
示例#14
0
reg_t global_reg_t::getFitRegionId() const {
   RegionNode *n = key->getRegionNode( id );
   bool keep_fitting = true;
   nanos_region_dimension_internal_t fitDimensions[ key->getNumDimensions() ];
   std::vector< std::size_t > const &sizes = key->getDimensionSizes();

   for ( int idx = key->getNumDimensions() - 1; idx >= 0; idx -= 1 ) {
      std::size_t accessedLength = n->getValue();
      n = n->getParent();
      std::size_t lowerBound = n->getValue();
      n = n->getParent();
      fitDimensions[ idx ].size = sizes[ idx ];
      if ( keep_fitting ) {
         fitDimensions[ idx ].accessed_length = accessedLength;
         fitDimensions[ idx ].lower_bound = lowerBound;
         if ( accessedLength != 1 )
            keep_fitting = false;
      } else {
         fitDimensions[ idx ].lower_bound = 0;
         fitDimensions[ idx ].accessed_length = sizes[ idx ];
      }
   }
   return key->obtainRegionId( fitDimensions );
}
//--- expand_allocation ------------------------------------------------------
void PhaseMacroExpand::expand_allocate(AllocateNode*A) {
    // See if we are forced to go slow-path.
    if( A->_entry_point == (address)SharedRuntime::_new )
        return;      // Always go slow-path - required for finalizers, etc

    Node*A_ctl=A->proj_out(TypeFunc::Control);
    Node*A_mem=A->proj_out(TypeFunc::Memory);
    Node*A_oop=A->proj_out(TypeFunc::Parms+0);

    // Inject a fast-path / slow-path diamond.  Fast-path is still milli-code,
    // which returns an oop or null - and does not block, nor GC, nor kill
    // registers.  If we have an allocation failure the fast-path leaves the
    // regs in-place for a slow-path call which DOES block, GC, etc.
    Node*ctl=A->in(TypeFunc::Control);
    Node*kid=A->in(AllocateNode::KID);
    Node*siz=A->in(AllocateNode::AllocSize);
    Node*xtr=A->in(AllocateNode::ExtraSlow);
    Node *len = A->is_AllocateArray() ? A->in(AllocateNode::ALength) : C->top();

    if ( !A_oop && ( !A->is_AllocateArray() || ( _igvn.type(A->in(AllocateNode::ALength))->higher_equal(TypeInt::POS) ) ) ) {
        tty->print_cr("Dead allocation should be removed earlier");
        Unimplemented();
    }

    // Convert the array element count to a Long value,
    // and fold in the EKID value for oop-arrays.
    const TypeOopPtr *toop = A->_tf->range()->field_at(TypeFunc::Parms+0)->is_ptr()->cast_to_ptr_type(TypePtr::BotPTR)->is_oopptr();
    if( len != C->top() ) {
        assert0( A->is_AllocateArray() );
        Node*lenl=transform_later(new(C,2)ConvI2LNode(len));
        Node*ekid=A->in(AllocateNode::EKID);
        if( ekid->bottom_type() != TypeInt::ZERO ) {
            // Have an EKID?  Smash the array length and EKID together.
            Node *ekidl  = transform_later(new (C,2) CastP2XNode(0,ekid));
            Node *ekidshf= transform_later(new (C,3) LShiftLNode(ekidl,_igvn.intcon(32)));
            Node *combo  = transform_later(new (C,3)     OrLNode(lenl,ekidshf));
            len=combo;
        } else {
            len=lenl;
        }
        // Crunch arguments for matching.  The old ExtraSlow argument is used to
        // make more gating control flow in this function, but is not an argument
        // to the runtime call.  Neither is the EKID argument: the slow-path will
        // compute it's own EKID.
        A->set_req(AllocateNode::ExtraSlow,len);
        A->set_req(AllocateNode::ALength,C->top());
        A->set_req(AllocateNode::EKID,C->top());
    } else {
        A->set_req(AllocateNode::ExtraSlow,_igvn.zerocon(T_INT));
        assert0( !A->is_AllocateArray() );
    }

    // Extra slow-path test required?
    RegionNode *region2 = NULL;
    if( xtr->bottom_type() != TypeInt::ZERO ) { // Commonly, no extra test required
        // Extra slow-path tests can be required for fast-pathing reflection
        // allocation (and cloning).  If the new object requires e.g. finalization
        // or further class linking/loading.
        Node *cmp = transform_later(new(C,3)CmpINode(xtr,_igvn.zerocon(T_INT)));
        Node*bol=transform_later(new(C,2)BoolNode(cmp,BoolTest::eq));
        Node *iff = new (C, 2) IfNode( ctl, bol, PROB_LIKELY_MAG(5), COUNT_UNKNOWN );
        region2=new(C,3)RegionNode(3);
        transform_later(region2);
        ctl = opt_iff(region2,iff);
    }

    FastAllocNode *fal = new (C, 4) FastAllocNode(kid,siz,toop,len);
    fal->set_req(0,ctl);
    transform_later(fal);
    Node *mem = new (C,1) SCMemProjNode(fal);
    transform_later(mem);

    Node *cmp = transform_later(new(C,3)CmpPNode(fal,_igvn.zerocon(T_OBJECT)));
    Node*bol=transform_later(new(C,2)BoolNode(cmp,BoolTest::eq));
    Node*iff=new(C,2)IfNode(ctl,bol,PROB_UNLIKELY_MAG(5),COUNT_UNKNOWN);
    RegionNode *region = new (C, 3) RegionNode(3);
    transform_later(region);
    Node *slow_path = opt_iff(region,iff);

    // Make the merge point
    PhiNode*phimem=new(C,3)PhiNode(region,Type::MEMORY,TypePtr::BOTTOM);
    transform_later(phimem);
    phimem->init_req(2,mem);    // Plug in the fast-path

    PhiNode*phioop=NULL;
    if (A_oop) {
        phioop = new (C, 3) PhiNode(region,toop);
        transform_later(phioop);
        phioop->init_req(2,fal);    // Plug in the fast-path
    }

    _igvn.hash_delete(A_ctl);
    _igvn.hash_delete(A_mem);
    if (A_oop) _igvn.hash_delete (A_oop);
    _igvn.subsume_node_keep_old(A_ctl,region);
    _igvn.subsume_node_keep_old(A_mem,phimem);
    if (A_oop) _igvn.subsume_node_keep_old(A_oop,phioop);

    // Plug in the slow-path
    region->init_req(1,A_ctl);
    phimem->init_req(1,A_mem);
    if (A_oop) phioop->init_req(1,A_oop);
    if( xtr->bottom_type() != TypeInt::ZERO ) { // Commonly, no extra test required
        region2->init_req(1,slow_path);
        slow_path = region2;
    }
    A->set_req(TypeFunc::Control,slow_path);
    // The slow-path call now directly calls into the runtime.
    A->_entry_point = (address)SharedRuntime::_new;
}
示例#16
0
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  GraphKit kit(jvms);
  PhaseGVN& gvn = kit.gvn();

  CompileLog* log = kit.C->log();
  if (log != NULL) {
    log->elem("predicted_intrinsic bci='%d' method='%d'",
              jvms->bci(), log->identify(method()));
  }

  Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
  if (kit.failing())
    return NULL;  // might happen because of NodeCountInliningCutoff

  SafePointNode* slow_map = NULL;
  JVMState* slow_jvms;
  if (slow_ctl != NULL) {
    PreserveJVMState pjvms(&kit);
    kit.set_control(slow_ctl);
    if (!kit.stopped()) {
      slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
      if (kit.failing())
        return NULL;  // might happen because of NodeCountInliningCutoff
      assert(slow_jvms != NULL, "must be");
      kit.add_exception_states_from(slow_jvms);
      kit.set_map(slow_jvms->map());
      if (!kit.stopped())
        slow_map = kit.stop();
    }
  }

  if (kit.stopped()) {
    // Predicate is always false.
    kit.set_jvms(slow_jvms);
    return kit.transfer_exceptions_into_jvms();
  }

  // Generate intrinsic code:
  JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
  if (new_jvms == NULL) {
    // Intrinsic failed, so use slow code or make a direct call.
    if (slow_map == NULL) {
      CallGenerator* cg = CallGenerator::for_direct_call(method());
      new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
    } else {
      kit.set_jvms(slow_jvms);
      return kit.transfer_exceptions_into_jvms();
    }
  }
  kit.add_exception_states_from(new_jvms);
  kit.set_jvms(new_jvms);

  // Need to merge slow and fast?
  if (slow_map == NULL) {
    // The fast path is the only path remaining.
    return kit.transfer_exceptions_into_jvms();
  }

  if (kit.stopped()) {
    // Intrinsic method threw an exception, so it's just the slow path after all.
    kit.set_jvms(slow_jvms);
    return kit.transfer_exceptions_into_jvms();
  }

  // Finish the diamond.
  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
  RegionNode* region = new (kit.C) RegionNode(3);
  region->init_req(1, kit.control());
  region->init_req(2, slow_map->control());
  kit.set_control(gvn.transform(region));
  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
  iophi->set_req(2, slow_map->i_o());
  kit.set_i_o(gvn.transform(iophi));
  kit.merge_memory(slow_map->merged_memory(), region, 2);
  uint tos = kit.jvms()->stkoff() + kit.sp();
  uint limit = slow_map->req();
  for (uint i = TypeFunc::Parms; i < limit; i++) {
    // Skip unused stack slots; fast forward to monoff();
    if (i == tos) {
      i = kit.jvms()->monoff();
      if( i >= limit ) break;
    }
    Node* m = kit.map()->in(i);
    Node* n = slow_map->in(i);
    if (m != n) {
      const Type* t = gvn.type(m)->meet(gvn.type(n));
      Node* phi = PhiNode::make(region, m, t);
      phi->set_req(2, n);
      kit.map()->set_req(i, gvn.transform(phi));
    }
  }
  return kit.transfer_exceptions_into_jvms();
}
示例#17
0
JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  GraphKit kit(jvms);
  PhaseGVN& gvn = kit.gvn();
  // We need an explicit receiver null_check before checking its type.
  // We share a map with the caller, so his JVMS gets adjusted.
  Node* receiver = kit.argument(0);

  CompileLog* log = kit.C->log();
  if (log != NULL) {
    log->elem("predicted_call bci='%d' klass='%d'",
              jvms->bci(), log->identify(_predicted_receiver));
  }

  receiver = kit.null_check_receiver_before_call(method());
  if (kit.stopped()) {
    return kit.transfer_exceptions_into_jvms();
  }

  Node* exact_receiver = receiver;  // will get updated in place...
  Node* slow_ctl = kit.type_check_receiver(receiver,
                                           _predicted_receiver, _hit_prob,
                                           &exact_receiver);

  SafePointNode* slow_map = NULL;
  JVMState* slow_jvms;
  { PreserveJVMState pjvms(&kit);
    kit.set_control(slow_ctl);
    if (!kit.stopped()) {
      slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
      if (kit.failing())
        return NULL;  // might happen because of NodeCountInliningCutoff
      assert(slow_jvms != NULL, "must be");
      kit.add_exception_states_from(slow_jvms);
      kit.set_map(slow_jvms->map());
      if (!kit.stopped())
        slow_map = kit.stop();
    }
  }

  if (kit.stopped()) {
    // Instance exactly does not matches the desired type.
    kit.set_jvms(slow_jvms);
    return kit.transfer_exceptions_into_jvms();
  }

  // fall through if the instance exactly matches the desired type
  kit.replace_in_map(receiver, exact_receiver);

  // Make the hot call:
  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
  if (new_jvms == NULL) {
    // Inline failed, so make a direct call.
    assert(_if_hit->is_inline(), "must have been a failed inline");
    CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
    new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
  }
  kit.add_exception_states_from(new_jvms);
  kit.set_jvms(new_jvms);

  // Need to merge slow and fast?
  if (slow_map == NULL) {
    // The fast path is the only path remaining.
    return kit.transfer_exceptions_into_jvms();
  }

  if (kit.stopped()) {
    // Inlined method threw an exception, so it's just the slow path after all.
    kit.set_jvms(slow_jvms);
    return kit.transfer_exceptions_into_jvms();
  }

  // Finish the diamond.
  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
  RegionNode* region = new (kit.C) RegionNode(3);
  region->init_req(1, kit.control());
  region->init_req(2, slow_map->control());
  kit.set_control(gvn.transform(region));
  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
  iophi->set_req(2, slow_map->i_o());
  kit.set_i_o(gvn.transform(iophi));
  kit.merge_memory(slow_map->merged_memory(), region, 2);
  uint tos = kit.jvms()->stkoff() + kit.sp();
  uint limit = slow_map->req();
  for (uint i = TypeFunc::Parms; i < limit; i++) {
    // Skip unused stack slots; fast forward to monoff();
    if (i == tos) {
      i = kit.jvms()->monoff();
      if( i >= limit ) break;
    }
    Node* m = kit.map()->in(i);
    Node* n = slow_map->in(i);
    if (m != n) {
      const Type* t = gvn.type(m)->meet(gvn.type(n));
      Node* phi = PhiNode::make(region, m, t);
      phi->set_req(2, n);
      kit.map()->set_req(i, gvn.transform(phi));
    }
  }
  return kit.transfer_exceptions_into_jvms();
}
示例#18
0
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
  // The code we want to generate here is:
  //    if (receiver == NULL)
  //        uncommon_Trap
  //    if (predicate(0))
  //        do_intrinsic(0)
  //    else
  //    if (predicate(1))
  //        do_intrinsic(1)
  //    ...
  //    else
  //        do_java_comp

  GraphKit kit(jvms);
  PhaseGVN& gvn = kit.gvn();

  CompileLog* log = kit.C->log();
  if (log != NULL) {
    log->elem("predicated_intrinsic bci='%d' method='%d'",
              jvms->bci(), log->identify(method()));
  }

  if (!method()->is_static()) {
    // We need an explicit receiver null_check before checking its type in predicate.
    // We share a map with the caller, so his JVMS gets adjusted.
    Node* receiver = kit.null_check_receiver_before_call(method());
    if (kit.stopped()) {
      return kit.transfer_exceptions_into_jvms();
    }
  }

  int n_predicates = _intrinsic->predicates_count();
  assert(n_predicates > 0, "sanity");

  JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));

  // Region for normal compilation code if intrinsic failed.
  Node* slow_region = new (kit.C) RegionNode(1);

  int results = 0;
  for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
#ifdef ASSERT
    JVMState* old_jvms = kit.jvms();
    SafePointNode* old_map = kit.map();
    Node* old_io  = old_map->i_o();
    Node* old_mem = old_map->memory();
    Node* old_exc = old_map->next_exception();
#endif
    Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
#ifdef ASSERT
    // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
    assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
    SafePointNode* new_map = kit.map();
    assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
    assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
    assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
#endif
    if (!kit.stopped()) {
      PreserveJVMState pjvms(&kit);
      // Generate intrinsic code:
      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
      if (new_jvms == NULL) {
        // Intrinsic failed, use normal compilation path for this predicate.
        slow_region->add_req(kit.control());
      } else {
        kit.add_exception_states_from(new_jvms);
        kit.set_jvms(new_jvms);
        if (!kit.stopped()) {
          result_jvms[results++] = kit.jvms();
        }
      }
    }
    if (else_ctrl == NULL) {
      else_ctrl = kit.C->top();
    }
    kit.set_control(else_ctrl);
  }
  if (!kit.stopped()) {
    // Final 'else' after predicates.
    slow_region->add_req(kit.control());
  }
  if (slow_region->req() > 1) {
    PreserveJVMState pjvms(&kit);
    // Generate normal compilation code:
    kit.set_control(gvn.transform(slow_region));
    JVMState* new_jvms = _cg->generate(kit.sync_jvms());
    if (kit.failing())
      return NULL;  // might happen because of NodeCountInliningCutoff
    assert(new_jvms != NULL, "must be");
    kit.add_exception_states_from(new_jvms);
    kit.set_jvms(new_jvms);
    if (!kit.stopped()) {
      result_jvms[results++] = kit.jvms();
    }
  }

  if (results == 0) {
    // All paths ended in uncommon traps.
    (void) kit.stop();
    return kit.transfer_exceptions_into_jvms();
  }

  if (results == 1) { // Only one path
    kit.set_jvms(result_jvms[0]);
    return kit.transfer_exceptions_into_jvms();
  }

  // Merge all paths.
  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
  RegionNode* region = new (kit.C) RegionNode(results + 1);
  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
  for (int i = 0; i < results; i++) {
    JVMState* jvms = result_jvms[i];
    int path = i + 1;
    SafePointNode* map = jvms->map();
    region->init_req(path, map->control());
    iophi->set_req(path, map->i_o());
    if (i == 0) {
      kit.set_jvms(jvms);
    } else {
      kit.merge_memory(map->merged_memory(), region, path);
    }
  }
  kit.set_control(gvn.transform(region));
  kit.set_i_o(gvn.transform(iophi));
  // Transform new memory Phis.
  for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
    Node* phi = mms.memory();
    if (phi->is_Phi() && phi->in(0) == region) {
      mms.set_memory(gvn.transform(phi));
    }
  }

  // Merge debug info.
  Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
  uint tos = kit.jvms()->stkoff() + kit.sp();
  Node* map = kit.map();
  uint limit = map->req();
  for (uint i = TypeFunc::Parms; i < limit; i++) {
    // Skip unused stack slots; fast forward to monoff();
    if (i == tos) {
      i = kit.jvms()->monoff();
      if( i >= limit ) break;
    }
    Node* n = map->in(i);
    ins[0] = n;
    const Type* t = gvn.type(n);
    bool needs_phi = false;
    for (int j = 1; j < results; j++) {
      JVMState* jvms = result_jvms[j];
      Node* jmap = jvms->map();
      Node* m = NULL;
      if (jmap->req() > i) {
        m = jmap->in(i);
        if (m != n) {
          needs_phi = true;
          t = t->meet_speculative(gvn.type(m));
        }
      }
      ins[j] = m;
    }
    if (needs_phi) {
      Node* phi = PhiNode::make(region, n, t);
      for (int j = 1; j < results; j++) {
        phi->set_req(j + 1, ins[j]);
      }
      map->set_req(i, gvn.transform(phi));
    }
  }

  return kit.transfer_exceptions_into_jvms();
}
示例#19
0
JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
  GraphKit kit(jvms);
  Compile* C = kit.C;
  PhaseGVN& gvn = kit.gvn();

  CompileLog* log = C->log();
  if (log != NULL) {
    log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
  }

  const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
  Node* predicted_mh = kit.makecon(predicted_mh_ptr);

  Node* bol = NULL;
  int bc = jvms->method()->java_code_at_bci(jvms->bci());
  if (bc != Bytecodes::_invokedynamic) {
    // This is the selectAlternative idiom for guardWithTest or
    // similar idioms.
    Node* receiver = kit.argument(0);

    // Check if the MethodHandle is the expected one
    Node* cmp = gvn.transform(new (C, 3) CmpPNode(receiver, predicted_mh));
    bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) );
  } else {
    // Get the constant pool cache from the caller class.
    ciMethod* caller_method = jvms->method();
    ciBytecodeStream str(caller_method);
    str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
    ciCPCache* cpcache = str.get_cpcache();

    // Get the offset of the CallSite from the constant pool cache
    // pointer.
    int index = str.get_method_index();
    size_t call_site_offset = cpcache->get_f1_offset(index);

    // Load the CallSite object from the constant pool cache.
    const TypeOopPtr* cpcache_type   = TypeOopPtr::make_from_constant(cpcache);  // returns TypeAryPtr of type T_OBJECT
    const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass());
    Node* cpcache_adr   = kit.makecon(cpcache_type);
    Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset);
    // The oops in the constant pool cache are not compressed; load then as raw pointers.
    Node* call_site     = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw);

    // Load the target MethodHandle from the CallSite object.
    const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass());
    Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
    Node* target_mh  = kit.make_load(kit.control(), target_adr, target_type, T_OBJECT);

    // Check if the MethodHandle is still the same.
    Node* cmp = gvn.transform(new (C, 3) CmpPNode(target_mh, predicted_mh));
    bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) );
  }
  IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
  kit.set_control( gvn.transform(new (C, 1) IfTrueNode (iff)));
  Node* slow_ctl = gvn.transform(new (C, 1) IfFalseNode(iff));

  SafePointNode* slow_map = NULL;
  JVMState* slow_jvms;
  { PreserveJVMState pjvms(&kit);
    kit.set_control(slow_ctl);
    if (!kit.stopped()) {
      slow_jvms = _if_missed->generate(kit.sync_jvms());
      if (kit.failing())
        return NULL;  // might happen because of NodeCountInliningCutoff
      assert(slow_jvms != NULL, "must be");
      kit.add_exception_states_from(slow_jvms);
      kit.set_map(slow_jvms->map());
      if (!kit.stopped())
        slow_map = kit.stop();
    }
  }

  if (kit.stopped()) {
    // Instance exactly does not matches the desired type.
    kit.set_jvms(slow_jvms);
    return kit.transfer_exceptions_into_jvms();
  }

  // Make the hot call:
  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
  if (new_jvms == NULL) {
    // Inline failed, so make a direct call.
    assert(_if_hit->is_inline(), "must have been a failed inline");
    CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
    new_jvms = cg->generate(kit.sync_jvms());
  }
  kit.add_exception_states_from(new_jvms);
  kit.set_jvms(new_jvms);

  // Need to merge slow and fast?
  if (slow_map == NULL) {
    // The fast path is the only path remaining.
    return kit.transfer_exceptions_into_jvms();
  }

  if (kit.stopped()) {
    // Inlined method threw an exception, so it's just the slow path after all.
    kit.set_jvms(slow_jvms);
    return kit.transfer_exceptions_into_jvms();
  }

  // Finish the diamond.
  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
  RegionNode* region = new (C, 3) RegionNode(3);
  region->init_req(1, kit.control());
  region->init_req(2, slow_map->control());
  kit.set_control(gvn.transform(region));
  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
  iophi->set_req(2, slow_map->i_o());
  kit.set_i_o(gvn.transform(iophi));
  kit.merge_memory(slow_map->merged_memory(), region, 2);
  uint tos = kit.jvms()->stkoff() + kit.sp();
  uint limit = slow_map->req();
  for (uint i = TypeFunc::Parms; i < limit; i++) {
    // Skip unused stack slots; fast forward to monoff();
    if (i == tos) {
      i = kit.jvms()->monoff();
      if( i >= limit ) break;
    }
    Node* m = kit.map()->in(i);
    Node* n = slow_map->in(i);
    if (m != n) {
      const Type* t = gvn.type(m)->meet(gvn.type(n));
      Node* phi = PhiNode::make(region, m, t);
      phi->set_req(2, n);
      kit.map()->set_req(i, gvn.transform(phi));
    }
  }
  return kit.transfer_exceptions_into_jvms();
}
//------------------------------build_cfg--------------------------------------
// Build a proper looking CFG.  Make every block begin with either a StartNode
// or a RegionNode.  Make every block end with either a Goto, If or Return.
// The RootNode both starts and ends it's own block.  Do this with a recursive
// backwards walk over the control edges.
uint PhaseCFG::build_cfg() {
  Arena *a = Thread::current()->resource_area();
  VectorSet visited(a);

  // Allocate stack with enough space to avoid frequent realloc
  Node_Stack nstack(a, C->unique() >> 1);
  nstack.push(_root, 0);
  uint sum = 0;                 // Counter for blocks

  while (nstack.is_nonempty()) {
    // node and in's index from stack's top
    // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
    // only nodes which point to the start of basic block (see below).
    Node *np = nstack.node();
    // idx > 0, except for the first node (_root) pushed on stack 
    // at the beginning when idx == 0.
    // We will use the condition (idx == 0) later to end the build.
    uint idx = nstack.index();
    Node *proj = np->in(idx);
    const Node *x = proj->is_block_proj();
    // Does the block end with a proper block-ending Node?  One of Return,
    // If or Goto? (This check should be done for visited nodes also).
    if (x == NULL) {                    // Does not end right...
      Node *g = _goto->clone(); // Force it to end in a Goto
      g->set_req(0, proj);
      np->set_req(idx, g);
      x = proj = g;
    }
    if (!visited.test_set(x->_idx)) { // Visit this block once
      // Skip any control-pinned middle'in stuff
      Node *p = proj;
      do {
        proj = p;                   // Update pointer to last Control
        p = p->in(0);               // Move control forward
      } while( !p->is_block_proj() &&
               !p->is_block_start() );
      // Make the block begin with one of Region or StartNode.
      if( !p->is_block_start() ) {
        RegionNode *r = new (C, 2) RegionNode( 2 );
        r->init_req(1, p);         // Insert RegionNode in the way
        proj->set_req(0, r);        // Insert RegionNode in the way
        p = r;
      }
      // 'p' now points to the start of this basic block

      // Put self in array of basic blocks
      Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
      _bbs.map(p->_idx,bb);
      _bbs.map(x->_idx,bb);
      if( x != p )                  // Only for root is x == p
        bb->_nodes.push((Node*)x);

      // Now handle predecessors
      ++sum;                        // Count 1 for self block
      uint cnt = bb->num_preds();
      for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
        Node *prevproj = p->in(i);  // Get prior input
        assert( !prevproj->is_Con(), "dead input not removed" );
        // Check to see if p->in(i) is a "control-dependent" CFG edge - 
        // i.e., it splits at the source (via an IF or SWITCH) and merges
        // at the destination (via a many-input Region).
        // This breaks critical edges.  The RegionNode to start the block
        // will be added when <p,i> is pulled off the node stack
        if ( cnt > 2 ) {             // Merging many things?
          assert( prevproj== bb->pred(i),"");
          if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
            // Force a block on the control-dependent edge
            Node *g = _goto->clone();       // Force it to end in a Goto
            g->set_req(0,prevproj);
            p->set_req(i,g);
          }
        }
        nstack.push(p, i);  // 'p' is RegionNode or StartNode
      }
    } else { // Post-processing visited nodes
      nstack.pop();                 // remove node from stack
      // Check if it the fist node pushed on stack at the beginning.
      if (idx == 0) break;          // end of the build
      // Find predecessor basic block
      Block *pb = _bbs[x->_idx];
      // Insert into nodes array, if not already there
      if( !_bbs.lookup(proj->_idx) ) {
        assert( x != proj, "" );
        // Map basic block of projection
        _bbs.map(proj->_idx,pb);
        pb->_nodes.push(proj);
      }
      // Insert self as a child of my predecessor block
      pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
      assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
              "too many control users, not a CFG?" );
    }
  }
  // Return number of basic blocks for all children and self
  return sum;
}