void FpuStackAllocator::allocate_block(BlockBegin* block) { bool processed_merge = false; LIR_OpList* insts = block->lir()->instructions_list(); set_lir(block->lir()); set_pos(0); // Note: insts->length() may change during loop while (pos() < insts->length()) { LIR_Op* op = insts->at(pos()); _debug_information_computed = false; #ifndef PRODUCT if (TraceFPUStack) { op->print(); } check_invalid_lir_op(op); #endif LIR_OpBranch* branch = op->as_OpBranch(); LIR_Op1* op1 = op->as_Op1(); LIR_Op2* op2 = op->as_Op2(); LIR_OpCall* opCall = op->as_OpCall(); if (branch != NULL && branch->block() != NULL) { if (!processed_merge) { // propagate stack at first branch to a successor processed_merge = true; bool required_merge = merge_fpu_stack_with_successors(block); assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches"); } } else if (op1 != NULL) { handle_op1(op1); } else if (op2 != NULL) { handle_op2(op2); } else if (opCall != NULL) { handle_opCall(opCall); } compute_debug_information(op); set_pos(1 + pos()); } // Propagate stack when block does not end with branch if (!processed_merge) { merge_fpu_stack_with_successors(block); } }
void LIR_OopMapGenerator::iterate_one(BlockBegin* block) { #ifndef PRODUCT if (TraceLIROopMaps) { tty->print_cr("Iterating through block %d", block->block_id()); } #endif set_block(block); block->set(BlockBegin::lir_oop_map_gen_reachable_flag); int i; if (!is_caching_change_block(block)) { LIR_OpVisitState state; LIR_OpList* inst = block->lir()->instructions_list(); int length = inst->length(); for (i = 0; i < length; i++) { LIR_Op* op = inst->at(i); LIR_Code code = op->code(); state.visit(op); for (int j = 0; j < state.info_count(); j++) { process_info(state.info_at(j)); } if (code == lir_volatile_move || code == lir_move) { process_move(op); } } } // Process successors if (block->end() != _base) { for (i = 0; i < block->end()->number_of_sux(); i++) { merge_state(block->end()->sux_at(i)); } } else { // Do not traverse OSR entry point of the base merge_state(_base->std_entry()); } set_block(NULL); }
void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) { if (!sim()->is_empty()) { LIR_List* old_lir = lir(); int old_pos = pos(); intArray* old_state = sim()->write_state(); #ifndef PRODUCT if (TraceFPUStack) { tty->cr(); tty->print_cr("------- begin of exception handler -------"); } #endif if (xhandler->entry_code() == NULL) { // need entry code to clear FPU stack LIR_List* entry_code = new LIR_List(_compilation); entry_code->jump(xhandler->entry_block()); xhandler->set_entry_code(entry_code); } LIR_OpList* insts = xhandler->entry_code()->instructions_list(); set_lir(xhandler->entry_code()); set_pos(0); // Note: insts->length() may change during loop while (pos() < insts->length()) { LIR_Op* op = insts->at(pos()); #ifndef PRODUCT if (TraceFPUStack) { op->print(); } check_invalid_lir_op(op); #endif switch (op->code()) { case lir_move: assert(op->as_Op1() != NULL, "must be LIR_Op1"); assert(pos() != insts->length() - 1, "must not be last operation"); handle_op1((LIR_Op1*)op); break; case lir_branch: assert(op->as_OpBranch()->cond() == lir_cond_always, "must be unconditional branch"); assert(pos() == insts->length() - 1, "must be last operation"); // remove all remaining dead registers from FPU stack clear_fpu_stack(LIR_OprFact::illegalOpr); break; default: // other operations not allowed in exception entry code ShouldNotReachHere(); } set_pos(pos() + 1); } #ifndef PRODUCT if (TraceFPUStack) { tty->cr(); tty->print_cr("------- end of exception handler -------"); } #endif set_lir(old_lir); set_pos(old_pos); sim()->read_state(old_state); } }