void HeapAddress::write_barrier_epilog() { GUARANTEE(has_address_register(), "write barrier must have an address register"); GUARANTEE(base_offset() == 0 || address_register_includes_base_offset() , "write_barrier_epilog() must follow address_2_for(0)"); // This is almost always the last thing we do with an address, so it // is okay to steal its temporary register. This saves us one or two // instructions in many cases. Assembler::Register dst = address_register(); clear_address_register(); #if ENABLE_ARM_V7 if (UseHandlers) { if (RegisterAllocator::references(dst) > 1) { Assembler::Register tmp = RegisterAllocator::allocate(); code_generator()->mov(tmp, dst); code_generator()->hbl(CodeGenerator::write_barrier_handler_r0 + (int)tmp); RegisterAllocator::dereference(tmp); } else { code_generator()->hbl(CodeGenerator::write_barrier_handler_r0 + (int)dst); } } else #endif { Assembler::Register tmp1 = RegisterAllocator::allocate(); Assembler::Register tmp2 = RegisterAllocator::allocate(); Assembler::Register tmp3 = Assembler::r12; code_generator()->oop_write_barrier(dst, tmp1, tmp2, tmp3, false); RegisterAllocator::dereference(tmp1); RegisterAllocator::dereference(tmp2); } RegisterAllocator::dereference(dst); }
void HeapAddress::write_barrier_epilog() { GUARANTEE(stack_type() == T_OBJECT, "write barrier should not be updated for non-object stores"); GUARANTEE(has_address_register(), "cannot update write barrier without proper register"); // update the bit vector code_generator()->shrl(address_register(), LogBytesPerWord); code_generator()->bts(BinaryAssembler::Address((int) _bitvector_base), address_register()); // dereference the allocated register and clear the cache RegisterAllocator::dereference(address_register()); clear_address_register(); }
RawLocation::Actions RawLocation::do_conform_to(int my_index, RawLocation* other, int other_index, RawLocation::Actions allowed_actions) { // make sure object registers and locations have object values if (other->stack_type() == T_OBJECT && stack_type() != T_OBJECT) { // Conformance code makes it so that this is no longer necessary. SHOULD_NOT_REACH_HERE(); #if NOT_CURRENTLY_USED // if we clear an cached object location there's no need to clear // any registers, since the register cache (due to type conflicts) is // guaranteed never to be used again if (other.is_flushed() || other.is_cached()) { code_generator()->clear_object_location(index()); } else { GUARANTEE(other.is_changed(), "only case left"); Value other_value(other.type()); other.read_value(other_value); Oop::Raw null_obj; code_generator()->move(other_value, &null_obj); } return; #endif } // compute the merge action const Actions required_actions = merge_actions(other); const Actions actions = required_actions & allowed_actions; // handle loads/stores from/to locations if (actions & LOC_LOAD) { other->update_cache(other_index); } if (actions & LOC_STORE) { write_changes(my_index); } // handle register stores if (actions & REG_STORE && other->in_register()) { // declare & read values for both source and destination const Value this_value (this, my_index ); const Value other_value(other, other_index); // do the register store if (!other->is_register_identical_to(my_index, this, other_index)) { code_generator()->move(other_value, this_value); } } return required_actions & ~allowed_actions; }
void MemoryAddress::fill_in_address_register() { // In all cases exception for variable arrays indices, we are looking at // at fixed offset into the object. jint fixed_offset; if (has_fixed_offset(fixed_offset)) { code_generator()->mov(address_register(), fixed_offset + base_offset()); code_generator()->add(address_register(), fixed_register(), address_register()); set_address_register_includes_base_offset(); } else { // This is a virtual method, and in this case, we better be calling // an overriding definition. SHOULD_NOT_REACH_HERE(); } }
void IndexedAddress::fill_in_address_register() { if (index()->is_immediate()) { MemoryAddress::fill_in_address_register(); } else { if (index_shift() != 0) { code_generator()->lsl_imm5(address_register(), index()->lo_register(), index_shift()); code_generator()->add(address_register(), fixed_register(), address_register()); } else { code_generator()->add(address_register(), fixed_register(), index()->lo_register()); } } }
Assembler::Register LocationAddress::fixed_register() { if (code_generator()->omit_stack_frame()) { return Assembler::jsp; } else { return is_local() ? Assembler::fp : Assembler::jsp; } }
bool LocationAddress::has_fixed_offset(jint& fixed_offset) { int base_offset; int actual_index; CodeGenerator* gen = code_generator() ; if (gen->omit_stack_frame()) { // Everything is accessed using jsp actual_index = frame()->stack_pointer() - index(); fixed_offset = JavaFrame::arg_offset_from_sp(actual_index); } else { // With a stack frame: locals are accessed using fp // stacks are accessed using jsp if (is_local()) { // The offset from the fp that would have it point at the end of the // locals block base_offset = JavaFrame::end_of_locals_offset(); actual_index = gen->root_method()->max_locals() - 1 - index(); } else { if (Assembler::jsp == Assembler::sp) { // We need to make sure that we don't put something beyond // the current end of stack gen->ensure_sufficient_stack_for(index(), type()); } base_offset = 0; actual_index = gen->frame()->stack_pointer() - index(); } fixed_offset = base_offset + JavaFrame::arg_offset_from_sp(actual_index); } return true; }
void MemoryAddress::prepare_preindexed_address(jint address_offset, Assembler::Register& reg, jint& offset){ offset = 0; if (!has_address_register()) { // Try to do direct access jint fixed_offset; if (has_fixed_offset(fixed_offset)) { offset = fixed_offset + base_offset() + address_offset; reg = fixed_register(); return; } create_and_initialize_address_register(); } GUARANTEE(has_address_register(), "We must have address register by now"); reg = address_register(); int xbase_offset = // base_offset or 0 address_register_includes_base_offset() ? 0 : base_offset(); if (address_offset == 0 && xbase_offset != 0) { // Update the address_register so that it includes the base_offset set_address_register_includes_base_offset(); code_generator()->add(address_register(), address_register(), xbase_offset); offset = 0; } else { offset = (address_offset + xbase_offset); } }
void HeapAddress::write_barrier_epilog() { GUARANTEE(has_address_register(), "write barrier must have an address register"); // allocate the necessary temporary registers Assembler::Register tmp0; GUARANTEE(base_offset() == 0 || address_register_includes_base_offset(), "write_barrier_epilog() must follow address_2_for(0)"); // This is almost always the last thing we do with an address, so it // is okay to steal its temporary register. This saves us one or two // instructions in many cases. tmp0 = address_register(); clear_address_register(); Assembler::Register tmp1 = RegisterAllocator::allocate(); Assembler::Register tmp2 = RegisterAllocator::allocate(); Assembler::Register tmp3 = RegisterAllocator::allocate(); // update the write barrier code_generator()->oop_write_barrier(tmp0, tmp1, tmp2, tmp3, false); // dereference the allocated registers RegisterAllocator::dereference(tmp0); RegisterAllocator::dereference(tmp1); RegisterAllocator::dereference(tmp2); RegisterAllocator::dereference(tmp3); }
int main (int argc, char **argv) { int result; long window=1; long count=1; if (argc>1) { const char *filename = argv[1]; if (argc==4) { window = strtol(argv[2],NULL,10); count = strtol(argv[3],NULL,10); } result = iloc_parser(filename); } else { iks_init(); gv_init("ast_graph.dot"); result = yyparse(); gv_close(); code_generator(&ast); iks_ast_node_value_t *program = ast->item; program_iloc = program->code; } optim_main(program_iloc,window,count); iloc_print(program_iloc); return result; }
void RegisterAllocator::initialize() { Assembler::Register next; if (code_generator()->omit_stack_frame()) { _next_register_table = (Assembler::Register*)next_register_table_noframe; } else { _next_register_table = (Assembler::Register*)next_register_table_frame; } next = Assembler::r0; // IMPL_NOTE: this needs fine tuning. _next_allocate = next; _next_byte_allocate = Assembler::no_reg; _next_spill = next; _next_byte_spill = Assembler::no_reg; #if ENABLE_ARM_VFP _next_float_allocate = Assembler::s0; _next_float_spill = Assembler::s0; #else _next_float_allocate = next; _next_float_spill = next; #endif initialize_register_references(); }
void HeapAddress::write_barrier_prolog() { GUARANTEE(stack_type() == T_OBJECT, "write barrier should not be updated for non-object stores"); // allocate an address register for the write barrier implementation set_address_register(RegisterAllocator::allocate()); // compute the effective address and store it in the address register BinaryAssembler::Address address = compute_address_for(lo_offset()); code_generator()->leal(address_register(), address); }
void huff_test(void) { huff_list* one_tree_list = \ algorithm(insertion_sort(huff_list_builder(occurrences("senselessness")))); huff_show(one_tree_list->huff); printf("%u letters encoded\n", huff_height(one_tree_list->huff)); printf("%s\n", code_generator('s', one_tree_list->huff)); free(one_tree_list->huff); free(one_tree_list); return; }
void IndexedAddress::fill_in_address_register() { if (index()->is_immediate()) { MemoryAddress::fill_in_address_register(); } else { code_generator()->add(address_register(), fixed_register(), Assembler::imm_shift(index()->lo_register(), Assembler::lsl, index_shift())); } }
jint LocationAddress::compute_base_offset() { // compute the base offset for this location address if (is_local()) { const int max_locals = Compiler::root()->method()->max_locals(); // bp + 8 acts like a stack pointer for the locals return 2 * sizeof(int) + JavaFrame::arg_offset_from_sp(max_locals - index() - 1); } else { code_generator()->ensure_sufficient_stack_for(index(), type()); return JavaFrame::arg_offset_from_sp(frame()->stack_pointer() - index()); } }
void RawLocation::update_cache(int index) { // this forces an update of the cache from memory // even if the location has been changed if (!is_flushed()) { // get the value for this location Value value(this, index); // read the value from memory if (value.in_register()) { code_generator()->load_from_location(value, index); } } }
void RawLocation::write_changes(int index) { // write any changes to memory if (is_changed()) { // get the value for this location Value value(this, index); // mark the location as being cached (do this before store_local to avoid // recursion when handling x86 floats). mark_as_cached(); // write the value into memory code_generator()->store_to_location(value, index); } }
bool LocationAddress::has_fixed_offset(jint& fixed_offset) { int base_offset; int actual_index; CodeGenerator* const gen = code_generator(); #if ENABLE_ARM_V7 if (is_local()) { // +--+--+--+---------+--+--+--+ // |L0|L1|L2|frame |E0|E1|E2| // +--+--+--+---------+--+--+--+ // ^- frame()->stack_pointer() // // Note: in the example above, frame()->stack_pointer() == 5. // int offset_from_jsp = (gen->frame()->stack_pointer() - index()) * BytesPerStackElement + JavaFrame::frame_desc_size(); if (!ENABLE_INCREASING_JAVA_STACK && ENABLE_FULL_STACK && !gen->method()->access_flags().is_synchronized() && !gen->method()->access_flags().has_monitor_bytecodes() && offset_from_jsp < 0xff) { // We will try to use jsp (r9) to access the local if possible, so that // it can be done with one 16-bit instruction. We don't do it for // synchronization methods, which may have a variable frame size. fixed_offset = offset_from_jsp; _fixed_register = Assembler::jsp; return true; } } #endif if (is_local()) { // The offset from the fp that would have it point at the end of the // locals block base_offset = JavaFrame::end_of_locals_offset(); actual_index = gen->method()->max_locals() - 1 - index(); _fixed_register = Assembler::fp; } else { // We need to make sure that we don't put something beyond // the current end of stack gen->ensure_sufficient_stack_for(index(), type()); base_offset = 0; actual_index = gen->frame()->stack_pointer() - index(); _fixed_register = Assembler::jsp; } fixed_offset = base_offset + JavaFrame::arg_offset_from_sp(actual_index); return true; }
void VSFMergeTest::run(OsFile_Handle config_file) { SETUP_ERROR_CHECKER_ARG; AccessFlags access_flags; access_flags.set_flags(0); Method method = Universe::new_method(0, access_flags JVM_CHECK); method.set_max_locals(MAX_TEST_LOCATION_COUNT); Compiler test_compiler(&method, -1); GUARANTEE(Compiler::current() != NULL, "Sanity"); Compiler * const compiler = Compiler::current(); CompiledMethod compiled_method = Universe::new_compiled_method(MAX_VSF_MERGE_CODE_SIZE JVM_NO_CHECK); compiler->set_current_compiled_method(&compiled_method); CodeGenerator code_generator(compiler); execute_test_cases(config_file JVM_NO_CHECK_AT_BOTTOM); }
bool VirtualStackFrame::flush_quick() { if (ENABLE_JAVA_STACK_TAGS) { return false; } int current_stack_ptr = stack_pointer(); int virtual_stack_ptr = virtual_stack_pointer(); if (current_stack_ptr >= virtual_stack_ptr) { // Not possible to use writeback addressing modes return false; } // This happens quite often, especially if we are making a series of // calls like: // // iload_0 // invokestatic void foo(int); // iload_0 // iload_1 // invokestatic void bar(int, int); // // During the second call, the top two VSF locations are in register // and must be flushed. The other VSF locations are already flushed // because of the first call. So (assuming we have downward-growing // full stack) we can use two pre-decrement STRs to store the top two // locations. // // IMPL_NOTE: consider using STM to save code footprint. However, doing // so might be slower than using individual STRs on Xscale { AllocationDisabler allocation_not_allowed; RawLocation *raw_location = raw_location_at(0); RawLocation *end = raw_location_end(raw_location); int index = 0; while (raw_location < end) { BasicType type = raw_location->type(); if (is_two_word(type)) { return false; // uncommon. Just bail out. } #if ENABLE_ARM_VFP if (type == T_FLOAT) { return false; // TEMP: fsts supports pre-indexing as well } #endif bool changed = raw_location->is_changed(); if (index <= current_stack_ptr) { if (changed) { return false; } } else { if (!changed) { return false; } if (!raw_location->in_register()) { return false; } } index ++; raw_location++; } } // Is this necessary? code_generator()->write_literals_if_desperate(); { AllocationDisabler allocation_not_allowed; const Assembler::Register jsp = Assembler::jsp; const int offset = BytesPerStackElement * JavaStackDirection; RawLocation *raw_location = raw_location_at(0); RawLocation *end = raw_location_end(raw_location); int index = 0; while (raw_location < end) { if (index <= current_stack_ptr) { raw_location->mark_as_flushed(); } else { Assembler::Register reg = raw_location->get_register(); Assembler::Address2 addr; addr = Assembler::imm_index(jsp, offset, Assembler::pre_indexed); code_generator()->str(reg, addr); raw_location->mark_as_flushed(); } index ++; raw_location++; } } set_real_stack_pointer(virtual_stack_ptr); return true; }
/** * Generate code for a list of instructions */ void GenerateCode(Instruction_list& instructions, CodeStyle style) { CodeGenerator code_generator(style); Accept(instructions, code_generator); }
/** * Generate code for list of nodes */ void GenerateCode(Node_list& nodes, CodeStyle style) { CodeGenerator code_generator(style); Accept(nodes, code_generator); }
inline bool LocationAddress::is_local( void ) const { return code_generator()->method()->is_local(index()); }
void main(void) { const_bitmask_init(); code_generator(); //const_bitmask_verify(); }