Scope::Scope() { data=0; stfgnd(getgc("8x13","red")); stw(32768); sth(256); }
inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) { if (UseCompressedOops) { Register compressedOop = encode_heap_oop_not_null((tmp != noreg) ? tmp : d, d); stw(compressedOop, offs, s1); } else { std(d, offs, s1); } }
static void stc(c) { if (Cp == 0) { stw(0); Cp = WORDSIZE; } Cp -= BYTESIZE; M[P - 1] += c << Cp; }
void FinalizerHandler::finish(STATE, GCToken gct) { if(!self_) { if(process_list_ || !lists_->empty() || !live_list_->empty()) { rubinius::bug("FinalizerHandler worker thread dead during halt"); } else { return; } } finishing_ = true; while(true) { { StopTheWorld stw(state, gct, 0); if(!process_list_) { if(live_list_->empty() && lists_->empty()) break; // Everything is garbage when halting so keep adding live objects to // finalize queue until done. if(!live_list_->empty()) { for(FinalizeObjects::iterator i = live_list_->begin(); i != live_list_->end(); ++i) { i->queued(); } queue_objects(); } first_process_item(); if(!process_list_) break; } } worker_signal(); { utilities::thread::Mutex::LockGuard lg(supervisor_lock_); state->vm()->set_call_frame(0); GCIndependent indy(state); if(process_list_) supervisor_wait(); } } if(!lists_->empty() || !live_list_->empty() || process_list_ != NULL) rubinius::bug("FinalizerHandler exiting with pending finalizers"); stop_thread(state); }
Vslider::Vslider() { rept=0; rate=0; delay=0; wsize=50; pos=0; dsize=100; fn=0; tevent=0; start=-1; min=8; stw(20); }
address CppInterpreterGenerator::generate_stack_to_stack_converter( BasicType type) { const Register stack = r3; address start = __ pc(); switch (type) { case T_VOID: break; case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: case T_FLOAT: __ load (stack, STATE(_stack)); __ lwz (r0, Address(stack, wordSize)); __ stw (r0, Address(Rlocals, 0)); __ subi (Rlocals, Rlocals, wordSize); break; case T_LONG: case T_DOUBLE: __ load (stack, STATE(_stack)); __ load (r0, Address(stack, wordSize)); __ store (r0, Address(Rlocals, -wordSize)); #ifdef PPC32 __ load (r0, Address(stack, wordSize * 2)); __ store (r0, Address(Rlocals, 0)); #endif __ subi (Rlocals, Rlocals, wordSize * 2); break; case T_OBJECT: __ load (stack, STATE(_stack)); __ load (r0, Address(stack, wordSize)); __ verify_oop (r0); __ store (r0, Address(Rlocals, 0)); __ subi (Rlocals, Rlocals, wordSize); break; default: ShouldNotReachHere(); } __ blr (); return start; }
address CppInterpreterGenerator::generate_tosca_to_stack_converter( BasicType type) { address start = __ pc(); switch (type) { case T_VOID: break; case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: __ stw (r3, Address(Rlocals, 0)); __ subi (Rlocals, Rlocals, wordSize); break; case T_LONG: __ store (r3, Address(Rlocals, -wordSize)); #ifdef PPC32 __ store (r4, Address(Rlocals, 0)); #endif __ subi (Rlocals, Rlocals, wordSize * 2); break; case T_FLOAT: __ stfs (f1, Address(Rlocals, 0)); __ subi (Rlocals, Rlocals, wordSize); break; case T_DOUBLE: __ stfd (f1, Address(Rlocals, -wordSize)); __ subi (Rlocals, Rlocals, wordSize * 2); break; case T_OBJECT: __ verify_oop (r3); __ store (r3, Address(Rlocals, 0)); __ subi (Rlocals, Rlocals, wordSize); break; default: ShouldNotReachHere(); } __ blr (); return start; }
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len, t1, t2); if (UseBiasedLocking && !len->is_valid()) { ld(t1, in_bytes(Klass::prototype_header_offset()), klass); } else { load_const_optimized(t1, (intx)markOopDesc::prototype()); } std(t1, oopDesc::mark_offset_in_bytes(), obj); store_klass(obj, klass); if (len->is_valid()) { stw(len, arrayOopDesc::length_offset_in_bytes(), obj); } else if (UseCompressedClassPointers) { // Otherwise length is in the class gap. store_klass_gap(obj); } }
void emit_trampoline(struct compilation_unit *cu, void *target_addr, struct jit_trampoline *t) { struct buffer *b = t->objcode; jit_text_lock(); b->buf = jit_text_ptr(); /* Allocate memory on the stack */ emit(b, stwu(1, -16, 1)); /* Save LR on stack */ emit(b, mflr(0)); emit(b, stw(0, 0, 1)); /* Pass pointer to 'struct compilation_unit' as first argument */ emit(b, lis(3, ptr_high(cu))); emit(b, ori(3, 3, ptr_low(cu))); /* Then call 'target_addr' */ emit(b, lis(0, ptr_high(target_addr))); emit(b, ori(0, 0, ptr_low(target_addr))); emit(b, mtctr(0)); emit(b, bctrl()); /* Restore LR from stack */ emit(b, lwz(0, 0, 1)); emit(b, mtlr(0)); /* Free memory on stack */ emit(b, addi(1, 1, 16)); /* Finally jump to the compiled method */ emit(b, mtctr(3)); emit(b, bctr()); jit_text_reserve(buffer_offset(b)); jit_text_unlock(); }
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len, t1, t2); if (UseBiasedLocking && !len->is_valid()) { ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1); } else { set((intx)markOopDesc::prototype(), t1); } st_ptr(t1, obj, oopDesc::mark_offset_in_bytes()); if (UseCompressedKlassPointers) { // Save klass mov(klass, t1); encode_klass_not_null(t1); stw(t1, obj, oopDesc::klass_offset_in_bytes()); } else { st_ptr(klass, obj, oopDesc::klass_offset_in_bytes()); } if (len->is_valid()) { st(len, obj, arrayOopDesc::length_offset_in_bytes()); } else if (UseCompressedKlassPointers) { // otherwise length is in the class gap store_klass_gap(G0, obj); } }
void CppInterpreterGenerator::generate_compute_interpreter_state(bool native) { StackFrame frame; const Address stack_words_addr( Rmethod, methodOopDesc::max_stack_offset()); const Address access_flags_addr( Rmethod, methodOopDesc::access_flags_offset()); Label not_synchronized_1, not_synchronized_2, not_synchronized_3; Label not_static, init_monitor; const int monitor_size = frame::interpreter_frame_monitor_size() * wordSize; // Calculate the access flags conditions const Register access_flags = r3; __ lwz (access_flags, access_flags_addr); __ andi_ (r0, access_flags, JVM_ACC_SYNCHRONIZED); __ compare (CRsync, r0, JVM_ACC_SYNCHRONIZED); __ andi_ (r0, access_flags, JVM_ACC_STATIC); __ compare (CRstatic, r0, JVM_ACC_STATIC); const int basic_frame_size = frame.unaligned_size() + sizeof(BytecodeInterpreter) + slop_factor; // Calculate the frame size const Register stack_size = r3; const Register frame_size = r4; const Register padding = r5; if (native) { __ load (frame_size, basic_frame_size); } else { __ lhz (stack_size, stack_words_addr); __ shift_left (stack_size, stack_size, LogBytesPerWord); __ addi (frame_size, stack_size, basic_frame_size); } __ bne (CRsync, not_synchronized_1); __ addi (frame_size, frame_size, monitor_size); __ bind (not_synchronized_1); __ calc_padding_for_alignment (padding, frame_size, StackAlignmentInBytes); __ add (frame_size, frame_size, padding); // Save the link register and create the new frame __ mflr (r0); __ store (r0, Address(r1, StackFrame::lr_save_offset * wordSize)); __ neg (r0, frame_size); __ store_update_indexed (r1, r1, r0); // Calculate everything's addresses const Register stack_limit = r6; const Register stack = r7; const Register stack_base = Rmonitor; const Register monitor_base = r8; __ addi (stack_limit, r1, frame.start_of_locals() + slop_factor - wordSize); __ add (stack_limit, stack_limit, padding); if (native) __ mr (stack, stack_limit); else __ add (stack, stack_limit, stack_size); __ addi (stack_base, stack, wordSize); __ mr (monitor_base, stack_base); __ bne (CRsync, not_synchronized_2); __ addi (monitor_base, monitor_base, monitor_size); __ bind (not_synchronized_2); __ mr (r0, Rstate); __ mr (Rstate, monitor_base); // Initialise the interpreter state object __ store (Rlocals, STATE(_locals)); __ store (Rmethod, STATE(_method)); __ store (Rstate, STATE(_self_link)); __ store (r0, STATE(_prev_link)); __ store (stack_limit, STATE(_stack_limit)); __ store (stack, STATE(_stack)); __ store (stack_base, STATE(_stack_base)); __ store (monitor_base, STATE(_monitor_base)); __ store (Rthread, STATE(_thread)); #ifdef ASSERT { Label ok; __ load (r3, ThreadLocalStorage::thread_index()); __ call (CAST_FROM_FN_PTR(address, pthread_getspecific)); __ compare (Rthread, r3); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif if (!native) { __ load (r3, Address(Rmethod, methodOopDesc::const_offset())); __ addi (r3, r3, in_bytes(constMethodOopDesc::codes_offset())); __ store (r3, STATE(_bcp)); } __ load (r3, Address(Rmethod, methodOopDesc::constants_offset())); __ load (r3, Address(r3, constantPoolOopDesc::cache_offset_in_bytes())); __ store (r3, STATE(_constants)); __ load (r3, BytecodeInterpreter::method_entry); __ stw (r3, STATE(_msg)); __ load (r3, 0); if (native) __ store (r3, STATE(_bcp)); __ store (r3, STATE(_oop_temp)); __ store (r3, STATE(_mdx)); __ store (r3, STATE(_result._to_call._callee)); // Initialise the monitor if synchronized __ bne (CRsync, not_synchronized_3); __ bne (CRstatic, not_static); __ get_mirror_handle (r3); __ b (init_monitor); __ bind (not_static); __ load (r3, Address(Rlocals, 0)); __ bind (init_monitor); __ store (r3, Address(Rmonitor, BasicObjectLock::obj_offset_in_bytes())); __ bind (not_synchronized_3); }
inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
static void assemble() { int v[501]; int f = 0; int i; Labv = v; clear: for (i = 0; i <= 500; i++) Labv[i] = 0; Cp = 0; next: rch(); sw: switch (Ch) { default: if (Ch == EOF) return; printf("\nBAD CH %c AT P = %d\n", Ch, P); goto next; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': setlab(rdn()); Cp = 0; goto sw; case '$': case ' ': case '\n': goto next; case 'L': f = 0; break; case 'S': f = 1; break; case 'A': f = 2; break; case 'J': f = 3; break; case 'T': f = 4; break; case 'F': f = 5; break; case 'K': f = 6; break; case 'X': f = 7; break; case 'C': rch(); stc(rdn()); goto sw; case 'D': rch(); if (Ch == 'L') { rch(); stw(0); labref(rdn(), P - 1); } else stw(rdn()); goto sw; case 'G': rch(); A = rdn() + G; if (Ch == 'L') rch(); else printf("\nBAD CODE AT P = %d\n", P); M[A] = 0; labref(rdn(), A); goto sw; case 'Z': for (i = 0; i <= 500; i++) if (Labv[i] > 0) printf("L%d UNSET\n", i); goto clear; } W = f << FSHIFT; rch(); if (Ch == 'I') { W = W + IBIT; rch(); } if (Ch == 'P') { W = W + PBIT; rch(); } if (Ch == 'G') { W = W + GBIT; rch(); } if (Ch == 'L') { rch(); stw(W + DBIT); stw(0); labref(rdn(), P - 1); } else { int a = rdn(); if ((a & ABITS) == a) stw(W + a); else { stw(W + DBIT); stw(a); } } goto sw; }
inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
address InterpreterGenerator::generate_normal_entry(bool synchronized) { assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor); Label re_dispatch; Label call_interpreter; Label call_method; Label call_non_interpreted_method; Label return_with_exception; Label return_from_method; Label resume_interpreter; Label return_to_initial_caller; Label more_monitors; Label throwing_exception; // We use the same code for synchronized and not if (normal_entry) return normal_entry; address start = __ pc(); // There are two ways in which we can arrive at this entry. // There is the special case where a normal interpreted method // calls another normal interpreted method, and there is the // general case of when we enter from somewhere else: from // call_stub, from C1 or C2, or from a fast accessor which // deferred. In the special case we're already in frame manager // code: we arrive at re_dispatch with Rstate containing the // previous interpreter state. In the general case we arrive // at start with no previous interpreter state so we set Rstate // to NULL to indicate this. __ bind (fast_accessor_slow_entry_path); __ load (Rstate, 0); __ bind (re_dispatch); // Adjust the caller's stack frame to accomodate any additional // local variables we have contiguously with our parameters. generate_adjust_callers_stack(); // Allocate and initialize our stack frame. generate_compute_interpreter_state(false); // Call the interpreter ============================================== __ bind (call_interpreter); // We can setup the frame anchor with everything we want at // this point as we are thread_in_Java and no safepoints can // occur until we go to vm mode. We do have to clear flags // on return from vm but that is it __ set_last_Java_frame (); // Call interpreter address interpreter = JvmtiExport::can_post_interpreter_events() ? CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks) : CAST_FROM_FN_PTR(address, BytecodeInterpreter::run); __ mr (r3, Rstate); __ call (interpreter); __ fixup_after_potential_safepoint (); // Clear the frame anchor __ reset_last_Java_frame (); // Examine the message from the interpreter to decide what to do __ lwz (r4, STATE(_msg)); __ compare (r4, BytecodeInterpreter::call_method); __ beq (call_method); __ compare (r4, BytecodeInterpreter::return_from_method); __ beq (return_from_method); __ compare (r4, BytecodeInterpreter::more_monitors); __ beq (more_monitors); __ compare (r4, BytecodeInterpreter::throwing_exception); __ beq (throwing_exception); __ load (r3, (intptr_t) "error: bad message from interpreter: %d\n"); __ call (CAST_FROM_FN_PTR(address, printf)); __ should_not_reach_here (__FILE__, __LINE__); // Handle a call_method message ====================================== __ bind (call_method); __ load (Rmethod, STATE(_result._to_call._callee)); __ verify_oop(Rmethod); __ load (Rlocals, STATE(_stack)); __ lhz (r0, Address(Rmethod, methodOopDesc::size_of_parameters_offset())); __ shift_left (r0, r0, LogBytesPerWord); __ add (Rlocals, Rlocals, r0); __ load (r0, STATE(_result._to_call._callee_entry_point)); __ load (r3, (intptr_t) start); __ compare (r0, r3); __ bne (call_non_interpreted_method); // Interpreted methods are intercepted and re-dispatched ----------- __ load (r0, CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation)); __ mtlr (r0); __ b (re_dispatch); // Non-interpreted methods are dispatched normally ----------------- __ bind (call_non_interpreted_method); __ mtctr (r0); __ bctrl (); // Restore Rstate __ load (Rstate, Address(r1, StackFrame::back_chain_offset * wordSize)); __ subi (Rstate, Rstate, sizeof(BytecodeInterpreter)); // Check for pending exceptions __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_with_exception); // Convert the result and resume generate_convert_result(CppInterpreter::_tosca_to_stack); __ b (resume_interpreter); // Handle a return_from_method message =============================== __ bind (return_from_method); __ load (r0, STATE(_prev_link)); __ compare (r0, 0); __ beq (return_to_initial_caller); // "Return" from a re-dispatch ------------------------------------- generate_convert_result(CppInterpreter::_stack_to_stack); generate_unwind_interpreter_state(); // Resume the interpreter __ bind (resume_interpreter); __ store (Rlocals, STATE(_stack)); __ load (Rlocals, STATE(_locals)); __ load (Rmethod, STATE(_method)); __ verify_oop(Rmethod); __ load (r0, BytecodeInterpreter::method_resume); __ stw (r0, STATE(_msg)); __ b (call_interpreter); // Return to the initial caller (call_stub etc) -------------------- __ bind (return_to_initial_caller); generate_convert_result(CppInterpreter::_stack_to_native_abi); generate_unwind_interpreter_state(); __ blr (); // Handle a more_monitors message ==================================== __ bind (more_monitors); generate_more_monitors(); __ load (r0, BytecodeInterpreter::got_monitors); __ stw (r0, STATE(_msg)); __ b (call_interpreter); // Handle a throwing_exception message =============================== __ bind (throwing_exception); // Check we actually have an exception #ifdef ASSERT { Label ok; __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Return to wherever generate_unwind_interpreter_state(); __ bind (return_with_exception); __ compare (Rstate, 0); __ bne (resume_interpreter); __ blr (); normal_entry = start; return start; }
inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
inline void MacroAssembler::stw(Register d, const Address& a, int offset) { if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); } else { stw(d, a.base(), a.disp() + offset); } }
// ByteSize is only a class when ASSERT is defined, otherwise it's an int. inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
Object* System::vm_find_object(STATE, GCToken gct, Array* arg, Object* callable, CallFrame* calling_environment) { ObjectMemory::GCInhibit inhibitor(state->memory()); // Support an aux mode, where callable is an array and we just append // objects to it rather than #call it. Array* ary = try_as<Array>(callable); if(!ary) ary = nil<Array>(); Array* args = Array::create(state, 1); int total = 0; QueryCondition* condition = create_condition(state, arg); if(!condition) return Fixnum::from(0); Object* ret = cNil; // Special case for looking for an immediate if(Object* obj = condition->immediate()) { if(Symbol* sym = try_as<Symbol>(obj)) { // Check whether this is actually a valid symbol, not // some random non existing symbol. if(!state->shared().symbols.lookup_string(state, sym)) { delete condition; std::ostringstream msg; msg << "Invalid symbol 0x" << std::hex << reinterpret_cast<uintptr_t>(sym); Exception::range_error(state, msg.str().c_str()); return 0; } } if(!ary->nil_p()) { ary->append(state, obj); } else { args->set(state, 0, obj); ret = callable->send(state, calling_environment, G(sym_call), args, cNil, false); } delete condition; if(!ret) return 0; return Fixnum::from(1); } OnStack<2> os(state, ary, args); state->set_call_frame(calling_environment); ObjectWalker walker(state->memory()); GCData gc_data(state->vm()); { StopTheWorld stw(state, gct, calling_environment); // Seed it with the root objects. walker.seed(gc_data); } Object* obj = walker.next(); while(obj) { if(condition->perform(state, obj)) { total++; if(!ary->nil_p()) { ary->append(state, obj); } else { // We call back into Ruby land here, so that might trigger a GC // This ensures we mark all the locations of the current search // queue for the walker, so we update these object references // properly. Object** stack_buf = walker.stack_buf(); size_t stack_size = walker.stack_size(); Object** variable_buffer[stack_size]; for(size_t i = 0; i < stack_size; ++i) { variable_buffer[i] = &stack_buf[i]; } VariableRootBuffer vrb(state->vm()->current_root_buffers(), variable_buffer, stack_size); args->set(state, 0, obj); ret = callable->send(state, calling_environment, G(sym_call), args, cNil, false); if(!ret) break; } } obj = walker.next(); } delete condition; if(!ret) return 0; return Integer::from(state, total); }
address InterpreterGenerator::generate_native_entry(bool synchronized) { const Register handler = r14; const Register function = r15; assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor, handler, function); // We use the same code for synchronized and not if (native_entry) return native_entry; address start = __ pc(); // Allocate and initialize our stack frame. __ load (Rstate, 0); generate_compute_interpreter_state(true); // Make sure method is native and not abstract #ifdef ASSERT { Label ok; __ lwz (r0, Address(Rmethod, methodOopDesc::access_flags_offset())); __ andi_ (r0, r0, JVM_ACC_NATIVE | JVM_ACC_ABSTRACT); __ compare (r0, JVM_ACC_NATIVE); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Lock if necessary Label not_synchronized_1; __ bne (CRsync, not_synchronized_1); __ lock_object (Rmonitor); __ bind (not_synchronized_1); // Get signature handler const Address signature_handler_addr( Rmethod, methodOopDesc::signature_handler_offset()); Label return_to_caller, got_signature_handler; __ load (handler, signature_handler_addr); __ compare (handler, 0); __ bne (got_signature_handler); __ call_VM (noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Rmethod, CALL_VM_NO_EXCEPTION_CHECKS); __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); __ load (handler, signature_handler_addr); __ bind (got_signature_handler); // Get the native function entry point const Address native_function_addr( Rmethod, methodOopDesc::native_function_offset()); Label got_function; __ load (function, native_function_addr); #ifdef ASSERT { // InterpreterRuntime::prepare_native_call() sets the mirror // handle and native function address first and the signature // handler last, so function should always be set here. Label ok; __ compare (function, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Call signature handler __ mtctr (handler); __ bctrl (); __ mr (handler, r0); // Pass JNIEnv __ la (r3, Address(Rthread, JavaThread::jni_environment_offset())); // Pass mirror handle if static const Address oop_temp_addr = STATE(_oop_temp); Label not_static; __ bne (CRstatic, not_static); __ get_mirror_handle (r4); __ store (r4, oop_temp_addr); __ la (r4, oop_temp_addr); __ bind (not_static); // Set up the Java frame anchor __ set_last_Java_frame (); // Change the thread state to native const Address thread_state_addr(Rthread, JavaThread::thread_state_offset()); #ifdef ASSERT { Label ok; __ lwz (r0, thread_state_addr); __ compare (r0, _thread_in_Java); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif __ load (r0, _thread_in_native); __ stw (r0, thread_state_addr); // Make the call __ call (function); __ fixup_after_potential_safepoint (); // The result will be in r3 (and maybe r4 on 32-bit) or f1. // Wherever it is, we need to store it before calling anything const Register r3_save = r16; #ifdef PPC32 const Register r4_save = r17; #endif const FloatRegister f1_save = f14; __ mr (r3_save, r3); #ifdef PPC32 __ mr (r4_save, r4); #endif __ fmr (f1_save, f1); // Switch thread to "native transition" state before reading the // synchronization state. This additional state is necessary // because reading and testing the synchronization state is not // atomic with respect to garbage collection. __ load (r0, _thread_in_native_trans); __ stw (r0, thread_state_addr); // Ensure the new state is visible to the VM thread. if(os::is_MP()) { if (UseMembar) __ sync (); else __ serialize_memory (r3, r4); } // Check for safepoint operation in progress and/or pending // suspend requests. We use a leaf call in order to leave // the last_Java_frame setup undisturbed. Label block, no_block; __ load (r3, (intptr_t) SafepointSynchronize::address_of_state()); __ lwz (r0, Address(r3, 0)); __ compare (r0, SafepointSynchronize::_not_synchronized); __ bne (block); __ lwz (r0, Address(Rthread, JavaThread::suspend_flags_offset())); __ compare (r0, 0); __ beq (no_block); __ bind (block); __ call_VM_leaf ( CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); __ fixup_after_potential_safepoint (); __ bind (no_block); // Change the thread state __ load (r0, _thread_in_Java); __ stw (r0, thread_state_addr); // Reset the frame anchor __ reset_last_Java_frame (); // If the result was an OOP then unbox it and store it in the frame // (where it will be safe from garbage collection) before we release // the handle it might be protected by Label non_oop, store_oop; __ load (r0, (intptr_t) AbstractInterpreter::result_handler(T_OBJECT)); __ compare (r0, handler); __ bne (non_oop); __ compare (r3_save, 0); __ beq (store_oop); __ load (r3_save, Address(r3_save, 0)); __ bind (store_oop); __ store (r3_save, STATE(_oop_temp)); __ bind (non_oop); // Reset handle block __ load (r3, Address(Rthread, JavaThread::active_handles_offset())); __ load (r0, 0); __ stw (r0, Address(r3, JNIHandleBlock::top_offset_in_bytes())); // If there is an exception we skip the result handler and return. // Note that this also skips unlocking which seems totally wrong, // but apparently this is what the asm interpreter does so we do // too. __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); // Unlock if necessary Label not_synchronized_2; __ bne (CRsync, not_synchronized_2); __ unlock_object (Rmonitor); __ bind (not_synchronized_2); // Restore saved result and call the result handler __ mr (r3, r3_save); #ifdef PPC32 __ mr (r4, r4_save); #endif __ fmr (f1, f1_save); __ mtctr (handler); __ bctrl (); // Unwind the current activation and return __ bind (return_to_caller); generate_unwind_interpreter_state(); __ blr (); native_entry = start; return start; }
inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
VtableStub* VtableStubs::create_itable_stub(int itable_index) { // PPC port: use fixed size. const int code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new (code_length) VtableStub(false, itable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); address start_pc; #ifndef PRODUCT if (CountCompiledCalls) { int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); __ lwz(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); __ stw(R12_scratch2, offs, R11_scratch1); } #endif assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); // Entry arguments: // R19_method: Interface // R3_ARG1: Receiver Label L_no_such_interface; const Register rcvr_klass = R11_scratch1, interface = R12_scratch2, tmp1 = R21_tmp1, tmp2 = R22_tmp2; address npe_addr = __ pc(); // npe = null pointer exception __ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1); // Receiver subtype check against REFC. __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method); __ lookup_interface_method(rcvr_klass, interface, noreg, R0, tmp1, tmp2, L_no_such_interface, /*return_method=*/ false); // Get Method* and entrypoint for compiler __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method); __ lookup_interface_method(rcvr_klass, interface, itable_index, R19_method, tmp1, tmp2, L_no_such_interface, /*return_method=*/ true); #ifndef PRODUCT if (DebugVtables) { Label ok; __ cmpd(CCR0, R19_method, 0); __ bne(CCR0, ok); __ stop("method is null", 103); __ bind(ok); } #endif // If the vtable entry is null, the method is abstract. address ame_addr = __ pc(); // ame = abstract method error // Must do an explicit check if implicit checks are disabled. assert(!MacroAssembler::needs_explicit_null_check(in_bytes(Method::from_compiled_offset())), "sanity"); if (!ImplicitNullChecks || !os::zero_page_read_protected()) { if (TrapBasedNullChecks) { __ trap_null_check(R19_method); } else { __ cmpdi(CCR0, R19_method, 0); __ beq(CCR0, L_no_such_interface); } } __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ mtctr(R12_scratch2); __ bctr(); // Handle IncompatibleClassChangeError in itable stubs. // More detailed error message. // We force resolving of the call site by jumping to the "handle // wrong method" stub, and so let the interpreter runtime do all the // dirty work. __ bind(L_no_such_interface); __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2); __ mtctr(R11_scratch1); __ bctr(); masm->flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
// Used by compiler only; may use only caller saved, non-argument // registers. VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // PPC port: use fixed size. const int code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new (code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); #ifndef PRODUCT if (CountCompiledCalls) { int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); __ lwz(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); __ stw(R12_scratch2, offs, R11_scratch1); } #endif assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); // Get receiver klass. const Register rcvr_klass = R11_scratch1; // We might implicit NULL fault here. address npe_addr = __ pc(); // npe = null pointer exception __ load_klass_with_trap_null_check(rcvr_klass, R3); // Set method (in case of interpreted method), and destination address. int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); #ifndef PRODUCT if (DebugVtables) { Label L; // Check offset vs vtable length. const Register vtable_len = R12_scratch2; __ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass); __ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size()); __ bge(CCR0, L); __ li(R12_scratch2, vtable_index); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false); __ bind(L); } #endif int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); __ ld(R19_method, v_off, rcvr_klass); #ifndef PRODUCT if (DebugVtables) { Label L; __ cmpdi(CCR0, R19_method, 0); __ bne(CCR0, L); __ stop("Vtable entry is ZERO", 102); __ bind(L); } #endif // If the vtable entry is null, the method is abstract. address ame_addr = __ pc(); // ame = abstract method error __ load_with_trap_null_check(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ mtctr(R12_scratch2); __ bctr(); masm->flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
address generate_call_stub(address& return_address) { assert (!TaggedStackInterpreter, "not supported"); StubCodeMark mark(this, "StubRoutines", "call_stub"); address start = __ enter(); const Register call_wrapper = r3; const Register result = r4; const Register result_type = r5; const Register method = r6; const Register entry_point = r7; const Register parameters = r8; const Register parameter_words = r9; const Register thread = r10; #ifdef ASSERT // Make sure we have no pending exceptions { StackFrame frame; Label label; __ load (r0, Address(thread, Thread::pending_exception_offset())); __ compare (r0, 0); __ beq (label); __ prolog (frame); __ should_not_reach_here (__FILE__, __LINE__); __ epilog (frame); __ blr (); __ bind (label); } #endif // ASSERT // Calculate the frame size StackFrame frame; for (int i = 0; i < StackFrame::max_crfs; i++) frame.get_cr_field(); for (int i = 0; i < StackFrame::max_gprs; i++) frame.get_register(); StubRoutines::set_call_stub_base_size(frame.unaligned_size() + 3*wordSize); // the 3 extra words are for call_wrapper, result and result_type const Register parameter_bytes = parameter_words; __ shift_left (parameter_bytes, parameter_words, LogBytesPerWord); const Register frame_size = r11; const Register padding = r12; __ addi (frame_size, parameter_bytes, StubRoutines::call_stub_base_size()); __ calc_padding_for_alignment (padding, frame_size, StackAlignmentInBytes); __ add (frame_size, frame_size, padding); // Save the link register and create the new frame __ mflr (r0); __ store (r0, Address(r1, StackFrame::lr_save_offset * wordSize)); __ neg (r0, frame_size); __ store_update_indexed (r1, r1, r0); #ifdef PPC64 __ mfcr (r0); __ store (r0, Address(r1, StackFrame::cr_save_offset * wordSize)); #endif // PPC64 // Calculate the address of the interpreter's local variables const Register locals = frame_size; __ addi (locals, r1, frame.start_of_locals() - wordSize); __ add (locals, locals, padding); __ add (locals, locals, parameter_bytes); // Store the call wrapper address and the result stuff const int initial_offset = 1; int offset = initial_offset; __ store (call_wrapper, Address(locals, offset++ * wordSize)); __ store (result, Address(locals, offset++ * wordSize)); __ store (result_type, Address(locals, offset++ * wordSize)); // Store the registers #ifdef PPC32 __ mfcr (r0); __ store (r0, Address(locals, offset++ * wordSize)); #endif // PPC32 for (int i = 14; i < 32; i++) { __ store (as_Register(i), Address(locals, offset++ * wordSize)); } const int final_offset = offset; // Store the location of call_wrapper frame::set_call_wrapper_offset((final_offset - initial_offset) * wordSize); #ifdef ASSERT // Check that we wrote all the way to the end of the frame. // The frame may have been resized when we return from the // interpreter, so the start of the frame may have moved // but the end will be where we left it and we rely on this // to find our stuff. { StackFrame frame; Label label; __ load (r3, Address(r1, 0)); __ subi (r3, r3, final_offset * wordSize); __ compare (r3, locals); __ beq (label); __ prolog (frame); __ should_not_reach_here (__FILE__, __LINE__); __ epilog (frame); __ blr (); __ bind (label); } #endif // ASSERT // Pass parameters if any { Label loop, done; __ compare (parameter_bytes, 0); __ ble (done); const Register src = parameters; const Register dst = padding; __ mr (dst, locals); __ shift_right (r0, parameter_bytes, LogBytesPerWord); __ mtctr (r0); __ bind (loop); __ load (r0, Address(src, 0)); __ store (r0, Address(dst, 0)); __ addi (src, src, wordSize); __ subi (dst, dst, wordSize); __ bdnz (loop); __ bind (done); } // Make the call __ mr (Rmethod, method); __ mr (Rlocals, locals); __ mr (Rthread, thread); __ mtctr (entry_point); __ bctrl(); // This is used to identify call_stub stack frames return_address = __ pc(); // Figure out where our stuff is stored __ load (locals, Address(r1, 0)); __ subi (locals, locals, final_offset * wordSize); #ifdef ASSERT // Rlocals should contain the address we just calculated. { StackFrame frame; Label label; __ compare (Rlocals, locals); __ beq (label); __ prolog (frame); __ should_not_reach_here (__FILE__, __LINE__); __ epilog (frame); __ blr (); __ bind (label); } #endif // ASSERT // Is an exception being thrown? Label exit; __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (exit); // Store result depending on type const Register result_addr = r6; Label is_int, is_long, is_object; offset = initial_offset + 1; // skip call_wrapper __ load (result_addr, Address(locals, offset++ * wordSize)); __ load (result_type, Address(locals, offset++ * wordSize)); __ compare (result_type, T_INT); __ beq (is_int); __ compare (result_type, T_LONG); __ beq (is_long); __ compare (result_type, T_OBJECT); __ beq (is_object); __ should_not_reach_here (__FILE__, __LINE__); __ bind (is_int); __ stw (r3, Address(result_addr, 0)); __ b (exit); __ bind (is_long); #ifdef PPC32 __ store (r4, Address(result_addr, wordSize)); #endif __ store (r3, Address(result_addr, 0)); __ b (exit); __ bind (is_object); __ store (r3, Address(result_addr, 0)); //__ b (exit); // Restore the registers __ bind (exit); #ifdef PPC32 __ load (r0, Address(locals, offset++ * wordSize)); __ mtcr (r0); #endif // PPC32 for (int i = 14; i < 32; i++) { __ load (as_Register(i), Address(locals, offset++ * wordSize)); } #ifdef PPC64 __ load (r0, Address(r1, StackFrame::cr_save_offset * wordSize)); __ mtcr (r0); #endif // PPC64 assert (offset == final_offset, "save and restore must match"); // Unwind and return __ load (r1, Address(r1, StackFrame::back_chain_offset * wordSize)); __ load (r0, Address(r1, StackFrame::lr_save_offset * wordSize)); __ mtlr (r0); __ blr (); return start; }
/** * \param[in] argc argument count * \param[in] argv argument array * \return 0 on success, 1 on error * * \attention In daemon mode, it finishes immediately. */ int main(int argc, char** argv) { AppArgs::Init(); if (!( AppArgs::AddOption("about", '?', AAT_NO_VALUE, false) && AppArgs::AddOption("help", 'h', AAT_NO_VALUE, false) && AppArgs::AddOption("foreground", 'n', AAT_NO_VALUE, false) && AppArgs::AddOption("kill", 'k', AAT_NO_VALUE, false) && AppArgs::AddOption("config", 'f', AAT_MANDATORY_VALUE, false) && AppArgs::AddOption("version", 'V', AAT_NO_VALUE, false))) { fprintf(stderr, "error while initializing application"); return 1; } AppArgs::Parse(argc, argv); if (AppArgs::ExistsOption("help")) { fprintf(stderr, "%s\n", INCROND_HELP); return 0; } if (AppArgs::ExistsOption("about")) { fprintf(stderr, "%s\n", INCROND_DESCRIPTION); return 0; } if (AppArgs::ExistsOption("version")) { fprintf(stderr, "%s\n", INCROND_VERSION); return 0; } IncronCfg::Init(); std::string cfg; if (!AppArgs::GetOption("config", cfg)) cfg = INCRON_CONFIG; IncronCfg::Load(cfg); std::string lckdir; IncronCfg::GetValue("lockfile_dir", lckdir); std::string lckfile; IncronCfg::GetValue("lockfile_name", lckfile); AppInstance app(lckfile, lckdir); if (AppArgs::ExistsOption("kill")) { fprintf(stderr, "attempting to terminate a running instance of incrond...\n"); if (app.Terminate()) { fprintf(stderr, "the instance notified, going down\n"); return 0; } else { fprintf(stderr, "error - incrond probably not running\n"); return 1; } } if (AppArgs::ExistsOption("foreground")) g_daemon = false; openlog(INCROND_NAME, INCRON_LOG_OPTS, INCRON_LOG_FACIL); syslog(LOG_NOTICE, "starting service (version %s, built on %s %s)", INCRON_VERSION, __DATE__, __TIME__); AppArgs::Destroy(); int ret = 0; std::string sysBase; std::string userBase; if (!IncronCfg::GetValue("system_table_dir", sysBase)) throw InotifyException("configuration is corrupted", EINVAL); if (access(sysBase.c_str(), R_OK) != 0) { syslog(LOG_CRIT, "cannot read directory for system tables (%s): (%i) %s", sysBase.c_str(), errno, strerror(errno)); if (!g_daemon) fprintf(stderr, "cannot read directory for system tables (%s): (%i) %s", sysBase.c_str(), errno, strerror(errno)); ret = 1; goto error; } if (!IncronCfg::GetValue("user_table_dir", userBase)) throw InotifyException("configuration is corrupted", EINVAL); if (access(userBase.c_str(), R_OK) != 0) { syslog(LOG_CRIT, "cannot read directory for user tables (%s): (%i) %s", userBase.c_str(), errno, strerror(errno)); if (!g_daemon) fprintf(stderr, "cannot read directory for user tables (%s): (%i) %s", userBase.c_str(), errno, strerror(errno)); ret = 1; goto error; } try { if (g_daemon) if (daemon(0, 0) == -1) { syslog(LOG_CRIT, "daemonizing failed: (%i) %s", errno, strerror(errno)); fprintf(stderr, "daemonizing failed: (%i) %s\n", errno, strerror(errno)); ret = 1; goto error; } try { if (!app.Lock()) { syslog(LOG_CRIT, "another instance of incrond already running"); if (!g_daemon) fprintf(stderr, "another instance of incrond already running\n"); ret = 1; goto error; } } catch (AppInstException e) { syslog(LOG_CRIT, "instance lookup failed: (%i) %s", e.GetErrorNumber(), strerror(e.GetErrorNumber())); if (!g_daemon) fprintf(stderr, "instance lookup failed: (%i) %s\n", e.GetErrorNumber(), strerror(e.GetErrorNumber())); ret = 1; goto error; } prepare_pipe(); Inotify in; in.SetNonBlock(true); in.SetCloseOnExec(true); uint32_t wm = IN_CREATE | IN_CLOSE_WRITE | IN_DELETE | IN_MOVE | IN_DELETE_SELF | IN_UNMOUNT; InotifyWatch stw(sysBase, wm); in.Add(stw); InotifyWatch utw(userBase, wm); in.Add(utw); EventDispatcher ed(g_cldPipe[0], &in, &stw, &utw); try { load_tables(&ed); } catch (InotifyException e) { int err = e.GetErrorNumber(); syslog(LOG_CRIT, "%s: (%i) %s", e.GetMessage().c_str(), err, strerror(err)); ret = 1; goto error; } ed.Rebuild(); // not too efficient, but simple signal(SIGTERM, on_signal); signal(SIGINT, on_signal); signal(SIGCHLD, on_signal); syslog(LOG_NOTICE, "ready to process filesystem events"); while (!g_fFinish) { int res = poll(ed.GetPollData(), ed.GetSize(), -1); if (res > 0) { if (ed.ProcessEvents()) UserTable::FinishDone(); } else if (res < 0) { switch (errno) { case EINTR: // syscall interrupted - continue polling break; case EAGAIN: // not enough resources - wait a moment and try again syslog(LOG_WARNING, "polling failed due to resource shortage, retrying later..."); sleep(POLL_EAGAIN_WAIT); break; default: throw InotifyException("polling failed", errno, NULL); } } } free_tables(&ed); if (g_cldPipe[0] != -1) close(g_cldPipe[0]); if (g_cldPipe[1] != -1) close(g_cldPipe[1]); } catch (InotifyException e) { int err = e.GetErrorNumber(); syslog(LOG_CRIT, "*** unhandled exception occurred ***"); syslog(LOG_CRIT, " %s", e.GetMessage().c_str()); syslog(LOG_CRIT, " error: (%i) %s", err, strerror(err)); ret = 1; } error: syslog(LOG_NOTICE, "stopping service"); closelog(); return ret; }