address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();

  // rbx: method
  // r14: pointer to locals
  // c_rarg3: first stack arg - wordSize
  __ mov(c_rarg3, rsp);
  // adjust rsp
  __ subptr(rsp, 4 * wordSize);
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rbx, r14, c_rarg3);

  // rax: result handler

  // Stack layout:
  // rsp: 3 integer or float args (if static first is unused)
  //      1 float/double identifiers
  //        return address
  //        stack args
  //        garbage
  //        expression stack bottom
  //        bcp (NULL)
  //        ...

  // Do FP first so we can use c_rarg3 as temp
  __ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers

  for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
    XMMRegister floatreg = as_XMMRegister(i+1);
    Label isfloatordouble, isdouble, next;

    __ testl(c_rarg3, 1 << (i*2));      // Float or Double?
    __ jcc(Assembler::notZero, isfloatordouble);

    // Do Int register here
    switch ( i ) {
      case 0:
        __ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
        __ testl(rscratch1, JVM_ACC_STATIC);
        __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
        break;
      case 1:
        __ movptr(c_rarg2, Address(rsp, wordSize));
        break;
      case 2:
        __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
        break;
      default:
        break;
    }

    __ jmp (next);

    __ bind(isfloatordouble);
    __ testl(c_rarg3, 1 << ((i*2)+1));     // Double?
    __ jcc(Assembler::notZero, isdouble);

// Do Float Here
    __ movflt(floatreg, Address(rsp, i * wordSize));
    __ jmp(next);

// Do Double here
    __ bind(isdouble);
    __ movdbl(floatreg, Address(rsp, i * wordSize));

    __ bind(next);
  }


  // restore rsp
  __ addptr(rsp, 4 * wordSize);

  __ ret(0);

  return entry;
}
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();

  // rbx: method
  // r14: pointer to locals
  // c_rarg3: first stack arg - wordSize
  __ mov(c_rarg3, rsp);
  // adjust rsp
  __ subptr(rsp, 14 * wordSize);
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rbx, r14, c_rarg3);

  // rax: result handler

  // Stack layout:
  // rsp: 5 integer args (if static first is unused)
  //      1 float/double identifiers
  //      8 double args
  //        return address
  //        stack args
  //        garbage
  //        expression stack bottom
  //        bcp (NULL)
  //        ...

  // Do FP first so we can use c_rarg3 as temp
  __ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers

  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
    const XMMRegister r = as_XMMRegister(i);

    Label d, done;

    __ testl(c_rarg3, 1 << i);
    __ jcc(Assembler::notZero, d);
    __ movflt(r, Address(rsp, (6 + i) * wordSize));
    __ jmp(done);
    __ bind(d);
    __ movdbl(r, Address(rsp, (6 + i) * wordSize));
    __ bind(done);
  }

  // Now handle integrals.  Only do c_rarg1 if not static.
  __ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
  __ testl(c_rarg3, JVM_ACC_STATIC);
  __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));

  __ movptr(c_rarg2, Address(rsp, wordSize));
  __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
  __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
  __ movptr(c_rarg5, Address(rsp, 4 * wordSize));

  // restore rsp
  __ addptr(rsp, 14 * wordSize);

  __ ret(0);

  return entry;
}
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {

  // rbx,: Method*
  // rcx: scratrch
  // r13: sender sp

  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry

  address entry_point = __ pc();

  // These don't need a safepoint check because they aren't virtually
  // callable. We won't enter these intrinsics from compiled code.
  // If in the future we added an intrinsic which was virtually callable
  // we'd have to worry about how to safepoint so that this code is used.

  // mathematical functions inlined by compiler
  // (interpreter must provide identical implementation
  // in order to avoid monotonicity bugs when switching
  // from interpreter to compiler in the middle of some
  // computation)
  //
  // stack: [ ret adr ] <-- rsp
  //        [ lo(arg) ]
  //        [ hi(arg) ]
  //


  if (kind == Interpreter::java_lang_math_sqrt) {
    __ sqrtsd(xmm0, Address(rsp, wordSize));
  } else if (kind == Interpreter::java_lang_math_exp) {
    __ movdbl(xmm0, Address(rsp, wordSize));
    if (StubRoutines::dexp() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)));
    }
  } else if (kind == Interpreter::java_lang_math_log) {
    __ movdbl(xmm0, Address(rsp, wordSize));
    if (StubRoutines::dlog() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)));
    }
  } else if (kind == Interpreter::java_lang_math_log10) {
    __ movdbl(xmm0, Address(rsp, wordSize));
    if (StubRoutines::dlog10() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10)));
    }
  } else if (kind == Interpreter::java_lang_math_sin) {
    __ movdbl(xmm0, Address(rsp, wordSize));
    if (StubRoutines::dsin() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)));
    }
  } else if (kind == Interpreter::java_lang_math_cos) {
    __ movdbl(xmm0, Address(rsp, wordSize));
    if (StubRoutines::dcos() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)));
    }
  } else if (kind == Interpreter::java_lang_math_pow) {
    __ movdbl(xmm1, Address(rsp, wordSize));
    __ movdbl(xmm0, Address(rsp, 3 * wordSize));
    if (StubRoutines::dpow() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dpow)));
    }
  } else if (kind == Interpreter::java_lang_math_tan) {
    __ movdbl(xmm0, Address(rsp, wordSize));
    if (StubRoutines::dtan() != NULL) {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan())));
    } else {
      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)));
    }
  } else {
    __ fld_d(Address(rsp, wordSize));
    switch (kind) {
      case Interpreter::java_lang_math_abs:
          __ fabs();
          break;
      default                              :
          ShouldNotReachHere();
    }

    // return double result in xmm0 for interpreter and compilers.
    __ subptr(rsp, 2*wordSize);
    // Round to 64bit precision
    __ fstp_d(Address(rsp, 0));
    __ movdbl(xmm0, Address(rsp, 0));
    __ addptr(rsp, 2*wordSize);
  }


  __ pop(rax);
  __ mov(rsp, r13);
  __ jmp(rax);

  return entry_point;
}
예제 #4
0
void decode(){

	//////////the instructions//////////
	int ADD = 1;
	int AND = 5;
	int JMP  = 12;
	int JSR  = 4;
	int LD   = 2;
	int LDR  = 6;
	int LEA  = 14;
	int NOT  = 9;
	int RET  = 12;
	int ST   = 3;
	int STR  = 7;
	int TRAP = 15;

    
    short instruction = MEMORY[IR];
   	unsigned int opcode = ( instruction & 65535 )  >> 12; 
    
    if (opcode == ADD ) 
        add(instruction);
    
    
    else if ( opcode == AND ) 
        and(instruction);
    
    
    else if( opcode == 0 )  //BR opcode == 0000
        br(instruction);
    
    
    else if ( opcode == JMP ) {
    	
    	if ( ( instruction & 448 ) == 448)
    		ret(instruction);
    	else
    		jmp(instruction);
    }
    
    else if ( opcode == JSR ) 
    	jsr(instruction);
    
    else if ( opcode == LD ) 
    	ld(instruction);
    
    else if ( opcode == LDR )
    	ldr(instruction);
    
    else if ( opcode == LEA ) 
    	lea(instruction);
    
    else if ( opcode == NOT )	
        not(instruction);
    
    else if ( opcode == RET )
    	ret(instruction);
    
    else if( ( opcode & ST ) == opcode)
    	st(instruction);
    
    else if ( opcode == STR )
    	str(instruction);
    
    
    else if ( opcode == TRAP )
   		trap(instruction);
    
    
    else{ //we have an invalid instruction
        
        fprintf(stderr, "Error!! Invalid Instruction: 0x%x\n", instruction);
        exit(1);
    }
    
    
}
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int amd64_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), amd64_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT
  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif

  // get receiver (need to skip return address on top of stack)
  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");

  // Free registers (non-args) are rax, rbx

  // get receiver klass
  address npe_addr = __ pc();
  __ load_klass(rax, j_rarg0);

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(rax, InstanceKlass::vtable_length_offset() * wordSize),
            vtable_index * vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(rbx, vtable_index);
    __ call_VM(noreg,
               CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx);
    __ bind(L);
  }
#endif // PRODUCT

  // load Method* and target address
  const Register method = rbx;

  __ lookup_virtual_method(rax, vtable_index, method);

  if (DebugVtables) {
    Label L;
    __ cmpptr(method, (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L);
    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }
  // rax: receiver klass
  // rbx: Method*
  // rcx: receiver
  address ame_addr = __ pc();
  __ jmp( Address(rbx, Method::from_compiled_offset()));

  __ flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
                  vtable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
예제 #6
0
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
{
#if USE(CTI_REPATCH_PIC)
    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));

    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    // referencing the prototype object - let's speculatively load it's table nice and early!)
    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);

    // Check eax is an object of the right Structure.
    JmpSrc failureCases1 = checkStructure(X86::eax, structure);

    // Check the prototype object's Structure had not changed.
    Structure** prototypeStructureAddress = &(protoObject->m_structure);
    __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
    JmpSrc failureCases2 = __ jne();

    // Checks out okay! - getDirectOffset
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);

    JmpSrc success = __ jmp();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    // Use the repatch information to link the failure cases back to the original slow case routine.
    void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
    X86Assembler::link(code, failureCases1, slowCaseBegin);
    X86Assembler::link(code, failureCases2, slowCaseBegin);

    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));

    // Track the stub we have created so that it will be deleted later.
    stubInfo->stubRoutine = code;

    // Finally repatch the jump to slow case back in the hot path to jump here instead.
    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    X86Assembler::repatchBranchOffset(jmpLocation, code);
#else
    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    // referencing the prototype object - let's speculatively load it's table nice and early!)
    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);

    // Check eax is an object of the right Structure.
    __ testl_i32r(JSImmediate::TagMask, X86::eax);
    JmpSrc failureCases1 = __ jne();
    JmpSrc failureCases2 = checkStructure(X86::eax, structure);

    // Check the prototype object's Structure had not changed.
    Structure** prototypeStructureAddress = &(protoObject->m_structure);
    __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
    JmpSrc failureCases3 = __ jne();

    // Checks out okay! - getDirectOffset
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);

    __ ret();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
    X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));

    stubInfo->stubRoutine = code;

    ctiRepatchCallByReturnAddress(returnAddress, code);
#endif
}
void GPUDrawScanlineCodeGenerator::Generate()
{
	push(esi);
	push(edi);

	Init();

	align(16);

L("loop");

	// GSVector4i test = m_test[7 + (steps & (steps >> 31))];

	mov(edx, ecx);
	sar(edx, 31);
	and(edx, ecx);
	shl(edx, 4);

	movdqa(xmm7, ptr[edx + (size_t)&m_test[7]]);

	// movdqu(xmm1, ptr[edi]);

	movq(xmm1, qword[edi]);
	movhps(xmm1, qword[edi + 8]);

	// ecx = steps
	// esi = tex (tme)
	// edi = fb
	// xmm1 = fd
	// xmm2 = s
	// xmm3 = t
	// xmm4 = r
	// xmm5 = g
	// xmm6 = b
	// xmm7 = test

	TestMask();

	SampleTexture();

	// xmm1 = fd
	// xmm3 = a
	// xmm4 = r
	// xmm5 = g
	// xmm6 = b
	// xmm7 = test
	// xmm0, xmm2 = free

	ColorTFX();

	AlphaBlend();

	Dither();

	WriteFrame();

L("step");

	// if(steps <= 0) break;

	test(ecx, ecx);
	jle("exit", T_NEAR);

	Step();

	jmp("loop", T_NEAR);

L("exit");

	pop(edi);
	pop(esi);

	ret(8);
}
예제 #8
0
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {

  // rbx,: methodOop
  // rcx: scratrch
  // r13: sender sp

  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry

  address entry_point = __ pc();

  // These don't need a safepoint check because they aren't virtually
  // callable. We won't enter these intrinsics from compiled code.
  // If in the future we added an intrinsic which was virtually callable
  // we'd have to worry about how to safepoint so that this code is used.

  // mathematical functions inlined by compiler
  // (interpreter must provide identical implementation
  // in order to avoid monotonicity bugs when switching
  // from interpreter to compiler in the middle of some
  // computation)
  //
  // stack: [ ret adr ] <-- rsp
  //        [ lo(arg) ]
  //        [ hi(arg) ]
  //

  // Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
  //       native methods. Interpreter::method_kind(...) does a check for
  //       native methods first before checking for intrinsic methods and
  //       thus will never select this entry point. Make sure it is not
  //       called accidentally since the SharedRuntime entry points will
  //       not work for JDK 1.2.
  //
  // We no longer need to check for JDK 1.2 since it's EOL'ed.
  // The following check existed in pre 1.6 implementation,
  //    if (Universe::is_jdk12x_version()) {
  //      __ should_not_reach_here();
  //    }
  // Universe::is_jdk12x_version() always returns false since
  // the JDK version is not yet determined when this method is called.
  // This method is called during interpreter_init() whereas
  // JDK version is only determined when universe2_init() is called.

  // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
  //       java methods.  Interpreter::method_kind(...) will select
  //       this entry point for the corresponding methods in JDK 1.3.
  // get argument

  if (kind == Interpreter::java_lang_math_sqrt) {
    __ sqrtsd(xmm0, Address(rsp, wordSize));
  } else {
    __ fld_d(Address(rsp, wordSize));
    switch (kind) {
      case Interpreter::java_lang_math_sin :
          __ trigfunc('s');
          break;
      case Interpreter::java_lang_math_cos :
          __ trigfunc('c');
          break;
      case Interpreter::java_lang_math_tan :
          __ trigfunc('t');
          break;
      case Interpreter::java_lang_math_abs:
          __ fabs();
          break;
      case Interpreter::java_lang_math_log:
          __ flog();
          break;
      case Interpreter::java_lang_math_log10:
          __ flog10();
          break;
      default                              :
          ShouldNotReachHere();
    }

    // return double result in xmm0 for interpreter and compilers.
    __ subptr(rsp, 2*wordSize);
    // Round to 64bit precision
    __ fstp_d(Address(rsp, 0));
    __ movdbl(xmm0, Address(rsp, 0));
    __ addptr(rsp, 2*wordSize);
  }


  __ pop(rax);
  __ mov(rsp, r13);
  __ jmp(rax);

  return entry_point;
}
예제 #9
0
CodeBlock* GlobalEntry::codeGen(){
	CodeBlock* main_block = new CodeBlock();

	//--------------Init stack and base pointers--------------
	ICode set_stack(ICode::ICodeType::MOVI,new Value(0,Type::TypeTag::UINT),&MemoryMgr::stackPointerRegister());
	ICode set_base(ICode::ICodeType::MOVI,new Value(0,Type::TypeTag::UINT),&MemoryMgr::basePointerRegister());

	main_block->append(set_stack);
	main_block->append(set_base);

	//----------------------Declarations----------------------
	//Loops over all declarations
	SymTab* st = this->symTab();

	//Separates FunctionEntries from the other types
	vector<CodeBlock*> function_blocks;
	vector<CodeBlock*> other_blocks;
	for(SymTab::iterator i = st->begin(); i != st->end(); ++i){
		CodeBlock* cb = (*i)->codeGen();
		if((*i)->kind() == SymTabEntry::Kind::FUNCTION_KIND){
			function_blocks.push_back(cb);
		} else {
			other_blocks.push_back(cb);
		}
	}
	for(unsigned int x = 0; x < other_blocks.size();++x){
		main_block->append(other_blocks[x]);
	}
	ICode jmp_start(ICode::ICodeType::JMP,parse_label_);
	main_block->append(jmp_start);
	for(unsigned int x = 0; x < function_blocks.size();++x){
		main_block->append(function_blocks[x]);
	}

	//---------------------Event Matching---------------------

	// Reads in character
	CodeBlock* input_block = new CodeBlock();
	ICode read_in(ICode::ICodeType::IN,&in_reg_);
	
	//If negative, then reached end of file
	ICode* check_neg = new ICode(ICode::ICodeType::GT,new Value(0,Type::TypeTag::INT),&in_reg_);
	ICode end_jmp(ICode::ICodeType::JMPC,check_neg,end_label_);

	input_block->append(read_in);
	input_block->append(end_jmp);

	input_block->setStartLabel(parse_label_);

	CodeBlock* rule_block;
	// Named rules
	for(unsigned int i = 0; i < rule_names_.size(); ++i){
		rule_block = new CodeBlock();
		// Moves the string constant to the register
		ICode mov_string(ICode::ICodeType::MOVI,new Value((int)(rule_names_[i].at(0)),Type::TypeTag::INT),&comp_reg_);
		// Comparison
		ICode* compare = new ICode(ICode::ICodeType::EQ,&comp_reg_,&in_reg_);
		//Conditionally jumps
		ICode jmpc(ICode::ICodeType::JMPC,compare,rule_labels_[i]);

		rule_block->append(mov_string);
		rule_block->append(jmpc);
		rule_block->setEndLabel(rule_return_labels_[i]);

		input_block->append(rule_block);
	}

	// "Any" rules
	for(unsigned int i = 0; i < any_labels_.size(); ++i){
		rule_block = new CodeBlock();

		//Jumps to any rule body
		ICode jmp(ICode::ICodeType::JMP,any_labels_[i]);

		rule_block->append(jmp);
		rule_block->setEndLabel(any_return_labels_[i]);

		input_block->append(rule_block);
	}

	input_block->append(jmp_start);

	main_block->append(input_block);

	//------------------------Rules------------------------

	//Loops over all Rules
	for(unsigned int x = 0; x < rules_.size();++x){
		main_block->append(rules_[x]->codeGen());
	}

	//---------------------End program---------------------

	//Do the end jump here
	CodeBlock* end_block = new CodeBlock();
	ICode print_end(ICode::ICodeType::PRTS,new Value("PROGRAM END\\n"));
	end_block->setStartLabel(end_label_);
	end_block->append(print_end);

	main_block->append(end_block);

	return main_block;
}
예제 #10
0
파일: asmgen.cpp 프로젝트: swizl/fakescript
void asmgen::variant_jmp(int jmppos)
{
	jmp(jmppos);
}
예제 #11
0
파일: encode_op.c 프로젝트: occho/cpu
int encode_op(char *opcode, char *op_data)
{
	int rd,rs,rt,imm,funct,shaft,target;
	char tmp[256];
	const char *fi = "%s %d";
	const char *fg = "%s %%g%d";
	const char *ff = "%s %%f%d";
	const char *fl = "%s %s";
	const char *fgi = "%s %%g%d, %d";
	const char *fgl = "%s %%g%d, %s";
	const char *fgg = "%s %%g%d, %%g%d";
	const char *fggl = "%s %%g%d, %%g%d, %s";
	const char *fggi = "%s %%g%d, %%g%d, %d";
	const char *fggg = "%s %%g%d, %%g%d, %%g%d";
	const char *fff = "%s %%f%d, %%f%d";
	const char *fgf = "%s %%g%d, %%f%d";
	const char *ffg = "%s %%f%d, %%g%d";
	const char *fffl = "%s %%f%d, %%f%d, %s";
	const char *ffff = "%s %%f%d, %%f%d, %%f%d";
	const char *ffgi = "%s %%f%d, %%g%d, %d";
	const char *ffgg = "%s %%f%d, %%g%d, %%g%d";
	char lname[256];

	shaft = funct = target = 0;

	if(strcmp(opcode, "mvhi") == 0){
		if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3)
		    return mvhi(rs,0,imm);
	}
	if(strcmp(opcode, "mvlo") == 0){
		if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3)
		    return mvlo(rs,0,imm);
	}
	if(strcmp(opcode, "add") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return add(rs,rt,rd,0);
	}
	if(strcmp(opcode, "nor") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return nor(rs,rt,rd,0);
	}
	if(strcmp(opcode, "sub") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return sub(rs,rt,rd,0);
	}
	if(strcmp(opcode, "mul") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return mul(rs,rt,rd,0);
	}
	if(strcmp(opcode, "addi") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return addi(rs,rt,imm);
	}
	if(strcmp(opcode, "subi") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return subi(rs,rt,imm);
	}
	if(strcmp(opcode, "muli") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return muli(rs,rt,imm);
	}
	if(strcmp(opcode, "input") == 0){
		if(sscanf(op_data, fg, tmp, &rd) == 2)
		    return input(0,0,rd,0);
	}
	if(strcmp(opcode, "inputw") == 0){
		if(sscanf(op_data, fg, tmp, &rd) == 2)
		    return inputw(0,0,rd,0);
	}
	if(strcmp(opcode, "inputf") == 0){
		if(sscanf(op_data, ff, tmp, &rd) == 2)
		    return inputf(0,0,rd,0);
	}
	if(strcmp(opcode, "output") == 0){
		if(sscanf(op_data, fg, tmp, &rs) == 2)
		    return output(rs,0,0,0);
	}
	if(strcmp(opcode, "outputw") == 0){
		if(sscanf(op_data, fg, tmp, &rs) == 2)
		    return outputw(rs,0,0,0);
	}
	if(strcmp(opcode, "outputf") == 0){
		if(sscanf(op_data, ff, tmp, &rs) == 2)
		    return outputf(rs,0,0,0);
	}
	if(strcmp(opcode, "and") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return _and(rs,rt,rd,0);
	}
	if(strcmp(opcode, "or") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return _or(rs,rt,rd,0);
	}
	if(strcmp(opcode, "sll") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return sll(rs,rt,rd,0);
	}
	if(strcmp(opcode, "srl") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return srl(rs,rt,rd,0);
	}
	if(strcmp(opcode, "slli") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return slli(rs,rt,imm);
	}
	if(strcmp(opcode, "srli") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return srli(rs,rt,imm);
	}
	if(strcmp(opcode, "b") == 0){
		if(sscanf(op_data, fg, tmp, &rs) == 2)
		    return b(rs,0,0,0);
	}
	if(strcmp(opcode, "jmp") == 0){
		if(sscanf(op_data, fl, tmp, lname) == 2) {
			strcpy(label_name[label_cnt],lname);
		    return jmp(label_cnt++);
		}
	}
	if(strcmp(opcode, "jeq") == 0){
		if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) {
			strcpy(label_name[label_cnt],lname);
		    return jeq(rs,rt,label_cnt++);
		}
	}
	if(strcmp(opcode, "jne") == 0){
		if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) {
			strcpy(label_name[label_cnt],lname);
		    return jne(rs,rt,label_cnt++);
		}
	}
	if(strcmp(opcode, "jlt") == 0){
		if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) {
			strcpy(label_name[label_cnt],lname);
		    return jlt(rs,rt,label_cnt++);
		}
	}
	if(strcmp(opcode, "jle") == 0){
		if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) {
			strcpy(label_name[label_cnt],lname);
		    return jle(rs,rt,label_cnt++);
		}
	}
	if(strcmp(opcode, "call") == 0){
		if(sscanf(op_data, fl, tmp, lname) == 2)  {
			strcpy(label_name[label_cnt],lname);
		    return call(label_cnt++);
		}
	}
	if(strcmp(opcode, "callR") == 0){
		if(sscanf(op_data, fg, tmp, &rs) == 2)
		    return callr(rs,0,0,0);
	}
	if(strcmp(opcode, "return") == 0){
		    return _return(0);
	}
	if(strcmp(opcode, "ld") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return ld(rs,rt,rd,0);
	}
	if(strcmp(opcode, "ldi") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return ldi(rs,rt,imm);
	}
	if(strcmp(opcode, "ldlr") == 0){
		if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3)
		    return ldlr(rs,0,imm);
	}
	if(strcmp(opcode, "fld") == 0){
		if(sscanf(op_data, ffgg, tmp, &rd, &rs,&rt) == 4)
		    return fld(rs,rt,rd,0);
	}
	if(strcmp(opcode, "st") == 0){
		if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4)
		    return st(rs,rt,rd,0);
	}
	if(strcmp(opcode, "sti") == 0){
		if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4)
		    return sti(rs,rt,imm);
	}
	if(strcmp(opcode, "stlr") == 0){
		if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3)
		    return stlr(rs,0,imm);
	}
	if(strcmp(opcode, "fst") == 0){
		if(sscanf(op_data, ffgg, tmp, &rd, &rs,&rt) == 4)
		    return fst(rs,rt,rd,0);
	}
	if(strcmp(opcode, "fadd") == 0){
		if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4)
		    return fadd(rs,rt,rd,0);
	}
	if(strcmp(opcode, "fsub") == 0){
		if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4)
		    return fsub(rs,rt,rd,0);
	}
	if(strcmp(opcode, "fmul") == 0){
		if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4)
		    return fmul(rs,rt,rd,0);
	}
	if(strcmp(opcode, "fdiv") == 0){
		if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4)
		    return fdiv(rs,rt,rd,0);
	}
	if(strcmp(opcode, "fsqrt") == 0){
		if(sscanf(op_data, fff, tmp, &rd, &rs) == 3)
		    return fsqrt(rs,0,rd,0);
	}
	if(strcmp(opcode, "fabs") == 0){
		if(sscanf(op_data, fff, tmp, &rd, &rs) == 3)
		    return _fabs(rs,0,rd,0);
	}
	if(strcmp(opcode, "fmov") == 0){
		if(sscanf(op_data, fff, tmp, &rd, &rs) == 3)
		    return fmov(rs,0,rd,0);
	}
	if(strcmp(opcode, "fneg") == 0){
		if(sscanf(op_data, fff, tmp, &rd, &rs) == 3)
		    return fneg(rs,0,rd,0);
	}
	if(strcmp(opcode, "fldi") == 0){
		if(sscanf(op_data, ffgi, tmp, &rt, &rs, &imm) == 4)
		    return fldi(rs,rt,imm);
	}
	if(strcmp(opcode, "fsti") == 0){
		if(sscanf(op_data, ffgi, tmp, &rt, &rs, &imm) == 4)
		    return fsti(rs,rt,imm);
	}
	if(strcmp(opcode, "fjeq") == 0){
		if(sscanf(op_data, fffl, tmp, &rs, &rt, lname) == 4) {
			strcpy(label_name[label_cnt],lname);
		    return fjeq(rs,rt,label_cnt++);
		}
	}
	if(strcmp(opcode, "fjlt") == 0){
		if(sscanf(op_data, fffl, tmp, &rs, &rt, lname) == 4) {
			strcpy(label_name[label_cnt],lname);
		    return fjlt(rs,rt,label_cnt++);
		}
	}
	if(strcmp(opcode, "halt") == 0){
		    return halt(0,0,0,0);
	}
	if(strcmp(opcode, "setL") == 0){
		if(sscanf(op_data, fgl, tmp, &rd, lname) == 3) {
			strcpy(label_name[label_cnt],lname);
		    return setl(0,rd,label_cnt++);
		}
	}
	if(strcmp(opcode, "padd") == 0){
		if(sscanf(op_data, fgi, tmp, &rt, &imm) == 3) {
		    return padd(0,rt,imm);
		}
	}
	if(strcmp(opcode, "link") == 0){
		if(sscanf(op_data, fi, tmp, &imm) == 2) {
		    return link(0,0,imm);
		}
	}
	if(strcmp(opcode, "movlr") == 0){
		return movlr(0,0,0,0);
	}
	if(strcmp(opcode, "btmplr") == 0){
		return btmplr(0,0,0,0);
	}
	/*
	if(strcmp(opcode, "padd") == 0){
		if(sscanf(op_data, fgg, tmp, &rd, &rt) == 3) {
		    return padd(0,rt,d,0);
		}
	}
	*/

	return -1;
}
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
  // Note well: pd_code_size_limit is the absolute minimum we can get away with.  If you
  //            add code here, bump the code stub size returned by pd_code_size_limit!
  const int i486_code_length = VtableStub::pd_code_size_limit(false);
  VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), i486_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

  // Entry arguments:
  //  rax,: Interface
  //  rcx: Receiver

#ifndef PRODUCT
  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif /* PRODUCT */
  // get receiver (need to skip return address on top of stack)

  assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");

  // get receiver klass (also an implicit null-check)
  address npe_addr = __ pc();
  __ movptr(rsi, Address(rcx, oopDesc::klass_offset_in_bytes()));

  // Most registers are in use; we'll use rax, rbx, rsi, rdi
  // (If we need to make rsi, rdi callee-save, do a push/pop here.)
  const Register method = rbx;
  Label throw_icce;

  // Get methodOop and entrypoint for compiler
  __ lookup_interface_method(// inputs: rec. class, interface, itable index
                             rsi, rax, itable_index,
                             // outputs: method, scan temp. reg
                             method, rdi,
                             throw_icce);

  // method (rbx): methodOop
  // rcx: receiver

#ifdef ASSERT
  if (DebugVtables) {
      Label L1;
      __ cmpptr(method, (int32_t)NULL_WORD);
      __ jcc(Assembler::equal, L1);
      __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
      __ jcc(Assembler::notZero, L1);
      __ stop("methodOop is null");
      __ bind(L1);
    }
#endif // ASSERT

  address ame_addr = __ pc();
  __ jmp(Address(method, methodOopDesc::from_compiled_offset()));

  __ bind(throw_icce);
  __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
                  itable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
예제 #13
0
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {

  OopMapSet* oop_maps = NULL;
  // for better readability
  const bool must_gc_arguments = true;
  const bool dont_gc_arguments = false;

  // stub code & info for the different stubs
  switch (id) {
    case forward_exception_id:
      {
        // we're handling an exception in the context of a compiled
        // frame.  The registers have been saved in the standard
        // places.  Perform an exception lookup in the caller and
        // dispatch to the handler if found.  Otherwise unwind and
        // dispatch to the callers exception handler.

        oop_maps = new OopMapSet();
        OopMap* oop_map = generate_oop_map(sasm, true);

        // transfer the pending exception to the exception_oop
        __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);
        __ ld_ptr(Oexception, 0, G0);
        __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));
        __ add(I7, frame::pc_return_offset, Oissuing_pc);

        generate_handle_exception(sasm, oop_maps, oop_map);
        __ should_not_reach_here();
      }
      break;

    case new_instance_id:
    case fast_new_instance_id:
    case fast_new_instance_init_check_id:
      {
        Register G5_klass = G5; // Incoming
        Register O0_obj   = O0; // Outgoing

        if (id == new_instance_id) {
          __ set_info("new_instance", dont_gc_arguments);
        } else if (id == fast_new_instance_id) {
          __ set_info("fast new_instance", dont_gc_arguments);
        } else {
          assert(id == fast_new_instance_init_check_id, "bad StubID");
          __ set_info("fast new_instance init check", dont_gc_arguments);
        }

        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
            UseTLAB && FastTLABRefill) {
          Label slow_path;
          Register G1_obj_size = G1;
          Register G3_t1 = G3;
          Register G4_t2 = G4;
          assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);

          // Push a frame since we may do dtrace notification for the
          // allocation which requires calling out and we don't want
          // to stomp the real return address.
          __ save_frame(0);

          if (id == fast_new_instance_init_check_id) {
            // make sure the klass is initialized
            __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
            __ cmp(G3_t1, instanceKlass::fully_initialized);
            __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
            __ delayed()->nop();
          }
#ifdef ASSERT
          // assert object can be fast path allocated
          {
            Label ok, not_ok;
          __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
          __ cmp(G1_obj_size, 0);  // make sure it's an instance (LH > 0)
          __ br(Assembler::lessEqual, false, Assembler::pn, not_ok);
          __ delayed()->nop();
          __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
          __ br(Assembler::zero, false, Assembler::pn, ok);
          __ delayed()->nop();
          __ bind(not_ok);
          __ stop("assert(can be fast path allocated)");
          __ should_not_reach_here();
          __ bind(ok);
          }
#endif // ASSERT
          // if we got here then the TLAB allocation failed, so try
          // refilling the TLAB or allocating directly from eden.
          Label retry_tlab, try_eden;
          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass

          __ bind(retry_tlab);

          // get the instance size
          __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
          __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
          __ verify_oop(O0_obj);
          __ mov(O0, I0);
          __ ret();
          __ delayed()->restore();

          __ bind(try_eden);
          // get the instance size
          __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
          __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
          __ verify_oop(O0_obj);
          __ mov(O0, I0);
          __ ret();
          __ delayed()->restore();

          __ bind(slow_path);

          // pop this frame so generate_stub_call can push it's own
          __ restore();
        }

        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
        // I0->O0: new instance
      }

      break;

#ifdef TIERED
    case counter_overflow_id:
        // G4 contains bci
      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4);
      break;
#endif // TIERED

    case new_type_array_id:
    case new_object_array_id:
      {
        Register G5_klass = G5; // Incoming
        Register G4_length = G4; // Incoming
        Register O0_obj   = O0; // Outgoing

        Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
                                    + Klass::layout_helper_offset_in_bytes()));
        assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
        assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
        // Use this offset to pick out an individual byte of the layout_helper:
        const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
                                                 - Klass::_lh_header_size_shift / BitsPerByte);

        if (id == new_type_array_id) {
          __ set_info("new_type_array", dont_gc_arguments);
        } else {
          __ set_info("new_object_array", dont_gc_arguments);
        }

#ifdef ASSERT
        // assert object type is really an array of the proper kind
        {
          Label ok;
          Register G3_t1 = G3;
          __ ld(klass_lh, G3_t1);
          __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
          int tag = ((id == new_type_array_id)
                     ? Klass::_lh_array_tag_type_value
                     : Klass::_lh_array_tag_obj_value);
          __ cmp(G3_t1, tag);
          __ brx(Assembler::equal, false, Assembler::pt, ok);
          __ delayed()->nop();
          __ stop("assert(is an array klass)");
          __ should_not_reach_here();
          __ bind(ok);
        }
#endif // ASSERT

        if (UseTLAB && FastTLABRefill) {
          Label slow_path;
          Register G1_arr_size = G1;
          Register G3_t1 = G3;
          Register O1_t2 = O1;
          assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);

          // check that array length is small enough for fast path
          __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
          __ cmp(G4_length, G3_t1);
          __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
          __ delayed()->nop();

          // if we got here then the TLAB allocation failed, so try
          // refilling the TLAB or allocating directly from eden.
          Label retry_tlab, try_eden;
          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass

          __ bind(retry_tlab);

          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
          __ ld(klass_lh, G3_t1);
          __ sll(G4_length, G3_t1, G1_arr_size);
          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
          __ add(G1_arr_size, G3_t1, G1_arr_size);
          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);  // align up
          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);

          __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path);  // preserves G1_arr_size

          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
          __ add(O0_obj, G3_t1, G3_t1);       // body start
          __ initialize_body(G3_t1, O1_t2);
          __ verify_oop(O0_obj);
          __ retl();
          __ delayed()->nop();

          __ bind(try_eden);
          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
          __ ld(klass_lh, G3_t1);
          __ sll(G4_length, G3_t1, G1_arr_size);
          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
          __ add(G1_arr_size, G3_t1, G1_arr_size);
          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);
          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);

          __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path);  // preserves G1_arr_size

          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
          __ add(O0_obj, G3_t1, G3_t1);       // body start
          __ initialize_body(G3_t1, O1_t2);
          __ verify_oop(O0_obj);
          __ retl();
          __ delayed()->nop();

          __ bind(slow_path);
        }

        if (id == new_type_array_id) {
          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
        } else {
          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
        }
        // I0 -> O0: new array
      }
      break;

    case new_multi_array_id:
      { // O0: klass
        // O1: rank
        // O2: address of 1st dimension
        __ set_info("new_multi_array", dont_gc_arguments);
        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
        // I0 -> O0: new multi array
      }
      break;

    case register_finalizer_id:
      {
        __ set_info("register_finalizer", dont_gc_arguments);

        // load the klass and check the has finalizer flag
        Label register_finalizer;
        Register t = O1;
        __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t);
        __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
        __ set(JVM_ACC_HAS_FINALIZER, G3);
        __ andcc(G3, t, G0);
        __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
        __ delayed()->nop();

        // do a leaf return
        __ retl();
        __ delayed()->nop();

        __ bind(register_finalizer);
        OopMap* oop_map = save_live_registers(sasm);
        int call_offset = __ call_RT(noreg, noreg,
                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);

        // Now restore all the live registers
        restore_live_registers(sasm);

        __ ret();
        __ delayed()->restore();
      }
      break;

    case throw_range_check_failed_id:
      { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded
        // G4: index
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
      }
      break;

    case throw_index_exception_id:
      { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded
        // G4: index
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
      }
      break;

    case throw_div0_exception_id:
      { __ set_info("throw_div0_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
      }
      break;

    case throw_null_pointer_exception_id:
      { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
      }
      break;

    case handle_exception_id:
      {
        __ set_info("handle_exception", dont_gc_arguments);
        // make a frame and preserve the caller's caller-save registers

        oop_maps = new OopMapSet();
        OopMap* oop_map = save_live_registers(sasm);
        __ mov(Oexception->after_save(),  Oexception);
        __ mov(Oissuing_pc->after_save(), Oissuing_pc);
        generate_handle_exception(sasm, oop_maps, oop_map);
      }
      break;

    case unwind_exception_id:
      {
        // O0: exception
        // I7: address of call to this method

        __ set_info("unwind_exception", dont_gc_arguments);
        __ mov(Oexception, Oexception->after_save());
        __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());

        __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
                        G2_thread, Oissuing_pc->after_save());
        __ verify_not_null_oop(Oexception->after_save());

        // Restore SP from L7 if the exception PC is a MethodHandle call site.
        __ mov(O0, G5);  // Save the target address.
        __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
        __ tst(L0);  // Condition codes are preserved over the restore.
        __ restore();

        __ jmp(G5, 0);
        __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
      }
      break;

    case throw_array_store_exception_id:
      {
        __ set_info("throw_array_store_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false);
      }
      break;

    case throw_class_cast_exception_id:
      {
        // G4: object
        __ set_info("throw_class_cast_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
      }
      break;

    case throw_incompatible_class_change_error_id:
      {
        __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
      }
      break;

    case slow_subtype_check_id:
      { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
        // Arguments :
        //
        //      ret  : G3
        //      sub  : G3, argument, destroyed
        //      super: G1, argument, not changed
        //      raddr: O7, blown by call
        Label miss;

        __ save_frame(0);               // Blow no registers!

        __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss);

        __ mov(1, G3);
        __ ret();                       // Result in G5 is 'true'
        __ delayed()->restore();        // free copy or add can go here

        __ bind(miss);
        __ mov(0, G3);
        __ ret();                       // Result in G5 is 'false'
        __ delayed()->restore();        // free copy or add can go here
      }

    case monitorenter_nofpu_id:
    case monitorenter_id:
      { // G4: object
        // G5: lock address
        __ set_info("monitorenter", dont_gc_arguments);

        int save_fpu_registers = (id == monitorenter_id);
        // make a frame and preserve the caller's caller-save registers
        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);

        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);

        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm, save_fpu_registers);

        __ ret();
        __ delayed()->restore();
      }
      break;

    case monitorexit_nofpu_id:
    case monitorexit_id:
      { // G4: lock address
        // note: really a leaf routine but must setup last java sp
        //       => use call_RT for now (speed can be improved by
        //       doing last java sp setup manually)
        __ set_info("monitorexit", dont_gc_arguments);

        int save_fpu_registers = (id == monitorexit_id);
        // make a frame and preserve the caller's caller-save registers
        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);

        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4);

        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm, save_fpu_registers);

        __ ret();
        __ delayed()->restore();

      }
      break;

    case access_field_patching_id:
      { __ set_info("access_field_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
      }
      break;

    case load_klass_patching_id:
      { __ set_info("load_klass_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
      }
      break;

    case jvmti_exception_throw_id:
      { // Oexception : exception
        __ set_info("jvmti_exception_throw", dont_gc_arguments);
        oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
      }
      break;

    case dtrace_object_alloc_id:
      { // O0: object
        __ set_info("dtrace_object_alloc", dont_gc_arguments);
        // we can't gc here so skip the oopmap but make sure that all
        // the live registers get saved.
        save_live_registers(sasm);

        __ save_thread(L7_thread_cache);
        __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
                relocInfo::runtime_call_type);
        __ delayed()->mov(I0, O0);
        __ restore_thread(L7_thread_cache);

        restore_live_registers(sasm);
        __ ret();
        __ delayed()->restore();
      }
      break;

#ifndef SERIALGC
    case g1_pre_barrier_slow_id:
      { // G4: previous value of memory
        BarrierSet* bs = Universe::heap()->barrier_set();
        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
          __ save_frame(0);
          __ set((int)id, O1);
          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
          __ should_not_reach_here();
          break;
        }

        __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);

        Register pre_val = G4;
        Register tmp  = G1_scratch;
        Register tmp2 = G3_scratch;

        Label refill, restart;
        bool with_frame = false; // I don't know if we can do with-frame.
        int satb_q_index_byte_offset =
          in_bytes(JavaThread::satb_mark_queue_offset() +
                   PtrQueue::byte_offset_of_index());
        int satb_q_buf_byte_offset =
          in_bytes(JavaThread::satb_mark_queue_offset() +
                   PtrQueue::byte_offset_of_buf());
        __ bind(restart);
        __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);

        __ br_on_reg_cond(Assembler::rc_z, /*annul*/false,
                          Assembler::pn, tmp, refill);

        // If the branch is taken, no harm in executing this in the delay slot.
        __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
        __ sub(tmp, oopSize, tmp);

        __ st_ptr(pre_val, tmp2, tmp);  // [_buf + index] := <address_of_card>
        // Use return-from-leaf
        __ retl();
        __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);

        __ bind(refill);
        __ save_frame(0);

        __ mov(pre_val, L0);
        __ mov(tmp,     L1);
        __ mov(tmp2,    L2);

        __ call_VM_leaf(L7_thread_cache,
                        CAST_FROM_FN_PTR(address,
                                         SATBMarkQueueSet::handle_zero_index_for_thread),
                                         G2_thread);

        __ mov(L0, pre_val);
        __ mov(L1, tmp);
        __ mov(L2, tmp2);

        __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
        __ delayed()->restore();
      }
      break;

    case g1_post_barrier_slow_id:
      {
        BarrierSet* bs = Universe::heap()->barrier_set();
        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
          __ save_frame(0);
          __ set((int)id, O1);
          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
          __ should_not_reach_here();
          break;
        }

        __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);

        Register addr = G4;
        Register cardtable = G5;
        Register tmp  = G1_scratch;
        Register tmp2 = G3_scratch;
        jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;

        Label not_already_dirty, restart, refill;

#ifdef _LP64
        __ srlx(addr, CardTableModRefBS::card_shift, addr);
#else
        __ srl(addr, CardTableModRefBS::card_shift, addr);
#endif

        AddressLiteral rs(byte_map_base);
        __ set(rs, cardtable);         // cardtable := <card table base>
        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]

        __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
                          tmp, not_already_dirty);
        // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch
        // case, harmless if not.
        __ delayed()->add(addr, cardtable, tmp2);

        // We didn't take the branch, so we're already dirty: return.
        // Use return-from-leaf
        __ retl();
        __ delayed()->nop();

        // Not dirty.
        __ bind(not_already_dirty);
        // First, dirty it.
        __ stb(G0, tmp2, 0);  // [cardPtr] := 0  (i.e., dirty).

        Register tmp3 = cardtable;
        Register tmp4 = tmp;

        // these registers are now dead
        addr = cardtable = tmp = noreg;

        int dirty_card_q_index_byte_offset =
          in_bytes(JavaThread::dirty_card_queue_offset() +
                   PtrQueue::byte_offset_of_index());
        int dirty_card_q_buf_byte_offset =
          in_bytes(JavaThread::dirty_card_queue_offset() +
                   PtrQueue::byte_offset_of_buf());
        __ bind(restart);
        __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);

        __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
                          tmp3, refill);
        // If the branch is taken, no harm in executing this in the delay slot.
        __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
        __ sub(tmp3, oopSize, tmp3);

        __ st_ptr(tmp2, tmp4, tmp3);  // [_buf + index] := <address_of_card>
        // Use return-from-leaf
        __ retl();
        __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);

        __ bind(refill);
        __ save_frame(0);

        __ mov(tmp2, L0);
        __ mov(tmp3, L1);
        __ mov(tmp4, L2);

        __ call_VM_leaf(L7_thread_cache,
                        CAST_FROM_FN_PTR(address,
                                         DirtyCardQueueSet::handle_zero_index_for_thread),
                                         G2_thread);

        __ mov(L0, tmp2);
        __ mov(L1, tmp3);
        __ mov(L2, tmp4);

        __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
        __ delayed()->restore();
      }
      break;
#endif // !SERIALGC

    default:
      { __ set_info("unimplemented entry", dont_gc_arguments);
        __ save_frame(0);
        __ set((int)id, O1);
        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1);
        __ should_not_reach_here();
      }
      break;
  }
  return oop_maps;
}
예제 #14
0
void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
                                                   void** vtable,
                                                   char** md_top,
                                                   char* md_end,
                                                   char** mc_top,
                                                   char* mc_end) {

  intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
  *(intptr_t *)(*md_top) = vtable_bytes;
  *md_top += sizeof(intptr_t);
  void** dummy_vtable = (void**)*md_top;
  *vtable = dummy_vtable;
  *md_top += vtable_bytes;

  // Get ready to generate dummy methods.

  CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
  MacroAssembler* masm = new MacroAssembler(&cb);

  Label common_code;
  for (int i = 0; i < vtbl_list_size; ++i) {
    for (int j = 0; j < num_virtuals; ++j) {
      dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();

      // Load eax with a value indicating vtable/offset pair.
      // -- bits[ 7..0]  (8 bits) which virtual method in table?
      // -- bits[12..8]  (5 bits) which virtual method table?
      // -- must fit in 13-bit instruction immediate field.
      __ movl(rax, (i << 8) + j);
      __ jmp(common_code);
    }
  }

  __ bind(common_code);

  // Expecting to be called with "thiscall" convections -- the arguments
  // are on the stack and the "this" pointer is in c_rarg0. In addition, rax
  // was set (above) to the offset of the method in the table.

  __ push(c_rarg1);                     // save & free register
  __ push(c_rarg0);                     // save "this"
  __ mov(c_rarg0, rax);
  __ shrptr(c_rarg0, 8);                // isolate vtable identifier.
  __ shlptr(c_rarg0, LogBytesPerWord);
  __ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list.
  __ addptr(c_rarg1, c_rarg0);          // ptr to list entry.
  __ movptr(c_rarg1, Address(c_rarg1, 0));      // get correct vtable address.
  __ pop(c_rarg0);                      // restore "this"
  __ movptr(Address(c_rarg0, 0), c_rarg1);      // update vtable pointer.

  __ andptr(rax, 0x00ff);                       // isolate vtable method index
  __ shlptr(rax, LogBytesPerWord);
  __ addptr(rax, c_rarg1);              // address of real method pointer.
  __ pop(c_rarg1);                      // restore register.
  __ movptr(rax, Address(rax, 0));      // get real method pointer.
  __ jmp(rax);                          // jump to the real method.

  __ flush();

  *mc_top = (char*)__ pc();
}
예제 #15
0
    address generate_getPsrInfo() {
        // Flags to test CPU type.
        const uint32_t HS_EFL_AC           = 0x40000;
        const uint32_t HS_EFL_ID           = 0x200000;
        // Values for when we don't have a CPUID instruction.
        const int      CPU_FAMILY_SHIFT = 8;
        const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
        const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);

        Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
        Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;

        StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
#   define __ _masm->

        address start = __ pc();

        //
        // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
        //
        // LP64: rcx and rdx are first and second argument registers on windows

        __ push(rbp);
#ifdef _LP64
        __ mov(rbp, c_rarg0); // cpuid_info address
#else
        __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
#endif
        __ push(rbx);
        __ push(rsi);
        __ pushf();          // preserve rbx, and flags
        __ pop(rax);
        __ push(rax);
        __ mov(rcx, rax);
        //
        // if we are unable to change the AC flag, we have a 386
        //
        __ xorl(rax, HS_EFL_AC);
        __ push(rax);
        __ popf();
        __ pushf();
        __ pop(rax);
        __ cmpptr(rax, rcx);
        __ jccb(Assembler::notEqual, detect_486);

        __ movl(rax, CPU_FAMILY_386);
        __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
        __ jmp(done);

        //
        // If we are unable to change the ID flag, we have a 486 which does
        // not support the "cpuid" instruction.
        //
        __ bind(detect_486);
        __ mov(rax, rcx);
        __ xorl(rax, HS_EFL_ID);
        __ push(rax);
        __ popf();
        __ pushf();
        __ pop(rax);
        __ cmpptr(rcx, rax);
        __ jccb(Assembler::notEqual, detect_586);

        __ bind(cpu486);
        __ movl(rax, CPU_FAMILY_486);
        __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
        __ jmp(done);

        //
        // At this point, we have a chip which supports the "cpuid" instruction
        //
        __ bind(detect_586);
        __ xorl(rax, rax);
        __ cpuid();
        __ orl(rax, rax);
        __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
        // value of at least 1, we give up and
        // assume a 486
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
        __ jccb(Assembler::belowEqual, std_cpuid4);

        //
        // cpuid(0xB) Processor Topology
        //
        __ movl(rax, 0xb);
        __ xorl(rcx, rcx);   // Threads level
        __ cpuid();

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        __ movl(rax, 0xb);
        __ movl(rcx, 1);     // Cores level
        __ cpuid();
        __ push(rax);
        __ andl(rax, 0x1f);  // Determine if valid topology level
        __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
        __ andl(rax, 0xffff);
        __ pop(rax);
        __ jccb(Assembler::equal, std_cpuid4);

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        __ movl(rax, 0xb);
        __ movl(rcx, 2);     // Packages level
        __ cpuid();
        __ push(rax);
        __ andl(rax, 0x1f);  // Determine if valid topology level
        __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
        __ andl(rax, 0xffff);
        __ pop(rax);
        __ jccb(Assembler::equal, std_cpuid4);

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // cpuid(0x4) Deterministic cache params
        //
        __ bind(std_cpuid4);
        __ movl(rax, 4);
        __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
        __ jccb(Assembler::greater, std_cpuid1);

        __ xorl(rcx, rcx);   // L1 cache
        __ cpuid();
        __ push(rax);
        __ andl(rax, 0x1f);  // Determine if valid cache parameters used
        __ orl(rax, rax);    // eax[4:0] == 0 indicates invalid cache
        __ pop(rax);
        __ jccb(Assembler::equal, std_cpuid1);

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Standard cpuid(0x1)
        //
        __ bind(std_cpuid1);
        __ movl(rax, 1);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Check if OS has enabled XGETBV instruction to access XCR0
        // (OSXSAVE feature flag) and CPU supports AVX
        //
        __ andl(rcx, 0x18000000);
        __ cmpl(rcx, 0x18000000);
        __ jccb(Assembler::notEqual, sef_cpuid);

        //
        // XCR0, XFEATURE_ENABLED_MASK register
        //
        __ xorl(rcx, rcx);   // zero for XCR0 register
        __ xgetbv();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rdx);

        //
        // cpuid(0x7) Structured Extended Features
        //
        __ bind(sef_cpuid);
        __ movl(rax, 7);
        __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
        __ jccb(Assembler::greater, ext_cpuid);

        __ xorl(rcx, rcx);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);

        //
        // Extended cpuid(0x80000000)
        //
        __ bind(ext_cpuid);
        __ movl(rax, 0x80000000);
        __ cpuid();
        __ cmpl(rax, 0x80000000);     // Is cpuid(0x80000001) supported?
        __ jcc(Assembler::belowEqual, done);
        __ cmpl(rax, 0x80000004);     // Is cpuid(0x80000005) supported?
        __ jccb(Assembler::belowEqual, ext_cpuid1);
        __ cmpl(rax, 0x80000006);     // Is cpuid(0x80000007) supported?
        __ jccb(Assembler::belowEqual, ext_cpuid5);
        __ cmpl(rax, 0x80000007);     // Is cpuid(0x80000008) supported?
        __ jccb(Assembler::belowEqual, ext_cpuid7);
        //
        // Extended cpuid(0x80000008)
        //
        __ movl(rax, 0x80000008);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Extended cpuid(0x80000007)
        //
        __ bind(ext_cpuid7);
        __ movl(rax, 0x80000007);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Extended cpuid(0x80000005)
        //
        __ bind(ext_cpuid5);
        __ movl(rax, 0x80000005);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Extended cpuid(0x80000001)
        //
        __ bind(ext_cpuid1);
        __ movl(rax, 0x80000001);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // return
        //
        __ bind(done);
        __ popf();
        __ pop(rsi);
        __ pop(rbx);
        __ pop(rbp);
        __ ret(0);

#   undef __

        return start;
    };
void ret ()
{
	jmp (SpecReg[5]); // ????

}
예제 #17
0
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
{
    Vector<JmpSrc, 16> failureCases;
    // Check eax is an object of the right Structure.
    __ testl_i32r(JSImmediate::TagMask, X86::eax);
    failureCases.append(__ jne());
    __ cmpl_im(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
    failureCases.append(__ jne());
    Vector<JmpSrc> successCases;

    //  ecx = baseObject
    __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
    // proto(ecx) = baseObject->structure()->prototype()
    __ cmpl_im(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
    failureCases.append(__ jne());
    __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
    
    // ecx = baseObject->m_structure
    for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
        // null check the prototype
        __ cmpl_ir(asInteger(jsNull()), X86::ecx);
        successCases.append(__ je());

        // Check the structure id
        __ cmpl_im(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx);
        failureCases.append(__ jne());
        
        __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);
        __ cmpl_im(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
        failureCases.append(__ jne());
        __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
    }

    failureCases.append(__ jne());
    for (unsigned i = 0; i < successCases.size(); ++i)
        __ link(successCases[i], __ label());

    JmpSrc callTarget;

    // emit a call only if storage realloc is needed
    if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
        __ push_r(X86::edx);
        __ push_i32(newStructure->propertyStorageCapacity());
        __ push_i32(oldStructure->propertyStorageCapacity());
        __ push_r(X86::eax);
        callTarget = __ call();
        __ addl_ir(3 * sizeof(void*), X86::esp);
        __ pop_r(X86::edx);
    }

    // Assumes m_refCount can be decremented easily, refcount decrement is safe as 
    // codeblock should ensure oldStructure->m_refCount > 0
    __ subl_im(1, reinterpret_cast<void*>(oldStructure));
    __ addl_im(1, reinterpret_cast<void*>(newStructure));
    __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);

    // write the value
    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);

    __ ret();
    
    JmpSrc failureJump;
    if (failureCases.size()) {
        for (unsigned i = 0; i < failureCases.size(); ++i)
            __ link(failureCases[i], __ label());
        restoreArgumentReferenceForTrampoline();
        failureJump = __ jmp();
    }

    void* code = __ executableCopy(m_codeBlock->executablePool());

    if (failureCases.size())
        X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));

    if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
        X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage));
    
    stubInfo->stubRoutine = code;
    
    ctiRepatchCallByReturnAddress(returnAddress, code);
}
예제 #18
0
void JIT::compileOpCallSlowCase(Instruction* instruction, unsigned i, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
{
    int dst = instruction[1].u.operand;
    int callee = instruction[2].u.operand;
    int argCount = instruction[3].u.operand;
    int registerOffset = instruction[4].u.operand;

    __ link(iter->from, __ label());

    // The arguments have been set up on the hot path for op_call_eval
    if (opcodeID == op_call)
        compileOpCallSetupArgs(instruction);
    else if (opcodeID == op_construct)
        compileOpConstructSetupArgs(instruction);

    // Fast check for JS function.
    __ testl_i32r(JSImmediate::TagMask, X86::ecx);
    JmpSrc callLinkFailNotObject = __ jne();
    __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx);
    JmpSrc callLinkFailNotJSFunction = __ jne();

    // First, in the case of a construct, allocate the new object.
    if (opcodeID == op_construct) {
        emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);
        emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
        emitGetVirtualRegister(callee, X86::ecx, i);
    }

    __ movl_i32r(argCount, X86::edx);

    // Speculatively roll the callframe, assuming argCount will match the arity.
    __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi);
    __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi);

    m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
        emitNakedCall(i, m_interpreter->m_ctiVirtualCallPreLink);

    JmpSrc storeResultForFirstRun = __ jmp();

    // This is the address for the cold path *after* the first run (which tries to link the call).
    m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label();

    // The arguments have been set up on the hot path for op_call_eval
    if (opcodeID == op_call)
        compileOpCallSetupArgs(instruction);
    else if (opcodeID == op_construct)
        compileOpConstructSetupArgs(instruction);

    // Check for JSFunctions.
    __ testl_i32r(JSImmediate::TagMask, X86::ecx);
    JmpSrc isNotObject = __ jne();
    __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx);
    JmpSrc isJSFunction = __ je();

    // This handles host functions
    JmpDst notJSFunctionlabel = __ label();
    __ link(isNotObject, notJSFunctionlabel);
    __ link(callLinkFailNotObject, notJSFunctionlabel);
    __ link(callLinkFailNotJSFunction, notJSFunctionlabel);
    emitCTICall(i, ((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
    JmpSrc wasNotJSFunction = __ jmp();

    // Next, handle JSFunctions...
    __ link(isJSFunction, __ label());

    // First, in the case of a construct, allocate the new object.
    if (opcodeID == op_construct) {
        emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);
        emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
        emitGetVirtualRegister(callee, X86::ecx, i);
    }

    // Speculatively roll the callframe, assuming argCount will match the arity.
    __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi);
    __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi);
    __ movl_i32r(argCount, X86::edx);

    emitNakedCall(i, m_interpreter->m_ctiVirtualCall);

    // Put the return value in dst. In the interpreter, op_ret does this.
    JmpDst storeResult = __ label();
    __ link(wasNotJSFunction, storeResult);
    __ link(storeResultForFirstRun, storeResult);
    emitPutVirtualRegister(dst);

#if ENABLE(CODEBLOCK_SAMPLING)
    __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
#endif
}
예제 #19
0
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
{
#if USE(CTI_REPATCH_PIC)
    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));

    ASSERT(count);
    
    Vector<JmpSrc> bucketsOfFail;

    // Check eax is an object of the right Structure.
    JmpSrc baseObjectCheck = checkStructure(X86::eax, structure);
    bucketsOfFail.append(baseObjectCheck);

    Structure* currStructure = structure;
    RefPtr<Structure>* chainEntries = chain->head();
    JSObject* protoObject = 0;
    for (unsigned i = 0; i < count; ++i) {
        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
        currStructure = chainEntries[i].get();

        // Check the prototype object's Structure had not changed.
        Structure** prototypeStructureAddress = &(protoObject->m_structure);
        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
        bucketsOfFail.append(__ jne());
    }
    ASSERT(protoObject);

    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    JmpSrc success = __ jmp();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    // Use the repatch information to link the failure cases back to the original slow case routine.
    void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;

    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
        X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);

    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));

    // Track the stub we have created so that it will be deleted later.
    stubInfo->stubRoutine = code;

    // Finally repatch the jump to slow case back in the hot path to jump here instead.
    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    X86Assembler::repatchBranchOffset(jmpLocation, code);
#else
    ASSERT(count);
    
    Vector<JmpSrc> bucketsOfFail;

    // Check eax is an object of the right Structure.
    __ testl_i32r(JSImmediate::TagMask, X86::eax);
    bucketsOfFail.append(__ jne());
    bucketsOfFail.append(checkStructure(X86::eax, structure));

    Structure* currStructure = structure;
    RefPtr<Structure>* chainEntries = chain->head();
    JSObject* protoObject = 0;
    for (unsigned i = 0; i < count; ++i) {
        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
        currStructure = chainEntries[i].get();

        // Check the prototype object's Structure had not changed.
        Structure** prototypeStructureAddress = &(protoObject->m_structure);
        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
        bucketsOfFail.append(__ jne());
    }
    ASSERT(protoObject);

    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    __ ret();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
        X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));

    stubInfo->stubRoutine = code;

    ctiRepatchCallByReturnAddress(returnAddress, code);
#endif
}
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
  // Note well: pd_code_size_limit is the absolute minimum we can get
  // away with.  If you add code here, bump the code stub size
  // returned by pd_code_size_limit!
  const int amd64_code_length = VtableStub::pd_code_size_limit(false);
  VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), amd64_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT
  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif

  // Entry arguments:
  //  rax: Interface
  //  j_rarg0: Receiver

  // Free registers (non-args) are rax (interface), rbx

  // get receiver (need to skip return address on top of stack)

  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
  // get receiver klass (also an implicit null-check)
  address npe_addr = __ pc();

  // Most registers are in use; we'll use rax, rbx, r10, r11
  // (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
  __ load_klass(r10, j_rarg0);

  // If we take a trap while this arg is on the stack we will not
  // be able to walk the stack properly. This is not an issue except
  // when there are mistakes in this assembly code that could generate
  // a spurious fault. Ask me how I know...

  const Register method = rbx;
  Label throw_icce;

  // Get Method* and entrypoint for compiler
  __ lookup_interface_method(// inputs: rec. class, interface, itable index
                             r10, rax, itable_index,
                             // outputs: method, scan temp. reg
                             method, r11,
                             throw_icce);

  // method (rbx): Method*
  // j_rarg0: receiver

#ifdef ASSERT
  if (DebugVtables) {
    Label L2;
    __ cmpptr(method, (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L2);
    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::notZero, L2);
    __ stop("compiler entrypoint is null");
    __ bind(L2);
  }
#endif // ASSERT

  // rbx: Method*
  // j_rarg0: receiver
  address ame_addr = __ pc();
  __ jmp(Address(method, Method::from_compiled_offset()));

  __ bind(throw_icce);
  __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));

  __ flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
                  itable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
예제 #21
0
/** 
 * @brief emulate instruction 
 * @return returns false if something goes wrong (e.g. illegal instruction)
 *
 * Current limitations:
 * 
 * - Illegal instructions are not implemented
 * - Excess cycles due to page boundary crossing are not calculated
 * - Some known architectural bugs are not emulated
 */
bool Cpu::emulate()
{
  /* fetch instruction */
  uint8_t insn = fetch_op();
  bool retval = true;
  /* emulate instruction */
  switch(insn)
  {
  /* BRK */
  case 0x0: brk(); break;
  /* ORA (nn,X) */
  case 0x1: ora(load_byte(addr_indx()),6); break;
  /* ORA nn */
  case 0x5: ora(load_byte(addr_zero()),3); break;
  /* ASL nn */
  case 0x6: asl_mem(addr_zero(),5); break;
  /* PHP */
  case 0x8: php(); break;
  /* ORA #nn */
  case 0x9: ora(fetch_op(),2); break;
  /* ASL A */
  case 0xA: asl_a(); break;
  /* ORA nnnn */
  case 0xD: ora(load_byte(addr_abs()),4); break;
  /* ASL nnnn */
  case 0xE: asl_mem(addr_abs(),6); break; 
  /* BPL nn */
  case 0x10: bpl(); break;
  /* ORA (nn,Y) */
  case 0x11: ora(load_byte(addr_indy()),5); break;
  /* ORA nn,X */
  case 0x15: ora(load_byte(addr_zerox()),4); break;
  /* ASL nn,X */
  case 0x16: asl_mem(addr_zerox(),6); break;
  /* CLC */
  case 0x18: clc(); break;
  /* ORA nnnn,Y */
  case 0x19: ora(load_byte(addr_absy()),4); break;
  /* ORA nnnn,X */
  case 0x1D: ora(load_byte(addr_absx()),4); break;
  /* ASL nnnn,X */
  case 0x1E: asl_mem(addr_absx(),7); break;
  /* JSR */
  case 0x20: jsr(); break;
  /* AND (nn,X) */
  case 0x21: _and(load_byte(addr_indx()),6); break;
  /* BIT nn */
  case 0x24: bit(addr_zero(),3); break;
  /* AND nn */
  case 0x25: _and(load_byte(addr_zero()),3); break;
  /* ROL nn */
  case 0x26: rol_mem(addr_zero(),5); break;
  /* PLP */
  case 0x28: plp(); break;
  /* AND #nn */
  case 0x29: _and(fetch_op(),2); break;
  /* ROL A */
  case 0x2A: rol_a(); break;
  /* BIT nnnn */
  case 0x2C: bit(addr_abs(),4); break;
  /* AND nnnn */
  case 0x2D: _and(load_byte(addr_abs()),4); break;
  /* ROL nnnn */
  case 0x2E: rol_mem(addr_abs(),6); break;
  /* BMI nn */
  case 0x30: bmi(); break;
  /* AND (nn,Y) */
  case 0x31: _and(load_byte(addr_indy()),5); break;               
  /* AND nn,X */
  case 0x35: _and(load_byte(addr_zerox()),4); break;
  /* ROL nn,X */
  case 0x36: rol_mem(addr_zerox(),6); break;
  /* SEC */
  case 0x38: sec(); break;
  /* AND nnnn,Y */
  case 0x39: _and(load_byte(addr_absy()),4); break;
  /* AND nnnn,X */
  case 0x3D: _and(load_byte(addr_absx()),4); break;
  /* ROL nnnn,X */
  case 0x3E: rol_mem(addr_absx(),7); break;
  /* RTI */
  case 0x40: rti(); break;
  /* EOR (nn,X) */
  case 0x41: eor(load_byte(addr_indx()),6); break;
  /* EOR nn */
  case 0x45: eor(load_byte(addr_zero()),3); break;
  /* LSR nn */
  case 0x46: lsr_mem(addr_zero(),5); break;
  /* PHA */
  case 0x48: pha(); break;
  /* EOR #nn */
  case 0x49: eor(fetch_op(),2); break;
  /* BVC */
  case 0x50: bvc(); break;
  /* JMP nnnn */
  case 0x4C: jmp(); break;
  /* EOR nnnn */
  case 0x4D: eor(load_byte(addr_abs()),4); break;
  /* LSR A */
  case 0x4A: lsr_a(); break;
  /* LSR nnnn */
  case 0x4E: lsr_mem(addr_abs(),6); break;
  /* EOR (nn,Y) */
  case 0x51: eor(load_byte(addr_indy()),5); break;
  /* EOR nn,X */
  case 0x55: eor(load_byte(addr_zerox()),4); break;
  /* LSR nn,X */
  case 0x56: lsr_mem(addr_zerox(),6); break;
  /* CLI */
  case 0x58: cli(); break;
  /* EOR nnnn,Y */
  case 0x59: eor(load_byte(addr_absy()),4); break;
  /* EOR nnnn,X */
  case 0x5D: eor(load_byte(addr_absx()),4); break;
  /* LSR nnnn,X */
  case 0x5E: lsr_mem(addr_absx(),7); break;
  /* RTS */
  case 0x60: rts(); break;
  /* ADC (nn,X) */
  case 0x61: adc(load_byte(addr_indx()),6); break;
  /* ADC nn */
  case 0x65: adc(load_byte(addr_zero()),3); break;
  /* ROR nn */
  case 0x66: ror_mem(addr_zero(),5); break;
  /* PLA */
  case 0x68: pla(); break;
  /* ADC #nn */
  case 0x69: adc(fetch_op(),2); break;
  /* ROR A */
  case 0x6A: ror_a(); break;
  /* JMP (nnnn) */
  case 0x6C: jmp_ind(); break;
  /* ADC nnnn */
  case 0x6D: adc(load_byte(addr_abs()),4); break;
  /* ROR nnnn */
  case 0x6E: ror_mem(addr_abs(),6); break;
  /* BVS */
  case 0x70: bvs(); break;
  /* ADC (nn,Y) */
  case 0x71: adc(load_byte(addr_indy()),5); break;
  /* ADC nn,X */
  case 0x75: adc(load_byte(addr_zerox()),4); break;
  /* ROR nn,X */
  case 0x76: ror_mem(addr_zerox(),6); break;
  /* SEI */
  case 0x78: sei(); break;
  /* ADC nnnn,Y */
  case 0x79: adc(load_byte(addr_absy()),4); break;
  /* ADC nnnn,X */
  case 0x7D: adc(load_byte(addr_absx()),4); break;
  /* ROR nnnn,X */
  case 0x7E: ror_mem(addr_absx(),7); break;
  /* STA (nn,X) */
  case 0x81: sta(addr_indx(),6); break;
  /* STY nn */
  case 0x84: sty(addr_zero(),3); break;
  /* STA nn */
  case 0x85: sta(addr_zero(),3); break;
  /* STX nn */
  case 0x86: stx(addr_zero(),3); break;
  /* DEY */
  case 0x88: dey(); break;
  /* TXA */
  case 0x8A: txa(); break;
  /* STY nnnn */
  case 0x8C: sty(addr_abs(),4); break;
  /* STA nnnn */
  case 0x8D: sta(addr_abs(),4); break;
  /* STX nnnn */
  case 0x8E: stx(addr_abs(),4); break;
  /* BCC nn */
  case 0x90: bcc(); break;
  /* STA (nn,Y) */
  case 0x91: sta(addr_indy(),6); break;
  /* STY nn,X */
  case 0x94: sty(addr_zerox(),4); break;
  /* STA nn,X */
  case 0x95: sta(addr_zerox(),4); break;
  /* STX nn,Y */
  case 0x96: stx(addr_zeroy(),4); break;
  /* TYA */
  case 0x98: tya(); break;
  /* STA nnnn,Y */
  case 0x99: sta(addr_absy(),5); break;
  /* TXS */
  case 0x9A: txs(); break;
  /* STA nnnn,X */
  case 0x9D: sta(addr_absx(),5); break;
  /* LDY #nn */
  case 0xA0: ldy(fetch_op(),2); break; 
  /* LDA (nn,X) */
  case 0xA1: lda(load_byte(addr_indx()),6); break;
  /* LDX #nn */
  case 0xA2: ldx(fetch_op(),2); break;
  /* LDY nn */
  case 0xA4: ldy(load_byte(addr_zero()),3); break;
  /* LDA nn */
  case 0xA5: lda(load_byte(addr_zero()),3); break;
  /* LDX nn */
  case 0xA6: ldx(load_byte(addr_zero()),3); break;
  /* TAY */
  case 0xA8: tay(); break;
  /* LDA #nn */
  case 0xA9: lda(fetch_op(),2); break;
  /* TAX */
  case 0xAA: tax(); break;
  /* LDY nnnn */
  case 0xAC: ldy(load_byte(addr_abs()),4); break;
  /* LDA nnnn */
  case 0xAD: lda(load_byte(addr_abs()),4); break;
  /* LDX nnnn */
  case 0xAE: ldx(load_byte(addr_abs()),4); break;
  /* BCS nn */
  case 0xB0: bcs(); break;
  /* LDA (nn,Y) */
  case 0xB1: lda(load_byte(addr_indy()),5); break;
  /* LDY nn,X */
  case 0xB4: ldy(load_byte(addr_zerox()),3); break;
  /* LDA nn,X */
  case 0xB5: lda(load_byte(addr_zerox()),3); break;
  /* LDX nn,Y */
  case 0xB6: ldx(load_byte(addr_zeroy()),3); break;
  /* CLV */
  case 0xB8: clv(); break;
  /* LDA nnnn,Y */
  case 0xB9: lda(load_byte(addr_absy()),4); break;
  /* TSX */
  case 0xBA: tsx(); break;
  /* LDY nnnn,X */
  case 0xBC: ldy(load_byte(addr_absx()),4); break;
  /* LDA nnnn,X */
  case 0xBD: lda(load_byte(addr_absx()),4); break;
  /* LDX nnnn,Y */
  case 0xBE: ldx(load_byte(addr_absy()),4); break;
  /* CPY #nn */
  case 0xC0: cpy(fetch_op(),2); break;
  /* CMP (nn,X) */
  case 0xC1: cmp(load_byte(addr_indx()),6); break;
  /* CPY nn */
  case 0xC4: cpy(load_byte(addr_zero()),3); break;
  /* CMP nn */
  case 0xC5: cmp(load_byte(addr_zero()),3); break;
  /* DEC nn */
  case 0xC6: dec(addr_zero(),5); break;
  /* INY */
  case 0xC8: iny(); break;
  /* CMP #nn */
  case 0xC9: cmp(fetch_op(),2); break;
  /* DEX */
  case 0xCA: dex(); break;
  /* CPY nnnn */
  case 0xCC: cpy(load_byte(addr_abs()),4); break;
  /* CMP nnnn */
  case 0xCD: cmp(load_byte(addr_abs()),4); break;
  /* DEC nnnn */
  case 0xCE: dec(addr_abs(),6); break;
  /* BNE nn */
  case 0xD0: bne(); break;
  /* CMP (nn,Y) */
  case 0xD1: cmp(load_byte(addr_indy()),5); break;
  /* CMP nn,X */
  case 0xD5: cmp(load_byte(addr_zerox()),4); break;
  /* DEC nn,X */
  case 0xD6: dec(addr_zerox(),6); break;
  /* CLD */
  case 0xD8: cld(); break;
  /* CMP nnnn,Y */
  case 0xD9: cmp(load_byte(addr_absy()),4); break;
  /* CMP nnnn,X */
  case 0xDD: cmp(load_byte(addr_absx()),4); break;
  /* DEC nnnn,X */
  case 0xDE: dec(addr_absx(),7); break;
  /* CPX #nn */
  case 0xE0: cpx(fetch_op(),2); break;
  /* SBC (nn,X) */
  case 0xE1: sbc(load_byte(addr_indx()),6); break;
  /* CPX nn */
  case 0xE4: cpx(load_byte(addr_zero()),3); break;
  /* SBC nn */
  case 0xE5: sbc(load_byte(addr_zero()),3); break;
  /* INC nn */
  case 0xE6: inc(addr_zero(),5); break;
  /* INX */
  case 0xE8: inx(); break;
  /* SBC #nn */
  case 0xE9: sbc(fetch_op(),2); break;
  /* NOP */
  case 0xEA: nop(); break;
  /* CPX nnnn */
  case 0xEC: cpx(load_byte(addr_abs()),4); break;
  /* SBC nnnn */
  case 0xED: sbc(load_byte(addr_abs()),4); break;
  /* INC nnnn */
  case 0xEE: inc(addr_abs(),6); break;
  /* BEQ nn */
  case 0xF0: beq(); break;
  /* SBC (nn,Y) */
  case 0xF1: sbc(load_byte(addr_indy()),5); break;
  /* SBC nn,X */
  case 0xF5: sbc(load_byte(addr_zerox()),4); break;
  /* INC nn,X */
  case 0xF6: inc(addr_zerox(),6); break;
  /* SED */
  case 0xF8: sed(); break;
  /* SBC nnnn,Y */
  case 0xF9: sbc(load_byte(addr_absy()),4); break;
  /* SBC nnnn,X */
  case 0xFD: sbc(load_byte(addr_absx()),4); break;
  /* INC nnnn,X */
  case 0xFE: inc(addr_absx(),7); break;
  /* Unknown or illegal instruction */
  default:
    D("Unknown instruction: %X at %04x\n", insn,pc());
    retval = false;
  }
  return retval;
}
예제 #22
-1
// used by compiler only; may use only caller saved registers eax, ebx, ecx.
// edx holds first int arg, esi, edi, ebp are callee-save & must be preserved.
// Leave reciever in ecx; required behavior when +OptoArgsInRegisters
// is modifed to put first oop in ecx.
//
// NOTE: If this code is used by the C1, the receiver_location is always 0.
VtableStub* VtableStubs::create_vtable_stub(int vtable_index, int receiver_location) {
  const int i486_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index, receiver_location);
  ResourceMark rm;
  MacroAssembler* masm = new MacroAssembler(new CodeBuffer(s->entry_point(), i486_code_length));

#ifndef PRODUCT
#ifdef COMPILER2
  if (CountCompiledCalls) __ incl(Address((int)OptoRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
#endif
#endif

  // get receiver (need to skip return address on top of stack)
#ifdef COMPILER1
  assert(receiver_location == 0, "receiver is always in ecx - no location info needed");
#else
  if( receiver_location < SharedInfo::stack0 ) {
    assert(receiver_location == ECX_num, "receiver expected in ecx");
  } else {
    __ movl(ecx, Address(esp, SharedInfo::reg2stack(OptoReg::Name(receiver_location)) * wordSize+wordSize/*skip return address*/));
  }
#endif
  // get receiver klass
  address npe_addr = __ pc();
  __ movl(eax, Address(ecx, oopDesc::klass_offset_in_bytes()));
  // compute entry offset (in words)
  int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT
  if (DebugVtables) { 
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(eax, instanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(ebx, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), ecx, ebx);
    __ bind(L);
  }
#endif // PRODUCT
  // load methodOop and target address
#ifdef COMPILER1
  __ movl(ebx, Address(eax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
  address ame_addr = __ pc();
  __ movl(edx, Address(ebx, methodOopDesc::from_compiled_code_entry_point_offset()));
  if (DebugVtables) {
    Label L;
    __ testl(edx, edx);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }
  // eax: receiver klass
  // ebx: methodOop
  // ecx: receiver
  // edx: entry point
  __ jmp(edx);
#else
  __ movl(eax, Address(eax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
  address ame_addr = __ pc();
  __ movl(ebx, Address(eax, methodOopDesc::from_compiled_code_entry_point_offset()));  

  if (DebugVtables) {
    Label L;
    __ testl(ebx, ebx);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  } 


  // jump to target (either compiled code or c2iadapter)
  // eax: methodOop (in case we call c2iadapter)
  __ jmp(ebx);
#endif // COMPILER1

  masm->flush();
  s->set_exception_points(npe_addr, ame_addr);
  return s;
}