Exemplo n.º 1
0
int main(int argc, char* argv[])
{
  int retval=0;
  #define maybe_return(X) retval = X();if (retval!=0){return retval;}

  printf("nfa_test\n");
  maybe_return(nfa_test);

  return 0;
}
Exemplo n.º 2
0
  void JITCompiler::compile(STATE, VMMethod* vmm) {
    // Used for fixups
    uintptr_t* last_imm = NULL;

    // A label pointing to the code for each virtual ip
    std::vector<AssemblerX86::NearJumpLocation> labels(vmm->total);

    // The location of the instructions that save ip into the current
    // CallStack then clear the stack and return
    AssemblerX86::NearJumpLocation fin;

    // The location of just the instructions that clear the stack and return
    AssemblerX86::NearJumpLocation real_fin;

    comments_[a.pc()] = "prologue";

    ops.prologue();
    cache_stack();

    // Pull native_ip out of the method_context and jump to it if
    // it's not 0.
    //
    // NOTE we don't pull the stack pointer out into ebx by default,
    // which means that any code that is jumped to has to assume it
    // needs to pull it out manually. This is currently not a problem
    // because our jump destinations are always right after calls
    // out to implementions and thus have uncached ebx.
    AssemblerX86::NearJumpLocation normal_start;

    comments_[a.pc()] = "method reentry";

    ops.load_native_ip(eax);
    a.cmp(eax, 0);
    a.jump_if_equal(normal_start);
    a.jump(eax);

    a.set_label(normal_start);

    for(size_t i = 0; i < vmm->total;) {
      opcode op = vmm->opcodes[i];
      // op = instructions::reverse_superop(op);
      size_t width = InstructionSequence::instruction_width(op);

      // Set the label location
      a.set_label(labels[i]);

      comments_[a.pc()] = InstructionSequence::get_instruction_name(op);

      // If we registers an immediate to be update, do it now.
      // TODO a.pc() is bigger than a uint32_t on 64bit
      if(last_imm) {

        *last_imm = (uintptr_t)a.pc();
        Relocation* rel = new Relocation(Relocation::LocalAbsolute,
            last_imm, a.pc(), 0);
        a.add_relocation(last_imm, rel);

        last_imm = NULL;
        // Because this is now a jump destination, reset the register
        // usage since we don't know the state off things when we're
        // jumped here.
        ops.reset_usage();
      } else if(labels[i].flags() & cFlagUnwoundTo) {
        // Update our table of virtual ip to native ip
        virtual2native[i] = reinterpret_cast<void*>(a.pc());

        labels[i].flags() |= cRecordV2N;
        // This is a jump destination for exceptions, reset
        // things and register it.
        ops.reset_usage();
      }

      switch(op) {
      case InstructionSequence::insn_noop:
        break;
      case InstructionSequence::insn_goto:
        a.jump(labels[vmm->opcodes[i + 1]]);
        break;
      case InstructionSequence::insn_goto_if_false:
        s.load_nth(eax, 0);
        s.pop();
        ops.jump_if_false(eax, labels[vmm->opcodes[i + 1]]);
        break;
      case InstructionSequence::insn_goto_if_true:
        s.load_nth(eax, 0);
        s.pop();
        ops.jump_if_true(eax, labels[vmm->opcodes[i + 1]]);
        break;
      case InstructionSequence::insn_goto_if_defined:
        s.load_nth(eax, 0);
        s.pop();
        a.cmp(eax, (uintptr_t)Qundef);
        a.jump_if_not_equal(labels[vmm->opcodes[i + 1]]);
        break;
      case InstructionSequence::insn_setup_unwind:
        labels[vmm->opcodes[i + 1]].flags() |= cFlagUnwoundTo;
        goto call_op;
      case InstructionSequence::insn_pop:
        s.pop();
        break;
      case InstructionSequence::insn_dup_top:
        s.load_nth(eax, 0);
        s.push(eax);
        break;
      case InstructionSequence::insn_rotate:
        if(vmm->opcodes[i + 1] != 2) goto call_op;
        // Fall through and use swap if it's just 2
      case InstructionSequence::insn_swap_stack:
        s.load_nth(eax, 0);
        s.load_nth(ecx, 1);
        a.mov(s.position(1), eax);
        s.set_top(ecx);
        break;
      case InstructionSequence::insn_push_true:
        s.push((uintptr_t)Qtrue);
        break;
      case InstructionSequence::insn_push_false:
        s.push((uintptr_t)Qfalse);
        break;
      case InstructionSequence::insn_push_nil:
        s.push((uintptr_t)Qnil);
        break;
      case InstructionSequence::insn_meta_push_0:
        s.push((uintptr_t)Fixnum::from(0));
        break;
      case InstructionSequence::insn_meta_push_1:
        s.push((uintptr_t)Fixnum::from(1));
        break;
      case InstructionSequence::insn_meta_push_2:
        s.push((uintptr_t)Fixnum::from(2));
        break;
      case InstructionSequence::insn_meta_push_neg_1:
        s.push((uintptr_t)Fixnum::from(-1));
        break;
      case InstructionSequence::insn_push_int:
        s.push((uintptr_t)Fixnum::from(vmm->opcodes[i + 1]));
        break;
      case InstructionSequence::insn_push_self:
        ops.load_self(eax);
        s.push(eax);
        break;
      case InstructionSequence::insn_ret:
        s.load_nth(eax, 0);
        a.jump(real_fin);
        break;
      // Now, for a bit more complicated ones...

      case InstructionSequence::insn_push_local:
        ops.get_local(eax, vmm->opcodes[i + 1]);
        s.push(eax);
        break;

      case InstructionSequence::insn_set_local:
        s.load_nth(edx, 0);
        ops.set_local(edx, vmm->opcodes[i + 1]);
        break;

      case InstructionSequence::insn_push_literal:
        ops.get_literal(eax, vmm->opcodes[i + 1]);
        s.push(eax);
        break;

      case InstructionSequence::insn_meta_send_op_minus:
      case InstructionSequence::insn_meta_send_op_plus: {
        AssemblerX86::NearJumpLocation done;
        emit_fast_math(done, op == InstructionSequence::insn_meta_send_op_plus);
        maybe_return(i+width, &last_imm, fin);

        // This is a phi point, where the fast path and slow path merge.
        // We have to be sure that the stack cache settings are in sync
        // for both paths taken at this point. To be sure of that, we
        // always run cache_stack() after calling slow_path_plus.
        a.set_label(done);
        // Remove one from the stack
        s.pop();
        // Put the result on the stack
        s.set_top(eax);
        break;
      }

      case InstructionSequence::insn_meta_send_op_equal: {
        AssemblerX86::NearJumpLocation done;
        emit_fast_equal(done, op == InstructionSequence::insn_meta_send_op_equal);
        maybe_return(i+width, &last_imm, fin);

        a.set_label(done);

        // Put the result on the stack
        s.set_top(eax);

        break;
      }

      case InstructionSequence::insn_meta_send_op_lt:
      case InstructionSequence::insn_meta_send_op_gt: {
        AssemblerX86::NearJumpLocation done;
        emit_fast_compare(done, op == InstructionSequence::insn_meta_send_op_lt);
        maybe_return(i+width, &last_imm, fin);

        a.set_label(done);
        // Put the result on the stack
        s.set_top(eax);
        break;
      }

      case InstructionSequence::insn_push_const_fast: {
        AssemblerX86::NearJumpLocation slow_path;
        AssemblerX86::NearJumpLocation done;

        ops.get_literal(eax, vmm->opcodes[i + 2]);
        a.cmp(eax, reinterpret_cast<uintptr_t>(Qnil));
        a.jump_if_equal(slow_path);
        a.mov(eax, a.address(eax, 0)); // FIELD_OFFSET(rubinius::LookupTableAssociation, value_)));
        // TODO this doesn't support autoload!
        s.push(eax);

        a.jump(done);

        a.set_label(slow_path);
        uncache_stack();
#if 0
        const instructions::Implementation* impl = instructions::implementation(op);
        ops.call_operation(impl->address, impl->name,
            vmm->opcodes[i + 1],
            vmm->opcodes[i + 2]);
        maybe_return(i, &last_imm, fin);
#endif

        a.set_label(done);
        break;
      }
        // for any instruction we don't handle with a special code sequence,
        // just call the regular function for it.
      default: {
call_op:
        uncache_stack();
#if 0
        const instructions::Implementation* impl = instructions::implementation(op);
        switch(width) {
        case 1:
          ops.call_operation(impl->address, impl->name);
          break;
        case 2:
          ops.call_operation(impl->address, impl->name,
              vmm->opcodes[i + 1]);
          break;
        case 3:
          ops.call_operation(impl->address, impl->name,
              vmm->opcodes[i + 1],
              vmm->opcodes[i + 2]);
          break;
        default:
          std::cout << "Invalid width '" << width << "' for instruction '" <<
            op << "'\n";
          abort();
        }
#endif
        cache_stack();

#if 0
        instructions::Status status = instructions::check_status(op);
        if(status == instructions::MightReturn) {
          maybe_return(i + width, &last_imm, fin);
        } else if(status == instructions::Terminate) {
          a.jump(real_fin);
        }
#endif
        break;
      }
      }

      i += width;
    }

    comments_[a.pc()] = "epilogue";

    a.set_label(fin);

    // We could be jumping here from anywhere, assume nothing.
    ops.reset_usage();
    ops.store_ip(ecx, edx);
    uncache_stack();

    a.set_label(real_fin);
    ops.epilogue();
  }