Exemplo n.º 1
0
  void
bx_cpu_c::RETnear32_Iw(BxInstruction_t *i)
{
  Bit16u imm16;
  Bit32u temp_ESP;
  Bit32u return_EIP;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
    temp_ESP = ESP;
  else
    temp_ESP = SP;

  imm16 = i->Iw;

  invalidate_prefetch_q();


    if (protected_mode()) {
      if ( !can_pop(4) ) {
        BX_PANIC(("retnear_iw: can't pop EIP"));
        /* ??? #SS(0) -or #GP(0) */
        }

      access_linear(bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0,
        4, CPL==3, BX_READ, &return_EIP);

      if (protected_mode() &&
          (return_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) ) {
        BX_DEBUG(("retnear_iw: EIP > limit"));
        exception(BX_GP_EXCEPTION, 0, 0);
        }

      /* Pentium book says imm16 is number of words ??? */
      if ( !can_pop(4 + imm16) ) {
        BX_PANIC(("retnear_iw: can't release bytes from stack"));
        /* #GP(0) -or #SS(0) ??? */
        }

      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += 4 + imm16; /* ??? should it be 2*imm16 ? */
      else
        SP  += 4 + imm16;
      }
    else {
      pop_32(&return_EIP);
      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += imm16; /* ??? should it be 2*imm16 ? */
      else
        SP  += imm16;
      }

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, bx_cpu. eip);
}
Exemplo n.º 2
0
  void
bx_cpu_c::JMP_Ed(BxInstruction_t *i)
{
  Bit32u new_EIP;
  Bit32u op1_32;

    /* op1_32 is a register or memory reference */
    if (i->mod == 0xc0) {
      op1_32 = BX_READ_32BIT_REG(i->rm);
      }
    else {
      /* pointer, segment address pair */
      read_virtual_dword(i->seg, i->rm_addr, &op1_32);
      }

    invalidate_prefetch_q();
    new_EIP = op1_32;

#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
    if (new_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
      BX_PANIC(("jmp_ev: IP out of CS limits!"));
      exception(BX_GP_EXCEPTION, 0, 0);
      }
    }
#endif

  bx_cpu. eip = new_EIP;

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_JMP, new_EIP);
}
Exemplo n.º 3
0
  void
bx_cpu_c::JMP_Ap(BxInstruction_t *i)
{
  Bit32u disp32;
  Bit16u cs_raw;

  invalidate_prefetch_q();

  if (i->os_32) {
    disp32 = i->Id;
    }
  else {
    disp32 = i->Iw;
    }
  cs_raw = i->Iw2;

#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
    bx_cpu. jump_protected(i, cs_raw, disp32);
    goto done;
    }
#endif

  load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], cs_raw);
  bx_cpu. eip = disp32;

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_JMP,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Exemplo n.º 4
0
  void
bx_cpu_c::CALL32_Ap(BxInstruction_t *i)
{
  Bit16u cs_raw;
  Bit32u disp32;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_call;
#endif

  disp32 = i->Id;
  cs_raw = i->Iw2;
  invalidate_prefetch_q();

  if (protected_mode()) {
    bx_cpu. call_protected(i, cs_raw, disp32);
    goto done;
    }
  push_32(bx_cpu. sregs[BX_SEG_REG_CS].selector.value);
  push_32(bx_cpu. eip);
  bx_cpu. eip = disp32;
  load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], cs_raw);

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_CALL,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Exemplo n.º 5
0
  void
bx_cpu_c::RETfar32(BxInstruction_t *i)
{
  Bit32u eip, ecs_raw;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  invalidate_prefetch_q();

#if BX_CPU_LEVEL >= 2
  if ( protected_mode() ) {
    bx_cpu. return_protected(i, 0);
    goto done;
    }
#endif


    pop_32(&eip);
    pop_32(&ecs_raw); /* 32bit pop, MSW discarded */
    bx_cpu. eip = eip;
    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw);

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_RET,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Exemplo n.º 6
0
  void
bx_cpu_c::CALL_Ad(BxInstruction_t *i)
{
  Bit32u new_EIP;
  Bit32s disp32;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_call;
#endif

  disp32 = i->Id;
  invalidate_prefetch_q();

  new_EIP = EIP + disp32;

  if ( protected_mode() ) {
    if ( new_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) {
      BX_PANIC(("call_av: offset outside of CS limits"));
      exception(BX_GP_EXCEPTION, 0, 0);
      }
    }

  /* push 32 bit EA of next instruction */
  push_32(bx_cpu. eip);
  bx_cpu. eip = new_EIP;

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_CALL, bx_cpu. eip);
}
Exemplo n.º 7
0
  void
bx_cpu_c::pop_16(Bit16u *value16_ptr)
{
  Bit32u temp_ESP;

#if BX_CPU_LEVEL >= 3
  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
    temp_ESP = ESP;
  else
#endif
    temp_ESP = SP;

#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
    if ( !can_pop(2) ) {
      BX_INFO(("pop_16(): can't pop from stack"));
      exception(BX_SS_EXCEPTION, 0, 0);
      return;
      }
    }
#endif


  /* access within limits */
  read_virtual_word(BX_SEG_REG_SS, temp_ESP, value16_ptr);

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
    ESP += 2;
  else
    SP += 2;
}
Exemplo n.º 8
0
  void
bx_cpu_c::pop_32(Bit32u *value32_ptr)
{
  Bit32u temp_ESP;

  /* 32 bit stack mode: use SS:ESP */
  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
    temp_ESP = ESP;
  else
    temp_ESP = SP;

  /* 16 bit stack mode: use SS:SP */
  if (protected_mode()) {
    if ( !can_pop(4) ) {
      BX_PANIC(("pop_32(): can't pop from stack"));
      exception(BX_SS_EXCEPTION, 0, 0);
      return;
      }
    }

  /* access within limits */
  read_virtual_dword(BX_SEG_REG_SS, temp_ESP, value32_ptr);

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b==1)
    ESP += 4;
  else
    SP += 4;
}
Exemplo n.º 9
0
  void
bx_cpu_c::JMP32_Ep(BxInstruction_t *i)
{
  Bit16u cs_raw;
  Bit32u op1_32;

    /* op1_32 is a register or memory reference */
    if (i->mod == 0xc0) {
      /* far indirect must specify a memory address */
      BX_PANIC(("JMP_Ep(): op1 is a register"));
      }

    /* pointer, segment address pair */
    read_virtual_dword(i->seg, i->rm_addr, &op1_32);
    read_virtual_word(i->seg, i->rm_addr+4, &cs_raw);
    invalidate_prefetch_q();

    if ( protected_mode() ) {
      bx_cpu. jump_protected(i, cs_raw, op1_32);
      goto done;
      }

    bx_cpu. eip = op1_32;
    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], cs_raw);

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_JMP,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Exemplo n.º 10
0
  /* push 32 bit operand size */
  void
bx_cpu_c::push_32(Bit32u value32)
{
  /* must use StackAddrSize, and either ESP or SP accordingly */
  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) { /* StackAddrSize = 32 */
    /* 32bit stack size: pushes use SS:ESP  */
    if (protected_mode()) {
      if (!can_push(&bx_cpu. sregs[BX_SEG_REG_SS].cache, ESP, 4)) {
        BX_PANIC(("push_32(): push outside stack limits"));
        /* #SS(0) */
        }
      }
    else { /* real mode */
      if ((ESP>=1) && (ESP<=3)) {
        BX_PANIC(("push_32: ESP=%08x", (unsigned) ESP));
        }
      }

    write_virtual_dword(BX_SEG_REG_SS, ESP-4, &value32);
    ESP -= 4;
    /* will return after error anyway */
    return;
    }
  else { /* 16bit stack size: pushes use SS:SP  */
    if (protected_mode()) {
      if (!can_push(&bx_cpu. sregs[BX_SEG_REG_SS].cache, SP, 4)) {
        BX_PANIC(("push_32(): push outside stack limits"));
        /* #SS(0) */
        }
      }
    else { /* real mode */
      if ((SP>=1) && (SP<=3)) {
        BX_PANIC(("push_32: SP=%08x", (unsigned) SP));
        }
      }

    write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (SP-4), &value32);
    SP -= 4;
    /* will return after error anyway */
    return;
    }
}
Exemplo n.º 11
0
  void
bx_cpu_c::JCC_Jd(BxInstruction_t *i)
{
  Boolean condition = 0;

  switch (i->b1 & 0x0f) {
    case 0x00: /* JO */ condition = get_OF(); break;
    case 0x01: /* JNO */ condition = !get_OF(); break;
    case 0x02: /* JB */ condition = get_CF(); break;
    case 0x03: /* JNB */ condition = !get_CF(); break;
    case 0x04: /* JZ */ condition = get_ZF(); break;
    case 0x05: /* JNZ */ condition = !get_ZF(); break;
    case 0x06: /* JBE */ condition = get_CF() || get_ZF(); break;
    case 0x07: /* JNBE */ condition = !get_CF() && !get_ZF(); break;
    case 0x08: /* JS */ condition = get_SF(); break;
    case 0x09: /* JNS */ condition = !get_SF(); break;
    case 0x0A: /* JP */ condition = get_PF(); break;
    case 0x0B: /* JNP */ condition = !get_PF(); break;
    case 0x0C: /* JL */ condition = get_SF() != get_OF(); break;
    case 0x0D: /* JNL */ condition = get_SF() == get_OF(); break;
    case 0x0E: /* JLE */ condition = get_ZF() || (get_SF() != get_OF());
      break;
    case 0x0F: /* JNLE */ condition = (get_SF() == get_OF()) &&
                            !get_ZF();
      break;
    }

  if (condition) {
    Bit32u new_EIP;

    new_EIP = EIP + (Bit32s) i->Id;
#if BX_CPU_LEVEL >= 2
    if (protected_mode()) {
      if ( new_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) {
        BX_PANIC(("jo_routine: offset outside of CS limits"));
        exception(BX_GP_EXCEPTION, 0, 0);
        }
      }
#endif
    EIP = new_EIP;
    BX_INSTR_CNEAR_BRANCH_TAKEN(new_EIP);
    revalidate_prefetch_q();
    }
#if BX_INSTRUMENTATION
  else {
    BX_INSTR_CNEAR_BRANCH_NOT_TAKEN();
    }
#endif
}
Exemplo n.º 12
0
  void
bx_cpu_c::push_16(Bit16u value16)
{
  Bit32u temp_ESP;


#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
#if BX_CPU_LEVEL >= 3
    if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
      temp_ESP = ESP;
    else
#endif
      temp_ESP = SP;
    if (!can_push(&bx_cpu. sregs[BX_SEG_REG_SS].cache, temp_ESP, 2)) {
      BX_PANIC(("push_16(): can't push on stack"));
      exception(BX_SS_EXCEPTION, 0, 0);
      return;
      }

    /* access within limits */
    write_virtual_word(BX_SEG_REG_SS, temp_ESP - 2, &value16);
    if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
      ESP -= 2;
    else
      SP -= 2;
    return;
    }
  else
#endif
    { /* real mode */
    if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) {
      if (ESP == 1)
        BX_PANIC(("CPU shutting down due to lack of stack space, ESP==1"));
      ESP -= 2;
      temp_ESP = ESP;
      }
    else {
      if (SP == 1)
        BX_PANIC(("CPU shutting down due to lack of stack space, SP==1"));
      SP -= 2;
      temp_ESP = SP;
      }

    write_virtual_word(BX_SEG_REG_SS, temp_ESP, &value16);
    return;
    }
}
Exemplo n.º 13
0
  void
bx_cpu_c::RETnear32(BxInstruction_t *i)
{
  Bit32u temp_ESP;
  Bit32u return_EIP;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  invalidate_prefetch_q();

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
    temp_ESP = ESP;
  else
    temp_ESP = SP;


    if (protected_mode()) {
      if ( !can_pop(4) ) {
        BX_PANIC(("retnear: can't pop EIP"));
        /* ??? #SS(0) -or #GP(0) */
        }

      access_linear(bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0,
        4, CPL==3, BX_READ, &return_EIP);

      if ( return_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) {
        BX_PANIC(("retnear: EIP > limit"));
        //exception(BX_GP_EXCEPTION, 0, 0);
        }
      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += 4;
      else
        SP  += 4;
      }
    else {
      pop_32(&return_EIP);
      bx_cpu. eip = return_EIP;
      }

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, bx_cpu. eip);
}
Exemplo n.º 14
0
  void
bx_cpu_c::PUSHAD16(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 2
  BX_PANIC(("PUSHAD: not supported on an 8086"));
#else
  Bit32u temp_ESP;
  Bit16u sp;

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
    temp_ESP = ESP;
  else
    temp_ESP = SP;


#if BX_CPU_LEVEL >= 2
    if (protected_mode()) {
      if ( !can_push(&bx_cpu. sregs[BX_SEG_REG_SS].cache, temp_ESP, 16) ) {
        BX_PANIC(("PUSHA(): stack doesn't have enough room!"));
        exception(BX_SS_EXCEPTION, 0, 0);
        return;
        }
      }
    else
#endif
      {
      if (temp_ESP < 16)
        BX_PANIC(("pushad: eSP < 16"));
      }

    sp = SP;

    /* ??? optimize this by using virtual write, all checks passed */
    push_16(AX);
    push_16(CX);
    push_16(DX);
    push_16(BX);
    push_16(sp);
    push_16(BP);
    push_16(SI);
    push_16(DI);
#endif
}
Exemplo n.º 15
0
  void
bx_cpu_c::CALL_Ed(BxInstruction_t *i)
{
  Bit32u temp_ESP;
  Bit32u op1_32;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_call;
#endif

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
    temp_ESP = ESP;
  else
    temp_ESP = SP;


    /* op1_32 is a register or memory reference */
    if (i->mod == 0xc0) {
      op1_32 = BX_READ_32BIT_REG(i->rm);
      }
    else {
      read_virtual_dword(i->seg, i->rm_addr, &op1_32);
      }
    invalidate_prefetch_q();

    if (protected_mode()) {
      if (op1_32 > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
        BX_DEBUG(("call_ev: EIP out of CS limits! at %s:%d"));
        exception(BX_GP_EXCEPTION, 0, 0);
        }
      if ( !can_push(&bx_cpu. sregs[BX_SEG_REG_SS].cache, temp_ESP, 4) ) {
        BX_PANIC(("call_ev: can't push EIP"));
        }
      }

    push_32(bx_cpu. eip);

    bx_cpu. eip = op1_32;

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_CALL, bx_cpu. eip);
}
Exemplo n.º 16
0
  void
bx_cpu_c::JMP_Jd(BxInstruction_t *i)
{
  Bit32u new_EIP;

    invalidate_prefetch_q();

    new_EIP = EIP + (Bit32s) i->Id;

#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
    if ( new_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) {
      BX_PANIC(("jmp_jv: offset outside of CS limits"));
      exception(BX_GP_EXCEPTION, 0, 0);
      }
    }
#endif

  bx_cpu. eip = new_EIP;
  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_JMP, new_EIP);
}
Exemplo n.º 17
0
  void
bx_cpu_c::RETfar32_Iw(BxInstruction_t *i)
{
  Bit32u eip, ecs_raw;
  Bit16s imm16;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif
  /* ??? is imm16, number of bytes/words depending on operandsize ? */

  imm16 = i->Iw;

  invalidate_prefetch_q();

#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
    bx_cpu. return_protected(i, imm16);
    goto done;
    }
#endif


    pop_32(&eip);
    pop_32(&ecs_raw);
    bx_cpu. eip = eip;
    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw);
    if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
      ESP += imm16;
    else
      SP  += imm16;

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_RET,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Exemplo n.º 18
0
  void
bx_cpu_c::POPAD16(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 2
  BX_PANIC(("POPAD not supported on an 8086"));
#else /* 286+ */

    Bit16u di, si, bp, tmp, bx, dx, cx, ax;

    if (protected_mode()) {
      if ( !can_pop(16) ) {
        BX_PANIC(("pop_a: not enough bytes on stack"));
        exception(BX_SS_EXCEPTION, 0, 0);
        return;
        }
      }

    /* ??? optimize this */
    pop_16(&di);
    pop_16(&si);
    pop_16(&bp);
    pop_16(&tmp); /* value for SP discarded */
    pop_16(&bx);
    pop_16(&dx);
    pop_16(&cx);
    pop_16(&ax);

    DI = di;
    SI = si;
    BP = bp;
    BX = bx;
    DX = dx;
    CX = cx;
    AX = ax;
#endif
}
Exemplo n.º 19
0
  void
bx_cpu_c::CALL32_Ep(BxInstruction_t *i)
{
  Bit16u cs_raw;
  Bit32u op1_32;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_call;
#endif

    /* op1_32 is a register or memory reference */
    if (i->mod == 0xc0) {
      BX_PANIC(("CALL_Ep: op1 is a register"));
      }

    /* pointer, segment address pair */
    read_virtual_dword(i->seg, i->rm_addr, &op1_32);
    read_virtual_word(i->seg, i->rm_addr+4, &cs_raw);
    invalidate_prefetch_q();

    if ( protected_mode() ) {
      bx_cpu. call_protected(i, cs_raw, op1_32);
      goto done;
      }

    push_32(bx_cpu. sregs[BX_SEG_REG_CS].selector.value);
    push_32(bx_cpu. eip);

    bx_cpu. eip = op1_32;
    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], cs_raw);

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_CALL,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Exemplo n.º 20
0
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
                              unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // do canonical checks
    if (!IsCanonical(offset)) {
      BX_ERROR(("read_virtual_checks(): canonical Failure 0x%08x:%08x", GET32H(offset), GET32L(offset)));
      exception(int_number(seg), 0, 0);
    }
    seg->cache.valid |= SegAccessROK;
    return;
  }
#endif
  if (protected_mode()) {
    if (seg->cache.valid==0) {
      BX_DEBUG(("read_virtual_checks(): segment descriptor not valid"));
      exception(int_number(seg), 0, 0);
    }

    if (seg->cache.p == 0) { /* not present */
      BX_ERROR(("read_virtual_checks(): segment not present"));
      exception(int_number(seg), 0, 0);
    }

    switch (seg->cache.type) {
      case 0: case 1: /* read only */
      case 2: case 3: /* read/write */
      case 10: case 11: /* execute/read */
      case 14: case 15: /* execute/read-only, conforming */
        if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
            || (length-1 > seg->cache.u.segment.limit_scaled))
        {
          BX_ERROR(("read_virtual_checks(): read beyond limit"));
          exception(int_number(seg), 0, 0);
        }
        if (seg->cache.u.segment.limit_scaled >= 7) {
          // Mark cache as being OK type for succeeding reads. See notes for
          // write checks; similar code.
          seg->cache.valid |= SegAccessROK;
        }
        break;

      case 4: case 5: /* read only, expand down */
      case 6: case 7: /* read/write, expand down */
        if (seg->cache.u.segment.d_b)
          upper_limit = 0xffffffff;
        else
          upper_limit = 0x0000ffff;
        if ((offset <= seg->cache.u.segment.limit_scaled) ||
             (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
        {
          BX_ERROR(("read_virtual_checks(): read beyond limit"));
          exception(int_number(seg), 0, 0);
        }
        break;

      case 8: case 9: /* execute only */
      case 12: case 13: /* execute only, conforming */
        /* can't read or write an execute-only segment */
        BX_ERROR(("read_virtual_checks(): execute only"));
        exception(int_number(seg), 0, 0);
    }
    return;
  }
  else { /* real mode */
    if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
        || (length-1 > seg->cache.u.segment.limit_scaled))
    {
      BX_DEBUG(("read_virtual_checks(): read beyond limit (real mode)"));
      exception(int_number(seg), 0, 0);
    }
    if (seg->cache.u.segment.limit_scaled >= 7) {
      // Mark cache as being OK type for succeeding reads. See notes for
      // write checks; similar code.
      seg->cache.valid |= SegAccessROK;
    }
  }
}
Exemplo n.º 21
0
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
                               unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // do canonical checks
    if (!IsCanonical(offset)) {
      BX_ERROR(("write_virtual_checks(): canonical Failure 0x%08x:%08x", GET32H(offset), GET32L(offset)));
      exception(int_number(seg), 0, 0);
    }
    seg->cache.valid |= SegAccessWOK;
    return;
  }
#endif
  if (protected_mode()) {
    if (seg->cache.valid==0) {
      BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
      exception(int_number(seg), 0, 0);
    }

    if (seg->cache.p == 0) { /* not present */
      BX_ERROR(("write_virtual_checks(): segment not present"));
      exception(int_number(seg), 0, 0);
    }

    switch (seg->cache.type) {
      case 0: case 1:   // read only
      case 4: case 5:   // read only, expand down
      case 8: case 9:   // execute only
      case 10: case 11: // execute/read
      case 12: case 13: // execute only, conforming
      case 14: case 15: // execute/read-only, conforming
        BX_ERROR(("write_virtual_checks(): no write access to seg"));
        exception(int_number(seg), 0, 0);

      case 2: case 3: /* read/write */
        if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
            || (length-1 > seg->cache.u.segment.limit_scaled))
        {
          BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
          exception(int_number(seg), 0, 0);
        }
        if (seg->cache.u.segment.limit_scaled >= 7) {
          // Mark cache as being OK type for succeeding writes.  The limit
          // checks still needs to be done though, but is more simple.  We
          // could probably also optimize that out with a flag for the case
          // when limit is the maximum 32bit value.  Limit should accomodate
          // at least a dword, since we subtract from it in the simple
          // limit check in other functions, and we don't want the value to roll.
          // Only normal segments (not expand down) are handled this way.
          seg->cache.valid |= SegAccessWOK;
        }
        break;

      case 6: case 7: /* read/write, expand down */
        if (seg->cache.u.segment.d_b)
          upper_limit = 0xffffffff;
        else
          upper_limit = 0x0000ffff;
        if ((offset <= seg->cache.u.segment.limit_scaled) ||
             (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
        {
          BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
          exception(int_number(seg), 0, 0);
        }
        break;
    }

    return;
  }
  else { /* real mode */
    if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
    {
      BX_DEBUG(("write_virtual_checks(): write beyond limit (real mode)"));
      exception(int_number(seg), 0, 0);
    }
    if (seg->cache.u.segment.limit_scaled >= 7) {
      // Mark cache as being OK type for succeeding writes. See notes above.
      seg->cache.valid |= SegAccessWOK;
    }
  }
}