Example #1
0
int main(void)
{
	printf("add2(%d, %d)     = %d\n", 3, 1,    add2(3, 1));
	printf("add3(%d, %d, %d) = %d\n", 1, 3, 1, add3(1, 3, 1));
	printf("bic(%d, %d)      = %d\n", 3, 1,    bic(3, 1));
	printf("mvn(%d, %d)      = %d\n", 3, 1,    mvn(3, 1));
	printf("rsb3(%d, %d, %d) = %d\n", 3, 1, 1, rsb3(3, 1, 1));
	printf("rsb1(%d)         = %d\n", 3,       rsb1(3));
	return 0;
}
void InterpreterStubs::generate_current_thread_to_primordial() {
  Segment seg(this, code_segment, "Current thread to primordial");
  bind_global("current_thread_to_primordial");
  
  comment("Set up global pointer, as we can be called from C code");
  ldr_gp_base(gp);
  
  bind_global("current_thread_to_primordial_fast");

  // We're never going to return to this thread, so it doesn't matter if
  // it doesn't look like a stopped Java thread anymore.
  get_primordial_sp(sp);
  comment("restore permanent registers (including return address)");
  ldr(lr, imm_index(sp, BytesPerWord, post_indexed));
  ldmfd(sp, range(r3, r11), writeback);
  jmpx(lr);

  if (GenerateDebugAssembly) {
    bind_local("interpreter_bkpt");
    get_gp_bytecode_counter(tmp3);
    add(tmp3, tmp3, imm(1));
    set_gp_bytecode_counter(tmp3);
    mov(pc, reg(tmp0));
  }

#if ENABLE_XSCALE_WMMX_TIMER_TICK && !ENABLE_TIMER_THREAD
  // set timer_tick from WMMX wCASF register
  comment("wmmx_set_timer_tick to set timer_tick from WMMX register");
  bind_global("wmmx_set_timer_tick");
  // tmrc(r2, wCASF);
  define_long(0xEE132110);
  mvn(r3, imm(4) );
  andr(r2, r2, reg(r3) );
  // tmcr(wCASF, r2);
  define_long(0xEE032110);
  jmpx(lr);
  // clear timer_tick from WMMX wCASF register
  comment("wmmx_set_timer_tick to clear timer_tick from WMMX register");
  bind_global("wmmx_clear_timer_tick");
  define_long(0xEE100060); 
//  wcmpgtub(wR0, wR0, wR0);
  jmpx(lr);
#endif // ENABLE_XSCALE_WMMX_TIMER_TICK && !ENABLE_TIMER_THREAD

}
Example #3
0
void MacroAssembler::atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg) {
  if (VM_Version::supports_ldrex()) {
    Register tmp_reg;
    if (tmpreg == noreg) {
      push(LR);
      tmp_reg = LR;
    } else {
      tmp_reg = tmpreg;
    }
    assert_different_registers(tmp_reg, oldval, newval, base);
    Label loop;
    bind(loop);
    ldrex(tmp_reg, Address(base, offset));
    subs(tmp_reg, tmp_reg, oldval);
    strex(tmp_reg, newval, Address(base, offset), eq);
    cmp(tmp_reg, 1, eq);
    b(loop, eq);
    cmp(tmp_reg, 0);
    if (tmpreg == noreg) {
      pop(tmp_reg);
    }
  } else if (VM_Version::supports_kuser_cmpxchg32()) {
    // On armv5 platforms we must use the Linux kernel helper
    // function for atomic cas operations since ldrex/strex is
    // not supported.
    //
    // This is a special routine at a fixed address 0xffff0fc0 with
    // with these arguments and results
    //
    // input:
    //  r0 = oldval, r1 = newval, r2 = ptr, lr = return adress
    // output:
    //  r0 = 0 carry set on success
    //  r0 != 0 carry clear on failure
    //
    // r3, ip and flags are clobbered
    //

    Label loop;

    push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR));

    Register tmp_reg = LR; // ignore the argument

    assert_different_registers(tmp_reg, oldval, newval, base);

    // Shuffle registers for kernel call
    if (oldval != R0) {
      if (newval == R0) {
        mov(tmp_reg, newval);
        newval = tmp_reg;
      }
      if (base == R0) {
        mov(tmp_reg, base);
        base = tmp_reg;
      }
      mov(R0, oldval);
    }
    if(newval != R1) {
      if(base == R1) {
        if(newval == R2) {
          mov(tmp_reg, base);
          base = tmp_reg;
        }
        else {
          mov(R2, base);
          base = R2;
        }
      }
      mov(R1, newval);
    }
    if (base != R2)
      mov(R2, base);

    if (offset != 0)
      add(R2, R2, offset);

    mvn(R3, 0xf000);
    mov(LR, PC);
    sub(PC, R3, 0x3f);
    cmp (R0, 0);

    pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR));
  } else {
    // Should never run on a platform so old that it does not have kernel helper
    stop("Atomic cmpxchg32 unsupported on this platform");
  }
}
Example #4
0
void MacroAssembler::atomic_cas64(Register memval_lo, Register memval_hi, Register result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset) {
  if (VM_Version::supports_ldrexd()) {
    Label loop;
    assert_different_registers(memval_lo, memval_hi, result, oldval_lo,
                               oldval_hi, newval_lo, newval_hi, base);
    assert(memval_hi == memval_lo + 1 && memval_lo < R9, "cmpxchg_long: illegal registers");
    assert(oldval_hi == oldval_lo + 1 && oldval_lo < R9, "cmpxchg_long: illegal registers");
    assert(newval_hi == newval_lo + 1 && newval_lo < R9, "cmpxchg_long: illegal registers");
    assert(result != R10, "cmpxchg_long: illegal registers");
    assert(base != R10, "cmpxchg_long: illegal registers");

    mov(result, 0);
    bind(loop);
    ldrexd(memval_lo, Address(base, offset));
    cmp(memval_lo, oldval_lo);
    cmp(memval_hi, oldval_hi, eq);
    strexd(result, newval_lo, Address(base, offset), eq);
    rsbs(result, result, 1, eq);
    b(loop, eq);
  } else if (VM_Version::supports_kuser_cmpxchg64()) {
    // On armv5 platforms we must use the Linux kernel helper
    // function for atomic cas64 operations since ldrexd/strexd is
    // not supported.
    //
    // This is a special routine at a fixed address 0xffff0f60
    //
    // input:
    //  r0 = (long long *)oldval, r1 = (long long *)newval,
    //  r2 = ptr, lr = return adress
    // output:
    //  r0 = 0 carry set on success
    //  r0 != 0 carry clear on failure
    //
    // r3, and flags are clobbered
    //
    Label done;
    Label loop;

    if (result != R12) {
      push(R12);
    }
    push(RegisterSet(R10) | RegisterSet(LR));
    mov(R10, SP);         // Save SP

    bic(SP, SP, StackAlignmentInBytes - 1);  // align stack
    push(RegisterSet(oldval_lo, oldval_hi));
    push(RegisterSet(newval_lo, newval_hi));

    if ((offset != 0) || (base != R12)) {
      add(R12, base, offset);
    }
    push(RegisterSet(R0, R3));
    bind(loop);
    ldrd(memval_lo, Address(R12)); //current
    ldrd(oldval_lo, Address(SP, 24));
    cmp(memval_lo, oldval_lo);
    cmp(memval_hi, oldval_hi, eq);
    pop(RegisterSet(R0, R3), ne);
    mov(result, 0, ne);
    b(done, ne);
    // Setup for kernel call
    mov(R2, R12);
    add(R0, SP, 24);            // R0 == &oldval_lo
    add(R1, SP, 16);            // R1 == &newval_lo
    mvn(R3, 0xf000);            // call kernel helper at 0xffff0f60
    mov(LR, PC);
    sub(PC, R3, 0x9f);
    b(loop, cc);                 // if Carry clear then oldval != current
                                 // try again. Otherwise, return oldval
    // Here on success
    pop(RegisterSet(R0, R3));
    mov(result, 1);
    ldrd(memval_lo, Address(SP, 8));
    bind(done);
    pop(RegisterSet(newval_lo, newval_hi));
    pop(RegisterSet(oldval_lo, oldval_hi));
    mov(SP, R10);                 // restore SP
    pop(RegisterSet(R10) | RegisterSet(LR));
    if (result != R12) {
      pop(R12);
    }
  } else {
    stop("Atomic cmpxchg64 unsupported on this platform");
  }
}
Example #5
0
void MacroAssembler::atomic_cas(Register temp1, Register temp2, Register oldval, Register newval, Register base, int offset) {
  if (temp1 != R0) {
    // try to read the previous value directly in R0
    if (temp2 == R0) {
      // R0 declared free
      temp2 = temp1;
      temp1 = R0;
    } else if ((oldval != R0) && (newval != R0) && (base != R0)) {
      // free, and scratched on return
      temp1 = R0;
    }
  }
  if (VM_Version::supports_ldrex()) {
    Label loop;
    assert_different_registers(temp1, temp2, oldval, newval, base);

    bind(loop);
    ldrex(temp1, Address(base, offset));
    cmp(temp1, oldval);
    strex(temp2, newval, Address(base, offset), eq);
    cmp(temp2, 1, eq);
    b(loop, eq);
    if (temp1 != R0) {
      mov(R0, temp1);
    }
  } else if (VM_Version::supports_kuser_cmpxchg32()) {
    // On armv5 platforms we must use the Linux kernel helper
    // function for atomic cas operations since ldrex/strex is
    // not supported.
    //
    // This is a special routine at a fixed address 0xffff0fc0
    //
    // input:
    //  r0 = oldval, r1 = newval, r2 = ptr, lr = return adress
    // output:
    //  r0 = 0 carry set on success
    //  r0 != 0 carry clear on failure
    //
    // r3, ip and flags are clobbered
    //
    Label done;
    Label loop;

    push(RegisterSet(R1, R4) | RegisterSet(R12) | RegisterSet(LR));

    if ( oldval != R0 || newval != R1 || base != R2 ) {
      push(oldval);
      push(newval);
      push(base);
      pop(R2);
      pop(R1);
      pop(R0);
    }

    if (offset != 0) {
      add(R2, R2, offset);
    }

    mov(R4, R0);
    bind(loop);
    ldr(R0, Address(R2));
    cmp(R0, R4);
    b(done, ne);
    mvn(R12, 0xf000);
    mov(LR, PC);
    sub(PC, R12, 0x3f);
    b(loop, cc);
    mov(R0, R4);
    bind(done);

    pop(RegisterSet(R1, R4) | RegisterSet(R12) | RegisterSet(LR));
  } else {
    // Should never run on a platform so old that it does not have kernel helper
    stop("Atomic cmpxchg32 unsupported on this platform");
  }
}
Example #6
0
void Macros::arith_imm(Opcode opcode, Register rd, Register rn, int imm32,
                       const LiteralAccessor* la, CCMode s, Condition cond) {
  GUARANTEE(rd <= r15 && rn <= r15, "Invalid register used");
  
  Address1 result;
  if (is_rotated_imm(imm32, result)) {
    // Simplest case.  We can handle the immediate directly
    arith(opcode, rd, rn, result, s, cond);
    return;
  }
  int alt_opcode_type = OpcodeInfo::table[opcode].alt_opcode_type;
  int alt_imm32 = alt_opcode_type == alt_NEG ? -imm32 : ~imm32;
  Opcode alt_opcode = (Opcode)OpcodeInfo::table[opcode].alt_opcode;

  if (alt_opcode_type != alt_NONE && is_rotated_imm(alt_imm32, result)) {
    // We can handle the negated/complemented immediate
    arith(alt_opcode, rd, rn, result, s, cond);
    return;
  }

  // Is the imm32, or some rotated or shifted form of it already available
  if (la != NULL && la->has_literal(imm32, result)) {
    arith(opcode, rd, rn, result, s, cond);
    return;
  }
  if (alt_opcode_type != alt_NONE && 
      la != NULL && la->has_literal(alt_imm32, result)) {
    arith(alt_opcode, rd, rn, result, s, cond);
    return;
  }

  // Let's see if we can split either imm32 or alt_imm32 into two pieces,
  // each of which can be represented as an immediate.
  if ((rd != pc) && (s == no_CC || opcode <= _eor || opcode >= _orr)) {

    // Don't even try if we're setting the pc, or if this is an "arithmetic"
    // rather than a logical operation.  (For arithmetic operations, the C
    // and V bit have meanings that cannot be reproduced by splitting)
    if (OpcodeInfo::table[opcode].twoword_allowed) {
      if (arith2_imm(opcode, rd, rn, imm32, s, cond)) {
        return;
      }
    }
    if (alt_opcode_type != alt_NONE
           && OpcodeInfo::table[alt_opcode].twoword_allowed){
      // Can we break up alt_imm32 into two pieces, each an immediate?
      if (arith2_imm(alt_opcode, rd, rn, alt_imm32, s, cond)) {
        return;
      }
    }
  }

  if (opcode == _eor && imm32 == -1) {
    // This is the only chance we have of optimizing ~X
    mvn(rd, reg(rn), s, cond);
    return;
  }

  if (opcode == _mov) {
    ldr_big_integer(rd, imm32, cond);
    return;
  }

  // We include the (opcode != _mov) below, even though it isn't necessary,
  // since on the XScale we may want to get of the preceding clause.
  if (opcode != _mov) {
    Register tmp = (la != NULL) ? la->get_literal(imm32) : Assembler::no_reg;
    if (tmp != no_reg) {
      GUARANTEE(rn != tmp, "register must be different");
      arith(opcode, rd, rn, reg(tmp), s, cond);
      la->free_literal();
      return;
    }
  }

  // We are desperate.  We are clearly in some test suite situation that
  // is purposely generating a large immediate when that shouldn't
  // normally happen.  Let's just deal with it
  if ((rd != pc) && (s == no_CC || opcode <= _eor || opcode >= _orr)) {
    if (OpcodeInfo::table[opcode].twoword_allowed) {
      arith4_imm(opcode, rd, rn, imm32, s, cond);
      return;
    }
    if (alt_opcode_type != alt_NONE &&
      OpcodeInfo::table[alt_opcode].twoword_allowed){
      arith4_imm(alt_opcode, rd, rn, alt_imm32, s, cond);
      return;
    }
  }

  SHOULD_NOT_REACH_HERE();
}