Пример #1
0
UTYPE
SIZE(libat_exchange) (UTYPE *mptr, UTYPE newval, int smodel)
{
  UWORD mask, shift, woldval, wnewval, t, *wptr;

  pre_barrier (smodel);

  if (N < WORDSIZE)
    {
      wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
      shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
      mask = SIZE(MASK) << shift;
    }
  else
    {
      wptr = (UWORD *)mptr;
      shift = 0;
      mask = -1;
    }

  wnewval = (UWORD)newval << shift;
  woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
  do
    {
      t = (woldval & ~mask) | wnewval;
    }
  while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
				     __ATOMIC_RELAXED, __ATOMIC_RELAXED));

  post_barrier (smodel);
  return woldval >> shift;
}
Пример #2
0
bool
SIZE(libat_test_and_set) (UTYPE *mptr, int smodel)
{
  UWORD wval, woldval, shift, *wptr, t;

  pre_barrier (smodel);

  if (N < WORDSIZE)
    {
      wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
      shift = SIZE(INVERT_MASK);
    }
  else
    {
      wptr = (UWORD *)mptr;
      shift = 0;
    }

  wval = (UWORD)__GCC_ATOMIC_TEST_AND_SET_TRUEVAL << shift;
  woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
  do
    {
      t = woldval | wval;
    }
  while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
				     __ATOMIC_RELAXED, __ATOMIC_RELAXED));

  post_barrier (smodel);
  return woldval != 0;
}
Пример #3
0
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
                                     BasicType type, bool is_volatile) {
  if (is_volatile && type == T_LONG) {
    LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
    LIR_Opr tmp = new_register(T_DOUBLE);
    LIR_Opr spill = new_register(T_DOUBLE);
    set_vreg_flag(spill, must_start_in_memory);
    __ move(data, spill);
    __ move(spill, tmp);
    __ move(tmp, addr);
  } else {
    LIR_Address* addr = new LIR_Address(src, offset, type);
    bool is_obj = (type == T_ARRAY || type == T_OBJECT);
    if (is_obj) {
      // Do the pre-write barrier, if any.
      pre_barrier(LIR_OprFact::address(addr), false, NULL);
      __ move(data, addr);
      assert(src->is_register(), "must be register");
      // Seems to be a precise address
      post_barrier(LIR_OprFact::address(addr), data);
    } else {
      __ move(data, addr);
    }
  }
}
Пример #4
0
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
                                     BasicType type, bool is_volatile) {
  LIR_Opr base_op = src;
  LIR_Opr index_op = offset;

  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
#ifndef _LP64
  if (is_volatile && type == T_LONG) {
    __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
  } else
#endif
    {
      if (type == T_BOOLEAN) {
        type = T_BYTE;
      }
      LIR_Address* addr;
      if (type == T_ARRAY || type == T_OBJECT) {
        LIR_Opr tmp = new_pointer_register();
        __ add(base_op, index_op, tmp);
        addr = new LIR_Address(tmp, 0, type);
      } else {
        addr = new LIR_Address(base_op, index_op, type);
      }

      if (is_obj) {
        pre_barrier(LIR_OprFact::address(addr), false, NULL);
        // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
      }
      __ move(data, addr);
      if (is_obj) {
        // This address is precise
        post_barrier(LIR_OprFact::address(addr), data);
      }
    }
}
Пример #5
0
void
SIZE(libat_store) (UTYPE *mptr, UTYPE newval, int smodel)
{
    UTYPE oldval;

    pre_barrier (smodel);

    oldval = *mptr;
    while (!atomic_compare_exchange_n (mptr, &oldval, newval, true,
                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED))
        continue;

    post_barrier (smodel);
}
Пример #6
0
UTYPE
SIZE(libat_load) (UTYPE *mptr, int smodel)
{
  UWORD shift, t, *wptr;

  pre_barrier (smodel);

  wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
  shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);

  /* Exchange 0 with 0, placing the old value of *WPTR in T.  */
  t = 0;
  atomic_compare_exchange_w (wptr, &t, 0);

  post_barrier (smodel);
  return t >> shift;
}
Пример #7
0
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
  assert(x->number_of_arguments() == 4, "wrong type");
  LIRItem obj   (x->argument_at(0), this);  // object
  LIRItem offset(x->argument_at(1), this);  // offset of field
  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp

  // Use temps to avoid kills
  LIR_Opr t1 = FrameMap::G1_opr;
  LIR_Opr t2 = FrameMap::G3_opr;
  LIR_Opr addr = new_pointer_register();

  // get address of field
  obj.load_item();
  offset.load_item();
  cmp.load_item();
  val.load_item();

  __ add(obj.result(), offset.result(), addr);

  if (type == objectType) {  // Write-barrier needed for Object fields.
    pre_barrier(obj.result(), false, NULL);
  }

  if (type == objectType)
    __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
  else if (type == intType)
    __ cas_int(addr, cmp.result(), val.result(), t1, t2);
  else if (type == longType)
    __ cas_long(addr, cmp.result(), val.result(), t1, t2);
  else {
    ShouldNotReachHere();
  }

  // generate conditional move of boolean result
  LIR_Opr result = rlock_result(x);
  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
  if (type == objectType) {  // Write-barrier needed for Object fields.
#ifdef PRECISE_CARDMARK
    post_barrier(addr, val.result());
#else
    post_barrier(obj.result(), val.result());
#endif // PRECISE_CARDMARK
  }
}
Пример #8
0
UTYPE
SIZE(C3(libat_,NAME,_fetch)) (UTYPE *mptr, UTYPE opval, int smodel)
{
    UTYPE oldval, t;

    pre_barrier (smodel);

    oldval = *mptr;
    do
    {
        t = OP(oldval, opval);
    }
    while (!atomic_compare_exchange_n (mptr, &oldval, t, true,
                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED));

    post_barrier (smodel);
    return t;
}
Пример #9
0
void
SIZE(libat_store) (UTYPE *mptr, UTYPE newval, int smodel)
{
    UWORD mask, shift, woldval, wnewval, t, *wptr;

    pre_barrier (smodel);

    wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
    shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
    mask = SIZE(MASK) << shift;

    wnewval = (UWORD)newval << shift;
    woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
    do
    {
        t = (woldval & ~mask) | wnewval;
    }
    while (!atomic_compare_exchange_w (wptr, &woldval, t));

    post_barrier (smodel);
}
Пример #10
0
bool
SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, UTYPE newval,
			      int smodel, int fmodel UNUSED)
{
  UWORD mask, shift, weval, woldval, wnewval, t, *wptr;
  bool ret = false;

  pre_barrier (smodel);

  if (N < WORDSIZE)
    {
      wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
      shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
      mask = SIZE(MASK) << shift;
    }
  else
    {
      wptr = (UWORD *)mptr;
      shift = 0;
      mask = -1;
    }

  weval = *eptr << shift;
  wnewval = (UWORD)newval << shift;
  woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
  do
    {
      if ((woldval & mask) != weval)
	goto failure;
      t = (woldval & ~mask) | wnewval;
    }
  while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
				     __ATOMIC_RELAXED, __ATOMIC_RELAXED));
  ret = true;
 failure:
  *eptr = woldval >> shift;

  post_barrier (smodel);
  return ret;
}
Пример #11
0
UTYPE
SIZE(C3(libat_,NAME,_fetch)) (UTYPE *mptr, UTYPE opval, int smodel)
{
    UWORD mask, shift, woldval, wopval, t, *wptr;

    pre_barrier (smodel);

    wptr = (UWORD *)mptr;
    shift = 0;
    mask = -1;

    wopval = (UWORD)opval << shift;
    woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
    do
    {
        t = (woldval & ~mask) | (OP(woldval, wopval) & mask);
    }
    while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED));

    post_barrier (smodel);
    return t >> shift;
}
Пример #12
0
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
  assert(x->number_of_arguments() == 4, "wrong type");
  LIRItem obj   (x->argument_at(0), this);  // object
  LIRItem offset(x->argument_at(1), this);  // offset of field
  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp

  assert(obj.type()->tag() == objectTag, "invalid type");

  // In 64bit the type can be long, sparc doesn't have this assert
  // assert(offset.type()->tag() == intTag, "invalid type");

  assert(cmp.type()->tag() == type->tag(), "invalid type");
  assert(val.type()->tag() == type->tag(), "invalid type");

  // get address of field
  obj.load_item();
  offset.load_nonconstant();

  if (type == objectType) {
    cmp.load_item_force(FrameMap::rax_oop_opr);
    val.load_item();
  } else if (type == intType) {
    cmp.load_item_force(FrameMap::rax_opr);
    val.load_item();
  } else if (type == longType) {
    cmp.load_item_force(FrameMap::long0_opr);
    val.load_item_force(FrameMap::long1_opr);
  } else {
    ShouldNotReachHere();
  }

  LIR_Opr addr = new_pointer_register();
  LIR_Address* a;
  if(offset.result()->is_constant()) {
    a = new LIR_Address(obj.result(),
                        NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
                        as_BasicType(type));
  } else {
    a = new LIR_Address(obj.result(),
                        offset.result(),
                        LIR_Address::times_1,
                        0,
                        as_BasicType(type));
  }
  __ leal(LIR_OprFact::address(a), addr);

  if (type == objectType) {  // Write-barrier needed for Object fields.
    // Do the pre-write barrier, if any.
    pre_barrier(addr, false, NULL);
  }

  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
  if (type == objectType)
    __ cas_obj(addr, cmp.result(), val.result(), ill, ill);
  else if (type == intType)
    __ cas_int(addr, cmp.result(), val.result(), ill, ill);
  else if (type == longType)
    __ cas_long(addr, cmp.result(), val.result(), ill, ill);
  else {
    ShouldNotReachHere();
  }

  // generate conditional move of boolean result
  LIR_Opr result = rlock_result(x);
  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
           result, as_BasicType(type));
  if (type == objectType) {   // Write-barrier needed for Object fields.
    // Seems to be precise
    post_barrier(addr, val.result());
  }
}
Пример #13
0
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
  assert(x->is_pinned(),"");
  bool needs_range_check = true;
  bool use_length = x->length() != NULL;
  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
                                         !get_jobject_constant(x->value())->is_null_object());

  LIRItem array(x->array(), this);
  LIRItem index(x->index(), this);
  LIRItem value(x->value(), this);
  LIRItem length(this);

  array.load_item();
  index.load_nonconstant();

  if (use_length) {
    needs_range_check = x->compute_needs_range_check();
    if (needs_range_check) {
      length.set_instruction(x->length());
      length.load_item();
    }
  }
  if (needs_store_check) {
    value.load_item();
  } else {
    value.load_for_store(x->elt_type());
  }

  set_no_result(x);

  // the CodeEmitInfo must be duplicated for each different
  // LIR-instruction because spilling can occur anywhere between two
  // instructions and so the debug information must be different
  CodeEmitInfo* range_check_info = state_for(x);
  CodeEmitInfo* null_check_info = NULL;
  if (x->needs_null_check()) {
    null_check_info = new CodeEmitInfo(range_check_info);
  }

  // emit array address setup early so it schedules better
  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);

  if (GenerateRangeChecks && needs_range_check) {
    if (use_length) {
      __ cmp(lir_cond_belowEqual, length.result(), index.result());
      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
    } else {
      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
      // range_check also does the null check
      null_check_info = NULL;
    }
  }

  if (GenerateArrayStoreCheck && needs_store_check) {
    LIR_Opr tmp1 = new_register(objectType);
    LIR_Opr tmp2 = new_register(objectType);
    LIR_Opr tmp3 = new_register(objectType);

    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info);
  }

  if (obj_store) {
    // Needs GC write barriers.
    pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
    __ move(value.result(), array_addr, null_check_info);
    // Seems to be a precise
    post_barrier(LIR_OprFact::address(array_addr), value.result());
  } else {
    __ move(value.result(), array_addr, null_check_info);
  }
}