Пример #1
0
  static void assert_params_ok(void* from, void* to, intptr_t log_align) {
#ifdef ASSERT
    if (mask_bits((uintptr_t)from, right_n_bits(log_align)) != 0)
      basic_fatal("not aligned");
    if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0)
      basic_fatal("not aligned");
#endif
  }
Пример #2
0
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#ifdef _LP64
  guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
         "unaligned fill words");
  julong* to = (julong*)tohw;
  julong  v  = ((julong)value << 32) | value;
  while (count-- > 0) {
    *to++ = v;
  }
#else // _LP64
  juint* to = (juint*)tohw;
  while (count-- > 0) {
    *to++ = value;
  }
#endif // _LP64
}
  enum { bit_index_length = 5,
         word_index_length = 3,
         block_index_length = 8 // not used
  };

  // Derived constants used for manipulating the index bitfields
  enum {
         bit_index_offset = 0, // not used
         word_index_offset = bit_index_length,
         block_index_offset = bit_index_length + word_index_length,

         bits_per_word = 1 << bit_index_length,
         words_per_block = 1 << word_index_length,
         bits_per_block = bits_per_word * words_per_block,

         bit_index_mask = right_n_bits(bit_index_length),
         word_index_mask = right_n_bits(word_index_length)
  };

  // These routines are used for extracting the block, word, and bit index
  // from an element.
  static uint get_block_index(uint element) {
    return element >> block_index_offset;
  }
  static uint get_word_index(uint element) {
    return mask_bits(element >> word_index_offset,word_index_mask);
  }
  static uint get_bit_index(uint element) {
    return mask_bits(element,bit_index_mask);
  }
  
bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
    assert(p >= _reserved.start(), "just checking");
    size_t delta = pointer_delta(p, _reserved.start());
    return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
}
// Action_mark - update the BOT for the block [blk_start, blk_end).
//               Current typical use is for splitting a block.
// Action_single - update the BOT for an allocation.
// Action_verify - BOT verification.
void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
                                           HeapWord* blk_end,
                                           Action action) {
  assert(Universe::heap()->is_in_reserved(blk_start),
         "reference must be into the heap");
  assert(Universe::heap()->is_in_reserved(blk_end-1),
         "limit must be within the heap");
  // This is optimized to make the test fast, assuming we only rarely
  // cross boundaries.
  uintptr_t end_ui = (uintptr_t)(blk_end - 1);
  uintptr_t start_ui = (uintptr_t)blk_start;
  // Calculate the last card boundary preceding end of blk
  intptr_t boundary_before_end = (intptr_t)end_ui;
  clear_bits(boundary_before_end, right_n_bits(LogN));
  if (start_ui <= (uintptr_t)boundary_before_end) {
    // blk starts at or crosses a boundary
    // Calculate index of card on which blk begins
    size_t    start_index = _array->index_for(blk_start);
    // Index of card on which blk ends
    size_t    end_index   = _array->index_for(blk_end - 1);
    // Start address of card on which blk begins
    HeapWord* boundary    = _array->address_for_index(start_index);
    assert(boundary <= blk_start, "blk should start at or after boundary");
    if (blk_start != boundary) {
      // blk starts strictly after boundary
      // adjust card boundary and start_index forward to next card
      boundary += N_words;
      start_index++;
    }
    assert(start_index <= end_index, "monotonicity of index_for()");
    assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
    switch (action) {
      case Action_mark: {
        if (init_to_zero()) {
          _array->set_offset_array(start_index, boundary, blk_start);
          break;
        } // Else fall through to the next case
      }
      case Action_single: {
        _array->set_offset_array(start_index, boundary, blk_start);
        // We have finished marking the "offset card". We need to now
        // mark the subsequent cards that this blk spans.
        if (start_index < end_index) {
          HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
          HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
          set_remainder_to_point_to_start(rem_st, rem_end);
        }
        break;
      }
      case Action_check: {
        _array->check_offset_array(start_index, boundary, blk_start);
        // We have finished checking the "offset card". We need to now
        // check the subsequent cards that this blk spans.
        check_all_cards(start_index + 1, end_index);
        break;
      }
      default:
        ShouldNotReachHere();
    }
  }
}
Пример #6
0
 *
 */

#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_HPP

#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/allocation.hpp"

// Forward declarations
class CompactibleFreeListSpace;

class PromotedObject VALUE_OBJ_CLASS_SPEC {
private:
    enum {
        promoted_mask  = right_n_bits(2),   // i.e. 0x3
        displaced_mark = nth_bit(2),        // i.e. 0x4
        next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
    };

    // Below, we want _narrow_next in the "higher" 32 bit slot,
    // whose position will depend on endian-ness of the platform.
    // This is so that there is no interference with the
    // cms_free_bit occupying bit position 7 (lsb == 0)
    // when we are using compressed oops; see FreeChunk::is_free().
    // We cannot move the cms_free_bit down because currently
    // biased locking code assumes that age bits are contiguous
    // with the lock bits. Even if that assumption were relaxed,
    // the least position we could move this bit to would be
    // to bit position 3, which would require 16 byte alignment.
    typedef struct {
Пример #7
0
vmIntrinsics::Flags vmIntrinsics::flags_for(vmIntrinsics::ID id) {
  jlong info = intrinsic_info(id);
  int shift = 0, mask = right_n_bits(log2_FLAG_LIMIT);
  assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 15, "");
  return Flags( (info >> shift) & mask );
}
Пример #8
0
vmSymbols::SID vmIntrinsics::signature_for(vmIntrinsics::ID id) {
  jlong info = intrinsic_info(id);
  int shift = log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
  assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1023, "");
  return vmSymbols::SID( (info >> shift) & mask );
}
Пример #9
0
// more significant bits. The counter is incremented before a method is activated and an
// action is triggered when when count() > limit().

class InvocationCounter VALUE_OBJ_CLASS_SPEC {
    friend class VMStructs;
private:                             // bit no: |31  3|  2  | 1 0 |
    unsigned int _counter;              // format: [count|carry|state]

    enum PrivateConstants {
        number_of_state_bits = 2,
        number_of_carry_bits = 1,
        number_of_noncount_bits = number_of_state_bits + number_of_carry_bits,
        number_of_count_bits = BitsPerInt - number_of_noncount_bits,
        state_limit          = nth_bit(number_of_state_bits),
        count_grain          = nth_bit(number_of_state_bits + number_of_carry_bits),
        carry_mask           = right_n_bits(number_of_carry_bits) << number_of_state_bits,
        state_mask           = right_n_bits(number_of_state_bits),
        status_mask          = right_n_bits(number_of_state_bits + number_of_carry_bits),
        count_mask           = ((int)(-1) ^ status_mask)
    };

public:
    static int InterpreterInvocationLimit;        // CompileThreshold scaled for interpreter use
    static int InterpreterBackwardBranchLimit;    // A separate threshold for on stack replacement
    static int InterpreterProfileLimit;           // Profiling threshold scaled for interpreter use

    typedef address (*Action)(methodHandle method, TRAPS);

    enum PublicConstants {
        count_increment      = count_grain,          // use this value to increment the 32bit _counter word
        count_mask_value     = count_mask,           // use this value to mask the backedge counter
 friend class StubGenerator;

 public:
  enum { nof_instance_allocators = 10 };

  // allocator lock values
  enum {
    unlocked = 0,
    locked   = 1
  };

  enum { 
    v8_oop_lock_ignore_bits = 2, 
    v8_oop_lock_bits = 4, 
    nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits), 
    v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits), 
    v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
  };

  static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];

 private:
  static address _test_stop_entry;
  static address _stop_subroutine_entry;
  static address _flush_callers_register_windows_entry;

  static address _copy_words_aligned8_lower_entry;
  static address _copy_words_aligned8_higher_entry;
  static address _set_words_aligned8_entry;
  static address _zero_words_aligned8_entry;
    checked_bits           = 1,
    instance_bits          = 1,
    address_bits           = BitsPerWord - checked_bits - instance_bits,

    large_offset_bits      = address_bits,  // unioned with address
    small_offset_bits      = 7,
    klass_bits             = address_bits - small_offset_bits,

    checked_shift          = 0,
    instance_shift         = checked_shift  + checked_bits,
    address_shift          = instance_shift + instance_bits,

    offset_shift           = address_shift,  // unioned with address
    klass_shift            = offset_shift + small_offset_bits,

    checked_mask_in_place  = right_n_bits(checked_bits)  << checked_shift,
    instance_mask_in_place = right_n_bits(instance_bits) << instance_shift,
#ifndef _WIN64
    large_offset_mask      = right_n_bits(large_offset_bits),
    small_offset_mask      = right_n_bits(small_offset_bits),
    klass_mask             = right_n_bits(klass_bits)
#endif
    };

#ifdef _WIN64
    // These values are too big for Win64
    const static uintptr_t large_offset_mask = right_n_bits(large_offset_bits);
    const static uintptr_t small_offset_mask = right_n_bits(small_offset_bits);
    const static uintptr_t klass_mask        = right_n_bits(klass_bits);
#endif
    checked_bits           = 1,
    instance_bits          = 1,
    address_bits           = BitsPerWord - checked_bits - instance_bits,

    large_offset_bits      = address_bits,  // unioned with address
    small_offset_bits      = 7,
    klass_bits             = address_bits - small_offset_bits,

    checked_shift          = 0,
    instance_shift         = checked_shift  + checked_bits,
    address_shift          = instance_shift + instance_bits,

    offset_shift           = address_shift,  // unioned with address
    klass_shift            = offset_shift + small_offset_bits,

    checked_mask_in_place  = right_n_bits(checked_bits)  << checked_shift,
    instance_mask_in_place = right_n_bits(instance_bits) << instance_shift,
    large_offset_mask      = right_n_bits(large_offset_bits),
    small_offset_mask      = right_n_bits(small_offset_bits),
    klass_mask             = right_n_bits(klass_bits)
    };

  // helper routines:
  static bool is_checked_jfieldID(jfieldID id) {
    uintptr_t as_uint = (uintptr_t) id;
    return ((as_uint & checked_mask_in_place) != 0);
  }
  static intptr_t encode_klass_hash(klassOop k, intptr_t offset);
  static bool             klass_hash_ok(klassOop k, jfieldID id);
  static void  verify_instance_jfieldID(klassOop k, jfieldID id);
Пример #13
0
// Helper to remove argument slots from the stack.
// arg_slots must be a multiple of stack_move_unit() and >= 0
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                     RegisterOrConstant arg_slots,
                                     Register argslot_reg,
                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
  assert(temp3_reg != noreg, "temp3 required");
  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));

  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);

#ifdef ASSERT
  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
  __ add(argslot_reg, offset, temp2_reg);
  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
  if (arg_slots.is_register()) {
    Label L_ok, L_bad;
    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
    __ br(Assembler::less, false, Assembler::pn, L_bad);
    __ delayed()->nop();
    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
    __ br(Assembler::zero, false, Assembler::pt, L_ok);
    __ delayed()->nop();
    __ bind(L_bad);
    __ stop("assert arg_slots >= 0 and clear low bits");
    __ bind(L_ok);
  } else {
    assert(arg_slots.as_constant() >= 0, "");
    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
  }
#endif // ASSERT

  // Pull up everything shallower than argslot.
  // Then remove the excess space on the stack.
  // The stacked return address gets pulled up with everything else.
  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
  //   for (temp = argslot-1; temp >= sp; --temp)
  //     temp[size] = temp[0]
  //   argslot += size;
  //   sp += size;
  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
  {
    Label loop;
    __ bind(loop);
    // pull one word up each time through the loop
    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
    __ st_ptr(temp2_reg, Address(temp_reg, offset));
    __ sub(temp_reg, wordSize, temp_reg);
    __ cmp(temp_reg, Gargs);
    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
    __ delayed()->nop();  // FILLME
  }

  // Now move the argslot up, to point to the just-copied block.
  __ add(Gargs, offset, Gargs);
  // And adjust the argslot address to point at the deletion point.
  __ add(argslot_reg, offset, argslot_reg);

  // Keep the stack pointer 2*wordSize aligned.
  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
  __ add(SP, masked_offset, SP);
}
Пример #14
0
// Helper to insert argument slots into the stack.
// arg_slots must be a multiple of stack_move_unit() and <= 0
void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
                                     RegisterOrConstant arg_slots,
                                     int arg_mask,
                                     Register argslot_reg,
                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
  assert(temp3_reg != noreg, "temp3 required");
  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));

#ifdef ASSERT
  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
  if (arg_slots.is_register()) {
    Label L_ok, L_bad;
    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
    __ br(Assembler::greater, false, Assembler::pn, L_bad);
    __ delayed()->nop();
    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
    __ br(Assembler::zero, false, Assembler::pt, L_ok);
    __ delayed()->nop();
    __ bind(L_bad);
    __ stop("assert arg_slots <= 0 and clear low bits");
    __ bind(L_ok);
  } else {
    assert(arg_slots.as_constant() <= 0, "");
    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
  }
#endif // ASSERT

#ifdef _LP64
  if (arg_slots.is_register()) {
    // Was arg_slots register loaded as signed int?
    Label L_ok;
    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
    __ sra(temp_reg, BitsPerInt, temp_reg);
    __ cmp(arg_slots.as_register(), temp_reg);
    __ br(Assembler::equal, false, Assembler::pt, L_ok);
    __ delayed()->nop();
    __ stop("arg_slots register not loaded as signed int");
    __ bind(L_ok);
  }
#endif

  // Make space on the stack for the inserted argument(s).
  // Then pull down everything shallower than argslot_reg.
  // The stacked return address gets pulled down with everything else.
  // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
  //   sp -= size;
  //   for (temp = sp + size; temp < argslot; temp++)
  //     temp[-size] = temp[0]
  //   argslot -= size;
  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);

  // Keep the stack pointer 2*wordSize aligned.
  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
  __ add(SP, masked_offset, SP);

  __ mov(Gargs, temp_reg);  // source pointer for copy
  __ add(Gargs, offset, Gargs);

  {
    Label loop;
    __ bind(loop);
    // pull one word down each time through the loop
    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
    __ st_ptr(temp2_reg, Address(temp_reg, offset));
    __ add(temp_reg, wordSize, temp_reg);
    __ cmp(temp_reg, argslot_reg);
    __ brx(Assembler::less, false, Assembler::pt, loop);
    __ delayed()->nop();  // FILLME
  }

  // Now move the argslot down, to point to the opened-up space.
  __ add(argslot_reg, offset, argslot_reg);
}