コード例 #1
0
ファイル: codeBlob.cpp プロジェクト: AllenWeb/openjdk-1
// Creates a simple CodeBlob. Sets up the size of the different regions.
CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
  assert(size == round_to(size, oopSize), "unaligned size");
  assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
  assert(header_size == round_to(header_size, oopSize), "unaligned size");
  assert(!UseRelocIndex, "no space allocated for reloc index yet");

  // Note: If UseRelocIndex is enabled, there needs to be (at least) one
  //       extra word for the relocation information, containing the reloc
  //       index table length. Unfortunately, the reloc index table imple-
  //       mentation is not easily understandable and thus it is not clear
  //       what exactly the format is supposed to be. For now, we just turn
  //       off the use of this table (gri 7/6/2000).

  _name                  = name;
  _size                  = size;
  _frame_complete_offset = frame_complete;
  _header_size           = header_size;
  _relocation_size       = locs_size;
  _instructions_offset   = align_code_offset(header_size + locs_size);
  _data_offset           = size;
  _oops_offset           = size;
  _oops_length           =  0;
  _frame_size            =  0;
  set_oop_maps(NULL);
}
コード例 #2
0
int Bytecodes::special_length_at(address bcp) {
  Code code = code_at(bcp);
  switch (code) {
  case _wide:
    return wide_length_for(cast(*(bcp + 1)));
  case _tableswitch:
    { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
      jlong lo = (jint)Bytes::get_Java_u4(aligned_bcp + 1*jintSize);
      jlong hi = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
      jlong len = (aligned_bcp - bcp) + (3 + hi - lo + 1)*jintSize;
      // only return len if it can be represented as a positive int;
      // return -1 otherwise
      return (len > 0 && len == (int)len) ? len : -1;
    }

  case _lookupswitch:      // fall through    
  case _fast_binaryswitch: // fall through    
  case _fast_linearswitch: 
    { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
      jlong npairs = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
      jlong len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize;
      // only return len if it can be represented as a positive int;
      // return -1 otherwise
      return (len > 0 && len == (int)len) ? len : -1;
    }
  }
  return 0;
}
コード例 #3
0
// simple tests of generated arraycopy functions
static void test_arraycopy_func(address func, int alignment) {
  int v = 0xcc;
  int v2 = 0x11;
  jlong lbuffer[8];
  jlong lbuffer2[8];
  address fbuffer  = (address) lbuffer;
  address fbuffer2 = (address) lbuffer2;
  unsigned int i;
  for (i = 0; i < sizeof(lbuffer); i++) {
    fbuffer[i] = v; fbuffer2[i] = v2;
  }
  // C++ does not guarantee jlong[] array alignment to 8 bytes.
  // Use middle of array to check that memory before it is not modified.
  address buffer  = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong);
  address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong);
  // do an aligned copy
  ((arraycopy_fn)func)(buffer, buffer2, 0);
  for (i = 0; i < sizeof(lbuffer); i++) {
    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
  }
  // adjust destination alignment
  ((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
  for (i = 0; i < sizeof(lbuffer); i++) {
    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
  }
  // adjust source alignment
  ((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
  for (i = 0; i < sizeof(lbuffer); i++) {
    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
  }
}
コード例 #4
0
ファイル: codeBlob.cpp プロジェクト: netroby/jdk9-dev
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
  _name(name),
  _size(layout.size()),
  _header_size(layout.header_size()),
  _frame_complete_offset(frame_complete_offset),
  _data_offset(layout.data_offset()),
  _frame_size(frame_size),
  _strings(CodeStrings()),
  _caller_must_gc_arguments(caller_must_gc_arguments),
  _code_begin(layout.code_begin()),
  _code_end(layout.code_end()),
  _data_end(layout.data_end()),
  _relocation_begin(layout.relocation_begin()),
  _relocation_end(layout.relocation_end()),
  _content_begin(layout.content_begin()),
  _type(type)
{
  assert(_size        == round_to(_size,        oopSize), "unaligned size");
  assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
  assert(_data_offset <= _size, "codeBlob is too small");
  assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");

  set_oop_maps(oop_maps);
#ifdef COMPILER1
  // probably wrong for tiered
  assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
コード例 #5
0
// --- generate
address MethodStubBlob::generate( heapRef moop, address c2i_adapter ) {
  // NativeMethodStubs must be jumped-to directly and are packed back-to-back.
  // Hence they start CodeEntryAligned, and each later one has to be
  // CodeEntryAligned so we expect the instruction_size to be a multiple.
  assert0( round_to(NativeMethodStub::instruction_size,CodeEntryAlignment) == NativeMethodStub::instruction_size );
  NativeMethodStub *nms;
  do {
    // The _free_list is a racing CAS-managed link-list.  Must read the
    // _free_list exactly ONCE before the CAS attempt below, or otherwise know
    // we have something that used to be on the free_list and is not-null.  In
    // generally, if we re-read the free_list we have to null-check the result.
    nms = _free_list;
    if( !nms ) {
      // CodeCache makes CodeBlobs.  Make a CodeBlob typed as a methodCodeStub.
      CodeBlob *cb = CodeCache::malloc_CodeBlob( CodeBlob::methodstub, 256*NativeMethodStub::instruction_size );
      address adr = (address)round_to((intptr_t)cb->code_begins(),CodeEntryAlignment);
      cb->_code_start_offset = adr-(address)cb->_code_begins;
      while( adr+NativeMethodStub::instruction_size < cb->end() ) {
        free_stub((NativeMethodStub*)adr);
        adr += NativeMethodStub::instruction_size;
      }
      // The last not-null thing jammed on the freelist.
      nms = (NativeMethodStub*)(adr-NativeMethodStub::instruction_size);
    }
  } while( Atomic::cmpxchg_ptr(*(NativeMethodStub**)nms,&_free_list,nms) != nms );
  nms->fill( moop, c2i_adapter );
return(address)nms;
}
コード例 #6
0
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {

  // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
  // expression stack, the callee will have callee_extra_locals (so we can account for
  // frame extension) and monitor_size for monitors. Basically we need to calculate
  // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
  //
  //
  // The big complicating thing here is that we must ensure that the stack stays properly
  // aligned. This would be even uglier if monitor size wasn't modulo what the stack
  // needs to be aligned for). We are given that the sp (fp) is already aligned by
  // the caller so we must ensure that it is properly aligned for our callee.
  //
  const int rounded_vm_local_words =
       round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
  // callee_locals and max_stack are counts, not the size in frame.
  const int locals_size =
       round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
  const int max_stack_words = max_stack * Interpreter::stackElementWords;
  return (round_to((max_stack_words
                   + rounded_vm_local_words
                   + frame::memory_parameter_word_sp_offset), WordsPerLong)
                   // already rounded
                   + locals_size + monitor_size);
}
コード例 #7
0
// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
// and copy code and relocation info.
CodeBlob::CodeBlob(
  const char* name,
  CodeBuffer* cb,
  int         header_size,
  int         size,
  int         frame_complete,
  int         frame_size,
  OopMapSet*  oop_maps
) {
  assert(size        == round_to(size,        oopSize), "unaligned size");
  assert(header_size == round_to(header_size, oopSize), "unaligned size");

  _name                  = name;
  _size                  = size;
  _frame_complete_offset = frame_complete;
  _header_size           = header_size;
  _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
  _content_offset        = align_code_offset(header_size + _relocation_size);
  _code_offset           = _content_offset + cb->total_offset_of(cb->insts());
  _data_offset           = _content_offset + round_to(cb->total_content_size(), oopSize);
  assert(_data_offset <= size, "codeBlob is too small");

  cb->copy_code_and_locs_to(this);
  set_oop_maps(oop_maps);
  _frame_size = frame_size;
#ifdef COMPILER1
  // probably wrong for tiered
  assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
コード例 #8
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        intptr_t cur_top = (intptr_t)s->top();
        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
        while (words_left_to_fill > 0) {
          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
          assert(words_to_fill >= CollectedHeap::min_fill_size(),
                 "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
                 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
          if (!os::numa_has_static_binding()) {
            size_t touched_words = words_to_fill;
#ifndef ASSERT
            if (!ZapUnusedHeapArea) {
              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                touched_words);
            }
#endif
            MemRegion invalid;
            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
            if (crossing_start != crossing_end) {
              // If object header crossed a small page boundary we mark the area
              // as invalid rounding it to a page_size().
              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
              invalid = MemRegion(start, end);
            }

            ls->add_invalid_region(invalid);
          }
          cur_top = cur_top + (words_to_fill * HeapWordSize);
          words_left_to_fill -= words_to_fill;
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
コード例 #9
0
// This must be consistent with the CodeBlob constructor's layout actions.
unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
  unsigned int size = header_size;
  size += round_to(cb->total_relocation_size(), oopSize);
  // align the size to CodeEntryAlignment
  size = align_code_offset(size);
  size += round_to(cb->total_content_size(), oopSize);
  size += round_to(cb->total_oop_size(), oopSize);
  return size;
}
コード例 #10
0
int RelocIterator::locs_and_index_size(int code_size, int locs_size) {
  if (!UseRelocIndex)  return locs_size;   // no index
  code_size = round_to(code_size, oopSize);
  locs_size = round_to(locs_size, oopSize);
  int index_size = num_cards(code_size) * sizeof(RelocIndexEntry);
  // format of indexed relocs:
  //   relocation_begin:   relocInfo ...
  //   index:              (addr,reloc#) ...
  //                       indexSize           :relocation_end
  return locs_size + index_size + BytesPerInt;
}
コード例 #11
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        size_t area_touched_words = pointer_delta(s->end(), s->top());
        CollectedHeap::fill_with_object(s->top(), area_touched_words);
#ifndef ASSERT
        if (!ZapUnusedHeapArea) {
          area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                                    area_touched_words);
        }
#endif
        if (!os::numa_has_static_binding()) {
          MemRegion invalid;
          HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
          HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
                                                       os::vm_page_size());
          if (crossing_start != crossing_end) {
            // If object header crossed a small page boundary we mark the area
            // as invalid rounding it to a page_size().
            HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
            HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
                                 s->end());
            invalid = MemRegion(start, end);
          }

          ls->add_invalid_region(invalid);
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
コード例 #12
0
// --- unlink ----------------------------------------------------------------
// GPGC unlink any MSB's whose method has died.
void MethodStubBlob::GPGC_unlink( address from, address to ) {
  address adr = (address)round_to((intptr_t)from,CodeEntryAlignment);
  for( ; adr+NativeMethodStub::instruction_size < to; adr+=NativeMethodStub::instruction_size ) {
    NativeMethodStub *nms = (NativeMethodStub*)adr;
heapRef ref=nms->get_oop();

if(ref.not_null()){
      assert(ref.is_old(), "CodeCache should only have old-space oops");

      if ( GPGC_ReadTrapArray::is_remap_trapped(ref) ) {
        assert0(GPGC_ReadTrapArray::is_old_gc_remap_trapped(ref));
        ref = GPGC_Collector::get_forwarded_object(ref);
      }

      assert(ref.as_oop()->is_oop(), "not oop");

      if ( ! GPGC_Marks::is_old_marked_strong_live(ref) ) {
        free_stub(nms);
      } else {
        // Any NativeMethodStub we don't free, we instead must mark through the objectRef to
        // get consistent NMT bits and remapped addresses.
        GPGC_OldCollector::mark_to_live(nms->oop_addr());
      }
    }
  }
}
コード例 #13
0
void* VtableStub::operator new(size_t size, int code_size) throw() {
  assert(size == sizeof(VtableStub), "mismatched size");
  // compute real VtableStub size (rounded to nearest word)
  const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
  // malloc them in chunks to minimize header overhead
  const int chunk_factor = 32;
  if (_chunk == NULL || _chunk + real_size > _chunk_end) {
    const int bytes = chunk_factor * real_size + pd_code_alignment();

   // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
   // If changing the name, update the other file accordingly.
    BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
    if (blob == NULL) {
      return NULL;
    }
    _chunk = blob->content_begin();
    _chunk_end = _chunk + bytes;
    Forte::register_stub("vtable stub", _chunk, _chunk_end);
    align_chunk();
  }
  assert(_chunk + real_size <= _chunk_end, "bad allocation");
  void* res = _chunk;
  _chunk += real_size;
  align_chunk();
 return res;
}
コード例 #14
0
  static void assert_byte_count_ok(size_t byte_count, size_t unit_size) {
#ifdef ASSERT
    if ((size_t)round_to(byte_count, unit_size) != byte_count) {
      basic_fatal("byte count must be aligned");
    }
#endif
  }
コード例 #15
0
// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
                                         int temps,
                                         int extra_args,
                                         int monitors,
                                         int callee_params,
                                         int callee_locals,
                                         bool is_top_frame) {
  // Note: This calculation must exactly parallel the frame setup
  // in TemplateInterpreterGenerator::generate_method_entry.

  // fixed size of an interpreter frame:
  int overhead = frame::sender_sp_offset -
                 frame::interpreter_frame_initial_sp_offset;
  // Our locals were accounted for by the caller (or last_frame_adjust
  // on the transistion) Since the callee parameters already account
  // for the callee's params we only need to account for the extra
  // locals.
  int size = overhead +
         (callee_locals - callee_params)*Interpreter::stackElementWords +
         monitors * frame::interpreter_frame_monitor_size() +
         temps* Interpreter::stackElementWords + extra_args;

  // On AArch64 we always keep the stack pointer 16-aligned, so we
  // must round up here.
  size = round_to(size, 2);

  return size;
}
コード例 #16
0
ファイル: vtableStubs.cpp プロジェクト: BaHbKaTX/openjdk
void* VtableStub::operator new(size_t size, int code_size) {
  assert(size == sizeof(VtableStub), "mismatched size");
  num_vtable_chunks++;
  // compute real VtableStub size (rounded to nearest word)
  const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
  // malloc them in chunks to minimize header overhead
  const int chunk_factor = 32;
  if (_chunk == NULL || _chunk + real_size > _chunk_end) {
    const int bytes = chunk_factor * real_size + pd_code_alignment();
    BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
    if (blob == NULL) {
      vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
    }
    _chunk = blob->instructions_begin();
    _chunk_end = _chunk + bytes;
    Forte::register_stub("vtable stub", _chunk, _chunk_end);
    // Notify JVMTI about this stub. The event will be recorded by the enclosing
    // JvmtiDynamicCodeEventCollector and posted when this thread has released
    // all locks.
    if (JvmtiExport::should_post_dynamic_code_generated()) {
      JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
    }
    align_chunk();
  }
  assert(_chunk + real_size <= _chunk_end, "bad allocation");
  void* res = _chunk;
  _chunk += real_size;
  align_chunk();
 return res;
}
コード例 #17
0
// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
                                         int tempcount,
                                         int extra_args,
                                         int moncount,
                                         int callee_param_count,
                                         int callee_locals,
                                         bool is_top_frame) {
  // Note: This calculation must exactly parallel the frame setup
  // in TemplateInterpreterGenerator::generate_fixed_frame.
  // fixed size of an interpreter frame:
  int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;

  // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
  // Since the callee parameters already account for the callee's params we only need to account for
  // the extra locals.

  int size = overhead +
         ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
         (moncount*frame::interpreter_frame_monitor_size()) +
         tempcount*Interpreter::stackElementWords + extra_args;

#ifdef AARCH64
  size = round_to(size, StackAlignmentInBytes/BytesPerWord);
#endif // AARCH64

  return size;
}
コード例 #18
0
// --- unlink ----------------------------------------------------------------
// Unlink any MSB's whose method has died.
void MethodStubBlob::unlink( address from, address to, BoolObjectClosure* is_alive ) {
  address adr = (address)round_to((intptr_t)from,CodeEntryAlignment);
  for( ; adr+NativeMethodStub::instruction_size < to; adr+=NativeMethodStub::instruction_size ) {
    NativeMethodStub *nms = (NativeMethodStub*)adr;
    if( nms->get_oop().not_null() && !nms->is_alive(is_alive) ) 
      free_stub(nms);
  }
}
inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
  // Align the end of mr so it's at a card boundary.
  // This is superfluous except at the end of the space;
  // we should do better than this XXX
  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
                 CardTableModRefBS::card_size /* bytes */));
  _t->par_mark_range(mr2);
}
コード例 #20
0
int AbstractInterpreter::size_activation(int max_stack,
                                         int temps,
                                         int extra_args,
                                         int monitors,
                                         int callee_params,
                                         int callee_locals,
                                         bool is_top_frame) {
  // Note: This calculation must exactly parallel the frame setup
  // in TemplateInterpreterGenerator::generate_fixed_frame.

  int monitor_size           = monitors * frame::interpreter_frame_monitor_size();

  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");

  //
  // Note: if you look closely this appears to be doing something much different
  // than generate_fixed_frame. What is happening is this. On sparc we have to do
  // this dance with interpreter_sp_adjustment because the window save area would
  // appear just below the bottom (tos) of the caller's java expression stack. Because
  // the interpreter want to have the locals completely contiguous generate_fixed_frame
  // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
  // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
  // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
  // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
  // because the oldest frame would have adjust its callers frame and yet that frame
  // already exists and isn't part of this array of frames we are unpacking. So at first
  // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
  // will after it calculates all of the frame's on_stack_size()'s will then figure out the
  // amount to adjust the caller of the initial (oldest) frame and the calculation will all
  // add up. It does seem like it simpler to account for the adjustment here (and remove the
  // callee... parameters here). However this would mean that this routine would have to take
  // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
  // and run the calling loop in the reverse order. This would also would appear to mean making
  // this code aware of what the interactions are when that initial caller fram was an osr or
  // other adapter frame. deoptimization is complicated enough and  hard enough to debug that
  // there is no sense in messing working code.
  //

  int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
  assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");

  int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);

  return raw_frame_size;
}
コード例 #21
0
ファイル: unsafe.cpp プロジェクト: campolake/openjdk9
} UNSAFE_END

UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
  size_t sz = (size_t)size;

  sz = round_to(sz, HeapWordSize);
  void* x = os::malloc(sz, mtInternal);

  return addr_to_java(x);
} UNSAFE_END
コード例 #22
0
void PatchingStub::align_patch_site(MacroAssembler* masm) {
  // We're patching a 5-7 byte instruction on intel and we need to
  // make sure that we don't see a piece of the instruction.  It
  // appears mostly impossible on Intel to simply invalidate other
  // processors caches and since they may do aggressive prefetch it's
  // very hard to make a guess about what code might be in the icache.
  // Force the instruction to be double word aligned so that it
  // doesn't span a cache line.
  masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
}
コード例 #23
0
ファイル: stubs.cpp プロジェクト: gaoxiaojun/dync
void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
    assert(committed_code_size > 0, "committed_code_size must be > 0");
    int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
    Stub* s = current_stub();
    assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
    stub_initialize(s, committed_size, strings);
    _queue_end += committed_size;
    _number_of_stubs++;
    if (_mutex != NULL) _mutex->unlock();
    debug_only(stub_verify(s);)
}
コード例 #24
0
ファイル: unsafe.cpp プロジェクト: campolake/openjdk9
} UNSAFE_END

UNSAFE_ENTRY(jlong, Unsafe_ReallocateMemory0(JNIEnv *env, jobject unsafe, jlong addr, jlong size)) {
  void* p = addr_from_java(addr);
  size_t sz = (size_t)size;
  sz = round_to(sz, HeapWordSize);

  void* x = os::realloc(p, sz, mtInternal);

  return addr_to_java(x);
} UNSAFE_END
コード例 #25
0
ファイル: stubs.cpp プロジェクト: fatman2021/myforthprocessor
StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
                     Mutex* lock, const char* name) : _mutex(lock) {
  BufferBlob* blob = BufferBlob::create(round_to(buffer_size, 2*BytesPerWord), name);
  if( blob == NULL ) fatal1( "CodeCache: no room for %s", name);
  _stub_interface  = stub_interface;
  _buffer_size     = blob->instructions_size();
  _buffer_limit    = blob->instructions_size();
  _stub_buffer     = blob->instructions_begin();
  _queue_begin     = 0;
  _queue_end       = 0;
  _number_of_stubs = 0;
  register_queue(this);
}
コード例 #26
0
ファイル: sbrk.c プロジェクト: spevans/vmm386
/* Add DELTA to the kernel's break address, returning the old break
   address. Note that DELTA may be negative if you want. Returns -1
   if no more memory is available. */
void *
kernel_sbrk(long delta)
{
#ifndef SBRK_DOESNT_ALLOC
    u_char *old = kernel_brk, *ptr;
    u_long flags;
    save_flags(flags);
    cli();
    kernel_brk += round_to(delta, 4);
    if((u_long)kernel_brk < (PHYS_MAP_ADDR - KERNEL_BASE_ADDR))
    {
	ptr = (u_char *)round_to((u_long)old, PAGE_SIZE);
	while(ptr < kernel_brk)
	{
	    page *p = alloc_page();
	    if(p == NULL)
		goto error;
	    map_page(logical_kernel_pd, p, TO_LINEAR(ptr), PTE_PRESENT);
	    ptr += PAGE_SIZE;
	}
	load_flags(flags);
	return old;
    }
 error:
    kernel_brk = old;
    load_flags(flags);
    return (void *)-1;
#else
    /* Don't need to map in any pages or anything; let the page-fault-
       handler do that. Should really release any unneeded pages if
       DELTA is negative. */
    register void *ptr = kernel_brk;
    kernel_brk += round_to(delta, 4);
    if((u_long)kernel_brk < (PHYS_MAP_ADDR - KERNEL_BASE_ADDR))
	return ptr;
    kernel_brk = ptr;
    return (void *)-1;
#endif
}
コード例 #27
0
ファイル: codeBlob.cpp プロジェクト: netroby/jdk9-dev
// Creates a simple CodeBlob. Sets up the size of the different regions.
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
  : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
{
  assert(locs_size   == round_to(locs_size,   oopSize), "unaligned size");
  assert(!UseRelocIndex, "no space allocated for reloc index yet");

  // Note: If UseRelocIndex is enabled, there needs to be (at least) one
  //       extra word for the relocation information, containing the reloc
  //       index table length. Unfortunately, the reloc index table imple-
  //       mentation is not easily understandable and thus it is not clear
  //       what exactly the format is supposed to be. For now, we just turn
  //       off the use of this table (gri 7/6/2000).
}
コード例 #28
0
int AbstractInterpreter::size_top_interpreter_activation(methodOop method)
{
#ifdef PPC
  StackFrame frame;

  int call_stub_frame = round_to(
    StubRoutines::call_stub_base_size() +
    method->max_locals() * wordSize, StackAlignmentInBytes);

  int interpreter_frame = round_to(
    frame.unaligned_size() +
    slop_factor + 
    method->max_stack() * wordSize +
    (method->is_synchronized() ?
     frame::interpreter_frame_monitor_size() * wordSize : 0) +
    sizeof(BytecodeInterpreter), StackAlignmentInBytes);

  return (call_stub_frame + interpreter_frame) / wordSize;
#else
  Unimplemented();
#endif // PPC
}
コード例 #29
0
// How much stack a method top interpreter activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {

  // See call_stub code
  int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
                                 WordsPerLong);    // 7 + register save area

  // Save space for one monitor to get into the interpreted method in case
  // the method is synchronized
  int monitor_size    = method->is_synchronized() ?
                                1*frame::interpreter_frame_monitor_size() : 0;
  return size_activation_helper(method->max_locals(), method->max_stack(),
                                monitor_size) + call_stub_size;
}
コード例 #30
0
ファイル: bytecodes.cpp プロジェクト: ismo1652/jvmnotebook
int Bytecodes::special_length_at(address bcp, address end) {
  Code code = code_at(bcp);
  switch (code) {
  case _wide:
    if (end != NULL && bcp + 1 >= end) {
      return -1; // don't read past end of code buffer
    }
    return wide_length_for(cast(*(bcp + 1)));
  case _tableswitch:
    { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
      if (end != NULL && aligned_bcp + 3*jintSize >= end) {
        return -1; // don't read past end of code buffer
      }
      jlong lo = (jint)Bytes::get_Java_u4(aligned_bcp + 1*jintSize);
      jlong hi = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
      jlong len = (aligned_bcp - bcp) + (3 + hi - lo + 1)*jintSize;
      // only return len if it can be represented as a positive int;
      // return -1 otherwise
      return (len > 0 && len == (int)len) ? len : -1;
    }

  case _lookupswitch:      // fall through
  case _fast_binaryswitch: // fall through
  case _fast_linearswitch:
    { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
      if (end != NULL && aligned_bcp + 2*jintSize >= end) {
        return -1; // don't read past end of code buffer
      }
      jlong npairs = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
      jlong len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize;
      // only return len if it can be represented as a positive int;
      // return -1 otherwise
      return (len > 0 && len == (int)len) ? len : -1;
    }
  }
  // Note: Length functions must return <=0 for invalid bytecodes.
  return 0;
}