inline objectRef LVB::lvb(objectRef* addr) {
  assert0( (!VerifyJVMLockAtLVB) || verify_jvm_lock() );

  objectRef old_ref = *addr;
  if ( old_ref.is_null() ) return nullRef;
  objectRef new_ref = old_ref;

#ifdef ASSERT
  if ( RefPoisoning ) {
    // If RefPoisoning is on, then we have to xor -1 a ref that is
    // not from the stack after we load it, and before it gets LVB'ed.
    if ( old_ref.not_null() && objectRef::needs_lvb(addr) ) { // needs an LVB ==> not stack allocated
      assert0(old_ref.is_poisoned());
      new_ref = old_ref._objref ^ -1; // So remove poison
    }
  }
#endif // ASSERT

  if ( UseLVBs ) {
assert(UseGenPauselessGC,"We only have LVB support for GPGC");
    new_ref = GPGC_LVB::lvb_loaded_ref(new_ref, addr);
  }

  return new_ref;
}
/**
 * check_for_overlayfs:
 *
 * Determine if the mount point used by the tests for creating temporary
 * files is using overlayfs.
 *
 * Returns: TRUE if temporary work area is on overlayfs, else FALSE.
 **/
int
check_for_overlayfs (void)
{
	struct statfs  statbuf;
	char           path[PATH_MAX];
	int            found = FALSE;

	/* Create a file in the temporary work area */
	TEST_FILENAME (path);
	fclose (fopen (path, "w"));

	/* Check it exits */
	assert0 (statfs (path, &statbuf));

	if (statbuf.f_type == OVERLAYFS_SUPER_MAGIC) {
		nih_warn ("Mountpoint for '%s' (needed by the Upstart tests) is an overlayfs "
				"filesystem, which does not support inotify.",
				path);
		found = TRUE;
	}

	assert0 (unlink (path));

	return found;
}
void C1_MacroAssembler::method_exit(FrameMap* frame_map) {
  // offset from the expected fixed sp within the method
  int sp_offset = 0;
  // adjust SP over spills...
  sp_offset = in_bytes(frame_map->framesize_in_bytes()) - 8 - (frame_map->num_callee_saves()*8);
  if (sp_offset == 8) pop(RCX);                // pop and blow arbitrary caller save, smaller encoding than add8i
  else                add8i (RSP, sp_offset );
  if( frame_map->num_callee_saves() > 0 ) {
    int callee_save_num = 0;
    int callee_saves    = frame_map->callee_saves(); // bitmap
for(int i=0;i<LinearScan::nof_cpu_regs;i++){
      if ((callee_saves & 1<<i) != 0) {
        int wanted_sp_offset = frame_map->address_for_callee_save(callee_save_num)._disp;
        assert0( sp_offset == wanted_sp_offset );
        pop((Register)i);
        sp_offset += 8;
        callee_save_num++;
        assert0( callee_save_num <= frame_map->num_callee_saves() );
      }
    }
#ifdef ASSERT
for(int i=0;i<LinearScan::nof_xmm_regs;i++){
      int reg = LinearScan::nof_cpu_regs+i;
      assert ((callee_saves & 1<<reg) == 0, "Unexpected callee save XMM register");
    }
#endif
  }
  assert0 (sp_offset == (in_bytes(frame_map->framesize_in_bytes())-8) );
  ret   ();
}
// --- pre_write_barrier_compiled
void C1_MacroAssembler::pre_write_barrier_compiled(RInOuts, Register Rtmp0, Register Rtmp1,
                                                   RKeepIns, Register Rbase, int off,  Register Rindex, int scale, Register Rval,
                                                   CodeEmitInfo *info) {
  Label retry, safe_to_store;
  if (UseSBA) bind(retry); // Come here to retry barrier following an SBA escape
  // Perform regular pre write barrier
  pre_write_barrier(RInOuts::a, Rtmp0, Rtmp1, RKeepIns::a, Rbase, off, Rindex, scale, Rval, safe_to_store);
  if( UseSBA ) {
    // SBA escape will update Rbase. Rbase may already have been added to the
    // oop map for patching. Force Rbase into oop map.
    // NB. this is the last use of the oop map!
    info->oop_map()->add((VOopReg::VR)Rbase);
    // Full SBA escape - Rtmp0 holds FID of Rbase
    // Place args to sba_escape below return address in outgoing stack frame
    assert0 (-frame::runtime_stub_frame_size + frame::xPAD == -16)
    st8(RSP, -16, Rval);
    assert0 (-frame::runtime_stub_frame_size + frame::xRAX == -24)
    st8(RSP, -24, Rbase);
    call(StubRoutines::x86::sba_escape_handler());  // Do call
assert(info,"oop map expected");
    add_oopmap(rel_pc(), info->oop_map()); // Add oop map on return address of call
    add_dbg(rel_pc(), info->debug_scope());
    jmp (retry);
  }
bind(safe_to_store);
}
// --- verify
void CodeBlob::verify()const{
  assert0( objectRef::is_null_or_heap(&_owner) );
  assert0( owner().as_oop()->is_oop_or_null() );
  assert0( _type < bad );
  // Be more generous on size for VerifyOop and !UseInterpreter (aka Xcomp)
  debug_only( size_t max_size = (300 + 300*UseGenPauselessGC)*K*(1+VerifyOopLevel)*(UseInterpreter?1:4) );
  assert0( !UseCodeBlobSizeCheck || (_size_in_bytes >= 0 && (size_t)_size_in_bytes <= max_size) );
}
inline GPGC_PageInfo* GPGC_PageInfo::page_info_unchecked(PageNum page)
{
  GPGC_PageInfo* result = &(_page_info[page]);

  assert0(_reserved_region.contains(result));
  assert0(_array_pages_mapped[GPGC_Layout::addr_to_DataPageNum(result)]);

  return result;
}
//--- DerivedPointerTable::add -----------------------------------------------
// Called during scavenge/GC
void DerivedPointerTable::add(objectRef*base,objectRef*derived){
  assert0( !UseGenPauselessGC );

  assert_lock_strong(DerivedPointerTableGC_lock);
  assert0( _active );
  _base_derived_pairs->push((intptr_t)base);
  _base_derived_pairs->push((intptr_t)derived);
  intptr_t offset = (*derived).raw_value() - (*base).raw_value();
  assert(offset >= -1000000, "wrong derived pointer info");
  _base_derived_pairs->push(offset);
}
// When we don't have enough sideband relocation array space to relocate every page
// originally selected for relocation, this method is called.  We reset the the
// relocation cutoff, calculate the amount of garbage not reclaimed by the change,
// and update the size of garbage selected for reclaimation.
void GPGC_PopulationArray::sideband_limit_reclaim_cutoff(uint32_t max_cursor) {
  assert0(_max_cursor >= 0);
  assert0(max_cursor  >= 0);
  assert0(max_cursor < _max_cursor);
  assert0(_sideband_limited_words == 0);

  for ( uint64_t cursor=max_cursor; cursor<_max_cursor; cursor++ ) {
    _sideband_limited_words += _array[cursor].dead_words;
  }

  _max_cursor = max_cursor;
}
Exemple #9
0
void
test_daemonise (void)
{
	pid_t pid;
	char  result[2];
	int   status, fds[2];

	/* Check that nih_main_daemonise does all of the right things,
	 * our immediate child should exit with a zero status, and the
	 * child within that should be run with a working directory of /
	 */
	TEST_FUNCTION ("nih_main_daemonise");

	assert0 (pipe (fds));
	TEST_CHILD (pid) {
		char buf[80];
		int  fd;

		program_name = "test";
		fd = open ("/dev/null", O_WRONLY);
		assert (fd >= 0);
		assert (dup2 (fd, STDERR_FILENO) >= 0);
		assert0 (close (fd));

		if (nih_main_daemonise () < 0)
			exit (50);

		assert (getcwd (buf, sizeof (buf)));
		if (strcmp (buf, "/")) {
			assert (write (fds[1], "wd", 2) == 2);
			exit (10);
		}

		assert (write (fds[1], "ok", 2) == 2);
		exit (10);
	}

	waitpid (pid, &status, 0);

	TEST_TRUE (WIFEXITED (status));
	TEST_EQ (WEXITSTATUS (status), 0);

	if (read (fds[0], result, 2) != 2)
		TEST_FAILED ("expected return code from child");

	if (! memcmp (result, "wd", 2))
		TEST_FAILED ("wrong working directory for child");

	if (memcmp (result, "ok", 2))
		TEST_FAILED ("wrong return code from child, expected 'ok' got '%.2s'",
			     result);
}
inline void GPGC_PageInfo::reset_unmap_free_stats()
{
  uint64_t old_flags;
  uint64_t new_flags;

  do {
    old_flags = _flags;
    assert0( (old_flags&PinnedFlag)         == 0 );
    assert0( (old_flags&UnmapFreeStatsFlag) == 0 );
    assert0( (old_flags>>CountShift)==0 || (old_flags&UnshatterFreeStatsFlag)!=0 );
    new_flags = old_flags & AllButCountMask;
    new_flags = new_flags & ~AllFreeStatFlags;
new_flags=new_flags|UnmapFreeStatsFlag;
  } while ( jlong(old_flags) != Atomic::cmpxchg(jlong(new_flags), (jlong*)&_flags, jlong(old_flags)) );
}
void C1_MacroAssembler::allocate(Register RDX_size_in_bytes, Register RCX_element_count_ekid, Register RSI_kid, Register RAX_oop, address alloc_stub, Label &slow_case) {
  // RDX: size in bytes
  // RCX: (EKID<<32 | element count)
  // RSI: kid
  assert0( RDX == RDX_size_in_bytes );
  assert0( RCX == RCX_element_count_ekid );
  assert0( RSI == RSI_kid );
  NativeAllocationTemplate::fill(this, alloc_stub);
  assert0( RAX == RAX_oop );
  // Z - failed, RAX - blown, RDX, RCX, RSI all preserved for the slow-path.
  // NZ- OK, R09- new oop, stripped; RAX - new oop w/mark, R10- preserved as ary length
  jeq  (slow_case);
  verify_not_null_oop(RAX_oop, MacroAssembler::OopVerify_NewOop);
  // Fall into the next code, with RAX holding correct value
}
inline void GPGC_ReadTrapArray::init_trap(PageNum page, long pages, bool new_gen_page, bool in_large_space)
{
  long nmt_flag = new_gen_page ? GPGC_NMT::desired_new_nmt_flag() : GPGC_NMT::desired_old_nmt_flag();
  long upcoming_nmt = new_gen_page ? GPGC_NMT::upcoming_new_nmt_flag() : GPGC_NMT::upcoming_old_nmt_flag();

  //Invariant: objects allocated in this page will have the right NMT bit
  // so the index derived from the objectref should have an UnTrapped entry.
  uint32_t nmt_index        = uint32_t(page) | ((nmt_flag)  << nmt_index_bit);
  uint32_t nmt_mirror_index = uint32_t(page) | ((!nmt_flag) << nmt_index_bit);

  uint8_t  trap_state       = UnTrapped;
  uint8_t  mirror_state     = NMTTrapped;

  _read_barrier_array[nmt_index]        = trap_state;
  _read_barrier_array[nmt_mirror_index] = mirror_state;

  if ( in_large_space ) {
    // Large space pages after the first one shouldn't be getting tested for trap state.
    trap_state   = InvalidTrapState;
    mirror_state = InvalidTrapState;
  }

  for ( long i=1; i<pages; i++ ) {
    assert0( GPGC_Layout::page_in_heap_range(page+i) );

    // the mutator visible rb-array entries reflect regular init value
    _read_barrier_array[nmt_index + i]        = trap_state;
    _read_barrier_array[nmt_mirror_index + i] = mirror_state;
  }

  set_dupe_array_entries(page, pages, new_gen_page, UnTrapped, NMTTrapped, in_large_space, upcoming_nmt);

  return;
}
void GPGC_ThreadCleaner::enable_thread_self_cleaning(TimeDetailTracer* tdt, JavaThread::SuspendFlags flag)
{
  DetailTracer dt(tdt, false, "%s: Enable self cleaning", (flag==JavaThread::gpgc_clean_new?"N":"O"));

  assert0(flag==JavaThread::gpgc_clean_new || flag==JavaThread::gpgc_clean_old);
  assert0(GPGC_Safepoint::is_at_safepoint());

  for ( JavaThread* jt=Threads::first(); jt!=NULL; jt=jt->next() ) {
    // TODO: in the second relocation safepoint of the OldGC, we expect to find threads which still have
    // their clean flag set.  This assert should only happen for other thread cleaning starts.
    //
    //assert0( ! (jt->please_self_suspend() & flag) );
    jt->set_suspend_request_no_polling( flag );
jt->reset_unshattered_page_trap_count();
  }
}
// --- unlink ----------------------------------------------------------------
// GPGC unlink any MSB's whose method has died.
void MethodStubBlob::GPGC_unlink( address from, address to ) {
  address adr = (address)round_to((intptr_t)from,CodeEntryAlignment);
  for( ; adr+NativeMethodStub::instruction_size < to; adr+=NativeMethodStub::instruction_size ) {
    NativeMethodStub *nms = (NativeMethodStub*)adr;
heapRef ref=nms->get_oop();

if(ref.not_null()){
      assert(ref.is_old(), "CodeCache should only have old-space oops");

      if ( GPGC_ReadTrapArray::is_remap_trapped(ref) ) {
        assert0(GPGC_ReadTrapArray::is_old_gc_remap_trapped(ref));
        ref = GPGC_Collector::get_forwarded_object(ref);
      }

      assert(ref.as_oop()->is_oop(), "not oop");

      if ( ! GPGC_Marks::is_old_marked_strong_live(ref) ) {
        free_stub(nms);
      } else {
        // Any NativeMethodStub we don't free, we instead must mark through the objectRef to
        // get consistent NMT bits and remapped addresses.
        GPGC_OldCollector::mark_to_live(nms->oop_addr());
      }
    }
  }
}
void vframe::set_java_local(int index, jvalue value, BasicType type) {
  assert0(_fr.is_interpreted_frame());
  intptr_t buf = 0;
  address addr = ((address)(&buf)) + sizeof(intptr_t);
  JavaValue v(type);
  switch (type) {
    case T_BOOLEAN:  *(jboolean *)(addr - sizeof(jboolean)) = value.z; break;
    case T_FLOAT:    *(jfloat *)(addr - sizeof(jfloat))     = value.f; break;
    case T_DOUBLE:   *(jdouble *)(addr - sizeof(jdouble))   = value.d; break;

      // these need to be sign-extended to 64 bits
    case T_CHAR:     *(intptr_t *)(addr - sizeof(intptr_t)) = (intptr_t)value.c; break;
    case T_BYTE:     *(intptr_t *)(addr - sizeof(intptr_t)) = (intptr_t)value.b; break;
    case T_SHORT:    *(intptr_t *)(addr - sizeof(intptr_t)) = (intptr_t)value.s; break;
    case T_INT:      *(intptr_t *)(addr - sizeof(intptr_t)) = (intptr_t)value.i; break;
    case T_LONG:     *(intptr_t *)(addr - sizeof(intptr_t)) = (intptr_t)value.j; break;

case T_OBJECT://fall-through
    case T_ARRAY:    *((objectRef*)(addr - sizeof(jobject))) = *((objectRef*) value.l); break;
    default:         ShouldNotReachHere();
  }
  // Azul: in interpreter we use two stack slots for doubles, longs --
  // value in 2nd slot -- so use index+1 for them
if(type==T_LONG||type==T_DOUBLE){
    get_frame().set_interpreter_frame_local_at(index+1, buf);
  } else {
    get_frame().set_interpreter_frame_local_at(index, buf);
  }
}
Exemple #16
0
AggregateOrdered ET::match(const Aggregate s,AggregateOrdered positions, Obj pat,AggregateOrdered r){
	SWITCHstart
		CASEIF(pat.isInstanceOf(classInt))
			uWord sn=s.elements();
// looping on the positions 
// if a position is between start and end and the pattern equals 
// the value at pos then add the next position to the results.
			for(auto p:positions){
				if(0<=p && p<sn && pat.w==s[p]) 
					r&=p+1; //&= adds a single object to an ordered aggregate (if it doesn't exists already)
			}
		CASEIF(pat.isInstanceOf(classDouble))
			uWord sn=s.elements();
			for(auto p:positions){
				if(0<=p && p<sn && compare(pat,s[p])==0) 
					r&=p+1; //&= adds a single object to an ordered aggregate (if it doesn't exists already)
			}
		CASEIF(pat.isInstanceOf(classPatternAll))
			AggregateOrdered r1=positions;
			for(auto pat1:(Aggregate)pat)
				r1=match(s,r1,pat1);
			if(r.elements()==0) r=r1;else r+=r1;
		CASEIF(pat.isInstanceOf(classPatternAny))
			for(auto pat1:(Aggregate)pat)
				match(s,positions,pat1,r);
		CASEIF(pat.isInstanceOf(classAggregate))
			stack().push(positions);
			execute(pat,true); // should execute independently from the execution status ? should be explained !
			r=stack().pop();
		DEFAULT
			assert0(false,"unexpected in matchAll");
	endSWITCH
	return r;
}
Exemple #17
0
// appends to the current aggregate a slice from a given aggregate from position i1 to position i2
void Aggregate::insertSlice(Aggregate a,Word pos,Word i1,Word i2){
	ADJUSTOFFSET(elements(),pos);ADJUSTOFFSET2(a.elements(),i1,i2);assert0(i1<=i2,"appendSlice");
	Word sz=i2-i1;checkResize(sz);
	elements(elements()+sz);
	WordMove(dataP()+pos+sz,dataP()+pos,elements()-pos);
	WordCpy(dataP()+pos,a.dataP()+i1,sz);
}
// --- generate
address MethodStubBlob::generate( heapRef moop, address c2i_adapter ) {
  // NativeMethodStubs must be jumped-to directly and are packed back-to-back.
  // Hence they start CodeEntryAligned, and each later one has to be
  // CodeEntryAligned so we expect the instruction_size to be a multiple.
  assert0( round_to(NativeMethodStub::instruction_size,CodeEntryAlignment) == NativeMethodStub::instruction_size );
  NativeMethodStub *nms;
  do {
    // The _free_list is a racing CAS-managed link-list.  Must read the
    // _free_list exactly ONCE before the CAS attempt below, or otherwise know
    // we have something that used to be on the free_list and is not-null.  In
    // generally, if we re-read the free_list we have to null-check the result.
    nms = _free_list;
    if( !nms ) {
      // CodeCache makes CodeBlobs.  Make a CodeBlob typed as a methodCodeStub.
      CodeBlob *cb = CodeCache::malloc_CodeBlob( CodeBlob::methodstub, 256*NativeMethodStub::instruction_size );
      address adr = (address)round_to((intptr_t)cb->code_begins(),CodeEntryAlignment);
      cb->_code_start_offset = adr-(address)cb->_code_begins;
      while( adr+NativeMethodStub::instruction_size < cb->end() ) {
        free_stub((NativeMethodStub*)adr);
        adr += NativeMethodStub::instruction_size;
      }
      // The last not-null thing jammed on the freelist.
      nms = (NativeMethodStub*)(adr-NativeMethodStub::instruction_size);
    }
  } while( Atomic::cmpxchg_ptr(*(NativeMethodStub**)nms,&_free_list,nms) != nms );
  nms->fill( moop, c2i_adapter );
return(address)nms;
}
Exemple #19
0
int main(){

	std::cout<<"main started mmStart"<<&M<<std::endl;
	assert0(chdir("/home/gk/Dropbox/workspace01/napl22")==0,"unable to change dir");
	stopETs.store(false,0);
	ETs.reserve(0x100);
	compact(M);
#if true
	int fin,i;
	if(	(fin=open("naplStart0.napl",0,S_IRUSR))>=0){
		i=startET(fin,dup(fileno(stdout)));
	}
	while(!ETs[i].threadEnded){}
	if(ETs[i].joinable()) 
		ETs[i].join();
#endif
	HandleIncomingConnections(8000); 

		for(size_t i=0;i<ETs.size();i++){
			std::cout<<"ETs["<<i<<"] threadEnded="<<ETs[i].threadEnded<<" joinable="<<ETs[i].joinable()<<std::endl;
			if(ETs[i].joinable()) ETs[i].join();
		}

		for(size_t i=0;i<ETs.size();i++){
			std::cout<<"ETs["<<i<<"] threadEnded="<<ETs[i].threadEnded<<" joinable="<<ETs[i].joinable()<<std::endl;
		}

		return 0;
}
// --- debuginfo -------------------------------------------------------------
const DebugScope *CodeBlob::debuginfo(address pc) const {
  assert0( CodeBlob::code_begins() <= pc && pc < CodeBlob::code_ends()+1 );
  int relpc = pc - (address)this;
  methodCodeOop mcoop = owner().as_methodCodeOop();
  if( mcoop->_blob != this ) return NULL; // vtable stubs share the mcoop with the real blob
  return mcoop->_debuginfo->get(relpc);
}
  // return a cached temporary operand
  LIR_Opr get_temp(int i, BasicType type) {
    if (i < FrameMap::pd_max_temp_vregs) {
      assert0(_temp_vregs[i]->type_register() == type);
return _temp_vregs[i];
    } else {
return new_register(type);
    }
  }
    // void do_codeblob(CodeBlob *cb) { cb->oops_do(this); }
    void do_oop(objectRef* p) {
      assert0(!Thread::current()->is_gc_mode());
#ifdef ASSERT
      if ( RefPoisoning ) {
        LVB::permissive_poison_lvb(p);
      } else
#endif // ASSERT
lvb_ref(p);
    }
inline void GPGC_GCManagerOldFinal::mark_through(objectRef ref, int referrer_kid)
{
oop obj=ref.as_oop();
  PageNum page = GPGC_Layout::addr_to_PageNum(obj);

  assert0(GPGC_Layout::page_in_heap_range(page));
  assert0(!GPGC_ReadTrapArray::is_remap_trapped(ref));
  assert0(GPGC_Marks::is_old_marked_final_live(ref));

  if ( GPGC_Marks::is_old_marked_strong_live(obj) ) {
    // FinalLive marking terminates when we find an object that's StrongLive
    return;
  }

  GPGC_Marks::set_marked_through_final(obj);

obj->GPGC_follow_contents(this);
}
void GPGC_PopulationArray::assert_only_allocated_pages()
{
  for ( uint64_t cursor=0; cursor<max_cursor(); cursor++ ) {
    PageNum        page = this->page(cursor);
    GPGC_PageInfo* info = GPGC_PageInfo::page_info(page);

    assert0(info->just_state() == GPGC_PageInfo::Allocated );
  }
}
inline void GPGC_PageInfo::atomic_clear_flag(Flags flag)
{
  uint64_t old_flags;
  uint64_t new_flags;

  do {
    old_flags = _flags;
    assert0( (old_flags&flag) != 0 );
new_flags=old_flags&~flag;
  } while ( jlong(old_flags) != Atomic::cmpxchg(jlong(new_flags), (jlong*)&_flags, jlong(old_flags)) );
}
inline void GPGC_PageInfo::atomic_set_flag(Flags flag, Flags assert_not_flag)
{
  uint64_t old_flags;
  uint64_t new_flags;

  do {
    old_flags = _flags;
    assert0( (old_flags&assert_not_flag) == 0 );
new_flags=old_flags|flag;
  } while ( jlong(old_flags) != Atomic::cmpxchg(jlong(new_flags), (jlong*)&_flags, jlong(old_flags)) );
}
  void invariants() const {
    assert(top() >= start() && top() <= end() && end() <= real_end(), "invalid tlab");
    // Verify the CLZ area
HeapWord*a=top();
    if( a ) {
      int extra_bytes = (((intptr_t)a + (BytesPerCacheLine-1)) & -BytesPerCacheLine) - (intptr_t)a;
      int clz_word_count = (TLABZeroRegion + extra_bytes) / sizeof(HeapWord);
for(int i=0;i<clz_word_count;i++)
        assert0( ((intptr_t*)a)[i] == 0 );
    }
  }
// --- ref_cas_with_check
// Write barriered compare of Rcmp with memory, if equal set memory to Rval. Set
// flags dependent on success. Returns relpc of where NPE info should be added.
// NB on failure Rcmp contains the value from memory, this will be poisoned and
// not lvb-ed. ie. you shouldn't use this value.
int C1_MacroAssembler::ref_cas_with_check(RInOuts, Register Rbase, Register Rcmp, Register Rtmp0, Register Rtmp1,
                                          RKeepIns, Register Rindex, int off, int scale, Register Rval,
                                          CodeEmitInfo *info) {
  assert0( Rcmp == RAX );
  Label checked, strip;

#ifdef ASSERT
  verify_oop(Rval, MacroAssembler::OopVerify_Store);
  verify_oop(Rcmp, MacroAssembler::OopVerify_Move);
  if (RefPoisoning) move8(Rtmp1,Rval); // Save Rval
#endif

  null_chk( Rval,strip  ); // NULLs are always safe to store

  pre_write_barrier_compiled(RInOuts::a, Rtmp0, Rtmp1,
                             RKeepIns::a, Rbase, off, Rindex, scale, Rval,
                             info);

#ifdef ASSERT
  if (RefPoisoning) {
    mov8  (Rtmp1,Rval);  // Save Rval again as it will have been squashed by the barrier
    always_poison(Rval); // Must store poisoned ref
  }
#endif // ASSERT
bind(strip);
  verify_not_null_oop(Rbase, MacroAssembler::OopVerify_StoreBase);
  cvta2va(Rbase);
#ifdef ASSERT
  if (RefPoisoning) {
    poison(Rcmp);      // Compared register must also be posioned
  }
#endif // ASSERT
bind(checked);
  int npe_relpc = rel_pc();
#ifdef ASSERT
  // check the value to be cas-ed is an oop, npe on this rather than the store
  if (should_verify_oop(MacroAssembler::OopVerify_OverWrittenField))
    npe_relpc = verify_oop (Rbase, off, Rindex, scale, MacroAssembler::OopVerify_OverWrittenField);
#endif
  if (Rindex == noreg) locked()->cas8 (Rbase, off, Rval);
  else                 locked()->cas8 (Rbase, off, Rindex, scale, Rval);
#ifdef ASSERT
  pushf();
  if (RefPoisoning) {
    mov8    (Rval,Rtmp1); // Restore unpoisoned Rval
    unpoison(Rcmp);       // Compared register must also be unposioned
  }
  verify_oop(Rval, MacroAssembler::OopVerify_Move);
  verify_oop(Rcmp, MacroAssembler::OopVerify_Move);
  popf();
#endif
  return npe_relpc;
}
void vframe::print_to_xml_lock_info(JavaThread* thread, bool youngest, xmlBuffer* xb) {
  ResourceMark rm;
  frame fr = get_frame();       // Shorthand notation

  // First, assume we have the monitor locked.  If we haven't found an owned
  // monitor before and this is the first frame, then we need to see if the
  // thread is blocked.
  bool first = (youngest && thread->is_hint_blocked());

  // Print out all monitors that we have locked or are trying to lock
  if( fr.is_interpreted_frame() ) {
    int x = fr.interpreter_frame_monitor_count();
    // Not Correct; this always (re)prints the most recent X monitors
for(int i=0;i<x;i++){
      first = print_to_xml_lock( first, ALWAYS_UNPOISON_OBJECTREF(thread->_lckstk_top[-i-1]), false, xb );
    }      
    
}else if(fr.is_native_frame()){
CodeBlob*cb=CodeCache::find_blob(fr.pc());
    assert0( cb->is_native_method() );
    methodCodeOop mco = cb->owner().as_methodCodeOop();
    methodOop moop = mco->method().as_methodOop();
    bool is_object_wait = youngest && moop->name() == vmSymbols::wait_name() && 
      instanceKlass::cast(moop->method_holder())->name() == vmSymbols::java_lang_Object();
    if( moop->is_synchronized() && moop->is_static() ) {
      first = print_to_xml_lock( first, objectRef(Klass::cast(moop->method_holder())->java_mirror()), false, xb );
    } else if( is_object_wait ) {
      // For synchronized native methods, there should be a single lock.
      // For object.wait, there is a single oop argument being wait'd upon.
      const RegMap *lm = cb->oop_maps();
      VOopReg::VR lck = lm->get_sole_oop(cb->rel_pc(fr.pc()));
      objectRef *loc = fr.reg_to_addr_oop(lck);
      first = print_to_xml_lock( first, *loc, is_object_wait, xb );
    } else if( moop->is_synchronized() ) {
      // For synchronized native methods, there should be a single lock.
      const DebugScope *ds = scope();
      DebugScopeValue::Name lck = ds->get_lock(0);
      objectRef *loc = (objectRef*)fr.reg_to_addr(DebugScopeValue::to_vreg(lck));
      first = print_to_xml_lock( first, *loc, is_object_wait, xb );
    } else if (thread->current_park_blocker() != NULL) {
oop obj=thread->current_park_blocker();
      first = print_to_xml_lock( first, objectRef(obj), false, xb );
    }

  } else {                      // Hopefully a compiled frame
    const DebugScope *ds = scope();
for(uint i=0;i<ds->numlocks();i++){
      DebugScopeValue::Name lck = ds->get_lock(i);
      first = print_to_xml_lock( first, *fr.reg_to_addr(DebugScopeValue::to_vreg(lck)), false, xb );
    }
  }
}
inline void GPGC_ReadTrapArray::clear_trap_on_page(PageNum page, long pages)
{
  uint32_t  read_barrier_index        = uint32_t(page) | (0U << nmt_index_bit);
  uint32_t  mirror_read_barrier_index = uint32_t(page) | (1U << nmt_index_bit);

for(uint32_t i=0;i<pages;i++){
    assert0( GPGC_Layout::page_in_heap_range(page+i) );
    _dupe_read_barrier_array[read_barrier_index]          = ClearTrapState; 
    _dupe_read_barrier_array[mirror_read_barrier_index]   = ClearTrapState;
    _read_barrier_array[read_barrier_index+i]             = ClearTrapState;
    _read_barrier_array[mirror_read_barrier_index+i]      = ClearTrapState;
  }
}