Пример #1
0
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
    jbyte* byte = byte_for(mr.start());
    jbyte* last_byte = byte_for(mr.last());
    Thread* thr = Thread::current();
    if (whole_heap) {
        while (byte <= last_byte) {
            *byte = dirty_card;
            byte++;
        }
    } else {
        // Enqueue if necessary.
        if (thr->is_Java_thread()) {
            JavaThread* jt = (JavaThread*)thr;
            while (byte <= last_byte) {
                if (*byte != dirty_card) {
                    *byte = dirty_card;
                    jt->dirty_card_queue().enqueue(byte);
                }
                byte++;
            }
        } else {
            MutexLockerEx x(Shared_DirtyCardQ_lock,
                            Mutex::_no_safepoint_check_flag);
            while (byte <= last_byte) {
                if (*byte != dirty_card) {
                    *byte = dirty_card;
                    _dcqs.shared_dirty_card_queue()->enqueue(byte);
                }
                byte++;
            }
        }
    }
}
Пример #2
0
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               DirtyCardToOopClosure* dcto_cl,
               MemRegionClosure* cl,
               bool clear,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We don't have to go downwards here; it wouldn't help anyway,
  // because of parallelism.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                CardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                CardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // We don't have to go downwards here; it wouldn't help anyway,
    // because of parallelism.  (We take care with "min_done"; see below.)
    // Invariant: chunk_mr should be fully contained within the "used" region.
    jbyte*    chunk_card_end = chunk_card_start + CardsPerStrideChunk;
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    non_clean_card_iterate_work(chunk_mr, cl, clear);

    // Find the next chunk of the stride.
    chunk_card_start += CardsPerStrideChunk * n_strides;
  }
}
Пример #3
0
void CardTableRS::clear_MemRegion(MemRegion mr) {
  jbyte* cur  = byte_for(mr.start());
  jbyte* last = byte_after(mr.last());
  assert(addr_for(cur) == mr.start(), "region must be card aligned");
  while (cur < last) {
    *cur = CardTableModRefBS::clean_card;
    cur++;
  }
}
Пример #4
0
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
  // We don't need to do young-gen spaces.
  if (s->end() <= gen_boundary) return;
  MemRegion used = s->used_region();

  jbyte* cur_entry = byte_for(used.start());
  jbyte* limit = byte_after(used.last());
  while (cur_entry < limit) {
    if (*cur_entry == CardTableModRefBS::clean_card) {
      jbyte* first_dirty = cur_entry+1;
      while (first_dirty < limit &&
	     *first_dirty == CardTableModRefBS::clean_card)
	first_dirty++;
      // If the first object is a regular object, and it has a
      // young-to-old field, that would mark the previous card.
      HeapWord* boundary = addr_for(cur_entry);
      HeapWord* end = addr_for(first_dirty);
      HeapWord* boundary_block = s->block_start(boundary);
      HeapWord* begin = boundary;             // Until proven otherwise.
      HeapWord* start_block = boundary_block; // Until proven otherwise.
      if (boundary_block < boundary) {
	if (s->block_is_obj(boundary_block)) {
	  oop boundary_obj = oop(boundary_block);
	  if (!boundary_obj->is_objArray() &&
	      !boundary_obj->is_typeArray()) {
	    guarantee(cur_entry > byte_for(used.start()),
		      "else boundary would be boundary_block");
	    if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
	      begin = boundary_block + s->block_size(boundary_block);
	      start_block = begin;
	    }
	  }
	}
      }
      // Now traverse objects until end.
      HeapWord* cur = start_block;
      VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
      while (cur < end) {
	if (s->block_is_obj(cur)) {
	  oop(cur)->oop_iterate(&verify_blk);
	}
	cur += s->block_size(cur);
      }
      cur_entry = first_dirty;
    } else {
      guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
		"Illegal CT value");
      // If we're in the parallel case, the cur and prev values are
      // different, and we can't have left a prev in the table.
      guarantee(cur_youngergen_card_val() == youngergen_card
		|| !is_prev_youngergen_card_val(*cur_entry),
		"Illegal CT value");
      cur_entry++;
    }
  }
}
Пример #5
0
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
  assert(mr.word_size() > 0, "Error");
  assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
  // mr.end() may not necessarily be card aligned.
  jbyte* cur_entry = _ct->byte_for(mr.last());
  const jbyte* limit = _ct->byte_for(mr.start());
  HeapWord* end_of_non_clean = mr.end();
  HeapWord* start_of_non_clean = end_of_non_clean;
  while (cur_entry >= limit) {
    HeapWord* cur_hw = _ct->addr_for(cur_entry);
    if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
      // Continue the dirty range by opening the
      // dirty window one card to the left.
      start_of_non_clean = cur_hw;
    } else {
      // We hit a "clean" card; process any non-empty
      // "dirty" range accumulated so far.
      if (start_of_non_clean < end_of_non_clean) {
        const MemRegion mrd(start_of_non_clean, end_of_non_clean);
        _dirty_card_closure->do_MemRegion(mrd);
      }

      // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
      if (is_word_aligned(cur_entry)) {
        jbyte* cur_row = cur_entry - BytesPerWord;
        while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
          cur_row -= BytesPerWord;
        }
        cur_entry = cur_row + BytesPerWord;
        cur_hw = _ct->addr_for(cur_entry);
      }

      // Reset the dirty window, while continuing to look
      // for the next dirty card that will start a
      // new dirty window.
      end_of_non_clean = cur_hw;
      start_of_non_clean = cur_hw;
    }
    // Note that "cur_entry" leads "start_of_non_clean" in
    // its leftward excursion after this point
    // in the loop and, when we hit the left end of "mr",
    // will point off of the left end of the card-table
    // for "mr".
    cur_entry--;
  }
  // If the first card of "mr" was dirty, we will have
  // been left with a dirty window, co-initial with "mr",
  // which we now process.
  if (start_of_non_clean < end_of_non_clean) {
    const MemRegion mrd(start_of_non_clean, end_of_non_clean);
    _dirty_card_closure->do_MemRegion(mrd);
  }
}
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
  jbyte *const first = byte_for(mr.start());
  jbyte *const last = byte_after(mr.last());

  // Below we may use an explicit loop instead of memset() because on
  // certain platforms memset() can give concurrent readers phantom zeros.
  if (UseMemSetInBOT) {
    memset(first, g1_young_gen, last - first);
  } else {
    for (jbyte* i = first; i < last; i++) {
      *i = g1_young_gen;
    }
  }
}
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
                                                             OopsInGenClosure* cl,
                                                             CardTableRS* ct,
                                                             int n_threads) {
  assert(n_threads > 0, "Error: expected n_threads > 0");
  assert((n_threads == 1 && ParallelGCThreads == 0) ||
         n_threads <= (int)ParallelGCThreads,
         "# worker threads != # requested!");
  assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
  assert(UseDynamicNumberOfGCThreads ||
         !FLAG_IS_DEFAULT(ParallelGCThreads) ||
         n_threads == (int)ParallelGCThreads,
         "# worker threads != # requested!");
  // Make sure the LNC array is valid for the space.
  jbyte**   lowest_non_clean;
  uintptr_t lowest_non_clean_base_chunk_index;
  size_t    lowest_non_clean_chunk_size;
  get_LNC_array_for_space(sp, lowest_non_clean,
                          lowest_non_clean_base_chunk_index,
                          lowest_non_clean_chunk_size);

  uint n_strides = n_threads * ParGCStridesPerThread;
  SequentialSubTasksDone* pst = sp->par_seq_tasks();
  // Sets the condition for completion of the subtask (how many threads
  // need to finish in order to be done).
  pst->set_n_threads(n_threads);
  pst->set_n_tasks(n_strides);

  uint stride = 0;
  while (!pst->is_task_claimed(/* reference */ stride)) {
    process_stride(sp, mr, stride, n_strides, cl, ct,
                   lowest_non_clean,
                   lowest_non_clean_base_chunk_index,
                   lowest_non_clean_chunk_size);
  }
  if (pst->all_tasks_completed()) {
    // Clear lowest_non_clean array for next time.
    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
             "Bounds error");
      lowest_non_clean[ind] = NULL;
    }
  }
}
Пример #8
0
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
  if (!mr.is_empty()) {
    jbyte* cur_entry = byte_for(mr.start());
    jbyte* limit = byte_after(mr.last());
    // The region mr may not start on a card boundary so
    // the first card may reflect a write to the space
    // just prior to mr.
    if (!is_aligned(mr.start())) {
      cur_entry++;
    }
    for (;cur_entry < limit; cur_entry++) {
      guarantee(*cur_entry == CardTableModRefBS::clean_card,
                "Unexpected dirty card found");
    }
  }
}
void CardTableModRefBSForCTRS::
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
                                     OopsInGenClosure* cl,
                                     CardTableRS* ct,
                                     uint n_threads) {
  assert(n_threads > 0, "expected n_threads > 0");
  assert(n_threads <= ParallelGCThreads,
         err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));

  // Make sure the LNC array is valid for the space.
  jbyte**   lowest_non_clean;
  uintptr_t lowest_non_clean_base_chunk_index;
  size_t    lowest_non_clean_chunk_size;
  get_LNC_array_for_space(sp, lowest_non_clean,
                          lowest_non_clean_base_chunk_index,
                          lowest_non_clean_chunk_size);

  uint n_strides = n_threads * ParGCStridesPerThread;
  SequentialSubTasksDone* pst = sp->par_seq_tasks();
  // Sets the condition for completion of the subtask (how many threads
  // need to finish in order to be done).
  pst->set_n_threads(n_threads);
  pst->set_n_tasks(n_strides);

  uint stride = 0;
  while (!pst->is_task_claimed(/* reference */ stride)) {
    process_stride(sp, mr, stride, n_strides,
                   cl, ct,
                   lowest_non_clean,
                   lowest_non_clean_base_chunk_index,
                   lowest_non_clean_chunk_size);
  }
  if (pst->all_tasks_completed()) {
    // Clear lowest_non_clean array for next time.
    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
             "Bounds error");
      lowest_non_clean[ind] = NULL;
    }
  }
}
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
  volatile jbyte* byte = byte_for(mr.start());
  jbyte* last_byte = byte_for(mr.last());
  Thread* thr = Thread::current();
  if (whole_heap) {
    while (byte <= last_byte) {
      *byte = dirty_card;
      byte++;
    }
  } else {
    // skip all consecutive young cards
    for (; byte <= last_byte && *byte == g1_young_gen; byte++);

    if (byte <= last_byte) {
      OrderAccess::storeload();
      // Enqueue if necessary.
      if (thr->is_Java_thread()) {
        JavaThread* jt = (JavaThread*)thr;
        for (; byte <= last_byte; byte++) {
          if (*byte == g1_young_gen) {
            continue;
          }
          if (*byte != dirty_card) {
            *byte = dirty_card;
            jt->dirty_card_queue().enqueue(byte);
          }
        }
      } else {
        MutexLockerEx x(Shared_DirtyCardQ_lock,
                        Mutex::_no_safepoint_check_flag);
        for (; byte <= last_byte; byte++) {
          if (*byte == g1_young_gen) {
            continue;
          }
          if (*byte != dirty_card) {
            *byte = dirty_card;
            _dcqs.shared_dirty_card_queue()->enqueue(byte);
          }
        }
      }
    }
  }
}
Пример #11
0
void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
                                                        DirtyCardToOopClosure* dcto_cl,
                                                        MemRegionClosure* cl,
                                                        bool clear,
                                                        int n_threads) {
  if (n_threads > 0) {
    assert((n_threads == 1 && ParallelGCThreads == 0) ||
           n_threads <= (int)ParallelGCThreads,
           "# worker threads != # requested!");
    // Make sure the LNC array is valid for the space.
    jbyte**   lowest_non_clean;
    uintptr_t lowest_non_clean_base_chunk_index;
    size_t    lowest_non_clean_chunk_size;
    get_LNC_array_for_space(sp, lowest_non_clean,
                            lowest_non_clean_base_chunk_index,
                            lowest_non_clean_chunk_size);

    int n_strides = n_threads * StridesPerThread;
    SequentialSubTasksDone* pst = sp->par_seq_tasks();
    pst->set_par_threads(n_threads);
    pst->set_n_tasks(n_strides);

    int stride = 0;
    while (!pst->is_task_claimed(/* reference */ stride)) {
      process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
                     lowest_non_clean,
                     lowest_non_clean_base_chunk_index,
                     lowest_non_clean_chunk_size);
    }
    if (pst->all_tasks_completed()) {
      // Clear lowest_non_clean array for next time.
      intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
      uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
      for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
        intptr_t ind = ch - lowest_non_clean_base_chunk_index;
        assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
               "Bounds error");
        lowest_non_clean[ind] = NULL;
      }
    }
  }
}
Пример #12
0
 void do_MemRegion(MemRegion mr) {
     // We start at the high end of "mr", walking backwards
     // while accumulating a contiguous dirty range of cards in
     // [start_of_non_clean, end_of_non_clean) which we then
     // process en masse.
     HeapWord* end_of_non_clean = mr.end();
     HeapWord* start_of_non_clean = end_of_non_clean;
     jbyte*       entry = _ct->byte_for(mr.last());
     const jbyte* first_entry = _ct->byte_for(mr.start());
     while (entry >= first_entry) {
         HeapWord* cur = _ct->addr_for(entry);
         if (!clear_card(entry)) {
             // We hit a clean card; process any non-empty
             // dirty range accumulated so far.
             if (start_of_non_clean < end_of_non_clean) {
                 MemRegion mr2(start_of_non_clean, end_of_non_clean);
                 _dirty_card_closure->do_MemRegion(mr2);
             }
             // Reset the dirty window while continuing to
             // look for the next dirty window to process.
             end_of_non_clean = cur;
             start_of_non_clean = end_of_non_clean;
         }
         // Open the left end of the window one card to the left.
         start_of_non_clean = cur;
         // Note that "entry" leads "start_of_non_clean" in
         // its leftward excursion after this point
         // in the loop and, when we hit the left end of "mr",
         // will point off of the left end of the card-table
         // for "mr".
         entry--;
     }
     // If the first card of "mr" was dirty, we will have
     // been left with a dirty window, co-initial with "mr",
     // which we now process.
     if (start_of_non_clean < end_of_non_clean) {
         MemRegion mr2(start_of_non_clean, end_of_non_clean);
         _dirty_card_closure->do_MemRegion(mr2);
     }
 }
Пример #13
0
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
  size_t commits = 0;
  uint start_index = (uint)_regions.get_index_by_address(range.start());
  uint last_index = (uint)_regions.get_index_by_address(range.last());

  // Ensure that each G1 region in the range is free, returning false if not.
  // Commit those that are not yet available, and keep count.
  for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
    if (!is_available(curr_index)) {
      commits++;
      expand_at(curr_index, 1);
    }
    HeapRegion* curr_region  = _regions.get_by_index(curr_index);
    if (!curr_region->is_free()) {
      return false;
    }
  }

  allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
  *commit_count = commits;
  return true;
}
Пример #14
0
  void do_MemRegion(MemRegion mr) {
    HeapWord* end_of_non_clean = mr.end();
    HeapWord* start_of_non_clean = end_of_non_clean;
    jbyte* entry = _ct->byte_for(mr.last());
    HeapWord* cur = _ct->addr_for(entry);
    while (mr.contains(cur)) {
      jbyte entry_val = *entry;
      if (!clear_card(entry)) {
	if (start_of_non_clean < end_of_non_clean) {
	  MemRegion mr2(start_of_non_clean, end_of_non_clean);
	  _dirty_card_closure->do_MemRegion(mr2);
	}
	end_of_non_clean = cur;
	start_of_non_clean = end_of_non_clean;
      }
      entry--;
      start_of_non_clean = cur;
      cur = _ct->addr_for(entry);
    }
    if (start_of_non_clean < end_of_non_clean) {
      MemRegion mr2(start_of_non_clean, end_of_non_clean);
      _dirty_card_closure->do_MemRegion(mr2);
    }
  }
Пример #15
0
void
CardTableModRefBS::
process_chunk_boundaries(Space* sp,
                         DirtyCardToOopClosure* dcto_cl,
                         MemRegion chunk_mr,
                         MemRegion used,
                         jbyte** lowest_non_clean,
                         uintptr_t lowest_non_clean_base_chunk_index,
                         size_t    lowest_non_clean_chunk_size)
{
  // We must worry about the chunk boundaries.

  // First, set our max_to_do:
  HeapWord* max_to_do = NULL;
  uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
  cur_chunk_index           = cur_chunk_index - lowest_non_clean_base_chunk_index;

  if (chunk_mr.end() < used.end()) {
    // This is not the last chunk in the used region.  What is the last
    // object?
    HeapWord* last_block = sp->block_start(chunk_mr.end());
    assert(last_block <= chunk_mr.end(), "In case this property changes.");
    if (last_block == chunk_mr.end()
        || !sp->block_is_obj(last_block)) {
      max_to_do = chunk_mr.end();

    } else {
      // It is an object and starts before the end of the current chunk.
      // last_obj_card is the card corresponding to the start of the last object
      // in the chunk.  Note that the last object may not start in
      // the chunk.
      jbyte* last_obj_card = byte_for(last_block);
      if (!card_may_have_been_dirty(*last_obj_card)) {
        // The card containing the head is not dirty.  Any marks in
        // subsequent cards still in this chunk must have been made
        // precisely; we can cap processing at the end.
        max_to_do = chunk_mr.end();
      } else {
        // The last object must be considered dirty, and extends onto the
        // following chunk.  Look for a dirty card in that chunk that will
        // bound our processing.
        jbyte* limit_card = NULL;
        size_t last_block_size = sp->block_size(last_block);
        jbyte* last_card_of_last_obj =
          byte_for(last_block + last_block_size - 1);
        jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
        // This search potentially goes a long distance looking
        // for the next card that will be scanned.  For example,
        // an object that is an array of primitives will not
        // have any cards covering regions interior to the array
        // that will need to be scanned. The scan can be terminated
        // at the last card of the next chunk.  That would leave
        // limit_card as NULL and would result in "max_to_do"
        // being set with the LNC value or with the end
        // of the last block.
        jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
          CardsPerStrideChunk;
        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
          == CardsPerStrideChunk, "last card of next chunk may be wrong");
        jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
                                                  last_card_of_next_chunk);
        for (jbyte* cur = first_card_of_next_chunk;
             cur <= last_card_to_check; cur++) {
          if (card_will_be_scanned(*cur)) {
            limit_card = cur; break;
          }
        }
        assert(0 <= cur_chunk_index+1 &&
               cur_chunk_index+1 < lowest_non_clean_chunk_size,
               "Bounds error.");
        // LNC for the next chunk
        jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
        if (limit_card == NULL) {
          limit_card = lnc_card;
        }
        if (limit_card != NULL) {
          if (lnc_card != NULL) {
            limit_card = (jbyte*)MIN2((intptr_t)limit_card,
                                      (intptr_t)lnc_card);
          }
          max_to_do = addr_for(limit_card);
        } else {
          max_to_do = last_block + last_block_size;
        }
      }
    }
    assert(max_to_do != NULL, "OOPS!");
  } else {
    max_to_do = used.end();
  }
  // Now we can set the closure we're using so it doesn't to beyond
  // max_to_do.
  dcto_cl->set_min_done(max_to_do);
#ifndef PRODUCT
  dcto_cl->set_last_bottom(max_to_do);
#endif

  // Now we set *our" lowest_non_clean entry.
  // Find the object that spans our boundary, if one exists.
  // Nothing to do on the first chunk.
  if (chunk_mr.start() > used.start()) {
    // first_block is the block possibly spanning the chunk start
    HeapWord* first_block = sp->block_start(chunk_mr.start());
    // Does the block span the start of the chunk and is it
    // an object?
    if (first_block < chunk_mr.start() &&
        sp->block_is_obj(first_block)) {
      jbyte* first_dirty_card = NULL;
      jbyte* last_card_of_first_obj =
          byte_for(first_block + sp->block_size(first_block) - 1);
      jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
      jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
      jbyte* last_card_to_check =
        (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
                      (intptr_t) last_card_of_first_obj);
      for (jbyte* cur = first_card_of_cur_chunk;
           cur <= last_card_to_check; cur++) {
        if (card_will_be_scanned(*cur)) {
          first_dirty_card = cur; break;
        }
      }
      if (first_dirty_card != NULL) {
        assert(0 <= cur_chunk_index &&
                 cur_chunk_index < lowest_non_clean_chunk_size,
               "Bounds error.");
        lowest_non_clean[cur_chunk_index] = first_dirty_card;
      }
    }
  }
}
Пример #16
0
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {

  // Some collectors need to do special things whenever their dirty
  // cards are processed. For instance, CMS must remember mutator updates
  // (i.e. dirty cards) so as to re-scan mutated objects.
  // Such work can be piggy-backed here on dirty card scanning, so as to make
  // it slightly more efficient than doing a complete non-detructive pre-scan
  // of the card table.
  MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
  if (pCl != NULL) {
    pCl->do_MemRegion(mr);
  }

  HeapWord* bottom = mr.start();
  HeapWord* last = mr.last();
  HeapWord* top = mr.end();
  HeapWord* bottom_obj;
  HeapWord* top_obj;

  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
         _precision == CardTableModRefBS::Precise,
         "Only ones we deal with for now.");

  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
         _cl->idempotent() || _last_bottom == NULL ||
         top <= _last_bottom,
         "Not decreasing");
  NOT_PRODUCT(_last_bottom = mr.start());

  bottom_obj = _sp->block_start(bottom);
  top_obj    = _sp->block_start(last);

  assert(bottom_obj <= bottom, "just checking");
  assert(top_obj    <= top,    "just checking");

  // Given what we think is the top of the memory region and
  // the start of the object at the top, get the actual
  // value of the top.
  top = get_actual_top(top, top_obj);

  // If the previous call did some part of this region, don't redo.
  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
      _min_done != NULL &&
      _min_done < top) {
    top = _min_done;
  }

  // Top may have been reset, and in fact may be below bottom,
  // e.g. the dirty card region is entirely in a now free object
  // -- something that could happen with a concurrent sweeper.
  bottom = MIN2(bottom, top);
  mr     = MemRegion(bottom, top);
  assert(bottom <= top &&
         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
          _min_done == NULL ||
          top <= _min_done),
         "overlap!");

  // Walk the region if it is not empty; otherwise there is nothing to do.
  if (!mr.is_empty()) {
    walk_mem_region(mr, bottom_obj, top);
  }

  // An idempotent closure might be applied in any order, so we don't
  // record a _min_done for it.
  if (!_cl->idempotent()) {
    _min_done = bottom;
  } else {
    assert(_min_done == _last_explicit_min_done,
           "Don't update _min_done for idempotent cl");
  }
}
Пример #17
0
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
  // We don't need to do young-gen spaces.
  if (s->end() <= gen_boundary) return;
  MemRegion used = s->used_region();

  jbyte* cur_entry = byte_for(used.start());
  jbyte* limit = byte_after(used.last());
  while (cur_entry < limit) {
    if (*cur_entry == CardTableModRefBS::clean_card) {
      jbyte* first_dirty = cur_entry+1;
      while (first_dirty < limit &&
             *first_dirty == CardTableModRefBS::clean_card) {
        first_dirty++;
      }
      // If the first object is a regular object, and it has a
      // young-to-old field, that would mark the previous card.
      HeapWord* boundary = addr_for(cur_entry);
      HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
      HeapWord* boundary_block = s->block_start(boundary);
      HeapWord* begin = boundary;             // Until proven otherwise.
      HeapWord* start_block = boundary_block; // Until proven otherwise.
      if (boundary_block < boundary) {
        if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
          oop boundary_obj = oop(boundary_block);
          if (!boundary_obj->is_objArray() &&
              !boundary_obj->is_typeArray()) {
            guarantee(cur_entry > byte_for(used.start()),
                      "else boundary would be boundary_block");
            if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
              begin = boundary_block + s->block_size(boundary_block);
              start_block = begin;
            }
          }
        }
      }
      // Now traverse objects until end.
      if (begin < end) {
        MemRegion mr(begin, end);
        VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
        for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) {
          if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
            oop(cur)->oop_iterate(&verify_blk, mr);
          }
        }
      }
      cur_entry = first_dirty;
    } else {
      // We'd normally expect that cur_youngergen_and_prev_nonclean_card
      // is a transient value, that cannot be in the card table
      // except during GC, and thus assert that:
      // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
      //        "Illegal CT value");
      // That however, need not hold, as will become clear in the
      // following...

      // We'd normally expect that if we are in the parallel case,
      // we can't have left a prev value (which would be different
      // from the current value) in the card table, and so we'd like to
      // assert that:
      // guarantee(cur_youngergen_card_val() == youngergen_card
      //           || !is_prev_youngergen_card_val(*cur_entry),
      //           "Illegal CT value");
      // That, however, may not hold occasionally, because of
      // CMS or MSC in the old gen. To wit, consider the
      // following two simple illustrative scenarios:
      // (a) CMS: Consider the case where a large object L
      //     spanning several cards is allocated in the old
      //     gen, and has a young gen reference stored in it, dirtying
      //     some interior cards. A young collection scans the card,
      //     finds a young ref and installs a youngergenP_n value.
      //     L then goes dead. Now a CMS collection starts,
      //     finds L dead and sweeps it up. Assume that L is
      //     abutting _unallocated_blk, so _unallocated_blk is
      //     adjusted down to (below) L. Assume further that
      //     no young collection intervenes during this CMS cycle.
      //     The next young gen cycle will not get to look at this
      //     youngergenP_n card since it lies in the unoccupied
      //     part of the space.
      //     Some young collections later the blocks on this
      //     card can be re-allocated either due to direct allocation
      //     or due to absorbing promotions. At this time, the
      //     before-gc verification will fail the above assert.
      // (b) MSC: In this case, an object L with a young reference
      //     is on a card that (therefore) holds a youngergen_n value.
      //     Suppose also that L lies towards the end of the used
      //     the used space before GC. An MSC collection
      //     occurs that compacts to such an extent that this
      //     card is no longer in the occupied part of the space.
      //     Since current code in MSC does not always clear cards
      //     in the unused part of old gen, this stale youngergen_n
      //     value is left behind and can later be covered by
      //     an object when promotion or direct allocation
      //     re-allocates that part of the heap.
      //
      // Fortunately, the presence of such stale card values is
      // "only" a minor annoyance in that subsequent young collections
      // might needlessly scan such cards, but would still never corrupt
      // the heap as a result. However, it's likely not to be a significant
      // performance inhibitor in practice. For instance,
      // some recent measurements with unoccupied cards eagerly cleared
      // out to maintain this invariant, showed next to no
      // change in young collection times; of course one can construct
      // degenerate examples where the cost can be significant.)
      // Note, in particular, that if the "stale" card is modified
      // after re-allocation, it would be dirty, not "stale". Thus,
      // we can never have a younger ref in such a card and it is
      // safe not to scan that card in any collection. [As we see
      // below, we do some unnecessary scanning
      // in some cases in the current parallel scanning algorithm.]
      //
      // The main point below is that the parallel card scanning code
      // deals correctly with these stale card values. There are two main
      // cases to consider where we have a stale "younger gen" value and a
      // "derivative" case to consider, where we have a stale
      // "cur_younger_gen_and_prev_non_clean" value, as will become
      // apparent in the case analysis below.
      // o Case 1. If the stale value corresponds to a younger_gen_n
      //   value other than the cur_younger_gen value then the code
      //   treats this as being tantamount to a prev_younger_gen
      //   card. This means that the card may be unnecessarily scanned.
      //   There are two sub-cases to consider:
      //   o Case 1a. Let us say that the card is in the occupied part
      //     of the generation at the time the collection begins. In
      //     that case the card will be either cleared when it is scanned
      //     for young pointers, or will be set to cur_younger_gen as a
      //     result of promotion. (We have elided the normal case where
      //     the scanning thread and the promoting thread interleave
      //     possibly resulting in a transient
      //     cur_younger_gen_and_prev_non_clean value before settling
      //     to cur_younger_gen. [End Case 1a.]
      //   o Case 1b. Consider now the case when the card is in the unoccupied
      //     part of the space which becomes occupied because of promotions
      //     into it during the current young GC. In this case the card
      //     will never be scanned for young references. The current
      //     code will set the card value to either
      //     cur_younger_gen_and_prev_non_clean or leave
      //     it with its stale value -- because the promotions didn't
      //     result in any younger refs on that card. Of these two
      //     cases, the latter will be covered in Case 1a during
      //     a subsequent scan. To deal with the former case, we need
      //     to further consider how we deal with a stale value of
      //     cur_younger_gen_and_prev_non_clean in our case analysis
      //     below. This we do in Case 3 below. [End Case 1b]
      //   [End Case 1]
      // o Case 2. If the stale value corresponds to cur_younger_gen being
      //   a value not necessarily written by a current promotion, the
      //   card will not be scanned by the younger refs scanning code.
      //   (This is OK since as we argued above such cards cannot contain
      //   any younger refs.) The result is that this value will be
      //   treated as a prev_younger_gen value in a subsequent collection,
      //   which is addressed in Case 1 above. [End Case 2]
      // o Case 3. We here consider the "derivative" case from Case 1b. above
      //   because of which we may find a stale
      //   cur_younger_gen_and_prev_non_clean card value in the table.
      //   Once again, as in Case 1, we consider two subcases, depending
      //   on whether the card lies in the occupied or unoccupied part
      //   of the space at the start of the young collection.
      //   o Case 3a. Let us say the card is in the occupied part of
      //     the old gen at the start of the young collection. In that
      //     case, the card will be scanned by the younger refs scanning
      //     code which will set it to cur_younger_gen. In a subsequent
      //     scan, the card will be considered again and get its final
      //     correct value. [End Case 3a]
      //   o Case 3b. Now consider the case where the card is in the
      //     unoccupied part of the old gen, and is occupied as a result
      //     of promotions during thus young gc. In that case,
      //     the card will not be scanned for younger refs. The presence
      //     of newly promoted objects on the card will then result in
      //     its keeping the value cur_younger_gen_and_prev_non_clean
      //     value, which we have dealt with in Case 3 here. [End Case 3b]
      //   [End Case 3]
      //
      // (Please refer to the code in the helper class
      // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
      //
      // The informal arguments above can be tightened into a formal
      // correctness proof and it behooves us to write up such a proof,
      // or to use model checking to prove that there are no lingering
      // concerns.
      //
      // Clearly because of Case 3b one cannot bound the time for
      // which a card will retain what we have called a "stale" value.
      // However, one can obtain a Loose upper bound on the redundant
      // work as a result of such stale values. Note first that any
      // time a stale card lies in the occupied part of the space at
      // the start of the collection, it is scanned by younger refs
      // code and we can define a rank function on card values that
      // declines when this is so. Note also that when a card does not
      // lie in the occupied part of the space at the beginning of a
      // young collection, its rank can either decline or stay unchanged.
      // In this case, no extra work is done in terms of redundant
      // younger refs scanning of that card.
      // Then, the case analysis above reveals that, in the worst case,
      // any such stale card will be scanned unnecessarily at most twice.
      //
      // It is nonethelss advisable to try and get rid of some of this
      // redundant work in a subsequent (low priority) re-design of
      // the card-scanning code, if only to simplify the underlying
      // state machine analysis/proof. ysr 1/28/2002. XXX
      cur_entry++;
    }
  }
}
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               OopsInGenClosure* cl,
               CardTableRS* ct,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We go from higher to lower addresses here; it wouldn't help that much
  // because of the strided parallelism pattern used here.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                ParGCCardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                ParGCCardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // Even though we go from lower to higher addresses below, the
    // strided parallelism can interleave the actual processing of the
    // dirty pages in various ways. For a specific chunk within this
    // stride, we take care to avoid double scanning or missing a card
    // by suitably initializing the "min_done" field in process_chunk_boundaries()
    // below, together with the dirty region extension accomplished in
    // DirtyCardToOopClosure::do_MemRegion().
    jbyte*    chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
    // Invariant: chunk_mr should be fully contained within the "used" region.
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
                                                     cl->gen_boundary());
    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);


    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    // We want the LNC array updates above in process_chunk_boundaries
    // to be visible before any of the card table value changes as a
    // result of the dirty card iteration below.
    OrderAccess::storestore();

    // We do not call the non_clean_card_iterate_serial() version because
    // we want to clear the cards: clear_cl here does the work of finding
    // contiguous dirty ranges of cards to process and clear.
    clear_cl.do_MemRegion(chunk_mr);

    // Find the next chunk of the stride.
    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
  }
}