void G1BlockOffsetArray::set_region(MemRegion mr) {
  _bottom = mr.start();
  _end = mr.end();
}
Exemplo n.º 2
0
// We get called with "mr" representing the dirty region
// that we want to process. Because of imprecise marking,
// we may need to extend the incoming "mr" to the right,
// and scan more. However, because we may already have
// scanned some of that extended region, we may need to
// trim its right-end back some so we do not scan what
// we (or another worker thread) may already have scanned
// or planning to scan.
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {

  // Some collectors need to do special things whenever their dirty
  // cards are processed. For instance, CMS must remember mutator updates
  // (i.e. dirty cards) so as to re-scan mutated objects.
  // Such work can be piggy-backed here on dirty card scanning, so as to make
  // it slightly more efficient than doing a complete non-detructive pre-scan
  // of the card table.
  MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
  if (pCl != NULL) {
    pCl->do_MemRegion(mr);
  }

  HeapWord* bottom = mr.start();
  HeapWord* last = mr.last();
  HeapWord* top = mr.end();
  HeapWord* bottom_obj;
  HeapWord* top_obj;

  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
         _precision == CardTableModRefBS::Precise,
         "Only ones we deal with for now.");

  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
         _cl->idempotent() || _last_bottom == NULL ||
         top <= _last_bottom,
         "Not decreasing");
  NOT_PRODUCT(_last_bottom = mr.start());

  bottom_obj = _sp->block_start(bottom);
  top_obj    = _sp->block_start(last);

  assert(bottom_obj <= bottom, "just checking");
  assert(top_obj    <= top,    "just checking");

  // Given what we think is the top of the memory region and
  // the start of the object at the top, get the actual
  // value of the top.
  top = get_actual_top(top, top_obj);

  // If the previous call did some part of this region, don't redo.
  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
      _min_done != NULL &&
      _min_done < top) {
    top = _min_done;
  }

  // Top may have been reset, and in fact may be below bottom,
  // e.g. the dirty card region is entirely in a now free object
  // -- something that could happen with a concurrent sweeper.
  bottom = MIN2(bottom, top);
  MemRegion extended_mr = MemRegion(bottom, top);
  assert(bottom <= top &&
         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
          _min_done == NULL ||
          top <= _min_done),
         "overlap!");

  // Walk the region if it is not empty; otherwise there is nothing to do.
  if (!extended_mr.is_empty()) {
    walk_mem_region(extended_mr, bottom_obj, top);
  }

  // An idempotent closure might be applied in any order, so we don't
  // record a _min_done for it.
  if (!_cl->idempotent()) {
    _min_done = bottom;
  } else {
    assert(_min_done == _last_explicit_min_done,
           "Don't update _min_done for idempotent cl");
  }
}
Exemplo n.º 3
0
void MutableSpace::pretouch_pages(MemRegion mr) {
  os::pretouch_memory(mr.start(), mr.end());
}
Exemplo n.º 4
0
void MutableSpace::initialize(MemRegion mr,
                              bool clear_space,
                              bool mangle_space,
                              bool setup_pages) {

  assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
         "invalid space boundaries");

  if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
    // The space may move left and right or expand/shrink.
    // We'd like to enforce the desired page placement.
    MemRegion head, tail;
    if (last_setup_region().is_empty()) {
      // If it's the first initialization don't limit the amount of work.
      head = mr;
      tail = MemRegion(mr.end(), mr.end());
    } else {
      // Is there an intersection with the address space?
      MemRegion intersection = last_setup_region().intersection(mr);
      if (intersection.is_empty()) {
        intersection = MemRegion(mr.end(), mr.end());
      }
      // All the sizes below are in words.
      size_t head_size = 0, tail_size = 0;
      if (mr.start() <= intersection.start()) {
        head_size = pointer_delta(intersection.start(), mr.start());
      }
      if(intersection.end() <= mr.end()) {
        tail_size = pointer_delta(mr.end(), intersection.end());
      }
      // Limit the amount of page manipulation if necessary.
      if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
        const size_t change_size = head_size + tail_size;
        const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
        head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                         head_size);
        tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                         tail_size);
      }
      head = MemRegion(intersection.start() - head_size, intersection.start());
      tail = MemRegion(intersection.end(), intersection.end() + tail_size);
    }
    assert(mr.contains(head) && mr.contains(tail), "Sanity");

    if (UseNUMA) {
      numa_setup_pages(head, clear_space);
      numa_setup_pages(tail, clear_space);
    }

    if (AlwaysPreTouch) {
      pretouch_pages(head);
      pretouch_pages(tail);
    }

    // Remember where we stopped so that we can continue later.
    set_last_setup_region(MemRegion(head.start(), tail.end()));
  }
Exemplo n.º 5
0
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
  // We don't need to do young-gen spaces.
  if (s->end() <= gen_boundary) return;
  MemRegion used = s->used_region();

  jbyte* cur_entry = byte_for(used.start());
  jbyte* limit = byte_after(used.last());
  while (cur_entry < limit) {
    if (*cur_entry == CardTableModRefBS::clean_card) {
      jbyte* first_dirty = cur_entry+1;
      while (first_dirty < limit &&
             *first_dirty == CardTableModRefBS::clean_card) {
        first_dirty++;
      }
      // If the first object is a regular object, and it has a
      // young-to-old field, that would mark the previous card.
      HeapWord* boundary = addr_for(cur_entry);
      HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
      HeapWord* boundary_block = s->block_start(boundary);
      HeapWord* begin = boundary;             // Until proven otherwise.
      HeapWord* start_block = boundary_block; // Until proven otherwise.
      if (boundary_block < boundary) {
        if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
          oop boundary_obj = oop(boundary_block);
          if (!boundary_obj->is_objArray() &&
              !boundary_obj->is_typeArray()) {
            guarantee(cur_entry > byte_for(used.start()),
                      "else boundary would be boundary_block");
            if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
              begin = boundary_block + s->block_size(boundary_block);
              start_block = begin;
            }
          }
        }
      }
      // Now traverse objects until end.
      if (begin < end) {
        MemRegion mr(begin, end);
        VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
        for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) {
          if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
            oop(cur)->oop_iterate_no_header(&verify_blk, mr);
          }
        }
      }
      cur_entry = first_dirty;
    } else {
      // We'd normally expect that cur_youngergen_and_prev_nonclean_card
      // is a transient value, that cannot be in the card table
      // except during GC, and thus assert that:
      // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
      //        "Illegal CT value");
      // That however, need not hold, as will become clear in the
      // following...

      // We'd normally expect that if we are in the parallel case,
      // we can't have left a prev value (which would be different
      // from the current value) in the card table, and so we'd like to
      // assert that:
      // guarantee(cur_youngergen_card_val() == youngergen_card
      //           || !is_prev_youngergen_card_val(*cur_entry),
      //           "Illegal CT value");
      // That, however, may not hold occasionally, because of
      // CMS or MSC in the old gen. To wit, consider the
      // following two simple illustrative scenarios:
      // (a) CMS: Consider the case where a large object L
      //     spanning several cards is allocated in the old
      //     gen, and has a young gen reference stored in it, dirtying
      //     some interior cards. A young collection scans the card,
      //     finds a young ref and installs a youngergenP_n value.
      //     L then goes dead. Now a CMS collection starts,
      //     finds L dead and sweeps it up. Assume that L is
      //     abutting _unallocated_blk, so _unallocated_blk is
      //     adjusted down to (below) L. Assume further that
      //     no young collection intervenes during this CMS cycle.
      //     The next young gen cycle will not get to look at this
      //     youngergenP_n card since it lies in the unoccupied
      //     part of the space.
      //     Some young collections later the blocks on this
      //     card can be re-allocated either due to direct allocation
      //     or due to absorbing promotions. At this time, the
      //     before-gc verification will fail the above assert.
      // (b) MSC: In this case, an object L with a young reference
      //     is on a card that (therefore) holds a youngergen_n value.
      //     Suppose also that L lies towards the end of the used
      //     the used space before GC. An MSC collection
      //     occurs that compacts to such an extent that this
      //     card is no longer in the occupied part of the space.
      //     Since current code in MSC does not always clear cards
      //     in the unused part of old gen, this stale youngergen_n
      //     value is left behind and can later be covered by
      //     an object when promotion or direct allocation
      //     re-allocates that part of the heap.
      //
      // Fortunately, the presence of such stale card values is
      // "only" a minor annoyance in that subsequent young collections
      // might needlessly scan such cards, but would still never corrupt
      // the heap as a result. However, it's likely not to be a significant
      // performance inhibitor in practice. For instance,
      // some recent measurements with unoccupied cards eagerly cleared
      // out to maintain this invariant, showed next to no
      // change in young collection times; of course one can construct
      // degenerate examples where the cost can be significant.)
      // Note, in particular, that if the "stale" card is modified
      // after re-allocation, it would be dirty, not "stale". Thus,
      // we can never have a younger ref in such a card and it is
      // safe not to scan that card in any collection. [As we see
      // below, we do some unnecessary scanning
      // in some cases in the current parallel scanning algorithm.]
      //
      // The main point below is that the parallel card scanning code
      // deals correctly with these stale card values. There are two main
      // cases to consider where we have a stale "younger gen" value and a
      // "derivative" case to consider, where we have a stale
      // "cur_younger_gen_and_prev_non_clean" value, as will become
      // apparent in the case analysis below.
      // o Case 1. If the stale value corresponds to a younger_gen_n
      //   value other than the cur_younger_gen value then the code
      //   treats this as being tantamount to a prev_younger_gen
      //   card. This means that the card may be unnecessarily scanned.
      //   There are two sub-cases to consider:
      //   o Case 1a. Let us say that the card is in the occupied part
      //     of the generation at the time the collection begins. In
      //     that case the card will be either cleared when it is scanned
      //     for young pointers, or will be set to cur_younger_gen as a
      //     result of promotion. (We have elided the normal case where
      //     the scanning thread and the promoting thread interleave
      //     possibly resulting in a transient
      //     cur_younger_gen_and_prev_non_clean value before settling
      //     to cur_younger_gen. [End Case 1a.]
      //   o Case 1b. Consider now the case when the card is in the unoccupied
      //     part of the space which becomes occupied because of promotions
      //     into it during the current young GC. In this case the card
      //     will never be scanned for young references. The current
      //     code will set the card value to either
      //     cur_younger_gen_and_prev_non_clean or leave
      //     it with its stale value -- because the promotions didn't
      //     result in any younger refs on that card. Of these two
      //     cases, the latter will be covered in Case 1a during
      //     a subsequent scan. To deal with the former case, we need
      //     to further consider how we deal with a stale value of
      //     cur_younger_gen_and_prev_non_clean in our case analysis
      //     below. This we do in Case 3 below. [End Case 1b]
      //   [End Case 1]
      // o Case 2. If the stale value corresponds to cur_younger_gen being
      //   a value not necessarily written by a current promotion, the
      //   card will not be scanned by the younger refs scanning code.
      //   (This is OK since as we argued above such cards cannot contain
      //   any younger refs.) The result is that this value will be
      //   treated as a prev_younger_gen value in a subsequent collection,
      //   which is addressed in Case 1 above. [End Case 2]
      // o Case 3. We here consider the "derivative" case from Case 1b. above
      //   because of which we may find a stale
      //   cur_younger_gen_and_prev_non_clean card value in the table.
      //   Once again, as in Case 1, we consider two subcases, depending
      //   on whether the card lies in the occupied or unoccupied part
      //   of the space at the start of the young collection.
      //   o Case 3a. Let us say the card is in the occupied part of
      //     the old gen at the start of the young collection. In that
      //     case, the card will be scanned by the younger refs scanning
      //     code which will set it to cur_younger_gen. In a subsequent
      //     scan, the card will be considered again and get its final
      //     correct value. [End Case 3a]
      //   o Case 3b. Now consider the case where the card is in the
      //     unoccupied part of the old gen, and is occupied as a result
      //     of promotions during thus young gc. In that case,
      //     the card will not be scanned for younger refs. The presence
      //     of newly promoted objects on the card will then result in
      //     its keeping the value cur_younger_gen_and_prev_non_clean
      //     value, which we have dealt with in Case 3 here. [End Case 3b]
      //   [End Case 3]
      //
      // (Please refer to the code in the helper class
      // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
      //
      // The informal arguments above can be tightened into a formal
      // correctness proof and it behooves us to write up such a proof,
      // or to use model checking to prove that there are no lingering
      // concerns.
      //
      // Clearly because of Case 3b one cannot bound the time for
      // which a card will retain what we have called a "stale" value.
      // However, one can obtain a Loose upper bound on the redundant
      // work as a result of such stale values. Note first that any
      // time a stale card lies in the occupied part of the space at
      // the start of the collection, it is scanned by younger refs
      // code and we can define a rank function on card values that
      // declines when this is so. Note also that when a card does not
      // lie in the occupied part of the space at the beginning of a
      // young collection, its rank can either decline or stay unchanged.
      // In this case, no extra work is done in terms of redundant
      // younger refs scanning of that card.
      // Then, the case analysis above reveals that, in the worst case,
      // any such stale card will be scanned unnecessarily at most twice.
      //
      // It is nonethelss advisable to try and get rid of some of this
      // redundant work in a subsequent (low priority) re-design of
      // the card-scanning code, if only to simplify the underlying
      // state machine analysis/proof. ysr 1/28/2002. XXX
      cur_entry++;
    }
  }
}
Exemplo n.º 6
0
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
  if (_g1->is_in_g1_reserved(mr.start())) {
    _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
    if (_start_first == NULL) _start_first = mr.start();
  }
}
inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
  NOT_PRODUCT(region_invariant(mr));
  // Range size must be greater than 32 bytes.
  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
                      BitMap::large_range);
}
Exemplo n.º 8
0
// In the numa case eden is not mangled so a survivor space
// moving into a region previously occupied by a survivor
// may find an unmangled region.  Also in the PS case eden
// to-space and from-space may not touch (i.e., there may be
// gaps between them due to movement while resizing the
// spaces).  Those gaps must be mangled.
void PSYoungGen::mangle_survivors(MutableSpace* s1,
                                  MemRegion s1MR,
                                  MutableSpace* s2,
                                  MemRegion s2MR) {
  // Check eden and gap between eden and from-space, in deciding
  // what to mangle in from-space.  Check the gap between from-space
  // and to-space when deciding what to mangle.
  //
  //      +--------+   +----+    +---+
  //      | eden   |   |s1  |    |s2 |
  //      +--------+   +----+    +---+
  //                 +-------+ +-----+
  //                 |s1MR   | |s2MR |
  //                 +-------+ +-----+
  // All of survivor-space is properly mangled so find the
  // upper bound on the mangling for any portion above current s1.
  HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
  MemRegion delta1_left;
  if (s1MR.start() < delta_end) {
    delta1_left = MemRegion(s1MR.start(), delta_end);
    s1->mangle_region(delta1_left);
  }
  // Find any portion to the right of the current s1.
  HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
  MemRegion delta1_right;
  if (delta_start < s1MR.end()) {
    delta1_right = MemRegion(delta_start, s1MR.end());
    s1->mangle_region(delta1_right);
  }

  // Similarly for the second survivor space except that
  // any of the new region that overlaps with the current
  // region of the first survivor space has already been
  // mangled.
  delta_end = MIN2(s2->bottom(), s2MR.end());
  delta_start = MAX2(s2MR.start(), s1->end());
  MemRegion delta2_left;
  if (s2MR.start() < delta_end) {
    delta2_left = MemRegion(s2MR.start(), delta_end);
    s2->mangle_region(delta2_left);
  }
  delta_start = MAX2(s2->end(), s2MR.start());
  MemRegion delta2_right;
  if (delta_start < s2MR.end()) {
    s2->mangle_region(delta2_right);
  }

  if (TraceZapUnusedHeapArea) {
    // s1
    gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
      "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
      s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
    gclog_or_tty->print_cr("    Mangle before: [" PTR_FORMAT ", "
      PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
      delta1_left.start(), delta1_left.end(), delta1_right.start(),
      delta1_right.end());

    // s2
    gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
      "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
      s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
    gclog_or_tty->print_cr("    Mangle before: [" PTR_FORMAT ", "
      PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
      delta2_left.start(), delta2_left.end(), delta2_right.start(),
      delta2_right.end());
  }

}
Exemplo n.º 9
0
void MutableSpace::pretouch_pages(MemRegion mr) {
  for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
    char t = *p; *p = t;
  }
}
Exemplo n.º 10
0
MemRegion MemRegion::minus(const MemRegion mr2) const {
  // There seem to be 6 cases:
  //                  |this MemRegion|
  // |strictly below|
  //   |overlap beginning|
  //                    |interior|
  //                        |overlap ending|
  //                                   |strictly above|
  //              |completely overlapping|
  // We can't deal with an interior case because it would
  // produce two disjoint regions as a result.
  // We aren't trying to be optimal in the number of tests below,
  // but the order is important to distinguish the strictly cases
  // from the overlapping cases.
  if (mr2.end() <= start()) {
    // strictly below
    return MemRegion(start(), end());
  }
  if (mr2.start() <= start() && mr2.end() <= end()) {
    // overlap beginning
    return MemRegion(mr2.end(), end());
  }
  if (mr2.start() >= end()) {
    // strictly above
    return MemRegion(start(), end());
  }
  if (mr2.start() >= start() && mr2.end() >= end()) {
    // overlap ending
    return MemRegion(start(), mr2.start());
  }
  if (mr2.start() <= start() && mr2.end() >= end()) {
    // completely overlapping
    return MemRegion();
  }
  if (mr2.start() > start() && mr2.end() < end()) {
    // interior
    guarantee(false, "MemRegion::minus, but interior");
    return MemRegion();
  }
  ShouldNotReachHere();
  return MemRegion();
}
Exemplo n.º 11
0
void CardTableRS::clear(MemRegion mr) {
  for (int i = 0; i < _ct_bs._cur_covered_regions; i++) {
    MemRegion mri = mr.intersection(_ct_bs._covered[i]);
    if (mri.byte_size() > 0) clear_MemRegion(mri);
  }
}
Exemplo n.º 12
0
void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
  if (method() != NULL && mr.contains(&_method)) {
    blk->do_oop((oop*) &_method);
  }
}
Exemplo n.º 13
0
void ContiguousSpace::mangle_region(MemRegion mr) {
  debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
}
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               OopsInGenClosure* cl,
               CardTableRS* ct,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We go from higher to lower addresses here; it wouldn't help that much
  // because of the strided parallelism pattern used here.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                ParGCCardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                ParGCCardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // Even though we go from lower to higher addresses below, the
    // strided parallelism can interleave the actual processing of the
    // dirty pages in various ways. For a specific chunk within this
    // stride, we take care to avoid double scanning or missing a card
    // by suitably initializing the "min_done" field in process_chunk_boundaries()
    // below, together with the dirty region extension accomplished in
    // DirtyCardToOopClosure::do_MemRegion().
    jbyte*    chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
    // Invariant: chunk_mr should be fully contained within the "used" region.
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
                                                     cl->gen_boundary());
    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);


    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    // We want the LNC array updates above in process_chunk_boundaries
    // to be visible before any of the card table value changes as a
    // result of the dirty card iteration below.
    OrderAccess::storestore();

    // We do not call the non_clean_card_iterate_serial() version because
    // we want to clear the cards: clear_cl here does the work of finding
    // contiguous dirty ranges of cards to process and clear.
    clear_cl.do_MemRegion(chunk_mr);

    // Find the next chunk of the stride.
    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
  }
}
inline void CMSBitMap::par_clear_range(MemRegion mr) {
  NOT_PRODUCT(region_invariant(mr));
  // Range size is usually just 1 bit.
  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
                      BitMap::small_range);
}
Exemplo n.º 16
0
void
CardTableModRefBS::
process_chunk_boundaries(Space* sp,
                         DirtyCardToOopClosure* dcto_cl,
                         MemRegion chunk_mr,
                         MemRegion used,
                         jbyte** lowest_non_clean,
                         uintptr_t lowest_non_clean_base_chunk_index,
                         size_t    lowest_non_clean_chunk_size)
{
  // We must worry about the chunk boundaries.

  // First, set our max_to_do:
  HeapWord* max_to_do = NULL;
  uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
  cur_chunk_index           = cur_chunk_index - lowest_non_clean_base_chunk_index;

  if (chunk_mr.end() < used.end()) {
    // This is not the last chunk in the used region.  What is the last
    // object?
    HeapWord* last_block = sp->block_start(chunk_mr.end());
    assert(last_block <= chunk_mr.end(), "In case this property changes.");
    if (last_block == chunk_mr.end()
        || !sp->block_is_obj(last_block)) {
      max_to_do = chunk_mr.end();

    } else {
      // It is an object and starts before the end of the current chunk.
      // last_obj_card is the card corresponding to the start of the last object
      // in the chunk.  Note that the last object may not start in
      // the chunk.
      jbyte* last_obj_card = byte_for(last_block);
      if (!card_may_have_been_dirty(*last_obj_card)) {
        // The card containing the head is not dirty.  Any marks in
        // subsequent cards still in this chunk must have been made
        // precisely; we can cap processing at the end.
        max_to_do = chunk_mr.end();
      } else {
        // The last object must be considered dirty, and extends onto the
        // following chunk.  Look for a dirty card in that chunk that will
        // bound our processing.
        jbyte* limit_card = NULL;
        size_t last_block_size = sp->block_size(last_block);
        jbyte* last_card_of_last_obj =
          byte_for(last_block + last_block_size - 1);
        jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
        // This search potentially goes a long distance looking
        // for the next card that will be scanned.  For example,
        // an object that is an array of primitives will not
        // have any cards covering regions interior to the array
        // that will need to be scanned. The scan can be terminated
        // at the last card of the next chunk.  That would leave
        // limit_card as NULL and would result in "max_to_do"
        // being set with the LNC value or with the end
        // of the last block.
        jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
          CardsPerStrideChunk;
        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
          == CardsPerStrideChunk, "last card of next chunk may be wrong");
        jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
                                                  last_card_of_next_chunk);
        for (jbyte* cur = first_card_of_next_chunk;
             cur <= last_card_to_check; cur++) {
          if (card_will_be_scanned(*cur)) {
            limit_card = cur; break;
          }
        }
        assert(0 <= cur_chunk_index+1 &&
               cur_chunk_index+1 < lowest_non_clean_chunk_size,
               "Bounds error.");
        // LNC for the next chunk
        jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
        if (limit_card == NULL) {
          limit_card = lnc_card;
        }
        if (limit_card != NULL) {
          if (lnc_card != NULL) {
            limit_card = (jbyte*)MIN2((intptr_t)limit_card,
                                      (intptr_t)lnc_card);
          }
          max_to_do = addr_for(limit_card);
        } else {
          max_to_do = last_block + last_block_size;
        }
      }
    }
    assert(max_to_do != NULL, "OOPS!");
  } else {
    max_to_do = used.end();
  }
  // Now we can set the closure we're using so it doesn't to beyond
  // max_to_do.
  dcto_cl->set_min_done(max_to_do);
#ifndef PRODUCT
  dcto_cl->set_last_bottom(max_to_do);
#endif

  // Now we set *our" lowest_non_clean entry.
  // Find the object that spans our boundary, if one exists.
  // Nothing to do on the first chunk.
  if (chunk_mr.start() > used.start()) {
    // first_block is the block possibly spanning the chunk start
    HeapWord* first_block = sp->block_start(chunk_mr.start());
    // Does the block span the start of the chunk and is it
    // an object?
    if (first_block < chunk_mr.start() &&
        sp->block_is_obj(first_block)) {
      jbyte* first_dirty_card = NULL;
      jbyte* last_card_of_first_obj =
          byte_for(first_block + sp->block_size(first_block) - 1);
      jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
      jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
      jbyte* last_card_to_check =
        (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
                      (intptr_t) last_card_of_first_obj);
      for (jbyte* cur = first_card_of_cur_chunk;
           cur <= last_card_to_check; cur++) {
        if (card_will_be_scanned(*cur)) {
          first_dirty_card = cur; break;
        }
      }
      if (first_dirty_card != NULL) {
        assert(0 <= cur_chunk_index &&
                 cur_chunk_index < lowest_non_clean_chunk_size,
               "Bounds error.");
        lowest_non_clean[cur_chunk_index] = first_dirty_card;
      }
    }
  }
}
int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
					   MemRegion mr) {
  assert(obj->is_klass(),"must be a klass");
  assert(klassOop(obj)->klass_part()->oop_is_instance(), "must be instance klass");
  instanceKlass* ik = instanceKlass::cast(klassOop(obj));
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = ik->object_size();

  ik->iterate_static_fields(blk, mr);
  ik->vtable()->oop_oop_iterate_m(blk, mr);
  ik->itable()->oop_oop_iterate_m(blk, mr);

  oop* adr;
  adr = ik->adr_array_klasses();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_methods();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_method_ordering();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_local_interfaces();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_transitive_interfaces();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_fields();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_constants();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_class_loader();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_protection_domain();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_signers();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_source_file_name();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_source_debug_extension();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_inner_classes();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_implementor();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_previous_version();
  if (mr.contains(adr)) blk->do_oop(adr);

  klassKlass::oop_oop_iterate_m(obj, blk, mr);

  if(ik->oop_map_cache() != NULL) ik->oop_map_cache()->oop_iterate(blk, mr);
  return size;
}
Exemplo n.º 18
0
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
                                                OopsInGenClosure* cl) {
  const MemRegion urasm = sp->used_region_at_save_marks();
#ifdef ASSERT
  // Convert the assertion check to a warning if we are running
  // CMS+ParNew until related bug is fixed.
  MemRegion ur    = sp->used_region();
  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
         err_msg("Did you forget to call save_marks()? "
                 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
                 urasm.start(), urasm.end(), ur.start(), ur.end()));
  // In the case of CMS+ParNew, issue a warning
  if (!ur.contains(urasm)) {
    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
    warning("CMS+ParNew: Did you forget to call save_marks()? "
            "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
            "[" PTR_FORMAT ", " PTR_FORMAT ")",
             urasm.start(), urasm.end(), ur.start(), ur.end());
    MemRegion ur2 = sp->used_region();
    MemRegion urasm2 = sp->used_region_at_save_marks();
    if (!ur.equals(ur2)) {
      warning("CMS+ParNew: Flickering used_region()!!");
    }
    if (!urasm.equals(urasm2)) {
      warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
    }
    ShouldNotReachHere();
  }
#endif
  _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
}
Exemplo n.º 19
0
//------------------------------------------------------------------------------
// Name: read_bytes(edb::address_t address, void *buf, std::size_t len)
// Desc: reads <len> bytes into <buf> starting at <address>
// Note: if the read failed, the part of the buffer that could not be read will
//       be filled with 0xff bytes
//------------------------------------------------------------------------------
bool DebuggerCore::read_bytes(edb::address_t address, void *buf, std::size_t len) {

	Q_CHECK_PTR(buf);

	bool ok = false;

	if(attached()) {

		if(len == 0) {
			return true;
		}

		memset(buf, 0xff, len);

		// might wanna make this more platform specific (e.g. Windows x86 user mode memory <= 0x7FFFFFFF)
		const edb::address_t max_address = std::numeric_limits<edb::address_t>::max();

		/*
		// I think we can safely assume this won't happen as long as
		// max_address is the biggest representable number ;)
		if(address > max_address || len > max_address) {
			return false;
		}
		*/

		edb::address_t cur_address = address;
		edb::address_t end_address;

		// check for max possible address (and overflow :s)
		// took a few hours to find that bug
		if(overflows<edb::address_t>(address, len, max_address)) {
			end_address = max_address;
		} else {
			end_address = address + len - 1;
		}

		len = end_address - address + 1;

		const MemoryRegions& regions = edb::v1::memory_regions();

		while(cur_address <= end_address) {
			bool part_ok = false;

			void* cur_dest = reinterpret_cast<quint8 *>(buf) + (cur_address - address);
			edb::address_t cur_end;
			std::size_t cur_len;

			MemRegion mem;
			if(regions.find_region(cur_address, mem)) {
				bool changed = false;
				if(!mem.readable()) {
					mem.set_permissions(true, mem.writable(), mem.executable());
					changed = true;
				}

				// special cases: first and last region (with unaligned address or end_address)
				if(overflows<edb::address_t>(mem.start, mem.size(), end_address)) {
					cur_end = end_address;
				} else {
					cur_end = mem.start + mem.size() - 1;
				}
				cur_len = cur_end - cur_address + 1;		

				SIZE_T bytes_read;
				part_ok = ReadProcessMemory(process_handle_, reinterpret_cast<void*>(cur_address), cur_dest, cur_len, &bytes_read);

				Q_ASSERT(bytes_read == cur_len);

				if(part_ok) {
					ok = true;
					Q_FOREACH(const Breakpoint::pointer &bp, breakpoints_) {
						if((bp->address() + breakpoint_size()) > cur_address && bp->address() <= cur_end) {
							// show the original bytes in the buffer..
							const QByteArray& bytes = bp->original_bytes();
							Q_ASSERT(bytes.size() == breakpoint_size());
							size_t offset = qMax(bp->address(), cur_address) - bp->address();
							const size_t bp_size = qMin<size_t>(breakpoint_size(), (end_address - bp->address() + 1)) - offset;
							const void* bp_src = bytes.data() + offset;
							void* bp_dest = reinterpret_cast<quint8 *>(buf) + (bp->address() + offset - address);
							memcpy(bp_dest, bp_src, bp_size);
						}
					}
				}

				if(changed) {
					mem.set_permissions(false, mem.writable(), mem.executable());
				}
			} else {
				// check next possible page
				const edb::address_t cur_base = cur_address - (cur_address % page_size());
				if(overflows<edb::address_t>(cur_base, page_size(), end_address)) {
					cur_end = end_address;
				} else {
					cur_end = cur_base + page_size() - 1;
				}
				cur_len = cur_end - cur_address + 1;
			}

			if(overflows<edb::address_t>(cur_address, cur_len, end_address)) {
				break;
			}

			cur_address += cur_len;
		}
Exemplo n.º 20
0
inline ParMarkBitMap::idx_t
ParMarkBitMap::bits_required(MemRegion covered_region)
{
  return bits_required(covered_region.word_size());
}
Exemplo n.º 21
0
 void do_oop(objectRef* p) {
   if (mr.contains(p)) {
     cl->do_oop(p);
   }
 }