virtual MemPointer* next() {
   MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
   // arena memory record is a special case, which we have to compare
   // sequence number against its associated arena record.
   if (next_rec != NULL && next_rec->is_arena_memory_record()) {
     MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
     // if there is an associated arena record, it has to be previous
     // record because of sorting order (by address) - NMT generates a pseudo address
     // for arena's size record by offsetting arena's address, that guarantees
     // the order of arena record and it's size record.
     if (prev_rec != NULL && prev_rec->is_arena_record() &&
       next_rec->is_memory_record_of_arena(prev_rec)) {
       if (prev_rec->seq() > next_rec->seq()) {
         // Skip this arena memory record
         // Two scenarios:
         //   - if the arena record is an allocation record, this early
         //     size record must be leftover by previous arena,
         //     and the last size record should have size = 0.
         //   - if the arena record is a deallocation record, this
         //     size record should be its cleanup record, which should
         //     also have size = 0. In other world, arena alway reset
         //     its size before gone (see Arena's destructor)
         assert(next_rec->size() == 0, "size not reset");
         return _itr.next();
       } else {
         assert(prev_rec->is_allocation_record(),
           "Arena size record ahead of allocation record");
       }
     }
   }
   return next_rec;
 }
  virtual MemPointer* current() const {
#ifdef ASSERT
    MemPointer* cur_rec = _itr.current();
    if (cur_rec != NULL) {
      MemPointer* prev_rec = _itr.peek_prev();
      MemPointer* next_rec = _itr.peek_next();
      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
    }
#endif
    return _itr.current();
  }
  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
    while (next != NULL) {
      assert(cur != NULL, "Sanity check");
      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
        "pre-sort order");

      if (is_duplicated_record(cur, next)) {
        _itr.next();
        next = (MemPointerRecord*)_itr.peek_next();
      } else {
        break;
      }
    }
  }
  // get next record, but skip the duplicated records
  virtual MemPointer* next() {
    MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
    while (next != NULL) {
      assert(cur != NULL, "Sanity check");
      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
        "pre-sort order");

      if (is_duplicated_record(cur, next)) {
        _itr.next();
        cur = next;
        next = (MemPointerRecord*)_itr.peek_next();
      } else {
        break;
      }
    }
    return cur;
  }
Example #5
0
void BaselineReporter::report_virtual_memory_map(const MemBaseline& baseline) {
  _outputer.start_virtual_memory_map();
  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
  MemPointerArrayIteratorImpl itr = MemPointerArrayIteratorImpl(pBL->_vm_map);
  VMMemRegionEx* rgn = (VMMemRegionEx*)itr.current();
  while (rgn != NULL) {
    if (rgn->is_reserved_region()) {
      _outputer.reserved_memory_region(FLAGS_TO_MEMORY_TYPE(rgn->flags()),
        rgn->base(), rgn->base() + rgn->size(), amount_in_current_scale(rgn->size()), rgn->pc());
    } else {
      _outputer.committed_memory_region(rgn->base(), rgn->base() + rgn->size(),
        amount_in_current_scale(rgn->size()), rgn->pc());
    }
    rgn = (VMMemRegionEx*)itr.next();
  }

  _outputer.done_virtual_memory_map();
}
 virtual MemPointer* current() const {
   return _itr.current();
 }