size_t MutableNUMASpace::free_in_words() const { size_t s = 0; for (int i = 0; i < lgrp_spaces()->length(); i++) { s += lgrp_spaces()->at(i)->space()->free_in_words(); } return s; }
// There may be unallocated holes in the middle chunks // that should be filled with dead objects to ensure parsability. void MutableNUMASpace::ensure_parsability() { for (int i = 0; i < lgrp_spaces()->length(); i++) { LGRPSpace *ls = lgrp_spaces()->at(i); MutableSpace *s = ls->space(); if (s->top() < top()) { // For all spaces preceding the one containing top() if (s->free_in_words() > 0) { intptr_t cur_top = (intptr_t)s->top(); size_t words_left_to_fill = pointer_delta(s->end(), s->top());; while (words_left_to_fill > 0) { size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size()); assert(words_to_fill >= CollectedHeap::min_fill_size(), "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")", words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()); CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill); if (!os::numa_has_static_binding()) { size_t touched_words = words_to_fill; #ifndef ASSERT if (!ZapUnusedHeapArea) { touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), touched_words); } #endif MemRegion invalid; HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size()); HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size()); if (crossing_start != crossing_end) { // If object header crossed a small page boundary we mark the area // as invalid rounding it to a page_size(). HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom()); HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end()); invalid = MemRegion(start, end); } ls->add_invalid_region(invalid); } cur_top = cur_top + (words_to_fill * HeapWordSize); words_left_to_fill -= words_to_fill; } } } else { if (!os::numa_has_static_binding()) { #ifdef ASSERT MemRegion invalid(s->top(), s->end()); ls->add_invalid_region(invalid); #else if (ZapUnusedHeapArea) { MemRegion invalid(s->top(), s->end()); ls->add_invalid_region(invalid); } else { return; } #endif } else { return; } } } }
// There may be unallocated holes in the middle chunks // that should be filled with dead objects to ensure parseability. void MutableNUMASpace::ensure_parsability() { for (int i = 0; i < lgrp_spaces()->length(); i++) { LGRPSpace *ls = lgrp_spaces()->at(i); MutableSpace *s = ls->space(); if (s->top() < top()) { // For all spaces preceding the one containing top() if (s->free_in_words() > 0) { size_t area_touched_words = pointer_delta(s->end(), s->top()); CollectedHeap::fill_with_object(s->top(), area_touched_words); #ifndef ASSERT if (!ZapUnusedHeapArea) { area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), area_touched_words); } #endif if (!os::numa_has_static_binding()) { MemRegion invalid; HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), os::vm_page_size()); if (crossing_start != crossing_end) { // If object header crossed a small page boundary we mark the area // as invalid rounding it to a page_size(). HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), s->end()); invalid = MemRegion(start, end); } ls->add_invalid_region(invalid); } } } else { if (!os::numa_has_static_binding()) { #ifdef ASSERT MemRegion invalid(s->top(), s->end()); ls->add_invalid_region(invalid); #else if (ZapUnusedHeapArea) { MemRegion invalid(s->top(), s->end()); ls->add_invalid_region(invalid); } else { return; } #endif } else { return; } } } }
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { guarantee(thr != NULL, "No thread"); int lgrp_id = thr->lgrp_id(); if (lgrp_id == -1) { if (lgrp_spaces()->length() > 0) { return capacity_in_words() / lgrp_spaces()->length(); } else { assert(false, "There should be at least one locality group"); return 0; } } int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); if (i == -1) { return 0; } return lgrp_spaces()->at(i)->space()->capacity_in_words(); }
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { // Please see the comments for tlab_capacity(). guarantee(thr != NULL, "No thread"); int lgrp_id = thr->lgrp_id(); if (lgrp_id == -1) { if (lgrp_spaces()->length() > 0) { return free_in_bytes() / lgrp_spaces()->length(); } else { assert(false, "There should be at least one locality group"); return 0; } } int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); if (i == -1) { return 0; } return lgrp_spaces()->at(i)->space()->free_in_bytes(); }
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { guarantee(thr != NULL, "No thread"); int lgrp_id = thr->lgrp_id(); if (lgrp_id == -1) { // This case can occur after the topology of the system has // changed. Thread can change their location, the new home // group will be determined during the first allocation // attempt. For now we can safely assume that all spaces // have equal size because the whole space will be reinitialized. if (lgrp_spaces()->length() > 0) { return capacity_in_bytes() / lgrp_spaces()->length(); } else { assert(false, "There should be at least one locality group"); return 0; } } // That's the normal case, where we know the locality group of the thread. int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); if (i == -1) { return 0; } return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); }
// Check if the NUMA topology has changed. Add and remove spaces if needed. // The update can be forced by setting the force parameter equal to true. bool MutableNUMASpace::update_layout(bool force) { // Check if the topology had changed. bool changed = os::numa_topology_changed(); if (force || changed) { // Compute lgrp intersection. Add/remove spaces. int lgrp_limit = (int)os::numa_get_groups_num(); int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC); int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); assert(lgrp_num > 0, "There should be at least one locality group"); // Add new spaces for the new nodes for (int i = 0; i < lgrp_num; i++) { bool found = false; for (int j = 0; j < lgrp_spaces()->length(); j++) { if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { found = true; break; } } if (!found) { lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); } } // Remove spaces for the removed nodes. for (int i = 0; i < lgrp_spaces()->length();) { bool found = false; for (int j = 0; j < lgrp_num; j++) { if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { found = true; break; } } if (!found) { delete lgrp_spaces()->at(i); lgrp_spaces()->remove_at(i); } else { i++; } } FREE_C_HEAP_ARRAY(int, lgrp_ids); if (changed) { for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { thread->set_lgrp_id(-1); } } return true; }
MutableNUMASpace::~MutableNUMASpace() { for (int i = 0; i < lgrp_spaces()->length(); i++) { delete lgrp_spaces()->at(i); } delete lgrp_spaces(); }