void MemoryIdler::unmapUnusedStack(size_t retain) { if (tls_stackSize == 0) { fetchStackLimits(); } if (tls_stackSize <= std::max(size_t(1), retain)) { // covers both missing stack info, and impossibly large retain return; } auto sp = getStackPtr(); assert(sp >= tls_stackLimit); assert(sp - tls_stackLimit < tls_stackSize); auto end = (sp - retain) & ~(s_pageSize - 1); if (end <= tls_stackLimit) { // no pages are eligible for unmapping return; } size_t len = end - tls_stackLimit; assert((len & (s_pageSize - 1)) == 0); if (madvise((void*)tls_stackLimit, len, MADV_DONTNEED) != 0) { // It is likely that the stack vma hasn't been fully grown. In this // case madvise will apply dontneed to the present vmas, then return // errno of ENOMEM. We can also get an EAGAIN, theoretically. // EINVAL means either an invalid alignment or length, or that some // of the pages are locked or shared. Neither should occur. assert(errno == EAGAIN || errno == ENOMEM); } }
PyScopedLock::~PyScopedLock() { #ifdef MUTEX_DEBUG printf("%p unlocks %p from %s\n", (void*)PyThread_get_thread_ident(), &mutex, getStackSymbol(getStackPtr(2))); #endif mutex.unlock(); }
const CStackInstance& CCreatureSet::getStack(SlotID slot) const { assert(hasStackAtSlot(slot)); return *getStackPtr(slot); }