void sc_cor_qt::stack_protect( bool enable ) { // Code needs to be tested on HP-UX and disabled if it doesn't work there // Code still needs to be ported to WIN32 static std::size_t pagesize; if( pagesize == 0 ) { # if defined(__ppc__) pagesize = getpagesize(); # else pagesize = sysconf( _SC_PAGESIZE ); # endif } assert( pagesize != 0 ); assert( m_stack_size > ( 2 * pagesize ) ); #ifdef QUICKTHREADS_GROW_DOWN // Stacks grow from high address down to low address caddr_t redzone = caddr_t( ( ( std::size_t( m_stack ) + pagesize - 1 ) / pagesize ) * pagesize ); #else // Stacks grow from low address up to high address caddr_t redzone = caddr_t( ( ( std::size_t( m_stack ) + m_stack_size - pagesize ) / pagesize ) * pagesize ); #endif int ret; // Enable the red zone at the end of the stack so that references within // it will cause an interrupt. if( enable ) { ret = mprotect( redzone, pagesize - 1, PROT_NONE ); } // Revert the red zone to normal memory usage. Try to make it read - write - // execute. If that does not work then settle for read - write else { ret = mprotect( redzone, pagesize - 1, PROT_READ|PROT_WRITE|PROT_EXEC); if ( ret != 0 ) ret = mprotect( redzone, pagesize - 1, PROT_READ | PROT_WRITE ); } assert( ret == 0 ); }
ApostolicoGiancarlo<AlphaType, ShiftRule>::ApostolicoGiancarlo( AlphaType const* pStr, long const& nStrLen, AlphaType const* pTxt, long const& nTxtLen, char const* pMFile) : pSubStr(pStr), nSubStr(nStrLen), pSubStrEnd(pStr + nStrLen - 1), pText(pTxt), nText(nTxtLen), pTextEnd(pTxt + nTxtLen), pTextCmp(pText + nStrLen - 1), nTextPos(0), tShiftRule(pStr, nStrLen), pNValues(tShiftRule.nValues()), mmapMValues(caddr_t(0), nTxtLen * sizeof(long), PROT_READ | PROT_WRITE, MAP_SHARED, pMFile, O_CREAT | O_RDWR | O_TRUNC, 0), pMWalk(mmapMValues + nStrLen - 1) { //pMWalk = (long*)(mmapMValues) + nStrLen - 1; #ifdef __STREE_DEBUG__ cerr << "ApostolicoGiancarlo::ApostolicoGiancarlo()" << endl; #endif // __STREE_DEBUG__ if (pNValues == NULL) { #ifdef __STREE_DEBUG__ cerr << "ApostolicoGiancarlo::ApostolicoGiancarlo(): pNValues == NULL" << endl; #endif // __STREE_DEBUG__ AlphaType* pStrRev = new AlphaType[nStrLen]; memcpy(pStrRev, pStr, nStrLen * sizeof(AlphaType)); reverse<AlphaType>(pStrRev, nStrLen); pNValues = new long[nStrLen]; zAlgo<char>(pStrRev, '\0', pNValues); delete [] pStrRev; } }
oldGeneration::oldGeneration(FILE* snap, int32 initial_size, int32 reserved_amt) { oldSpace *s; VM_reserved_mem= reserved_amt; setLowSpaceThreshold(VM_reserved_mem * 2); OS::FRead_swap(&nSpaces, sizeof(nSpaces), snap); assert( nSpaces < 1000, "Snapshot corrupted, unbelievable number of spaces"); oldSpace *prev= NULL; for (unsigned n= 0; n < nSpaces; n++) { char *name= new char[10]; sprintf(name, "old%d", n); s= new oldSpace(const_cast<const char*>(name), snap); if (prev) prev->next_space= s; else first_space= s; prev= s; } last_space= s; caddr_t old_start= Memory->new_gen->high_boundary; char *old_heap= OS::allocate_idealized_page_aligned(initial_size, "old0", old_start); // I don't know why the Unix code does this, but // the MAC always falls back on malloc--I don't know // how else to do it -- dmu if (OS::is_directed_allocation_supported() && caddr_t(old_heap) != old_start) fatal("Couldn't allocate old space contiguous with new space"); assert(caddr_t(old_heap) >= old_start, "at least assume sequentiality"); top_of_old_space= old_heap + initial_size; char *bottom= low_boundary= old_heap; char *top= high_boundary= old_heap + initial_size; {FOR_EACH_OLD_SPACE(ss) { ss->read_snapshot(snap, bottom, top); bottom= (char*)(ss->objs_top); top= (char*)(ss->bytes_bottom); }} }
void sc_cor_qt::stack_protect( bool enable ) { // Code needs to be tested on HP-UX and disabled if it doesn't work there // Code still needs to be ported to WIN32 static std::size_t pagesize; if( pagesize == 0 ) { # if defined(__ppc__) pagesize = getpagesize(); # else pagesize = sysconf( _SC_PAGESIZE ); # endif } assert( pagesize != 0 ); assert( m_stack_size > ( 2 * pagesize ) ); #ifdef QUICKTHREADS_GROW_DOWN // Stacks grow from high address down to low address caddr_t redzone = caddr_t( ( ( std::size_t( m_stack ) + pagesize - 1 ) / pagesize ) * pagesize ); #else // Stacks grow from low address up to high address caddr_t redzone = caddr_t( ( ( std::size_t( m_stack ) + m_stack_size - pagesize ) / pagesize ) * pagesize ); #endif int ret; if( enable ) { ret = mprotect( redzone, pagesize - 1, PROT_NONE ); } else { ret = mprotect( redzone, pagesize - 1, PROT_READ | PROT_WRITE ); } assert( ret == 0 ); }