static std::size_t default_stacksize() { std::size_t size = 64 * 1024; // 64 kB if ( is_stack_unbound() ) return (std::max)( size, minimum_stacksize() ); BOOST_ASSERT( maximum_stacksize() >= minimum_stacksize() ); return maximum_stacksize() == minimum_stacksize() ? minimum_stacksize() : ( std::min)( size, maximum_stacksize() ); }
std::size_t guarded_stack_allocator::default_stacksize() { std::size_t size = 8 * minimum_stacksize(); if ( is_stack_unbound() ) return size; BOOST_ASSERT( maximum_stacksize() >= minimum_stacksize() ); return maximum_stacksize() == size ? size : std::min( size, maximum_stacksize() ); }
std::size_t guarded_stack_allocator::default_stacksize() { std::size_t size = 64 * 1024; // 64 kB if ( is_stack_unbound() ) return std::max( size, minimum_stacksize() ); BOOST_ASSERT( maximum_stacksize() >= minimum_stacksize() ); return maximum_stacksize() == minimum_stacksize() ? minimum_stacksize() : std::min( size, maximum_stacksize() ); }
void * guarded_stack_allocator::allocate( std::size_t size) const { BOOST_ASSERT( minimum_stacksize() <= size); BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) ); const std::size_t pages( page_count( size) + 1); // add one guard page const std::size_t size_( pages * pagesize() ); BOOST_ASSERT( 0 < size && 0 < size_); const int fd( ::open("/dev/zero", O_RDONLY) ); BOOST_ASSERT( -1 != fd); // conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L) void * limit = # if defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__) ::mmap( 0, size_, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); # else ::mmap( 0, size_, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); # endif ::close( fd); if ( ! limit) throw std::bad_alloc(); std::memset( limit, size_, '\0'); // conforming to POSIX.1-2001 const int result( ::mprotect( limit, pagesize(), PROT_NONE) ); BOOST_ASSERT( 0 == result); return static_cast< char * >( limit) + size_; }
void * allocate( std::size_t size) const { BOOST_ASSERT( minimum_stacksize() <= size); BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) ); const std::size_t pages( page_count( size) + 1); // add one guard page const std::size_t size_ = pages * pagesize(); BOOST_ASSERT( 0 < size && 0 < size_); void * limit = ::VirtualAlloc( 0, size_, MEM_COMMIT, PAGE_READWRITE); if ( ! limit) throw std::bad_alloc(); std::memset( limit, size_, '\0'); DWORD old_options; #if defined(BOOST_DISABLE_ASSERTS) ::VirtualProtect( limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options); #else const BOOL result = ::VirtualProtect( limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options); BOOST_ASSERT( FALSE != result); #endif return static_cast< char * >( limit) + size_; }
void deallocate( void * vp, std::size_t size) const { BOOST_ASSERT( vp); BOOST_ASSERT( minimum_stacksize() <= size); BOOST_ASSERT( maximum_stacksize() >= size); void * limit = static_cast< char * >( vp) - size; std::free( limit); }
void * allocate( std::size_t size) const { BOOST_ASSERT( minimum_stacksize() <= size); BOOST_ASSERT( maximum_stacksize() >= size); void * limit = std::malloc( size); if ( ! limit) throw std::bad_alloc(); return static_cast< char * >( limit) + size; }
///---------------------------------------------------------------------------- void coroutine_stackful_actor::start(std::size_t stack_size) { if (stack_size < minimum_stacksize()) { stack_size = minimum_stacksize(); } else if (stack_size > default_stacksize()) { stack_size = default_stacksize(); } boost::asio::spawn( snd_, boost::bind( &coroutine_stackful_actor::run, this, _1 ), boost::coroutines::attributes(stack_size) ); }
void* allocate(std::size_t size) const { BOOST_ASSERT(minimum_stacksize() <= size); BOOST_ASSERT(maximum_stacksize() >= size); void* limit = std::calloc(size, sizeof(char)); if (!limit) boost::throw_exception(std::bad_alloc()); return static_cast<char*>(limit) + size; }
void standard_stack_allocator::deallocate( stack_context & ctx) { BOOST_ASSERT( ctx.sp); BOOST_ASSERT( minimum_stacksize() <= ctx.size); BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= ctx.size) ); void * limit = static_cast< char * >( ctx.sp) - ctx.size; ::VirtualFree( limit, 0, MEM_RELEASE); }
void deallocate( void * vp, std::size_t size) const { BOOST_ASSERT( vp); BOOST_ASSERT( minimum_stacksize() <= size); BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) ); const std::size_t pages = page_count( size) + 1; const std::size_t size_ = pages * pagesize(); BOOST_ASSERT( 0 < size && 0 < size_); void * limit = static_cast< char * >( vp) - size_; ::VirtualFree( limit, 0, MEM_RELEASE); }
void guarded_stack_allocator::deallocate( void * vp, std::size_t size) const { BOOST_ASSERT( vp); BOOST_ASSERT( minimum_stacksize() <= size); BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) ); const std::size_t pages = page_count( size) + 1; const std::size_t size_ = pages * pagesize(); BOOST_ASSERT( 0 < size && 0 < size_); void * limit = static_cast< char * >( vp) - size_; // conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L) ::munmap( limit, size_); }
static void test_spawn() { try { io_service_t ios; strand_t snd(ios); for (std::size_t i=0; i<20000; ++i) { boost::asio::spawn( snd, boost::bind( &actor_ut::echo, _arg1, boost::ref(ios) ), boost::coroutines::attributes(minimum_stacksize()) ); } ios.run(); } catch (std::exception& ex) { std::cerr << ex.what() << std::endl; } }
std::size_t segmented_stack_allocator::default_stacksize() { return minimum_stacksize(); }
static std::size_t default_stacksize() { return minimum_stacksize(); }