std::size_t
guarded_stack_allocator::default_stacksize()
{
    std::size_t size = 8 * minimum_stacksize();
    if ( is_stack_unbound() ) return size;
    
    BOOST_ASSERT( maximum_stacksize() >= minimum_stacksize() );
    return maximum_stacksize() == size
        ? size
        : std::min( size, maximum_stacksize() );
}
Ejemplo n.º 2
0
    static std::size_t default_stacksize()
    {
        std::size_t size = 64 * 1024; // 64 kB
        if ( is_stack_unbound() )
            return (std::max)( size, minimum_stacksize() );

        BOOST_ASSERT( maximum_stacksize() >= minimum_stacksize() );
        return maximum_stacksize() == minimum_stacksize()
            ? minimum_stacksize()
            : ( std::min)( size, maximum_stacksize() );
    }
std::size_t
guarded_stack_allocator::default_stacksize()
{
    std::size_t size = 64 * 1024; // 64 kB
    if ( is_stack_unbound() )
        return std::max( size, minimum_stacksize() );
    
    BOOST_ASSERT( maximum_stacksize() >= minimum_stacksize() );
    return maximum_stacksize() == minimum_stacksize()
        ? minimum_stacksize()
        : std::min( size, maximum_stacksize() );
}
void *
guarded_stack_allocator::allocate( std::size_t size) const
{
    BOOST_ASSERT( minimum_stacksize() <= size);
    BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

    const std::size_t pages( page_count( size) + 1); // add one guard page
    const std::size_t size_( pages * pagesize() );
    BOOST_ASSERT( 0 < size && 0 < size_);

    const int fd( ::open("/dev/zero", O_RDONLY) );
    BOOST_ASSERT( -1 != fd);
    // conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L)
    void * limit =
# if defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)
        ::mmap( 0, size_, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
# else
        ::mmap( 0, size_, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
# endif
    ::close( fd);
    if ( ! limit) throw std::bad_alloc();

    std::memset( limit, size_, '\0');

    // conforming to POSIX.1-2001
    const int result( ::mprotect( limit, pagesize(), PROT_NONE) );
    BOOST_ASSERT( 0 == result);

    return static_cast< char * >( limit) + size_;
}
    void * allocate( std::size_t size) const
    {
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

        const std::size_t pages( page_count( size) + 1); // add one guard page
        const std::size_t size_ = pages * pagesize();
        BOOST_ASSERT( 0 < size && 0 < size_);

        void * limit = ::VirtualAlloc( 0, size_, MEM_COMMIT, PAGE_READWRITE);
        if ( ! limit) throw std::bad_alloc();

        std::memset( limit, size_, '\0');

        DWORD old_options;
#if defined(BOOST_DISABLE_ASSERTS)
        ::VirtualProtect(
            limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
#else
        const BOOL result = ::VirtualProtect(
            limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
        BOOST_ASSERT( FALSE != result);
#endif

        return static_cast< char * >( limit) + size_;
    }
Ejemplo n.º 6
0
    void deallocate( void * vp, std::size_t size) const
    {
        BOOST_ASSERT( vp);
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( maximum_stacksize() >= size);

        void * limit = static_cast< char * >( vp) - size;
        std::free( limit);
    }
Ejemplo n.º 7
0
    void * allocate( std::size_t size) const
    {
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( maximum_stacksize() >= size);

        void * limit = std::malloc( size);
        if ( ! limit) throw std::bad_alloc();

        return static_cast< char * >( limit) + size;
    }
Ejemplo n.º 8
0
            void* allocate(std::size_t size) const
            {
                BOOST_ASSERT(minimum_stacksize() <= size);
                BOOST_ASSERT(maximum_stacksize() >= size);

                void* limit = std::calloc(size, sizeof(char));
                if (!limit) boost::throw_exception(std::bad_alloc());

                return static_cast<char*>(limit) + size;
            }
void
standard_stack_allocator::deallocate( stack_context & ctx)
{
    BOOST_ASSERT( ctx.sp);
    BOOST_ASSERT( minimum_stacksize() <= ctx.size);
    BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= ctx.size) );

    void * limit = static_cast< char * >( ctx.sp) - ctx.size;
    ::VirtualFree( limit, 0, MEM_RELEASE);
}
Ejemplo n.º 10
0
    void deallocate( void * vp, std::size_t size) const
    {
        BOOST_ASSERT( vp);
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

        const std::size_t pages = page_count( size) + 1;
        const std::size_t size_ = pages * pagesize();
        BOOST_ASSERT( 0 < size && 0 < size_);
        void * limit = static_cast< char * >( vp) - size_;
        ::VirtualFree( limit, 0, MEM_RELEASE);
    }
void
guarded_stack_allocator::deallocate( void * vp, std::size_t size) const
{
    BOOST_ASSERT( vp);
    BOOST_ASSERT( minimum_stacksize() <= size);
    BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

    const std::size_t pages = page_count( size) + 1;
    const std::size_t size_ = pages * pagesize();
    BOOST_ASSERT( 0 < size && 0 < size_);
    void * limit = static_cast< char * >( vp) - size_;
    // conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L)
    ::munmap( limit, size_);
}