Esempio n. 1
0
void *
memory_region::realloc(void *mem, size_t size) {
    if (_synchronized) { _lock.lock(); }
    if (!mem) {
        add_alloc();
    }
#ifdef TRACK_ALLOCATIONS
    size += sizeof(int);
    mem = (void*)((uint8_t*)mem - sizeof(int));
    int index = *(int  *)mem;
#endif
    void *newMem = _srv->realloc(mem, size);
#ifdef TRACK_ALLOCATIONS
    if (_allocation_list[index] != mem) {
        printf("at index %d, found %p, expected %p\n",
               index, _allocation_list[index], mem);
        printf("realloc: ptr 0x%" PRIxPTR " is not in allocation_list\n",
            (uintptr_t) mem);
        _srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
    }
    else {
        _allocation_list[index] = newMem;
        (*(int*)newMem) = index;
        // printf("realloc: stored %p at index %d, replacing %p\n",
        //        newMem, index, mem);
    }
#endif
    if (_synchronized) { _lock.unlock(); }
#ifdef TRACK_ALLOCATIONS
    newMem = (void *)((uint8_t*)newMem + sizeof(int));
#endif
    return newMem;
}
Esempio n. 2
0
void
memory_region::claim_alloc(void *mem) {
#   if RUSTRT_TRACK_ALLOCATIONS >= 1
    alloc_header *alloc = get_header(mem);
    assert(alloc->magic == MAGIC);
#   endif

#   if RUSTRT_TRACK_ALLOCATIONS >= 2
    if (_synchronized) {
        _lock.lock();
    }
    alloc->index = _allocation_list.append(alloc);
    if (_synchronized) {
        _lock.unlock();
    }
#   endif

#   if RUSTRT_TRACK_ALLOCATIONS >= 3
    if (_detailed_leaks) {
        alloc->btframes = ::backtrace(alloc->bt, 32);
    }
#   endif

    add_alloc();
}
Esempio n. 3
0
void *
memory_region::realloc(void *mem, size_t size) {
    if (_synchronized) { _lock.lock(); }
    if (!mem) {
        add_alloc();
    }
    size += sizeof(alloc_header);
    alloc_header *alloc = get_header(mem);
    assert(alloc->magic == MAGIC);
    alloc_header *newMem = (alloc_header *)_srv->realloc(alloc, size);
#ifdef TRACK_ALLOCATIONS
    if (_allocation_list[newMem->index] != alloc) {
        printf("at index %d, found %p, expected %p\n",
               alloc->index, _allocation_list[alloc->index], alloc);
        printf("realloc: ptr 0x%" PRIxPTR " is not in allocation_list\n",
            (uintptr_t) mem);
        _srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
    }
    else {
        _allocation_list[newMem->index] = newMem;
        // printf("realloc: stored %p at index %d, replacing %p\n",
        //        newMem, index, mem);
    }
#endif
    if (_synchronized) { _lock.unlock(); }
    return newMem->data;
}
Esempio n. 4
0
void *
memory_region::realloc(void *mem, size_t orig_size) {
    if (_synchronized) { _lock.lock(); }
    if (!mem) {
        add_alloc();
    }

    alloc_header *alloc = get_header(mem);
    size_t size = orig_size + HEADER_SIZE;
    alloc_header *newMem = (alloc_header *)_srv->realloc(alloc, size);

#   if RUSTRT_TRACK_ALLOCATIONS >= 1
    assert(alloc->magic == MAGIC);
    newMem->size = orig_size;
#   endif

#   if RUSTRT_TRACK_ALLOCATIONS >= 2
    if (_allocation_list[newMem->index] != alloc) {
        printf("at index %d, found %p, expected %p\n",
               alloc->index, _allocation_list[alloc->index], alloc);
        printf("realloc: ptr 0x%" PRIxPTR " (%s) is not in allocation_list\n",
               (uintptr_t) get_data(alloc), alloc->tag);
        _srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
    }
    else {
        _allocation_list[newMem->index] = newMem;
        // printf("realloc: stored %p at index %d, replacing %p\n",
        //        newMem, index, mem);
    }
#   endif

    if (_synchronized) { _lock.unlock(); }
    return get_data(newMem);
}
Esempio n. 5
0
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

#ifndef CONFIG_UML
	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);
#endif

	if (!vaddr)
		goto out_kfree;

	/*
	 * Just cast to an unsigned long to avoid warnings about casting from a
	 * pointer to an integer of different size. The pointer is only 32-bits
	 * so we lose no data.
	 */
	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
#ifndef CONFIG_UML
	if (vaddr)
		iounmap(vaddr);
#endif
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
Esempio n. 6
0
/**
 * Initialize allocation functions
 *
 * @param base base of the usable memory
 * @param size amount of usable memory
 */
void init_alloc(u32 base, u32 size)
{
	lock(&alloc_lock);

	struct alcent ae = { .valid = 1, .used = 0, .addr = base, .size = size };

	// Add a free allocation block encompasing all the usable memory
	add_alloc(&ae);

	unlock(&alloc_lock);
}
Esempio n. 7
0
unsigned long allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align)
{
	unsigned long paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool)
		return -EINVAL;

	if (!mpool->gpool)
		return -EAGAIN;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return -EAGAIN;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	/* We search the tree using node->vaddr, so set
	 * it to something unique even though we don't
	 * use it for physical allocation nodes.
	 * The virtual and physical address ranges
	 * are disjoint, so there won't be any chance of
	 * a duplicate node->vaddr value.
	 */
	node->vaddr = (void *)paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return -ENOMEM;
}
Esempio n. 8
0
void *
memory_region::realloc(void *mem, size_t orig_size) {
    if (!mem) {
        add_alloc();
    }

    alloc_header *alloc = get_header(mem);
#   if RUSTRT_TRACK_ALLOCATIONS >= 1
    assert(alloc->magic == MAGIC);
#   endif

    size_t size = orig_size + HEADER_SIZE;
    alloc_header *newMem = (alloc_header *)::realloc(alloc, size);
    if (newMem == NULL) {
        fprintf(stderr,
                "memory_region::realloc> "
                "Out of memory allocating %ld bytes",
                (long int) size);
        abort();
    }

#   if RUSTRT_TRACK_ALLOCATIONS >= 1
    assert(newMem->magic == MAGIC);
    newMem->size = orig_size;
#   endif

#   if RUSTRT_TRACK_ALLOCATIONS >= 2
    if (_synchronized) {
        _lock.lock();
    }
    if (_allocation_list[newMem->index] != alloc) {
        printf("at index %d, found %p, expected %p\n",
               alloc->index, _allocation_list[alloc->index], alloc);
        printf("realloc: ptr 0x%" PRIxPTR " (%s) is not in allocation_list\n",
               (uintptr_t) get_data(alloc), alloc->tag);
        assert(false && "not in allocation_list");
    }
    else {
        _allocation_list[newMem->index] = newMem;
        // printf("realloc: stored %p at index %d, replacing %p\n",
        //        newMem, index, mem);
    }
    if (_synchronized) {
        _lock.unlock();
    }
#   endif

    return get_data(newMem);
}
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);

	if (!vaddr)
		goto out_kfree;

	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
	if (vaddr)
		iounmap(vaddr);
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
Esempio n. 10
0
void *
memory_region::calloc(size_t size) {
    if (_synchronized) { _lock.lock(); }
    add_alloc();
#ifdef TRACK_ALLOCATIONS
    size += sizeof(int);
#endif
    void *mem = _srv->malloc(size);
    memset(mem, 0, size);
#ifdef TRACK_ALLOCATIONS
    int index = _allocation_list.append(mem);
    int *p = (int *)mem;
    *p = index;
    // printf("calloc: stored %p at index %d\n", mem, index);
#endif
    if (_synchronized) { _lock.unlock(); }
#ifdef TRACK_ALLOCATIONS
    mem = (void*)((uint8_t*)mem + sizeof(int));
#endif
    return mem;
}
phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align, void *caller)
{
	phys_addr_t paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool || !mpool->gpool)
		return 0;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return 0;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	node->vaddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return 0;
}
Esempio n. 12
0
void *
memory_region::malloc(size_t size) {
    if (_synchronized) { _lock.lock(); }
    add_alloc();
#ifdef TRACK_ALLOCATIONS
    size += sizeof(int);
#endif
    void *mem = _srv->malloc(size);
#ifdef TRACK_ALLOCATIONS
    int index = _allocation_list.append(mem);
    int *p = (int *)mem;
    *p = index;
    // printf("malloc: stored %p at index %d\n", mem, index);
#endif
    // printf("malloc: ptr 0x%" PRIxPTR " region=%p\n",
    //        (uintptr_t) mem, this);
    if (_synchronized) { _lock.unlock(); }
#ifdef TRACK_ALLOCATIONS
    mem = (void*)((uint8_t*)mem + sizeof(int));
#endif
    return mem;
}
Esempio n. 13
0
void *
memory_region::malloc(size_t size, const char *tag, bool zero) {
    if (_synchronized) { _lock.lock(); }
    add_alloc();
    size_t old_size = size;
    size += sizeof(alloc_header);
    alloc_header *mem = (alloc_header *)_srv->malloc(size);
    mem->magic = MAGIC;
    mem->tag = tag;
#ifdef TRACK_ALLOCATIONS
    mem->index = _allocation_list.append(mem);
    // printf("malloc: stored %p at index %d\n", mem, index);
#endif
    // printf("malloc: ptr 0x%" PRIxPTR " region=%p\n",
    //        (uintptr_t) mem, this);

    if(zero) {
        memset(mem->data, 0, old_size);
    }

    if (_synchronized) { _lock.unlock(); }
    return mem->data;
}
Esempio n. 14
0
/**
 * Allocate a block of memory
 *
 * @param sz size of the required block
 * @returns pointer to block
 */
void *kmalloc(u32 sz)
{
	kerror(ERR_DETAIL, "Allocating %d bytes of memory", sz);

	// We don't want two processes using the same memory block!
	lock(&alloc_lock);

	// Find the smallest memory block that we can use
	u32 idx = find_hole(sz);

	// Couldn't find one...
	if(idx == 0xFFFFFFFF) return 0;

	int block = idx >> 16;
	int index = idx & 0xFFFF;

	if(empty_slots(block) == 4) // Get ready ahead of time
	{
		u32 asz = ALLOC_BLOCK * sizeof(struct alcent);

		u32 idx = find_hole(asz);
		if(idx == 0xFFFFFFFF) kpanic("Could not create another allocation block!");

		int block = idx >> 16;
		int index = idx & 0xFFFF;

		if(allocs[block][index].size == asz)
		{
			allocs[block][index].used = 1;
			allocs[get_free_block()] = (struct alcent *)allocs[block][index].addr;
		}
		else
		{
			allocs[block][index].size -= asz;
			struct alcent ae = { .valid = 1, .used = 1, .addr = allocs[block][index].addr, .size = asz };
			allocs[block][index].addr += asz;
			add_alloc(&ae);
			allocs[get_free_block()] = (struct alcent *)ae.addr;
		}
	}

	// If the previous block of code was used, we may have to reinitialize these
	idx = find_hole(sz);
	if(idx == 0xFFFFFFFF) return 0;
	block = idx >> 16;
	index = idx & 0xFFFF;


	if(allocs[block][index].size == sz)
	{
		allocs[block][index].used = 1;
		unlock(&alloc_lock);
		kerror(ERR_DETAIL, "  -> %08X W", allocs[block][index].addr);
		return (void *)allocs[block][index].addr;
	}

	allocs[block][index].size -= sz; // We are using part of this block

	struct alcent ae = { .valid = 1, .used = 1, .addr = allocs[block][index].addr, .size = sz };

	allocs[block][index].addr += sz; // We don't want anything else using the allocated memory

	add_alloc(&ae); // We will just assume this worked, the worst that could happen is we can't `free` it (FIXME)

	// Let other processes allocate memory
	unlock(&alloc_lock);

	kerror(ERR_DETAIL, "  -> %08X P", ae.addr);
	return (void *)ae.addr;
}

/**
 * Free an allocated block of memory
 *
 * @param ptr pointer to the previously allocated memory block
 */
void kfree(void *ptr)
{
	kerror(ERR_DETAIL, "Freeing %08X", ptr);

	lock(&alloc_lock);

	int i, j = 0;

	// Find the corresponding memory block
	for(; j < ALLOC_BLOCKS; j++)
	{
		if(!allocs[j]) continue;
		for(i = 0; i < ALLOC_BLOCK; i++)
			if(allocs[j][i].valid) // Is it valid?
				if(allocs[j][i].addr == (u32)ptr) // Is it the correct block?
					rm_alloc(j, i); // Free it!
	}

	unlock(&alloc_lock);
}
Esempio n. 15
0
byte* memory_sub_session::allocate(size_t needed_bytes)
{
    const size_t page_size = memory_manager::instance->get_page_size();

    byte* res;

    if (needed_bytes > memory_manager::instance->get_max_small_alloc_bytes())
    {
        const size_t bytes = (needed_bytes + page_size - 1) / page_size * page_size;

        byte * const p = reinterpret_cast<byte*> (swap_mmap(bytes));

        if (p == MAP_FAILED)
            throw std::bad_alloc();

        add_alloc(p, bytes, POSIX_MADV_NORMAL);

        res = p;
    }
    else
    {
        std::pair<size_t, byte*> optimal_chunk = smallest_sufficent_free_small_chunk(needed_bytes);

        size_t remainder_size;

        if (optimal_chunk.first == 0)
        {
            const size_t bytes = memory_manager::instance->get_single_small_alloc_bytes();

            byte * const p = reinterpret_cast<byte*> (swap_mmap(bytes));

            if (p == MAP_FAILED)
                throw std::bad_alloc();

            add_alloc(p, bytes, POSIX_MADV_RANDOM);
            add_small_alloc(p, needed_bytes);

            res = p;

            remainder_size = bytes - needed_bytes;
        }
        else
        {
            remainder_size = optimal_chunk.first - needed_bytes;

            add_small_alloc(optimal_chunk.second, needed_bytes);
            remove_free_small_chunk(optimal_chunk.second);

            res = optimal_chunk.second;
        }

        if (remainder_size != 0)
        {
            byte * const remainder_pointer = res + needed_bytes;

            add_free_small_chunk(remainder_pointer, remainder_size);
        }
    }

    return res;
}
Esempio n. 16
0
 static void* operator new(size_t s) {
     add_alloc(1);
     CObjectWithTLS* ptr = (CObjectWithTLS*)::operator new(s);
     RegisterNew(ptr);
     return ptr;
 }
Esempio n. 17
0
 static void operator delete(void* ptr) {
     add_alloc(-1);
     RegisterDelete((CObjectWithTLS*)ptr);
     ::operator delete(ptr);
 }
Esempio n. 18
0
void CTestTlsObjectApp::RunTest(void)
{
    const size_t OBJECT_SIZE = sizeof(CObjectWithNew);
    for ( int t = 0; t < 1; ++t ) {
        // prealloc
        {
            size_t size = (OBJECT_SIZE+16)*COUNT;
            void* p = ::operator new(size);
            memset(p, 1, size);
            ::operator delete(p);
        }
        {
            const size_t COUNT2 = COUNT*2;
            void** p = new void*[COUNT2];
            for ( size_t i = 0; i < COUNT2; ++i ) {
                add_alloc(1);
                add_step();
                p[i] = ::operator new(OBJECT_SIZE);
            }
            for ( size_t i = 0; i < COUNT2; ++i ) {
                add_alloc(-1);
                add_step();
                ::operator delete(p[i]);
            }
            delete[] p;
        }
        {
            const size_t COUNT2 = COUNT*2;
            int** p = new int*[COUNT2];
            for ( size_t i = 0; i < COUNT2; ++i ) {
                add_alloc(1);
                add_step();
                p[i] = new int(int(i));
            }
            for ( size_t i = 0; i < COUNT2; ++i ) {
                add_alloc(-1);
                add_step();
                delete p[i];
            }
            delete[] p;
        }
    }
    //return;
    CStopWatch sw;
    check_cnts();
    for ( int t = 0; t < 1; ++t ) {
        void** ptr = new void*[COUNT];
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_alloc(1);
            add_step();
            ptr[i] = ::operator new(OBJECT_SIZE);
        }
        double t1 = sw.Elapsed();
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_alloc(-1);
            add_step();
            ::operator delete(ptr[i]);
        }
        double t2 = sw.Elapsed();
        message("plain malloc", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        sw.Start();
        int* ptr = new int;
        sx_PushLastNewPtr(ptr, 2);
        double t1 = sw.Elapsed();
        sw.Start();
        _VERIFY(sx_PopLastNewPtr(ptr));
        delete ptr;
        double t2 = sw.Elapsed();
        message("tls", "set", t1, "get", t2, COUNT);
    }
    check_cnts();
    {
        CObjectWithNew** ptr = new CObjectWithNew*[COUNT];
        for ( size_t i = 0; i < COUNT; ++i ) {
            ptr[i] = 0;
        }
        sw.Start();
        s_CurrentStep = "new CObjectWithNew";
        s_CurrentInHeap = true;
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            ptr[i] = new CObjectWithNew;
        }
        s_CurrentInHeap = false;
        double t1 = sw.Elapsed();
        check_cnts(COUNT);
        for ( size_t i = 0; i < COUNT; ++i ) {
            _ASSERT(ptr[i]->IsInHeap());
        }
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            CObjectWithNew::Delete(ptr[i]);
        }
        double t2 = sw.Elapsed();
        message("new CObjectWithNew", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        CObjectWithTLS** ptr = new CObjectWithTLS*[COUNT];
        sw.Start();
        s_CurrentStep = "new CObjectWithTLS";
        s_CurrentInHeap = true;
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            try {
                switch ( rand()%3 ) {
                case 0: ptr[i] = new CObjectWithTLS; break;
                case 1: ptr[i] = new CObjectWithTLS2; break;
                case 2: ptr[i] = new CObjectWithTLS3; break;
                }
            }
            catch ( exception& ) {
                ptr[i] = 0;
            }
            _ASSERT(!sx_HaveLastNewPtr());
            _ASSERT(!ptr[i] || ptr[i]->IsInHeap());
        }
        s_CurrentInHeap = false;
        double t1 = sw.Elapsed();
        check_cnts(COUNT);
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            CObjectWithTLS::Delete(ptr[i]);
        }
        double t2 = sw.Elapsed();
        message("new CObjectWithTLS", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        CRef<CObjectWithRef>* ptr = new CRef<CObjectWithRef>[COUNT];
        sw.Start();
        s_CurrentStep = "new CObjectWithRef";
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            try {
                switch ( rand()%2 ) {
                case 0: ptr[i] = new CObjectWithRef; break;
                case 1: ptr[i] = new CObjectWithRef2; break;
                }
            }
            catch ( exception& ) {
                ptr[i] = 0;
            }
            _ASSERT(!sx_HaveLastNewPtr());
            _ASSERT(!ptr[i] || ptr[i]->CanBeDeleted());
        }
        double t1 = sw.Elapsed();
        check_cnts(COUNT);
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            ptr[i].Reset();
        }
        double t2 = sw.Elapsed();
        message("new CObjectWithRef", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        CObjectWithNew** ptr = new CObjectWithNew*[COUNT];
        for ( size_t i = 0; i < COUNT; ++i ) {
            ptr[i] = 0;
        }
        sw.Start();
        s_CurrentStep = "new CObjectWithNew()";
        s_CurrentInHeap = true;
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            ptr[i] = new CObjectWithNew();
        }
        s_CurrentInHeap = false;
        double t1 = sw.Elapsed();
        check_cnts(COUNT);
        for ( size_t i = 0; i < COUNT; ++i ) {
            _ASSERT(ptr[i]->IsInHeap());
        }
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            CObjectWithNew::Delete(ptr[i]);
        }
        double t2 = sw.Elapsed();
        message("new CObjectWithNew()", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        CObjectWithTLS** ptr = new CObjectWithTLS*[COUNT];
        sw.Start();
        s_CurrentStep = "new CObjectWithTLS()";
        s_CurrentInHeap = true;
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            try {
                switch ( rand()%4 ) {
                case 0: ptr[i] = new CObjectWithTLS(); break;
                case 1: ptr[i] = new CObjectWithTLS2(); break;
                case 2: ptr[i] = new CObjectWithTLS3(); break;
                case 3: ptr[i] = new CObjectWithTLS3(RecursiveNewTLS(rand()%4)); break;
                }
            }
            catch ( exception& ) {
                ptr[i] = 0;
            }
            _ASSERT(!sx_HaveLastNewPtr());
            _ASSERT(!ptr[i] || ptr[i]->IsInHeap());
        }
        s_CurrentInHeap = false;
        double t1 = sw.Elapsed();
        check_cnts(COUNT);
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            CObjectWithTLS::Delete(ptr[i]);
        }
        double t2 = sw.Elapsed();
        message("new CObjectWithTLS()", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        CRef<CObjectWithRef>* ptr = new CRef<CObjectWithRef>[COUNT];
        sw.Start();
        s_CurrentStep = "new CObjectWithRef()";
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            try {
                size_t j = rand()%COUNT;
                switch ( rand()%4 ) {
                case 0: ptr[j] = new CObjectWithRef(); break;
                case 1: ptr[j] = new CObjectWithRef(RecursiveNewRef(rand()%4)); break;
                case 2: ptr[j] = new CObjectWithRef2(); break;
                case 3: ptr[j] = new CObjectWithRef2(RecursiveNewRef(rand()%4)); break;
                }
            }
            catch ( exception& ) {
                ptr[i] = 0;
            }
            _ASSERT(!sx_HaveLastNewPtr());
            _ASSERT(!ptr[i] || ptr[i]->CanBeDeleted());
        }
        double t1 = sw.Elapsed();
        check_cnts(COUNT);
        sw.Start();
        for ( size_t i = 0; i < COUNT; ++i ) {
            add_step();
            ptr[i] = 0;
        }
        double t2 = sw.Elapsed();
        message("new CObjectWithRef()", "create", t1, "delete", t2, COUNT);
        delete[] ptr;
    }
    check_cnts();
    {
        sw.Start();
        s_CurrentStep = "CObjectWithNew[]";
        CArray<CObjectWithNew, COUNT>* arr =
            new CArray<CObjectWithNew, COUNT>;
        double t1 = sw.Elapsed();
        check_cnts(COUNT, COUNT);
        for ( size_t i = 0; i < COUNT; ++i ) {
            _ASSERT(!arr->m_Array[i].IsInHeap());
        }
        sw.Start();
        delete arr;
        double t2 = sw.Elapsed();
        message("static CObjectWithNew", "create", t1, "delete", t2, COUNT);
    }
    check_cnts();
    {
        sw.Start();
        s_CurrentStep = "CObjectWithTLS[]";
        CArray<CObjectWithTLS, COUNT, false>* arr =
            new CArray<CObjectWithTLS, COUNT, false>;
        double t1 = sw.Elapsed();
        check_cnts(COUNT, COUNT);
        for ( size_t i = 0; i < COUNT; ++i ) {
            _ASSERT(!arr->m_Array[i].IsInHeap());
        }
        sw.Start();
        delete arr;
        double t2 = sw.Elapsed();
        message("static CObjectWithTLS", "create", t1, "delete", t2, COUNT);
    }
    check_cnts();
    {
        sw.Start();
        s_CurrentStep = "CObjectWithRef[]";
        CArray<CObjectWithRef, COUNT, false>* arr =
            new CArray<CObjectWithRef, COUNT, false>;
        double t1 = sw.Elapsed();
        check_cnts(COUNT, COUNT);
        for ( size_t i = 0; i < COUNT; ++i ) {
            _ASSERT(!arr->m_Array[i].CanBeDeleted());
        }
        sw.Start();
        delete arr;
        double t2 = sw.Elapsed();
        message("static CObjectWithRef", "create", t1, "delete", t2, COUNT);
    }
    check_cnts();
}