コード例 #1
0
ファイル: thread.c プロジェクト: rampantpixels/foundation_lib
void
_thread_finalize(void) {
#if FOUNDATION_PLATFORM_APPLE
	for (int i = 0; i < 1024; ++i) {
		if (atomic_loadptr(&_thread_local_blocks[i].block)) {
			void* block = atomic_loadptr(&_thread_local_blocks[i].block);
			_thread_local_blocks[i].thread = 0;
			atomic_storeptr(&_thread_local_blocks[i].block, 0);
			memory_deallocate(block);
		}
	}
#endif
	thread_exit();
}
コード例 #2
0
ファイル: main.c プロジェクト: haifenghuang/foundation_lib
DECLARE_TEST(atomic, cas) {
	size_t num_threads = math_clamp(system_hardware_threads() * 4, 4, 32);
	size_t ithread;
	thread_t threads[32];
	cas_value_t cas_values[32];

	for (ithread = 0; ithread < num_threads; ++ithread) {
		cas_values[ithread].val_32 = (int32_t)ithread;
		cas_values[ithread].val_64 = (int64_t)ithread;
		cas_values[ithread].val_ptr = (void*)(uintptr_t)ithread;
		thread_initialize(&threads[ithread], cas_thread, &cas_values[ithread],
		                  STRING_CONST("cas"), THREAD_PRIORITY_NORMAL, 0);
	}
	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_start(&threads[ithread]);

	test_wait_for_threads_startup(threads, num_threads);
	test_wait_for_threads_finish(threads, num_threads);

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_finalize(&threads[ithread]);

	EXPECT_EQ(atomic_load32(&val_32), 0);
	EXPECT_EQ(atomic_load64(&val_64), 0);
	EXPECT_EQ(atomic_loadptr(&val_ptr), 0);

	return 0;
}
コード例 #3
0
ファイル: thread.c プロジェクト: rampantpixels/foundation_lib
void*
_allocate_thread_local_block(size_t size) {
	void* block = memory_allocate(0, size, 0, MEMORY_PERSISTENT | MEMORY_ZERO_INITIALIZED);

	for (int i = 0; i < 1024; ++i) {
		if (!atomic_loadptr(&_thread_local_blocks[i].block)) {
			if (atomic_cas_ptr(&_thread_local_blocks[i].block, block, 0)) {
				_thread_local_blocks[i].thread = thread_id();
				return block;
			}
		}
	}

	log_warnf(0, WARNING_MEMORY,
	          STRING_CONST("Unable to locate thread local memory block slot, will leak %" PRIsize " bytes"),
	          size);
	return block;
}
コード例 #4
0
ファイル: memory.c プロジェクト: haifenghuang/foundation_lib
static void*
_atomic_allocate_linear(size_t chunksize) {
	void* old_head;
	void* new_head;
	void* return_pointer = 0;

	do {
		old_head = atomic_loadptr(&_memory_temporary.head);
		new_head = pointer_offset(old_head, chunksize);

		return_pointer = old_head;

		if (new_head > _memory_temporary.end) {
			new_head = pointer_offset(_memory_temporary.storage, chunksize);
			return_pointer = _memory_temporary.storage;
		}
	}
	while (!atomic_cas_ptr(&_memory_temporary.head, new_head, old_head));

	return return_pointer;
}
コード例 #5
0
ファイル: memory.c プロジェクト: haifenghuang/foundation_lib
static void*
_memory_allocate_malloc_raw(size_t size, unsigned int align, unsigned int hint) {
	FOUNDATION_UNUSED(hint);

	//If we align manually, we must be able to retrieve the original pointer for passing to free()
	//Thus all allocations need to go through that path

#if FOUNDATION_PLATFORM_WINDOWS

#  if FOUNDATION_SIZE_POINTER == 4
#    if BUILD_ENABLE_MEMORY_GUARD
	char* memory = _aligned_malloc((size_t)size + FOUNDATION_MAX_ALIGN * 3, align);
	if (memory)
		memory = _memory_guard_initialize(memory, (size_t)size);
	return memory;
#    else
	return _aligned_malloc((size_t)size, align);
#    endif
#  else
	unsigned int padding, extra_padding = 0;
	size_t allocate_size;
	char* raw_memory;
	void* memory;
	long vmres;

	if (!(hint & MEMORY_32BIT_ADDRESS)) {
		padding = (align > FOUNDATION_SIZE_POINTER ? align : FOUNDATION_SIZE_POINTER);
#if BUILD_ENABLE_MEMORY_GUARD
		extra_padding = FOUNDATION_MAX_ALIGN * 3;
#endif
		raw_memory = _aligned_malloc((size_t)size + padding + extra_padding, align);
		if (raw_memory) {
			memory = raw_memory +
			         padding; //Will be aligned since padding is multiple of alignment (minimum align/pad is pointer size)
			*((void**)memory - 1) = raw_memory;
			FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1));
			FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
#if BUILD_ENABLE_MEMORY_GUARD
			memory = _memory_guard_initialize(memory, size);
			FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
#endif
			return memory;
		}
		log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory"), size);
		return 0;
	}

#    if BUILD_ENABLE_MEMORY_GUARD
	extra_padding = FOUNDATION_MAX_ALIGN * 3;
#    endif

	allocate_size = size + FOUNDATION_SIZE_POINTER + extra_padding + align;
	raw_memory = 0;

	vmres = NtAllocateVirtualMemory(INVALID_HANDLE_VALUE, &raw_memory, 1, &allocate_size,
	                                MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
	if (vmres != 0) {
		log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY,
		           STRING_CONST("Unable to allocate %" PRIsize " bytes of memory in low 32bit address space"), size);
		return 0;
	}

	memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER, align);
	*((void**)memory - 1) = (void*)((uintptr_t)raw_memory | 1);
#    if BUILD_ENABLE_MEMORY_GUARD
	memory = _memory_guard_initialize(memory, size);
#    endif
	FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1));
	FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
	return memory;
#  endif

#else

#  if FOUNDATION_SIZE_POINTER > 4
	if (!(hint & MEMORY_32BIT_ADDRESS))
#  endif
	{
#if BUILD_ENABLE_MEMORY_GUARD
		size_t extra_padding = FOUNDATION_MAX_ALIGN * 3;
#else
		size_t extra_padding = 0;
#endif
		size_t allocate_size = size + align + FOUNDATION_SIZE_POINTER + extra_padding;
		char* raw_memory = malloc(allocate_size);
		if (raw_memory) {
			void* memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER, align);
			*((void**)memory - 1) = raw_memory;
			FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1));
			FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
#if BUILD_ENABLE_MEMORY_GUARD
			memory = _memory_guard_initialize(memory, size);
			FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
#endif
			return memory;
		}
		log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY,
		           STRING_CONST("Unable to allocate %" PRIsize " bytes of memory (%" PRIsize " requested)"), size,
		           allocate_size);
		return 0;
	}

#  if FOUNDATION_SIZE_POINTER > 4

	size_t allocate_size;
	char* raw_memory;
	void* memory;

#    if BUILD_ENABLE_MEMORY_GUARD
	unsigned int extra_padding = FOUNDATION_MAX_ALIGN * 3;
#else
	unsigned int extra_padding = 0;
#    endif

	allocate_size = size + align + FOUNDATION_SIZE_POINTER * 2 + extra_padding;

#ifndef MAP_UNINITIALIZED
#define MAP_UNINITIALIZED 0
#endif

#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif

#    ifndef MAP_32BIT
	//On MacOSX app needs to be linked with -pagezero_size 10000 -image_base 100000000 to
	// 1) Free up low 4Gb address range by reducing page zero size
	// 2) Move executable base address above 4Gb to free up more memory address space
#define MMAP_REGION_START ((uintptr_t)0x10000)
#define MMAP_REGION_END   ((uintptr_t)0x80000000)
	static atomicptr_t baseaddr = { (void*)MMAP_REGION_START };
	bool retried = false;
	do {
		raw_memory = mmap(atomic_loadptr(&baseaddr), allocate_size, PROT_READ | PROT_WRITE,
		                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
		if (((uintptr_t)raw_memory >= MMAP_REGION_START) &&
		    (uintptr_t)(raw_memory + allocate_size) < MMAP_REGION_END) {
			atomic_storeptr(&baseaddr, pointer_offset(raw_memory, allocate_size));
			break;
		}
		if (raw_memory && (raw_memory != MAP_FAILED)) {
			if (munmap(raw_memory, allocate_size) < 0)
				log_warn(HASH_MEMORY, WARNING_SYSTEM_CALL_FAIL,
				         STRING_CONST("Failed to munmap pages outside 32-bit range"));
		}
		raw_memory = 0;
		if (retried)
			break;
		retried = true;
		atomic_storeptr(&baseaddr, (void*)MMAP_REGION_START);
	}
	while (true);
#    else
	raw_memory = mmap(0, allocate_size, PROT_READ | PROT_WRITE,
	                  MAP_32BIT | MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
	if (raw_memory == MAP_FAILED) {
		raw_memory = mmap(0, allocate_size, PROT_READ | PROT_WRITE,
	                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
		if (raw_memory == MAP_FAILED)
			raw_memory = 0;
		if ((uintptr_t)raw_memory > 0xFFFFFFFFULL) {
			if (munmap(raw_memory, allocate_size) < 0)
				log_warn(HASH_MEMORY, WARNING_SYSTEM_CALL_FAIL,
				         STRING_CONST("Failed to munmap pages outside 32-bit range"));
			raw_memory = 0;
		}
	}
#    endif
	if (!raw_memory) {
		string_const_t errmsg = system_error_message(0);
		log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY,
		           STRING_CONST("Unable to allocate %" PRIsize " bytes of memory in low 32bit address space: %.*s"),
		           size, STRING_FORMAT(errmsg));
		return 0;
	}

	memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER * 2, align);
	*((uintptr_t*)memory - 1) = ((uintptr_t)raw_memory | 1);
	*((uintptr_t*)memory - 2) = (uintptr_t)allocate_size;
	FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1));
	FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
#    if BUILD_ENABLE_MEMORY_GUARD
	memory = _memory_guard_initialize(memory, size);
	FOUNDATION_ASSERT(!((uintptr_t)memory & 1));
#    endif

	return memory;

#  endif

#endif
}