Example #1
0
void*
mono_shared_area_for_pid (void *pid)
{
	int fd;
	/* we should allow the user to configure the size */
	int size = mono_pagesize ();
	char buf [128];
	void *res;

	if (shared_area_disabled ())
		return NULL;

	g_snprintf (buf, sizeof (buf), "/mono.%d", GPOINTER_TO_INT (pid));

	fd = shm_open (buf, O_RDONLY, S_IRUSR|S_IRGRP);
	if (fd == -1)
		return NULL;
	res = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0);
	if (res == MAP_FAILED) {
		close (fd);
		return NULL;
	}
	/* FIXME: validate the area */
	/* we don't need the file descriptor anymore */
	close (fd);
	return res;
}
Example #2
0
int
mono_pages_not_faulted (void *addr, size_t size)
{
#ifdef HAVE_MINCORE
	int i;
	gint64 count;
	int pagesize = mono_pagesize ();
	int npages = (size + pagesize - 1) / pagesize;
	char *faulted = g_malloc0 (sizeof (char*) * npages);

	if (mincore (addr, size, faulted) != 0) {
		count = -1;
	} else {
		count = 0;
		for (i = 0; i < npages; ++i) {
			if (faulted [i] != 0)
				++count;
		}
	}

	g_free (faulted);

	return count;
#else
	return -1;
#endif
}
Example #3
0
int
mono_pages_not_faulted (void *addr, size_t size)
{
#ifdef HAVE_MINCORE
	int i;
	gint64 count;
	int pagesize = mono_pagesize ();
	int npages = (size + pagesize - 1) / pagesize;
	char *faulted = (char *) g_malloc0 (sizeof (char*) * npages);

	/*
	 * We cast `faulted` to void* because Linux wants an unsigned
	 * char* while BSD wants a char*.
	 */
#ifdef __linux__
	if (mincore (addr, size, (unsigned char *)faulted) != 0) {
#else
	if (mincore (addr, size, (char *)faulted) != 0) {
#endif
		count = -1;
	} else {
		count = 0;
		for (i = 0; i < npages; ++i) {
			if (faulted [i] != 0)
				++count;
		}
	}

	g_free (faulted);

	return count;
#else
	return -1;
#endif
}
Example #4
0
void
sgen_init_internal_allocator (void)
{
	int i, size;

	for (i = 0; i < INTERNAL_MEM_MAX; ++i)
		fixed_type_allocator_indexes [i] = -1;

	for (i = 0; i < NUM_ALLOCATORS; ++i) {
		allocator_block_sizes [i] = block_size (allocator_sizes [i]);
		mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]);
		mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i], MONO_MEM_ACCOUNT_SGEN_INTERNAL);
	}

	for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
		int max_size = (LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2) & ~(SIZEOF_VOID_P - 1);
		/*
		 * we assert that allocator_sizes contains the biggest possible object size
		 * per block which has to be an aligned address.
		 * (4K => 2040, 8k => 4088, 16k => 8184 on 64bits),
		 * so that we do not get different block sizes for sizes that should go to the same one
		 */
		g_assert (allocator_sizes [index_for_size (max_size)] == max_size);
		g_assert (block_size (max_size) == size);
		if (size < LOCK_FREE_ALLOC_SB_MAX_SIZE)
			g_assert (block_size (max_size + 1) == size << 1);
	}
}
Example #5
0
void
mono_shared_area_unload  (void *area)
{
	/* FIXME: currently we load only a page */
	BEGIN_CRITICAL_SECTION;
	munmap (area, mono_pagesize ());
	END_CRITICAL_SECTION;
}
Example #6
0
void*
mono_shared_area (void)
{
	int fd;
	int pid = getpid ();
	/* we should allow the user to configure the size */
	int size = mono_pagesize ();
	char buf [128];
	void *res;
	SAreaHeader *header;

	if (shared_area_disabled ()) {
		if (!malloced_shared_area)
			malloced_shared_area = malloc_shared_area (0);
		/* get the pid here */
		return malloced_shared_area;
	}

	/* perform cleanup of segments left over from dead processes */
	mono_shared_area_instances_helper (NULL, 0, TRUE);

	g_snprintf (buf, sizeof (buf), "/mono.%d", pid);

	fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
	if (fd == -1 && errno == EEXIST) {
		/* leftover */
		shm_unlink (buf);
		fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
	}
	/* in case of failure we try to return a memory area anyway,
	 * even if it means the data can't be read by other processes
	 */
	if (fd == -1)
		return malloc_shared_area (pid);
	if (ftruncate (fd, size) != 0) {
		shm_unlink (buf);
		close (fd);
	}
	BEGIN_CRITICAL_SECTION;
	res = mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
	END_CRITICAL_SECTION;

	if (res == MAP_FAILED) {
		shm_unlink (buf);
		close (fd);
		return malloc_shared_area (pid);
	}
	/* we don't need the file descriptor anymore */
	close (fd);
	header = (SAreaHeader *) res;
	header->size = size;
	header->pid = pid;
	header->stats_start = sizeof (SAreaHeader);
	header->stats_end = sizeof (SAreaHeader);

	mono_atexit (mono_shared_area_remove);
	return res;
}
Example #7
0
static void*
malloc_shared_area (int pid)
{
	int size = mono_pagesize ();
	SAreaHeader *sarea = g_malloc0 (size);
	sarea->size = size;
	sarea->pid = pid;
	sarea->stats_start = sizeof (SAreaHeader);
	sarea->stats_end = sizeof (SAreaHeader);

	return sarea;
}
Example #8
0
/*
 * mono_thread_info_get_stack_bounds:
 *
 *   Return the address and size of the current threads stack. Return NULL as the 
 * stack address if the stack address cannot be determined.
 */
void
mono_thread_info_get_stack_bounds (guint8 **staddr, size_t *stsize)
{
	guint8 *current = (guint8 *)&stsize;
	mono_threads_core_get_stack_bounds (staddr, stsize);
	if (!*staddr)
		return;

	/* Sanity check the result */
	g_assert ((current > *staddr) && (current < *staddr + *stsize));

	/* When running under emacs, sometimes staddr is not aligned to a page size */
	*staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1));
}
Example #9
0
static size_t
block_size (size_t slot_size)
{
    static int pagesize = -1;

    int size;

    if (pagesize == -1)
        pagesize = mono_pagesize ();

    for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
        if (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size))
            return size;
    }
    return LOCK_FREE_ALLOC_SB_MAX_SIZE;
}
Example #10
0
static size_t
block_size (size_t slot_size)
{
	static int pagesize = -1;

	int size;
	size_t aligned_slot_size = SGEN_ALIGN_UP_TO (slot_size, SIZEOF_VOID_P);

	if (pagesize == -1)
		pagesize = mono_pagesize ();

	for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
		if (aligned_slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size))
			return size;
	}
	return LOCK_FREE_ALLOC_SB_MAX_SIZE;
}
Example #11
0
void
mono_threads_core_get_stack_bounds (guint8 **staddr, size_t *stsize)
{
	*staddr = (guint8*)pthread_get_stackaddr_np (pthread_self());
	*stsize = pthread_get_stacksize_np (pthread_self());

#ifdef TARGET_OSX
	/*
	 * Mavericks reports stack sizes as 512kb:
	 * http://permalink.gmane.org/gmane.comp.java.openjdk.hotspot.devel/11590
	 * https://bugs.openjdk.java.net/browse/JDK-8020753
	 */
	if (pthread_main_np () && *stsize == 512 * 1024)
		*stsize = 2048 * mono_pagesize ();
#endif

	/* staddr points to the start of the stack, not the end */
	*staddr -= *stsize;
}
Example #12
0
static gpointer
alloc_sb (Descriptor *desc)
{
	static int pagesize = -1;

	gpointer sb_header;

	if (pagesize == -1)
		pagesize = mono_pagesize ();

	sb_header = desc->block_size == pagesize ?
		mono_valloc (0, desc->block_size, prot_flags_for_activate (TRUE)) :
		mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE));

	g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size));

	*(Descriptor**)sb_header = desc;
	//g_print ("sb %p for %p\n", sb_header, desc);

	return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE;
}
Example #13
0
static gpointer
alloc_sb (Descriptor *desc)
{
	static int pagesize = -1;

	gpointer sb_header;

	if (pagesize == -1)
		pagesize = mono_pagesize ();

	sb_header = desc->block_size == pagesize ?
		mono_valloc (NULL, desc->block_size, prot_flags_for_activate (TRUE), desc->heap->account_type) :
		mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE), desc->heap->account_type);

	g_assertf (sb_header, "Failed to allocate memory for the lock free allocator");
	g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size));

	*(Descriptor**)sb_header = desc;
	//g_print ("sb %p for %p\n", sb_header, desc);

	return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE;
}
Example #14
0
void
sgen_init_internal_allocator (void)
{
    int i, size;

    for (i = 0; i < INTERNAL_MEM_MAX; ++i)
        fixed_type_allocator_indexes [i] = -1;

    for (i = 0; i < NUM_ALLOCATORS; ++i) {
        allocator_block_sizes [i] = block_size (allocator_sizes [i]);
        mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]);
        mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i]);
    }

    for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
        int max_size = LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2;
        /*
         * we assert that allocator_sizes contains the biggest possible object size
         * per block (4K => 4080 / 2 = 2040, 8k => 8176 / 2 = 4088, 16k => 16368 / 2 = 8184 on 64bits),
         * so that we do not get different block sizes for sizes that should go to the same one
         */
        g_assert (allocator_sizes [index_for_size (max_size)] == max_size);
    }
}
Example #15
0
static CodeChunk*
new_codechunk (CodeChunk *last, int dynamic, int size)
{
	int minsize, flags = CODE_FLAG_MMAP;
	int chunk_size, bsize = 0;
	int pagesize, valloc_granule;
	CodeChunk *chunk;
	void *ptr;

#ifdef FORCE_MALLOC
	flags = CODE_FLAG_MALLOC;
#endif

	pagesize = mono_pagesize ();
	valloc_granule = mono_valloc_granule ();

	if (dynamic) {
		chunk_size = size;
		flags = CODE_FLAG_MALLOC;
	} else {
		minsize = MAX (pagesize * MIN_PAGES, valloc_granule);
		if (size < minsize)
			chunk_size = minsize;
		else {
			/* Allocate MIN_ALIGN-1 more than we need so we can still */
			/* guarantee MIN_ALIGN alignment for individual allocs    */
			/* from mono_code_manager_reserve_align.                  */
			size += MIN_ALIGN - 1;
			size &= ~(MIN_ALIGN - 1);
			chunk_size = size;
			chunk_size += valloc_granule - 1;
			chunk_size &= ~ (valloc_granule - 1);
		}
	}
#ifdef BIND_ROOM
	if (dynamic)
		/* Reserve more space since there are no other chunks we might use if this one gets full */
		bsize = (chunk_size * 2) / BIND_ROOM;
	else
		bsize = chunk_size / BIND_ROOM;
	if (bsize < MIN_BSIZE)
		bsize = MIN_BSIZE;
	bsize += MIN_ALIGN -1;
	bsize &= ~ (MIN_ALIGN - 1);
	if (chunk_size - size < bsize) {
		chunk_size = size + bsize;
		if (!dynamic) {
			chunk_size += valloc_granule - 1;
			chunk_size &= ~ (valloc_granule - 1);
		}
	}
#endif

	if (flags == CODE_FLAG_MALLOC) {
		ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
		if (!ptr)
			return NULL;
	} else {
		/* Try to allocate code chunks next to each other to help the VM */
		ptr = NULL;
		if (last)
			ptr = codechunk_valloc ((guint8*)last->data + last->size, chunk_size);
		if (!ptr)
			ptr = codechunk_valloc (NULL, chunk_size);
		if (!ptr)
			return NULL;
	}

	if (flags == CODE_FLAG_MALLOC) {
#ifdef BIND_ROOM
		/* Make sure the thunks area is zeroed */
		memset (ptr, 0, bsize);
#endif
	}

	chunk = (CodeChunk *) g_malloc (sizeof (CodeChunk));
	if (!chunk) {
		if (flags == CODE_FLAG_MALLOC)
			dlfree (ptr);
		else
			mono_vfree (ptr, chunk_size, MONO_MEM_ACCOUNT_CODE);
		return NULL;
	}
	chunk->next = NULL;
	chunk->size = chunk_size;
	chunk->data = (char *) ptr;
	chunk->flags = flags;
	chunk->pos = bsize;
	chunk->bsize = bsize;
	if (code_manager_callbacks.chunk_new)
		code_manager_callbacks.chunk_new ((gpointer)chunk->data, chunk->size);
	mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);

	code_memory_used += chunk_size;
	mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used);
	/*printf ("code chunk at: %p\n", ptr);*/
	return chunk;
}
Example #16
0
/*
 * Allocate a small thread id.
 *
 * FIXME: The biggest part of this function is very similar to
 * domain_id_alloc() in domain.c and should be merged.
 */
int
mono_thread_small_id_alloc (void)
{
	int i, id = -1;

	mono_os_mutex_lock (&small_id_mutex);

	if (!small_id_table)
		small_id_table = mono_bitset_new (1, 0);

	id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
	if (id == -1)
		id = mono_bitset_find_first_unset (small_id_table, -1);

	if (id == -1) {
		MonoBitSet *new_table;
		if (small_id_table->size * 2 >= (1 << 16))
			g_assert_not_reached ();
		new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
		id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);

		mono_bitset_free (small_id_table);
		small_id_table = new_table;
	}

	g_assert (!mono_bitset_test_fast (small_id_table, id));
	mono_bitset_set_fast (small_id_table, id);

	small_id_next++;
	if (small_id_next >= small_id_table->size)
		small_id_next = 0;

	g_assert (id < HAZARD_TABLE_MAX_SIZE);
	if (id >= hazard_table_size) {
#if MONO_SMALL_CONFIG
		hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
		hazard_table_size = HAZARD_TABLE_MAX_SIZE;
#else
		gpointer page_addr;
#if defined(__PASE__)
		/*
		 * HACK: allocating the table with none prot will cause i 7.1
		 * to segfault when accessing or protecting it
		 */
		int table_prot = MONO_MMAP_READ | MONO_MMAP_WRITE;
#else
		int table_prot = MONO_MMAP_NONE;
#endif
		int pagesize = mono_pagesize ();
		int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;

		if (hazard_table == NULL) {
			hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL,
				sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
				table_prot, MONO_MEM_ACCOUNT_HAZARD_POINTERS);
		}

		g_assert (hazard_table != NULL);
		page_addr = (guint8*)hazard_table + num_pages * pagesize;

		mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);

		++num_pages;
		hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);

#endif
		g_assert (id < hazard_table_size);
		for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
			hazard_table [id].hazard_pointers [i] = NULL;
	}

	if (id > highest_small_id) {
		highest_small_id = id;
		mono_memory_write_barrier ();
	}

	mono_os_mutex_unlock (&small_id_mutex);

	return id;
}
Example #17
0
void
mono_shared_area_unload  (void *area)
{
	/* FIXME: currently we load only a page */
	munmap (area, mono_pagesize ());
}
Example #18
0
void
mono_threads_core_get_stack_bounds (guint8 **staddr, size_t *stsize)
{
#if defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
	/* Mac OS X */
	*staddr = (guint8*)pthread_get_stackaddr_np (pthread_self());
	*stsize = pthread_get_stacksize_np (pthread_self());

#ifdef TARGET_OSX
	/*
	 * Mavericks reports stack sizes as 512kb:
	 * http://permalink.gmane.org/gmane.comp.java.openjdk.hotspot.devel/11590
	 * https://bugs.openjdk.java.net/browse/JDK-8020753
	 */
	if (pthread_main_np () && *stsize == 512 * 1024)
		*stsize = 2048 * mono_pagesize ();
#endif

	/* staddr points to the start of the stack, not the end */
	*staddr -= *stsize;

	/* When running under emacs, sometimes staddr is not aligned to a page size */
	*staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize() - 1));
	return;

#elif (defined(HAVE_PTHREAD_GETATTR_NP) || defined(HAVE_PTHREAD_ATTR_GET_NP)) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
	/* Linux, BSD */

	pthread_attr_t attr;
	guint8 *current = (guint8*)&attr;

	*staddr = NULL;
	*stsize = (size_t)-1;

	pthread_attr_init (&attr);

#if     defined(HAVE_PTHREAD_GETATTR_NP)
	/* Linux */
	pthread_getattr_np (pthread_self(), &attr);

#elif   defined(HAVE_PTHREAD_ATTR_GET_NP)
	/* BSD */
	pthread_attr_get_np (pthread_self(), &attr);

#else
#error 	Cannot determine which API is needed to retrieve pthread attributes.
#endif

	pthread_attr_getstack (&attr, (void**)staddr, stsize);
	pthread_attr_destroy (&attr);

	if (*staddr)
		g_assert ((current > *staddr) && (current < *staddr + *stsize));

	/* When running under emacs, sometimes staddr is not aligned to a page size */
	*staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1));
	return;

#elif defined(__OpenBSD__)
	/* OpenBSD */
	/* TODO :   Determine if this code is actually still needed. It may already be covered by the case above. */

	pthread_attr_t attr;
	guint8 *current = (guint8*)&attr;

	*staddr = NULL;
	*stsize = (size_t)-1;

	pthread_attr_init (&attr);

	stack_t ss;
	int rslt;

	rslt = pthread_stackseg_np(pthread_self(), &ss);
	g_assert (rslt == 0);

	*staddr = (guint8*)((size_t)ss.ss_sp - ss.ss_size);
	*stsize = ss.ss_size;

	pthread_attr_destroy (&attr);

	if (*staddr)
		g_assert ((current > *staddr) && (current < *staddr + *stsize));

	/* When running under emacs, sometimes staddr is not aligned to a page size */
	*staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1));
	return;

#elif defined(sun) || defined(__native_client__)
	/* Solaris/Illumos, NaCl */
	pthread_attr_t attr;
	pthread_attr_init (&attr);
	pthread_attr_getstacksize (&attr, &stsize);
	pthread_attr_destroy (&attr);
	*staddr = NULL;
	return;

#else
	/* FIXME:   It'd be better to use the 'error' preprocessor macro here so we know
		    at compile-time if the target platform isn't supported. */
#warning "Unable to determine how to retrieve a thread's stack-bounds for this platform in 'mono_thread_get_stack_bounds()'."
	*staddr = NULL;
	*stsize = 0;
	return;
#endif
}
Example #19
0
static gint64
align_up_to_page_size (gint64 size)
{
	gint64 page_size = mono_pagesize ();
	return (size + page_size - 1) & ~(page_size - 1);
}
Example #20
0
static gint64
align_down_to_page_size (gint64 size)
{
	gint64 page_size = mono_pagesize ();
	return size & ~(page_size - 1);
}
Example #21
0
/*
 * Allocate a small thread id.
 *
 * FIXME: The biggest part of this function is very similar to
 * domain_id_alloc() in domain.c and should be merged.
 */
int
mono_thread_small_id_alloc (void)
{
	int i, id = -1;

	EnterCriticalSection (&small_id_mutex);

	if (!small_id_table)
		small_id_table = mono_bitset_new (1, 0);

	id = mono_bitset_find_first_unset (small_id_table, small_id_next);
	if (id == -1)
		id = mono_bitset_find_first_unset (small_id_table, -1);

	if (id == -1) {
		MonoBitSet *new_table;
		if (small_id_table->size * 2 >= (1 << 16))
			g_assert_not_reached ();
		new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
		id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);

		mono_bitset_free (small_id_table);
		small_id_table = new_table;
	}

	g_assert (!mono_bitset_test_fast (small_id_table, id));
	mono_bitset_set_fast (small_id_table, id);

	small_id_next++;
	if (small_id_next >= small_id_table->size)
		small_id_next = 0;

	g_assert (id < HAZARD_TABLE_MAX_SIZE);
	if (id >= hazard_table_size) {
#if MONO_SMALL_CONFIG
		hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
		hazard_table_size = HAZARD_TABLE_MAX_SIZE;
#else
		gpointer page_addr;
		int pagesize = mono_pagesize ();
		int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;

		if (hazard_table == NULL) {
			hazard_table = mono_valloc (NULL,
				sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
				MONO_MMAP_NONE);
		}

		g_assert (hazard_table != NULL);
		page_addr = (guint8*)hazard_table + num_pages * pagesize;

		mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);

		++num_pages;
		hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);

#endif
		g_assert (id < hazard_table_size);
		for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
			hazard_table [id].hazard_pointers [i] = NULL;
	}

	if (id > highest_small_id) {
		highest_small_id = id;
		mono_memory_write_barrier ();
	}

	LeaveCriticalSection (&small_id_mutex);

	return id;
}