/*
 * look for a given offset in the tree, and if it can't be found return the
 * first lesser offset
 */
static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
				     struct rb_node **prev_ret)
{
	struct rb_node *n = root->rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *test;
	struct btrfs_ordered_extent *entry;
	struct btrfs_ordered_extent *prev_entry = NULL;

	while (n) {
		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
		prev = n;
		prev_entry = entry;

		if (file_offset < entry->file_offset)
			n = n->rb_left;
		else if (file_offset >= entry_end(entry))
			n = n->rb_right;
		else
			return n;
	}
	if (!prev_ret)
		return NULL;

	while (prev && file_offset >= entry_end(prev_entry)) {
		test = rb_next(prev);
		if (!test)
			break;
		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
				      rb_node);
		if (file_offset < entry_end(prev_entry))
			break;

		prev = test;
	}
	if (prev)
		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
				      rb_node);
	while (prev && file_offset < entry_end(prev_entry)) {
		test = rb_prev(prev);
		if (!test)
			break;
		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
				      rb_node);
		prev = test;
	}
	*prev_ret = prev;
	return NULL;
}
void InterpreterStubs::generate_primordial_to_current_thread() {
  entry("primordial_to_current_thread");
  pushal();
  pushl(ebp);
  movl(Address(Constant("_primordial_sp")), esp);
  get_thread(ecx);
  movl(esp, Address(ecx, Constant(Thread::stack_pointer_offset())));
  popl(ebp);
  ret();
  entry_end(); // primordial_to_current_thread

  entry("start_lightweight_thread_asm");
  // Should never reach here on x86
  int3();
  entry_end(); // start_lightweight_thread_asm
}
void InterpreterStubs::generate_interpreter_timer_tick() {
  comment_section("Interpreter call timer_tick");
  entry("interpreter_timer_tick");
  interpreter_call_vm(Constant("timer_tick"), T_VOID);
  dispatch_next();
  entry_end(); // interpreter_timer_tick

#if ENABLE_PAGE_PROTECTION
  stop_code_segment();
  start_data_segment();
  if (GenerateGNUCode || GenerateInlineAsm) {
    align(PROTECTED_PAGE_SIZE);
    define_array_begin("unsigned char", "_protected_page");
    for (int i = 0; i < PROTECTED_PAGE_SIZE; i++) {
      define_byte_element(Constant(0));
    }
    define_array_end();
  } else {
    // MASM doesn't allow 4096-byte alignment,
    // so surround the protected area with 4K padding.
    // This will certainly add 8K of static footprint,
    // but who cares about the size of win32_i386 binary!
    define_byte(Constant(0), PROTECTED_PAGE_SIZE);
    define_long(Constant(0), PROTECTED_PAGE_SIZE / BytesPerWord,
                "_protected_page");
    define_byte(Constant(0), PROTECTED_PAGE_SIZE);
  }
  stop_data_segment();
  start_code_segment();
#endif
}
Beispiel #4
0
void PoolAllocator::compact(int p_up_to) {

	uint32_t prev_entry_end_pos=0;

	if (p_up_to<0)
		p_up_to=entry_count;
	for (int i=0;i<p_up_to;i++) {


		Entry &entry=entry_array[ entry_indices[ i ] ];

		/* determine hole size to previous entry */

		int hole_size=entry.pos-prev_entry_end_pos;

		/* if we can compact, do it */
		if (hole_size>0 && !entry.lock) {

			COMPACT_CHUNK(entry,prev_entry_end_pos);

		}

		/* prepare for next one */
		prev_entry_end_pos=entry_end(entry);
	}


}
Beispiel #5
0
/**
 * Find a hole
 * @param p_pos The hole is behind the block pointed by this variable upon return. if pos==entry_count, then allocate at end
 * @param p_for_size hole size
 * @return false if hole found, true if no hole found
 */
bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) {

	/* position where previous entry ends. Defaults to zero (begin of pool) */

	int prev_entry_end_pos = 0;

	for (int i = 0; i < entry_count; i++) {

		Entry &entry = entry_array[entry_indices[i]];

		/* determine hole size to previous entry */

		int hole_size = entry.pos - prev_entry_end_pos;

		/* detemine if what we want fits in that hole */
		if (hole_size >= p_for_size) {
			*p_pos = i;
			return true;
		}

		/* prepare for next one */
		prev_entry_end_pos = entry_end(entry);
	}

	/* No holes between entrys, check at the end..*/

	if ((pool_size - prev_entry_end_pos) >= p_for_size) {
		*p_pos = entry_count;
		return true;
	}

	return false;
}
void CompilerStubs::generate_compiler_new_object() {
  comment_section("Compiler new object (any size)");
  comment("Register edx holds the instance size, register ebx holds the prototypical near of the instance class");
  Label slow_case;
  entry("compiler_new_object");

  comment("Get _inline_allocation_top");
  movl(eax, Address(Constant("_inline_allocation_top")));

  comment("Compute new top");
  leal(ecx, Address(eax, edx, times_1));

  if (GenerateDebugAssembly) {
    comment("Check ExcessiveGC");
    testl(Address(Constant("ExcessiveGC")), Constant(0));
    jcc(not_zero, Constant(slow_case));
  }

  comment("Compare against _inline_allocation_end");
  cmpl(ecx, Address(Constant("_inline_allocation_end")));
  jcc(above, Constant(slow_case));

  comment("Allocation succeeded, set _inline_allocation_top");
  movl(Address(Constant("_inline_allocation_top")), ecx);

  comment("Set prototypical near in object; no need for write barrier");
  movl(Address(eax), ebx);

  comment("Compute remaining size");
  decrement(edx, oopSize);

  comment("One-word object?");
  Label init_done;
  jcc(zero, Constant(init_done));

  comment("Zero object fields");
  xorl(ecx, ecx);
  Label init_loop;
  bind(init_loop);
  movl(Address(eax, edx, times_1), ecx);
  decrement(edx, oopSize);
  jcc(not_zero, Constant(init_loop));
  bind(init_done);

  comment("The newly allocated object is in register eax");
  ret();

  comment("Slow case - call the VM runtime system");
  bind(slow_case);
  leal(eax, Address(Constant("newobject")));
  goto_shared_call_vm(T_OBJECT);

  entry_end(); // compiler_new_object
}
void InterpreterStubs::generate_interpreter_throw_exceptions() {
  comment_section("Interpreter exception throwers");

  entry("interpreter_throw_ArrayIndexOutOfBoundsException", 0);
  interpreter_call_vm(Constant("array_index_out_of_bounds_exception"), T_VOID);
  entry_end(); // interpreter_throw_ArrayIndexOutOfBoundsException

  entry("interpreter_throw_NullPointerException", 0);
  interpreter_call_vm(Constant("null_pointer_exception"), T_VOID);
  entry_end(); // interpreter_throw_NullPointerException

  entry("interpreter_throw_IllegalMonitorStateException", 0);
  interpreter_call_vm(Constant("illegal_monitor_state_exception"), T_VOID);
  entry_end(); // interpreter_throw_IllegalMonitorStateException

  entry("interpreter_throw_ArithmeticException", 0);
  interpreter_call_vm(Constant("arithmetic_exception"), T_VOID);
  entry_end(); // interpreter_throw_ArithmeticException

  entry("interpreter_throw_IncompatibleClassChangeError", 0);
  interpreter_call_vm(Constant("incompatible_class_change_error"), T_VOID);
  entry_end(); // interpreter_throw_IncompatibleClassChangeError

  if (GenerateDebugAssembly) {
    entry("interpreter_throw_InternalStackTagException", 0);
    interpreter_call_vm(Constant("internal_stack_tag_exception"), T_VOID);
    entry_end(); // interpreter_throw_InternalStackTagException
  }
}
void InterpreterStubs::generate_current_thread_to_primordial() {
  entry("current_thread_to_primordial");
  // We're never going to return to this thread, so it doesn't matter if
  // it doesn't look like a stopped Java thread anymore.
  // pushl(ebp);
  // get_thread(ecx);
  // movl(Address(ecx, Constant(Thread::stack_pointer_offset())), esp);
  movl(esp, Address(Constant("_primordial_sp")));
  popl(ebp);
  popal();
  ret();
  entry_end(); // current_thread_to_primordial
}
void InterpreterStubs::generate_interpreter_deoptimization_entry() {
  comment_section("Interpreter deoptimization entry");
  entry("interpreter_deoptimization_entry");

  // Define an interpreter call info.
  define_call_info();

  comment("Restore bytecode and locals pointers");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

  // Dispatch to the next bytecode.
  dispatch_next();

  entry_end(); // interpreter_deoptimization_entry
}
void InterpreterStubs::generate_interpreter_unwind_activation() {
  comment_section("Interpreter unwind activation");
  entry("interpreter_unwind_activation");

  comment("The exception is the single item on the interpreter stack");

  comment("Unlock and remove the activation");
  unlock_activation(true);

  comment("Remove exception from the stack");
  pop_obj(eax, eax);

  remove_activation(edx);
  get_thread(ecx);
  
  jmp(Constant("shared_code_for_handling_of_exception_forwarding"));

  entry_end(); // interpreter_unwind_activation
}
void InterpreterStubs::generate_interpreter_rethrow_exception() {
  comment_section("Interpreter rethrow exception");
  comment("Register eax holds the exception; Interpreter state is not in registers");

  entry("interpreter_rethrow_exception");
  comment("Restore bytecode and locals pointers");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

  comment("Mark the bytecode pointer as being inside an exception");
  addl(esi, Constant(JavaFrame::exception_frame_flag));

  comment("Clear the expression stack");
  movl(esp, Address(ebp, Constant(JavaFrame::stack_bottom_pointer_offset())));
  
  comment("Push the exception on the expression stack");
  push_obj(eax);

  comment("Get exception handler bci for exception");
  interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT);
  
  comment("Check if we got a bci - otherwise unwind the activation");
  cmpl(eax, Constant(-1));
  jcc(equal, Constant("interpreter_unwind_activation"));
#if ENABLE_JAVA_DEBUGGER
  Label skip;
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(equal, Constant(skip));
  movl(edx, eax);
  interpreter_call_vm(Constant("handle_caught_exception"), T_VOID);
  comment("Re-get exception handler bci for exception");
  interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT);
  bind(skip);
#endif
  comment("Convert the bytecode index into a bytecode pointer");
  movl(ecx, Address(ebp, Constant(JavaFrame::method_offset())));
  leal(esi, Address(ecx, eax, times_1, Constant(Method::base_offset())));

  // Dispatch to the exception handler.
  dispatch_next();

  entry_end(); // interpreter_rethrow_exception
}
void InterpreterStubs::generate_interpreter_call_vm_dispatch() {
  comment_section("Interpreter call VM - and dispatch to the bytecode returned by the VM upon termination");
  entry("interpreter_call_vm_dispatch");

  comment("Save bytecode pointer");
  movl(Address(ebp, Constant(JavaFrame::bcp_store_offset())), esi);

  comment("Call the shared call vm and disregard any return value");
  call_shared_call_vm(T_INT);
 
  comment("Restore bytecode pointer");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));

  comment("Restore locals pointer");
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

  comment("Dispatch to next byte code");
  jmp(Address(no_reg, eax, times_4, Constant("interpreter_dispatch_table")));

  entry_end(); // interpreter_call_vm_dispatch
}
void InterpreterStubs::generate_interpreter_call_vm_redo() {
#if ENABLE_JAVA_DEBUGGER
  Label check_breakpoint, no_breakpoint;
#endif
  comment_section("Interpreter call VM - and repeat current bytecode upon termination");
  entry("interpreter_call_vm_redo");

  comment("Save bytecode pointer");
  movl(Address(ebp, Constant(JavaFrame::bcp_store_offset())), esi);

  comment("Call the shared call vm and disregard any return value");
  call_shared_call_vm(T_VOID);
 
  comment("Restore bytecode pointer");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));

  comment("Restore locals pointer");
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

#if ENABLE_JAVA_DEBUGGER
  comment("Check to see if we are connected to a debugger");
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(not_zero, Constant(check_breakpoint));
  bind(no_breakpoint);
  comment("Not debugging, so just dispatch");
  dispatch_next(0);
  bind(check_breakpoint);
  comment("We are debugging, so let's see if we just replaced a breakpoint opcode");
  cmpb(Address(esi), Constant(Bytecodes::_breakpoint));
  jcc(not_zero, Constant(no_breakpoint));
  comment("There is a breakpoint in the code, so that means that eax has the correct opcode");
  comment("So just jmp directly without using esi");
  andl(eax, Constant(0xFF));
  movl(ebx, eax);
  jmp(Address(no_reg, ebx, times_4, Constant("interpreter_dispatch_table")));
#else
  dispatch_next(0);
#endif
  entry_end(); // interpreter_call_vm_redo
}
void InterpreterStubs::generate_interpreter_rethrow_exception_init() {
  comment_section("Interpreter rethrow exception init");
  comment("Register eax holds the exception; Interpreter state is not in registers");
  entry("interpreter_rethrow_exception_init");
#if ENABLE_JAVA_DEBUGGER
  Label skip;
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(equal, Constant(skip));
  comment("push the exception object so we don't nuke it");
  push_obj(eax);
  comment("call debugger code to store relevant info about where exception happened");
  interpreter_call_vm(Constant("handle_exception_info"), T_VOID);
  pop_obj(eax, edx);
  bind(skip);
#endif
  if (GenerateInlineAsm)
      jmp(Constant("interpreter_rethrow_exception_init"));
  else
      comment("fall through to rethrow_exception"); // IMPL_NOTE: FALLTHROUGH

  entry_end(); // interpreter_rethrow_exception_init
}
/* returns NULL if the insertion worked, or it returns the node it did find
 * in the tree
 */
static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
				   struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_ordered_extent *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);

		if (file_offset < entry->file_offset)
			p = &(*p)->rb_left;
		else if (file_offset >= entry_end(entry))
			p = &(*p)->rb_right;
		else
			return parent;
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}
Beispiel #16
0
void CompilerStubs::generate_compiler_idiv_irem() {
  comment_section("Compiler integer divide and remainder");
  comment("Register eax holds the dividend, register ebx holds the divisor");

  entry("compiler_idiv_irem");
  const int min_int = 0x80000000;
  Label normal_case, special_case;

  Label throw_exception;

  testl(ebx,ebx);
  jcc(equal, Constant(throw_exception));

  // Check for special case
  cmpl(eax, Constant(min_int));
  jcc(not_equal, Constant(normal_case));
  xorl(edx, edx); // Prepare edx for possible special case (where remainder = 0)
  cmpl(ebx, Constant(-1));
  jcc(equal, Constant(special_case));

  // Handle normal case
  bind(normal_case);
  cdql();
  idivl(ebx);

  // Normal and special case exit
  bind(special_case);
  ret();

  bind(throw_exception);
  comment("Throw a DivisionByZeroException");
  leal(eax, Address(Constant("division_by_zero_exception")));
  goto_shared_call_vm(T_VOID);

  entry_end(); // compiler_idiv_irem
}
void InterpreterStubs::generate_interpreter_fill_in_tags() {
  comment_section("Interpreter fill in tags");
  entry("interpreter_fill_in_tags");
  comment("eax: return address of method");
  comment("ebx: method");
  comment("ecx: size of parameters.  Guaranteed to be >= 1");
  comment("edx: call info from call site");
  comment("Must preserve eax, ebx, ecx");

  // stack layout:
  //   sp return address of caller
  //      --> argument n
  //      -->    ...
  //      --> argument 0

  Label extended_call_info;

  comment("Compact call info or normal call info?");
  testl(edx, edx); 
  jcc(positive, Constant(extended_call_info));

  Label loop_entry, loop_condition;
  comment("We have a compact call info");
  movl(edi, ecx);

bind(loop_entry);
  decl(edi);
  comment("Store int tag");
  movl(Address(esp, edi, times_8, Constant(BytesPerWord)), Constant(int_tag));
  comment("Test the bit in the call info");
  GUARANTEE(CallInfo::format1_tag_start == 0, "Tag must start at bit position 0 for this code to work");
  btl(edx, edi);
  jcc(carry_clear, Constant(loop_condition));
  comment("Store obj tag");
  movl(Address(esp, edi, times_8, Constant(BytesPerWord)), Constant(obj_tag));
  bind(loop_condition);
  testl(edi, edi);
  jcc(not_zero, Constant(loop_entry));
  ret();

bind(extended_call_info);
  comment("Normal call info");
  // The following code is slightly complicated.  "Bit offset" below
  // pretends like the callinfo's are in a bit array, as follows:
  //     Callinfo describing bci and offset
  //     Size [16 bits] and stack info 0-3
  //     Stack info 4-11
  // We ignore the fact that each of these words is preceded by a byte
  // that makes it look like an instruction.
  pushl(ecx); 
  pushl(ebx);
  Label loopx_entry, loopx_done;
 
  comment("Bit offset of first argument in CallInfo array");
  movzxw(edx, Address(eax, Constant(5 + 1)));  // total number of locals/expr
  subl(edx, ecx);               // number of locals/expr belonging to callee
  shll(edx, Constant(2));       // number of bits per nybble
  addl(edx, Constant(32 + 16)); // 48 bits is the 32 bit callinfo and 16bit size info

  comment("Decrement argument count; move to more convenient register");
  leal(esi, Address(ecx, Constant(-1)));

  comment("Location of tag of esi-th local");
  leal(ebx, Address(esp, Constant(3 * BytesPerWord)));

bind(loopx_entry);
  comment("eax holds the return address");
  comment("ebx holds address of the esi-th tag");
  comment("esi is the local whose tag we are setting");
  comment("edx contains the bit offset of Local 0 in the CallInfo array");
  comment("Get bit offset of esi-th local");
  leal(ecx, Address(edx, esi, times_4));

  comment("From bit offset, get word offset, then multiply by 5");
  movl(edi, ecx);
  shrl(edi, Constant(5));
  leal(edi, Address(edi, edi, times_4));

  comment("Get the appropriate CallInfo word; extract the nybble");
  movl(edi, Address(eax, edi, times_1, Constant(1)));
  shrl(edi);
  andl(edi, Constant(0xF));

  comment("Tag is (1 << value) >> 1.  This is 0 when value == 0");
  movl(ecx, edi);
  movl(edi, Constant(1));
  shll(edi);
  shrl(edi, Constant(1));

  comment("Store the tag");
  movl(Address(ebx), edi);

  comment("Are we done?");
  decl(esi); 
  addl(ebx, Constant(8));
  testl(esi, esi);
  jcc(greater_equal, Constant(loopx_entry));
bind(loopx_done);
  popl(ebx); 
  popl(ecx);
  ret();

  entry_end(); // interpreter_fill_in_tags
}
void SourceAssembler::rom_linkable_entry_end() {
  entry_end();
}
Beispiel #19
0
void CompilerStubs::generate_compiler_new_obj_array() {
  comment_section("Compiler stub: new object array");
  comment("- ebx holds the prototypical near of the array class");

  entry("compiler_new_obj_array");
  comment("Get array length");
  // add BytesPerWord for return address
  movl(edx, Address(esp, Constant(BytesPerWord + JavaFrame::arg_offset_from_sp(0))));

  Label slow_case;
  comment("Check if the array length is too large or negative");
  cmpl(edx, Constant(maximum_safe_array_length));
  jcc(above, Constant(slow_case));

  comment("Get _inline_allocation_top");
  movl(eax, Address(Constant("_inline_allocation_top")));

  if (GenerateDebugAssembly) {
    comment("Check ExcessiveGC");
    testl(Address(Constant("ExcessiveGC")), Constant(0));
    jcc(not_zero, Constant(slow_case));
  }

  comment("Compute new top");
  leal(ecx, Address(eax, edx, times_4, Constant(Array::base_offset())));

  comment("Check for overflow");
  cmpl(ecx, eax);
  jcc(below, Constant(slow_case));

  comment("Compare against _inline_allocation_end");
  cmpl(ecx, Address(Constant("_inline_allocation_end")));
  jcc(above, Constant(slow_case));

  comment("Allocation succeeded, set _inline_allocation_top");
  movl(Address(Constant("_inline_allocation_top")), ecx);

  comment("Set prototypical near in object; no need for write barrier");
  movl(Address(eax), ebx);

  comment("Set the length");
  movl(Address(eax, Constant(Array::length_offset())), edx);

  comment("Compute remaining size");
  testl(edx, edx);

  comment("Empty array?");
  Label init_done;
  jcc(equal, Constant(init_done));

  comment("Zero array elements");
  xorl(ecx, ecx);
  Label init_loop;
  bind(init_loop);
  movl(Address(eax, edx, times_4, Constant(Array::base_offset() - oopSize)), ecx);
  decrement(edx, 1);
  jcc(not_zero, Constant(init_loop));
  bind(init_done);

  comment("The newly allocated array is in register eax");
  ret();

  comment("Slow case - call the VM runtime system");
  bind(slow_case);
  leal(eax, Address(Constant("anewarray")));
  goto_shared_call_vm(T_ARRAY);

  entry_end(); // compiler_new_obj_array
}
Beispiel #20
0
PoolAllocator::ID PoolAllocator::alloc(int p_size) {

	ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID);
#ifdef DEBUG_ENABLED
	if (p_size > free_mem) OS::get_singleton()->debug_break();
#endif
	ERR_FAIL_COND_V(p_size > free_mem, POOL_ALLOCATOR_INVALID_ID);

	mt_lock();

	if (entry_count == entry_max) {
		mt_unlock();
		ERR_PRINT("entry_count==entry_max");
		return POOL_ALLOCATOR_INVALID_ID;
	}

	int size_to_alloc = aligned(p_size);

	EntryIndicesPos new_entry_indices_pos;

	if (!find_hole(&new_entry_indices_pos, size_to_alloc)) {
		/* No hole could be found, try compacting mem */
		compact();
		/* Then search again */

		if (!find_hole(&new_entry_indices_pos, size_to_alloc)) {

			mt_unlock();
			ERR_PRINT("memory can't be compacted further");
			return POOL_ALLOCATOR_INVALID_ID;
		}
	}

	EntryArrayPos new_entry_array_pos;

	bool found_free_entry = get_free_entry(&new_entry_array_pos);

	if (!found_free_entry) {
		mt_unlock();
		ERR_FAIL_COND_V(!found_free_entry, POOL_ALLOCATOR_INVALID_ID);
	}

	/* move all entry indices up, make room for this one */
	for (int i = entry_count; i > new_entry_indices_pos; i--) {

		entry_indices[i] = entry_indices[i - 1];
	}

	entry_indices[new_entry_indices_pos] = new_entry_array_pos;

	entry_count++;

	Entry &entry = entry_array[entry_indices[new_entry_indices_pos]];

	entry.len = p_size;
	entry.pos = (new_entry_indices_pos == 0) ? 0 : entry_end(entry_array[entry_indices[new_entry_indices_pos - 1]]); //alloc either at begining or end of previous
	entry.lock = 0;
	entry.check = (check_count++) & CHECK_MASK;
	free_mem -= size_to_alloc;
	if (free_mem < free_mem_peak)
		free_mem_peak = free_mem;

	ID retval = (entry_indices[new_entry_indices_pos] << CHECK_BITS) | entry.check;
	mt_unlock();

	//ERR_FAIL_COND_V( (uintptr_t)get(retval)%align != 0, retval );

	return retval;
}