void *pup_heap_alloc(struct PupHeap *heap, size_t size, enum PupHeapKind kind) { if (is_large_object(size)) { ABORTF("Large object allocator not implemented yet! (%ld bytes)", size); } return thread_local_alloc(heap, size, kind); }
void* mspace_alloc(unsigned int size, Allocator* allocator) { void *p_return = NULL; /* All chunks of data requested need to be multiples of GC_OBJECT_ALIGNMENT */ assert((size % GC_OBJECT_ALIGNMENT) == 0); assert( size <= GC_LOS_OBJ_SIZE_THRESHOLD ); /* check if collector local alloc block is ok. If not, grab a new block */ p_return = thread_local_alloc(size, allocator); if(p_return) return p_return; /* grab a new block */ Mspace* mspace = (Mspace*)allocator->alloc_space;; Boolean ok = mspace_alloc_block(mspace, allocator); if(!ok) return NULL; p_return = thread_local_alloc(size, allocator); assert(p_return); return p_return; }
void thread_init(struct thread *t, int priority, void *(*run)(void *), void *arg) { assert(t); assert(run); assert(thread_stack_get(t)); assert(thread_stack_get_size(t)); t->id = id_counter++; /* setup thread ID */ dlist_head_init(&t->thread_link); /* default unlink value */ t->task = NULL; t->critical_count = __CRITICAL_COUNT(CRITICAL_SCHED_LOCK); t->siglock = 0; t->state = TS_INIT; if (thread_local_alloc(t, MODOPS_THREAD_KEY_QUANTITY)) { panic("can't initialize thread_local"); } t->joining = NULL; t->run = run; t->run_arg = arg; /* cpu context init */ /* setup stack pointer to the top of allocated memory * The structure of kernel thread stack follow: * +++++++++++++++ top * | * v * the thread structure * xxxxxxx * the end * +++++++++++++++ bottom (t->stack - allocated memory for the stack) */ context_init(&t->context, CONTEXT_PRIVELEGED | CONTEXT_IRQDISABLE, thread_trampoline, thread_stack_get(t) + thread_stack_get_size(t)); sigstate_init(&t->sigstate); schedee_init(&t->schedee, priority, thread_process); /* initialize everthing else */ thread_wait_init(&t->thread_wait); }