mm_port_destroy(struct mm_port *port) { ENTER(); mm_list_delete(&port->ports); mm_global_free(port); LEAVE(); }
void mm_timeq_delete(struct mm_timeq *timeq, struct mm_timeq_entry *entry) { DEBUG("entry: %p", entry); ASSERT(entry->index != MM_TIMEQ_INDEX_NO); if (entry->index == MM_TIMEQ_INDEX_FE) { timeq->fe_num--; } else if (entry->index == MM_TIMEQ_INDEX_T2) { timeq->t2_num--; // TODO: take into account that t2_min and t2_max might be // changed after this. } mm_list_delete(&entry->queue); entry->index = MM_TIMEQ_INDEX_NO; }
void mm_task_free(void *ptr) { ENTER(); if (likely(ptr != NULL)) { /* Get the real start address of the chunk. */ struct mm_list *link = (struct mm_list *) (((char *) ptr) - sizeof(struct mm_list)); /* Remove it from the task's chunk list. */ mm_list_delete(link); /* Free the memory. */ mm_local_free(link); } LEAVE(); }
mm_task_combiner_execute(struct mm_task_combiner *combiner, mm_combiner_routine_t routine, uintptr_t data) { ENTER(); // Disable cancellation as the enqueue algorithm cannot be // safely undone if interrupted in the middle. int cancelstate; mm_task_setcancelstate(MM_TASK_CANCEL_DISABLE, &cancelstate); // Get per-core queue of pending requests. mm_core_t core = mm_core_self(); struct mm_list *wait_queue = MM_THREAD_LOCAL_DEREF(core, combiner->wait_queue); // Add the current request to the per-core queue. struct mm_task *task = mm_task_selfptr(); task->flags |= MM_TASK_COMBINING; mm_list_append(wait_queue, &task->wait_queue); // Wait until the current request becomes the head of the // per-core queue. while (mm_list_head(wait_queue) != &task->wait_queue) mm_task_block(); mm_combiner_execute(&combiner->combiner, routine, data); // Remove the request from the per-core queue. mm_list_delete(&task->wait_queue); task->flags &= ~MM_TASK_COMBINING; // If the per-core queue is not empty then let its new head take // the next turn. if (!mm_list_empty(wait_queue)) { struct mm_link *link = mm_list_head(wait_queue); task = containerof(link, struct mm_task, wait_queue); mm_task_run(task); } // Restore cancellation. mm_task_setcancelstate(cancelstate, NULL); LEAVE(); }
/* Create a new task. */ struct mm_task * mm_task_create(const struct mm_task_attr *attr, mm_routine_t start, mm_value_t start_arg) { ENTER(); // Check to see if called from the bootstrap context. bool boot = (mm_core == NULL); struct mm_task *task = NULL; // Try to reuse a dead task. if (likely(!boot) && !mm_list_empty(&mm_core->dead)) { // Get the last dead task. struct mm_list *link = mm_list_head(&mm_core->dead); struct mm_task *dead = containerof(link, struct mm_task, queue); // Check it against the required stack size. uint32_t stack_size = (attr != NULL ? attr->stack_size : MM_TASK_STACK_SIZE); if (dead->stack_size == stack_size) { // The dead task is just good. mm_list_delete(link); task = dead; } else if (dead->stack_size != MM_TASK_STACK_SIZE) { // The dead task has an unusual stack, free it. mm_stack_destroy(dead->stack_base, dead->stack_size); dead->stack_base = NULL; // Now use that task. mm_list_delete(link); task = dead; } else { // A task with unusual stack size is requested, leave // the dead task alone, it is likely to be reused the // next time. } } // Allocate a new task if needed. if (task == NULL) task = mm_task_new(); // Initialize the task info. mm_task_set_attr(task, attr); task->start = start; task->start_arg = start_arg; // Allocate a new stack if needed. if (task->stack_base == NULL) task->stack_base = mm_stack_create(task->stack_size, MM_PAGE_SIZE); // Setup the task entry point on its own stack and queue it for // execution unless bootstrapping in which case it will be done // later in a special way. if (likely(!boot)) { mm_stack_init(&task->stack_ctx, mm_task_entry, task->stack_base, task->stack_size); mm_task_run(task); } LEAVE(); return task; }