static int get_lock(volatile struct mutex *mutex) { struct task_mutex_data *curr_task_data = &curr_task->mutex_data; /* TODO: Use a nicer name than this builtin */ if (__sync_bool_compare_and_swap(&mutex->lock, 0, 1)) { mutex->held_by = curr_task; held_mutexes_insert(curr_task_data->held_mutexes, mutex); curr_task_data->waiting = NULL; return 1; } else { if (mutex->held_by != NULL) { deadlock_check(mutex); /* Add to waitlist if higher priority */ if (mutex->waiting) { if (task_compare(mutex->waiting, curr_task) < 0) { mutex->waiting = curr_task; curr_task_data->waiting = (struct mutex *) mutex; } } else { mutex->waiting = curr_task; curr_task_data->waiting = (struct mutex *) mutex; } return 0; } else { panic_print("Semaphore (0x%x) not available, but held_by unset.", mutex); } } return 0; }
/* Called by svc_handler */ void sched_svc_end_task(void) { struct task_ctrl *task = get_task_ctrl(curr_task); if (task->stack_limit > task->stack_top) { panic_print("Task (0x%x, fptr: 0x%x) has overflowed its stack. stack_top: 0x%x stack_limit: 0x%x", task, task->fptr, task->stack_top, task->stack_limit); } list_remove(&task->runnable_task_list); /* Periodic (but only if aborted) */ if (task->period && task->abort) { list_remove(&task->periodic_task_list); } /* Periodic (but only if not aborted) */ if (!task->abort && task->period) { task->running = 0; /* Reset stack */ task->stack_top = task->stack_base; } else { /* Add to queue for freeing */ list_add(&task->free_task_list, &free_task_list); total_tasks -= 1; } switch_task(NULL); }
int mutex_service_call(uint32_t svc_number, ...) { int ret = 0; va_list ap; va_start(ap, svc_number); switch (svc_number) { case SVC_ACQUIRE: { struct mutex *mut = va_arg(ap, struct mutex *); ret = svc_acquire(mut); break; } case SVC_RELEASE: { struct mutex *mut = va_arg(ap, struct mutex *); svc_release(mut); break; } default: panic_print("Unknown SVC: %d", svc_number); break; } va_end(ap); return ret; }
void svc_handler(uint32_t *registers) { uint32_t svc_number; /* Stack contains: * r0, r1, r2, r3, r12, r14, the return address and xPSR * First argument and return value (r0) is registers[0] */ svc_number = ((char *)registers[6])[-2]; switch (svc_number) { case SVC_YIELD: case SVC_END_TASK: case SVC_REGISTER_TASK: case SVC_TASK_SWITCH: registers[0] = sched_service_call(svc_number, registers[0], registers[1]); break; case SVC_ACQUIRE: case SVC_RELEASE: registers[0] = mutex_service_call(svc_number, registers[0]); break; default: panic_print("Unknown SVC: %d", svc_number); break; } }
static void held_mutexes_insert(struct mutex *list[], volatile struct mutex *mutex) { for (int i = 0; i < HELD_MUTEXES_MAX; i++) { if (list[i] == NULL) { list[i] = (struct mutex *) mutex; return; } } panic_print("Too many mutexes already held in list (0x%x).", list); }
static void deadlock_check(volatile struct mutex *mut) { struct task_t *task = mut->held_by; struct task_mutex_data *task_data = &task->mutex_data; if (task == curr_task) { panic_print("Task (0x%x) attempted to double acquire mutex 0x%x", curr_task, mut); } if (task_data->waiting) { for (int i = 0; i < HELD_MUTEXES_MAX; i++) { struct task_mutex_data *curr_task_data = &curr_task->mutex_data; if (curr_task_data->held_mutexes[i] == task_data->waiting) { panic_print("Deadlock! Task (0x%x) is waiting on mutex 0x%x, " "but curr_task (0x%x) holds it.", task, task_data->waiting, curr_task); } } } }
/* Set size bytes to value from p */ void memset32(void *p, int32_t value, uint32_t size) { uint32_t *end = (uint32_t *) ((uintptr_t) p + size); /* Disallowed unaligned addresses */ if ( (uintptr_t) p % 4 ) { panic_print("Attempt to memset unaligned address (0x%x).", p); } while ( (uint32_t*) p < end ) { *((uint32_t*)p) = value; p++; } }
void close(rd_t rd) { if (rd >= RESOURCE_TABLE_SIZE) { panic_print("Resource descriptor too large"); } if (task_switching) { if (rd == curr_task->task->top_rd - 1) { curr_task->task->top_rd--; } curr_task->task->resources[rd]->closer(curr_task->task->resources[rd]); kfree(curr_task->task->resources[rd]); curr_task->task->resources[rd] = NULL; } else { default_resources[rd].closer(default_resources[rd].env); kfree(&default_resources[rd]); } }
void swrite(rd_t rd, char* s) { if (rd >= RESOURCE_TABLE_SIZE) { panic_print("Resource descriptor too large"); } if (task_switching) { acquire(curr_task->task->resources[rd]->sem); while(*s) { curr_task->task->resources[rd]->writer(*s++, curr_task->task->resources[rd]->env); } release(curr_task->task->resources[rd]->sem); } else { while(*s) { default_resources[rd].writer(*s++, default_resources[rd].env); } } }
void write(rd_t rd, char* d, int n) { if (rd >= RESOURCE_TABLE_SIZE) { panic_print("Resource descriptor too large"); } if (task_switching) { acquire(curr_task->task->resources[rd]->sem); for(int i = 0; i < n; i++) { curr_task->task->resources[rd]->writer(d[i], curr_task->task->resources[rd]->env); } release(curr_task->task->resources[rd]->sem); } else { for(int i = 0; i < n; i++) { default_resources[rd].writer(d[i], default_resources[rd].env); } } }
void read(rd_t rd, char *buf, int n) { if (rd >= RESOURCE_TABLE_SIZE) { panic_print("Resource descriptor too large"); } if (task_switching) { acquire(curr_task->task->resources[rd]->sem); for(int i = 0; i < n; i++) { buf[i] = curr_task->task->resources[rd]->reader(curr_task->task->resources[rd]->env); } release(curr_task->task->resources[rd]->sem); } else { for(int i = 0; i < n; i++) { buf[i] = default_resources[rd].reader(default_resources[rd].env); } } }
task_t *new_task(void (*fptr)(void), uint8_t priority, uint32_t period) { task_ctrl *task = create_task(fptr, priority, period); if (task == NULL) { goto fail; } int ret = register_task(task, period); if (ret != 0) { goto fail2; } total_tasks += 1; return get_task_t(task); fail2: free(task->stack_limit); kfree(task); fail: panic_print("Could not allocate task with function pointer 0x%x", fptr); }
void __attribute__((section(".kernel"))) svc_handler(struct stacked_registers *registers) { uintptr_t pc; uint32_t svc_number; /* Initial task switch! */ if (!task_switching && (registers->cpsr & CPSR_MODE) == CPSR_MODE_USR) { sched_service_call(SVC_YIELD); return; } pc = (uintptr_t) registers->pc; /* Extract SVC number from ARM or Thumb SVC instruction */ if (registers->cpsr & CPSR_THUMB) { uint8_t *svc = (uint8_t *) (pc - 2); svc_number = svc[0]; } else { uint8_t *svc = (uint8_t *) (pc - 4); svc_number = svc[0] | (svc[1] << 8) | (svc[2] << 16); } switch (svc_number) { case SVC_YIELD: case SVC_END_TASK: case SVC_REGISTER_TASK: case SVC_TASK_SWITCH: registers->r0 = sched_service_call(svc_number, registers->r0, registers->r1); break; case SVC_ACQUIRE: case SVC_RELEASE: registers->r0 = mutex_service_call(svc_number, registers->r0); break; default: panic_print("Unknown SVC: %d", svc_number); break; } }
void buddy_merge(struct heapnode *node, struct buddy *buddy) { if (node->header.magic != MM_MAGIC) { fprintf(stderr, "OOPS: mm: attempted to merge invalid node 0x%x\r\n", node); return; } uint8_t order = node->header.order; /* There is only one node of maximum size */ if (order == buddy->max_order) { buddy->list[buddy->max_order] = node; node->next = NULL; return; } /* Our buddy node covers the other half of this order of memory, * thus it will have the order bit in the opposite state of ours. * Note: this is not necessarily free */ struct heapnode *buddy_node = (struct heapnode *) ((uintptr_t) node ^ (1 << order)); struct heapnode *curr_node = buddy->list[order]; struct heapnode *prev_node = NULL; /* Look for node and buddy */ uint8_t found = 0; struct heapnode *node_curr_node = NULL; struct heapnode *node_prev_node = NULL; struct heapnode *buddy_curr_node = NULL; struct heapnode *buddy_prev_node = NULL; while (found < 2 && curr_node != NULL) { if (curr_node == node) { node_curr_node = curr_node; node_prev_node = prev_node; found += 1; } else if (curr_node == buddy_node) { buddy_curr_node = curr_node; buddy_prev_node = prev_node; found += 1; } prev_node = curr_node; curr_node = curr_node->next; } /* Buddy not free */ if (buddy_curr_node == NULL) { /* If node already in list, leave it, * otherwise add it */ if (node_curr_node == NULL) { node->next = buddy->list[order]; buddy->list[order] = node; } return; } else { /* Buddy free */ if (buddy_node->header.order != order) { panic_print("mm: node->header.order != buddy_node->header.order, " "node: 0x%x node->header.order: %d buddy_node: 0x%x, " "buddy_node->header.order: %d", node, node->header.order, buddy_node, buddy_node->header.order); } /* Remove buddy from list */ if (buddy_prev_node == NULL) { buddy->list[order] = buddy_curr_node->next; } else { buddy_prev_node->next = buddy_curr_node->next; } /* Remove node if found */ if (node_curr_node != NULL) { /* Remove node from list */ if (node_prev_node == NULL) { buddy->list[order] = node_curr_node->next; } else { node_prev_node->next = node_curr_node->next; } } /* Set parent node as the less of the two buddies */ node = node < buddy_curr_node ? node : buddy_curr_node; /* Merge the nodes simply by increasing the order * of the smaller node. */ uint8_t new_order = order + 1; node->header.order = new_order; /* Put on higher order list */ node->next = buddy->list[new_order]; buddy->list[new_order] = node; /* Recurse */ buddy_merge(node, buddy); } }
void am335x_dmtimer1ms_init_systick(void) { const void *fdt = fdtparse_get_blob(); struct am335x_dmtimer_1ms *regs; int offset, len; fdt32_t *cell; const struct fdt_property *interrupts; uint32_t interrupt_num; uint32_t tldr_val; /* HACK: Simply use the first DMTimer 1ms */ offset = fdt_node_offset_by_compatible(fdt, -1, AM335X_DMTIMER_1MS_COMPAT); if (offset < 0) { panic_print("DMTimer 1ms not found"); } regs = fdtparse_get_addr32(fdt, offset, "regs"); if (!regs) { panic_print("DMTimer 1ms registers not found"); } /* Get interrupt number */ interrupts = fdt_get_property(fdt, offset, "interrupts", &len); /* Make sure there is room for one interrupt */ if (len < 0 || len < sizeof(fdt32_t)) { panic_print("Unable to get DMTimer 1ms interrupt number"); } cell = (fdt32_t *) interrupts->data; /* There is a single interrupt */ interrupt_num = fdt32_to_cpu(cell[0]); /* Select master oscillator as clock */ if (clocks_set_param(fdt, offset, "ti,clock-select", AM335X_DMTIMER_1MS_CLK_M_OSC)) { panic_print("Unable to set DMTimer 1ms clock source"); } /* Enable module clock */ if (clocks_enable(fdt, offset, "clocks")) { panic_print("Unable to enable DMTimer 1ms module clock"); } if (!TIMER_CNT_PER_SYSTICK) { panic_print("Unable to achieve system tick frequency"); } /* * We want TIMER_CNT_PER_SYSTICK timer counts between the * load value and overflow. */ tldr_val = (1LL << 32) - TIMER_CNT_PER_SYSTICK; raw_mem_write(®s->tldr, tldr_val); /* Force reload of counter */ raw_mem_write(®s->ttgr, 1); /* Enable overflow interrupt */ raw_mem_set_bits(®s->tier, AM335X_DMTIMER_TIER_OVF_IT_EN); /* Register and enable interrupt. Pass registers to handler */ if (am335x_interrupt_register(fdt, offset, interrupt_num, dmtimer1ms_systick_handler, regs)) { panic_print("Unable to register DMTimer 1ms interrupt"); } if (am335x_interrupt_enable(fdt, offset, interrupt_num)) { panic_print("Unable to enable DMTimer 1ms interrupt"); } /* Start timer free running */ raw_mem_write(®s->tclr, AM335X_DMTIMER_TCLR_ST | AM335X_DMTIMER_TCLR_AR); }