int ssx_sleep_absolute(SsxTimebase time) { SsxMachineContext ctx; SsxThread *current; if (SSX_ERROR_CHECK_API) { SSX_ERROR_UNLESS_THREAD_CONTEXT(); } ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); current = (SsxThread *)__ssx_current_thread; current->timer.timeout = time; __ssx_timer_schedule(&(current->timer)); current->flags |= SSX_THREAD_FLAG_TIMER_PEND; SSX_TRACE_THREAD_SLEEP(current->priority); __ssx_thread_queue_delete(&__ssx_run_queue, current->priority); __ssx_schedule(); current->flags &= ~(SSX_THREAD_FLAG_TIMER_PEND | SSX_THREAD_FLAG_TIMED_OUT); ssx_critical_section_exit(&ctx); return SSX_OK; }
int ssx_thread_resume(SsxThread *thread) { SsxMachineContext ctx; if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL_INTERRUPT_CONTEXT(); SSX_ERROR_IF(thread == 0, SSX_INVALID_THREAD_AT_RESUME1); } ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL(!__ssx_thread_is_active(thread), SSX_INVALID_THREAD_AT_RESUME2, &ctx); } if (!__ssx_thread_is_mapped(thread)) { if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL(__ssx_priority_map[thread->priority] != 0, SSX_PRIORITY_IN_USE_AT_RESUME, &ctx); } __ssx_thread_map(thread); __ssx_schedule(); } ssx_critical_section_exit(&ctx); return SSX_OK; }
int ssx_thread_suspend(SsxThread *thread) { SsxMachineContext ctx; if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL_INTERRUPT_CONTEXT(); SSX_ERROR_IF((thread == 0), SSX_INVALID_THREAD_AT_SUSPEND1); } ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL(!__ssx_thread_is_active(thread), SSX_INVALID_THREAD_AT_SUSPEND2, &ctx); } if (__ssx_thread_is_mapped(thread)) { SSX_TRACE_THREAD_SUSPENDED(thread->priority); __ssx_thread_unmap(thread); __ssx_schedule(); } ssx_critical_section_exit(&ctx); return SSX_OK; }
void __ssx_thread_delete(SsxThread *thread, SsxThreadState final_state) { SsxMachineContext ctx; int mapped; ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); mapped = __ssx_thread_is_mapped(thread); if (mapped) { __ssx_thread_unmap(thread); } __ssx_timer_cancel(&(thread->timer)); thread->state = final_state; if (mapped) { if (SSX_KERNEL_TRACE_ENABLE) { if (final_state == SSX_THREAD_STATE_DELETED) { SSX_TRACE_THREAD_DELETED(thread->priority); } else { SSX_TRACE_THREAD_COMPLETED(thread->priority); } } if (thread == __ssx_current_thread) { __ssx_current_thread = 0; } __ssx_schedule(); } ssx_critical_section_exit(&ctx); }
int ssx_thread_priority_swap(SsxThread* thread_a, SsxThread* thread_b) { SsxMachineContext ctx; SsxThreadPriority priority_a, priority_b; int mapped_a, mapped_b; if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL_INTERRUPT_CONTEXT(); SSX_ERROR_IF((thread_a == 0) || (thread_b == 0), SSX_INVALID_THREAD_AT_SWAP1); } ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); if (thread_a != thread_b) { mapped_a = __ssx_thread_is_mapped(thread_a); mapped_b = __ssx_thread_is_mapped(thread_b); priority_a = thread_a->priority; priority_b = thread_b->priority; if (SSX_ERROR_CHECK_API) { int priority_in_use; SSX_ERROR_IF_CRITICAL((priority_a > SSX_THREADS) || (priority_b > SSX_THREADS), SSX_INVALID_THREAD_AT_SWAP2, &ctx); priority_in_use = (mapped_a && !mapped_b && (__ssx_thread_at_priority(priority_b) != 0)) || (!mapped_a && mapped_b && (__ssx_thread_at_priority(priority_a) != 0)); SSX_ERROR_IF_CRITICAL(priority_in_use, SSX_PRIORITY_IN_USE_AT_SWAP, &ctx); } if (mapped_a) { __ssx_thread_unmap(thread_a); } if (mapped_b) { __ssx_thread_unmap(thread_b); } thread_a->priority = priority_b; thread_b->priority = priority_a; if (mapped_a) { __ssx_thread_map(thread_a); } if (mapped_b) { __ssx_thread_map(thread_b); } __ssx_schedule(); } ssx_critical_section_exit(&ctx); return SSX_OK; }
int ssx_thread_priority_change(SsxThread* thread, SsxThreadPriority new_priority, SsxThreadPriority* old_priority) { SsxMachineContext ctx; SsxThreadPriority priority; if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL_INTERRUPT_CONTEXT(); SSX_ERROR_IF(thread == 0, SSX_INVALID_THREAD_AT_CHANGE); SSX_ERROR_IF(new_priority > SSX_THREADS, SSX_INVALID_ARGUMENT_THREAD_CHANGE); } ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); priority = thread->priority; if (priority != new_priority) { if (!__ssx_thread_is_mapped(thread)) { thread->priority = new_priority; } else { if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL(__ssx_priority_map[new_priority] != 0, SSX_PRIORITY_IN_USE_AT_CHANGE, &ctx); } __ssx_thread_unmap(thread); thread->priority = new_priority; __ssx_thread_map(thread); __ssx_schedule(); } } if (old_priority) { *old_priority = priority; } ssx_critical_section_exit(&ctx); return SSX_OK; }
int ssx_timer_cancel(SsxTimer *timer) { SsxMachineContext ctx; int rc = SSX_OK; if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF_CRITICAL_INTERRUPT_CONTEXT(); SSX_ERROR_IF(timer == 0, SSX_INVALID_TIMER_AT_CANCEL); } ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); rc = __ssx_timer_cancel(timer); ssx_critical_section_exit(&ctx); return rc; }
int _putscom(uint32_t address, uint64_t data, SsxInterval timeout) { pmc_o2p_addr_reg_t addr; pmc_o2p_ctrl_status_reg_t cs; SsxMachineContext ctx; Uint64 data64; int rc; ssx_critical_section_enter(SSX_CRITICAL, &ctx); // Check for a transaction already ongoing cs.value = in32(PMC_O2P_CTRL_STATUS_REG); if (cs.fields.o2p_ongoing) { ssx_critical_section_exit(&ctx); return -SCOM_PROTOCOL_ERROR_PUTSCOM_BUSY; } // Start the write. The 'write' bit is cleared in the address. Here the // PIB write starts when the PMC_O2P_SEND_DATA_LO_REG is written. addr.value = address; addr.fields.o2p_read_not_write = 0; out32(PMC_O2P_ADDR_REG, addr.value); data64.value = data; out32(PMC_O2P_SEND_DATA_HI_REG, data64.word[0]); out32(PMC_O2P_SEND_DATA_LO_REG, data64.word[1]); // Poll and return. rc = poll_scom(timeout, &cs); ssx_critical_section_exit(&ctx); if (rc) { return rc; } else { return cs.fields.o2p_scresp; } }
int _getscom(uint32_t address, uint64_t *data, SsxInterval timeout) { pmc_o2p_addr_reg_t addr; pmc_o2p_ctrl_status_reg_t cs; SsxMachineContext ctx; Uint64 data64; int rc; ssx_critical_section_enter(SSX_CRITICAL, &ctx); // Check for a transaction already ongoing cs.value = in32(PMC_O2P_CTRL_STATUS_REG); if (cs.fields.o2p_ongoing) { ssx_critical_section_exit(&ctx); return -SCOM_PROTOCOL_ERROR_GETSCOM_BUSY; } // Start the read. The 'read' bit is forced into the address. Writing // the PMC_O2P_ADDR_REG starts the read. addr.value = address; addr.fields.o2p_read_not_write = 1; out32(PMC_O2P_ADDR_REG, addr.value); // Polling and return. rc = poll_scom(timeout, &cs); data64.word[0] = in32(PMC_O2P_RECV_DATA_HI_REG); data64.word[1] = in32(PMC_O2P_RECV_DATA_LO_REG); *data = data64.value; ssx_critical_section_exit(&ctx); if (rc) { return rc; } else { return cs.fields.o2p_scresp; } }
int ssx_timer_schedule_absolute(SsxTimer *timer, SsxTimebase timeout, SsxInterval period) { SsxMachineContext ctx; ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); if (SSX_ERROR_CHECK_API) { SSX_ERROR_IF(timer == 0, SSX_INVALID_TIMER_AT_SCHEDULE); SSX_ERROR_IF(__ssx_kernel_context_critical_interrupt(), SSX_ILLEGAL_CONTEXT_TIMER); } timer->timeout = timeout; timer->period = period; __ssx_timer_schedule(timer); ssx_critical_section_exit(&ctx); return SSX_OK; }
void ssx_trace_binary(uint32_t i_hash_and_size, void* bufp) { SsxTraceBinary footer; SsxTraceBinary* footer_ptr; SsxTraceState state; uint64_t* ptr64; uint64_t tb64; SsxMachineContext ctx; uint32_t data_size; uint32_t cb_offset; uint32_t footer_offset; uint8_t* dest; uint8_t* src; uint32_t index; //fill in the footer data tb64 = ssx_ext_timebase_get(); footer.parms.word32 = i_hash_and_size; //this has the size and hash state.tbu32 = tb64 >> 32; footer.time_format.word32 = tb64 & 0x00000000ffffffffull; footer.time_format.format = SSX_TRACE_FORMAT_BINARY; //round up to 8 byte boundary data_size = (footer.parms.num_bytes + 7) & ~0x00000007ul; //limit data size if(data_size > SSX_TRACE_CLIPPED_BINARY_SZ) { data_size = SSX_TRACE_CLIPPED_BINARY_SZ; } //*****The following operations must be done atomically***** ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); //load in the offset in the cb for the entry we are adding cb_offset = g_ssx_trace_buf.state.offset; //Find the offset for the footer (at the end of the entry) footer_offset = cb_offset + data_size; //calculate the address of the footer ptr64 = (uint64_t*)&g_ssx_trace_buf.cb[footer_offset & SSX_TRACE_CB_MASK]; //calculate the offset for the next entry in the cb state.offset = footer_offset + sizeof(SsxTraceBinary); //update the cb state (tbu and offset) g_ssx_trace_buf.state.word64 = state.word64; //write the footer data to the circular buffer including the //timesamp, string hash and data size *ptr64 = footer.word64; //*******************exit the critical section*************** ssx_critical_section_exit(&ctx); //write data to the circular buffer for(src = bufp, index = 0; index < data_size; index++) { dest = &g_ssx_trace_buf.cb[(cb_offset + index) & SSX_TRACE_CB_MASK]; *dest = *(src++); } //Mark the trace entry update as being completed footer_ptr = (SsxTraceBinary*)ptr64; footer_ptr->parms.complete = 1; }