void trace_add(union trace *trace, u8 type, u16 len) { struct trace_info *ti = this_cpu()->trace; unsigned int tsz; trace->hdr.type = type; trace->hdr.len_div_8 = (len + 7) >> 3; tsz = trace->hdr.len_div_8 << 3; #ifdef DEBUG_TRACES assert(tsz >= sizeof(trace->hdr)); assert(tsz <= sizeof(*trace)); assert(trace->hdr.type != TRACE_REPEAT); assert(trace->hdr.type != TRACE_OVERFLOW); #endif /* Skip traces not enabled in the debug descriptor */ if (!((1ul << trace->hdr.type) & debug_descriptor.trace_mask)) return; trace->hdr.timestamp = cpu_to_be64(mftb()); trace->hdr.cpu = cpu_to_be16(this_cpu()->server_no); lock(&ti->lock); /* Throw away old entries before we overwrite them. */ while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.mask) + 1) < (be64_to_cpu(ti->tb.end) + tsz)) { struct trace_hdr *hdr; hdr = (void *)ti->tb.buf + be64_to_cpu(ti->tb.start & ti->tb.mask); ti->tb.start = cpu_to_be64(be64_to_cpu(ti->tb.start) + (hdr->len_div_8 << 3)); } /* Must update ->start before we rewrite new entries. */ lwsync(); /* write barrier */ /* Check for duplicates... */ if (!handle_repeat(&ti->tb, trace)) { /* This may go off end, and that's why ti->tb.buf is oversize */ memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end & ti->tb.mask), trace, tsz); ti->tb.last = ti->tb.end; lwsync(); /* write barrier: write entry before exposing */ ti->tb.end = cpu_to_be64(be64_to_cpu(ti->tb.end) + tsz); } unlock(&ti->lock); }
static void inmem_write(char c) { uint32_t opos; if (!c) return; con_buf[con_in++] = c; if (con_in >= INMEM_CON_OUT_LEN) { con_in = 0; con_wrapped = true; } /* * We must always re-generate memcons.out_pos because * under some circumstances, the console script will * use a broken putmemproc that does RMW on the full * 8 bytes containing out_pos and in_prod, thus corrupting * out_pos */ opos = con_in; if (con_wrapped) opos |= MEMCONS_OUT_POS_WRAP; lwsync(); memcons.out_pos = opos; /* If head reaches tail, push tail around & drop chars */ if (con_in == con_out) con_out = (con_in + 1) % INMEM_CON_OUT_LEN; }
static void __op_panel_write_complete(struct fsp_msg *msg) { fsp_tce_unmap(PSI_DMA_OP_PANEL_MISC, 0x1000); lwsync(); op_req = NULL; fsp_freemsg(msg); }
/////////////////////////////////////////////////////////////////////////////// // Handling commit error log. /////////////////////////////////////////////////////////////////////////////// void ErrlManager::commitErrLog(errlHndl_t& io_err, compId_t i_committerComp ) { TRACDCOMP( g_trac_errl, ENTER_MRK"ErrlManager::commitErrLog" ); do { if (io_err == NULL) { // put out warning trace TRACFCOMP(g_trac_errl, ERR_MRK "commitErrLog() - NULL pointer"); break; } TRACFCOMP(g_trac_errl, "commitErrLog() called by %.4X for eid=%.8x, Reasoncode=%.4X", i_committerComp, io_err->eid(), io_err->reasonCode() ); if (io_err->sev() != ERRORLOG::ERRL_SEV_INFORMATIONAL) { iv_nonInfoCommitted = true; lwsync(); } //Ask ErrlEntry to check for any special deferred deconfigure callouts io_err->deferredDeconfigure(); //Offload the error log to the errlog message queue sendErrlogToMessageQueue ( io_err, i_committerComp ); io_err = NULL; } while( 0 ); TRACDCOMP( g_trac_errl, EXIT_MRK"ErrlManager::commitErrLog" ); return; }
void ErrlManager::setHwasProcessCalloutFn(HWAS::processCalloutFn i_fn) { // sync to ensure that all of HWAS is fully constructed BEFORE we // write this function pointer lwsync(); ERRORLOG::theErrlManager::instance().iv_hwasProcessCalloutFn = i_fn; }
void cpu_wait_job(struct cpu_job *job, bool free_it) { unsigned long ticks = usecs_to_tb(5); if (!job) return; while(!job->complete) { time_wait(ticks); lwsync(); } lwsync(); smt_medium(); if (free_it) free(job); }
/* To avoid bloating each entry, repeats are actually specific entries. * tb->last points to the last (non-repeat) entry. */ static bool handle_repeat(struct tracebuf *tb, const union trace *trace) { struct trace_hdr *prev; struct trace_repeat *rpt; u32 len; prev = (void *)tb->buf + be64_to_cpu(tb->last & tb->mask); if (prev->type != trace->hdr.type || prev->len_div_8 != trace->hdr.len_div_8 || prev->cpu != trace->hdr.cpu) return false; len = prev->len_div_8 << 3; if (memcmp(prev + 1, &trace->hdr + 1, len - sizeof(*prev)) != 0) return false; /* If they've consumed prev entry, don't repeat. */ if (be64_to_cpu(tb->last) < be64_to_cpu(tb->start)) return false; /* OK, it's a duplicate. Do we already have repeat? */ if (be64_to_cpu(tb->last) + len != be64_to_cpu(tb->end)) { u64 pos = be64_to_cpu(tb->last) + len; /* FIXME: Reader is not protected from seeing this! */ rpt = (void *)tb->buf + (pos & be64_to_cpu(tb->mask)); assert(pos + rpt->len_div_8*8 == be64_to_cpu(tb->end)); assert(rpt->type == TRACE_REPEAT); /* If this repeat entry is full, don't repeat. */ if (be16_to_cpu(rpt->num) == 0xFFFF) return false; rpt->num = cpu_to_be16(be16_to_cpu(rpt->num) + 1); rpt->timestamp = trace->hdr.timestamp; return true; } /* * Generate repeat entry: it's the smallest possible entry, so we * must have eliminated old entries. */ assert(trace->hdr.len_div_8 * 8 >= sizeof(*rpt)); rpt = (void *)tb->buf + be64_to_cpu(tb->end & tb->mask); rpt->timestamp = trace->hdr.timestamp; rpt->type = TRACE_REPEAT; rpt->len_div_8 = sizeof(*rpt) >> 3; rpt->cpu = trace->hdr.cpu; rpt->prev_len = cpu_to_be16(trace->hdr.len_div_8 << 3); rpt->num = cpu_to_be16(1); lwsync(); /* write barrier: complete repeat record before exposing */ tb->end = cpu_to_be64(be64_to_cpu(tb->end) + sizeof(*rpt)); return true; }
void Daemon::replaceEntry(Entry* from, Entry* to) { do { // Copy entry content to new entry. memcpy(to, from, from->size + sizeof(Entry)); // Update next object's pointer. if (to->next) { to->next->prev = to; } else { to->comp->iv_last = to; } lwsync(); // Ensure pointer update is globally visible // (to order before 'prev' object updates). // Update prev object's pointer. // Buffer ensures that an entries "next" is written before // the "next->prev", so we can be certain that if to->prev // then to->prev->next is finalized. if (to->prev) { to->prev->next = to; } else // If there is no previous, this is the first (most recent) // for the component, so update the component object. { Buffer* b = iv_service->iv_buffers[to->comp->iv_bufferType]; // Need to toggle the consumer lock on this one, so use // the consumerOp to move the compoment->first from // 'from' to 'to'. // // If it fails (first != from anymore) then retry this // whole sequence. if (!b->consumerOp(&to->comp->iv_first, from, &to->comp->iv_first, to)) { continue; } } // Successfully updated everything, break from loop. break; } while (1); }
void bm_mc_commit(struct bm_portal *portal, uint8_t myverb) { register struct bm_mc *mc = &portal->mc; ASSERT_COND(mc->state == mc_user); lwsync(); mc->cr->__dont_write_directly__verb = (uint8_t)(myverb | mc->vbit); dcbf_64(mc->cr); dcbit_ro(mc->rr + mc->rridx); #ifdef BM_CHECKING mc->state = mc_hw; #endif /* BM_CHECKING */ }
static size_t inmem_read(char *buf, size_t req) { size_t read = 0; char *ibuf = (char *)memcons.ibuf_phys; while (req && memcons.in_prod != memcons.in_cons) { *(buf++) = ibuf[memcons.in_cons]; lwsync(); memcons.in_cons = (memcons.in_cons + 1) % INMEM_CON_IN_LEN; req--; read++; } return read; }
void bm_rcr_pce_commit(struct bm_portal *portal, uint8_t myverb) { register struct bm_rcr *rcr = &portal->rcr; ASSERT_COND(rcr->busy); ASSERT_COND(rcr->pmode == e_BmPortalPCE); rcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit); RCR_INC(rcr); rcr->available--; lwsync(); bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor)); #ifdef BM_CHECKING rcr->busy = 0; #endif /* BM_CHECKING */ }
void bm_rcr_pvb_commit(struct bm_portal *portal, uint8_t myverb) { register struct bm_rcr *rcr = &portal->rcr; struct bm_rcr_entry *rcursor; ASSERT_COND(rcr->busy); ASSERT_COND(rcr->pmode == e_BmPortalPVB); lwsync(); rcursor = rcr->cursor; rcursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit); dcbf_64(rcursor); RCR_INC(rcr); rcr->available--; #ifdef BM_CHECKING rcr->busy = 0; #endif /* BM_CHECKING */ }
int main() { printk("Booting %s kernel...\n\n", "Hostboot"); printk("CPU=%s\n", ProcessorCoreTypeStrings[CpuID::getCpuType()]); MAGIC_INST_PRINT_ISTEP(6,2); // Erase task-pointer so that TaskManager::getCurrentTask() returns NULL. setSPRG3(NULL); Kernel& kernel = Singleton<Kernel>::instance(); kernel.cppBootstrap(); // Get pointer to BL and HB comm data const auto l_pBltoHbData = getBlToHbData(); if ( l_pBltoHbData != nullptr ) { printk("Valid BlToHbData found at 0x%lX\n", reinterpret_cast<uint64_t>(l_pBltoHbData)); // Initialize Secureboot Data class g_BlToHbDataManager.initValid(*l_pBltoHbData); } else { printk("BL to HB commun invalid\n"); // Force invalidation of securebootdata g_BlToHbDataManager.initInvalid(); } kernel.memBootstrap(); kernel.cpuBootstrap(); // Let FSP/BMC know that Hostboot is now running KernelMisc::setHbScratchStatus(KernelMisc::HB_RUNNING); kernel.inittaskBootstrap(); // Ready to let the other CPUs go. lwsync(); kernel_other_thread_spinlock = 1; kernel_dispatch_task(); // no return. while(1); return 0; }
static int qbman_swp_enqueue_array_mode(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct qbman_fd *fd) { uint32_t *p; const uint32_t *cl = qb_cl(d); uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); pr_debug("EQAR=%08x\n", eqar); if (!EQAR_SUCCESS(eqar)) return -EBUSY; p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); memcpy(&p[1], &cl[1], 28); memcpy(&p[8], fd, sizeof(*fd)); /* Set the verb byte, have to substitute in the valid-bit */ lwsync(); p[0] = cl[0] | EQAR_VB(eqar); qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); return 0; }
static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct qbman_fd *fd) { uint32_t *p; const uint32_t *cl = qb_cl(d); uint32_t eqcr_ci; uint8_t diff; if (!s->eqcr.available) { eqcr_ci = s->eqcr.ci; s->eqcr.ci = qbman_cena_read_reg(&s->sys, QBMAN_CENA_SWP_EQCR_CI) & 0xF; diff = qm_cyc_diff(QBMAN_EQCR_SIZE, eqcr_ci, s->eqcr.ci); s->eqcr.available += diff; if (!diff) return -EBUSY; } p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7)); memcpy(&p[1], &cl[1], 28); memcpy(&p[8], fd, sizeof(*fd)); lwsync(); /* Set the verb byte, have to substitute in the valid-bit */ p[0] = cl[0] | s->eqcr.pi_vb; qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7)); s->eqcr.pi++; s->eqcr.pi &= 0xF; s->eqcr.available--; if (!(s->eqcr.pi & 7)) s->eqcr.pi_vb ^= QB_VALID_BIT; return 0; }
int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct qbman_fd *fd, int num_frames) { uint32_t *p; const uint32_t *cl; uint32_t eqcr_ci, eqcr_pi; uint8_t diff; int i, num_enqueued = 0; uint64_t addr_cena; if (!s->eqcr.available) { eqcr_ci = s->eqcr.ci; s->eqcr.ci = qbman_cena_read_reg(&s->sys, QBMAN_CENA_SWP_EQCR_CI) & 0xF; diff = qm_cyc_diff(QBMAN_EQCR_SIZE, eqcr_ci, s->eqcr.ci); s->eqcr.available += diff; if (!diff) return 0; } eqcr_pi = s->eqcr.pi; num_enqueued = (s->eqcr.available < num_frames) ? s->eqcr.available : num_frames; s->eqcr.available -= num_enqueued; /* Fill in the EQCR ring */ for (i = 0; i < num_enqueued; i++) { p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)); cl = qb_cl(&d[i]); memcpy(&p[1], &cl[1], 28); memcpy(&p[8], &fd[i], sizeof(*fd)); eqcr_pi++; eqcr_pi &= 0xF; } lwsync(); /* Set the verb byte, have to substitute in the valid-bit */ eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++) { p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)); cl = qb_cl(&d[i]); p[0] = cl[0] | s->eqcr.pi_vb; eqcr_pi++; eqcr_pi &= 0xF; if (!(eqcr_pi & 7)) s->eqcr.pi_vb ^= QB_VALID_BIT; } /* Flush all the cacheline without load/store in between */ eqcr_pi = s->eqcr.pi; addr_cena = (uint64_t)s->sys.addr_cena; for (i = 0; i < num_enqueued; i++) { dcbf((uint64_t *)(addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); eqcr_pi++; eqcr_pi &= 0xF; } s->eqcr.pi = eqcr_pi; return num_enqueued; }
inline void MacroAssembler::membar(int bits) { // TODO: use elemental_membar(bits) for Power 8 and disable optimization of acquire-release // (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||)) if (bits & StoreLoad) sync(); else lwsync(); }
bool cpu_poll_job(struct cpu_job *job) { lwsync(); return job->complete; }