char UTF8UTF16::processText(SWBuf &text, const SWKey *key, const SWModule *module) { const unsigned char *from; SWBuf orig = text; from = (const unsigned char *)orig.c_str(); // ------------------------------- text = ""; while (*from) { __u32 ch = getUniCharFromUTF8(&from); if (!ch) continue; // invalid char if (ch < 0x10000) { text.setSize(text.size()+2); *((__u16 *)(text.getRawData()+(text.size()-2))) = (__u16)ch; } else { __u16 utf16; utf16 = (__s16)((ch - 0x10000) / 0x400 + 0xD800); text.setSize(text.size()+4); *((__u16 *)(text.getRawData()+(text.size()-4))) = utf16; utf16 = (__s16)((ch - 0x10000) % 0x400 + 0xDC00); *((__u16 *)(text.getRawData()+(text.size()-2))) = utf16; } } text.setSize(text.size()+2); *((__u16 *)(text.getRawData()+(text.size()-2))) = (__u16)0; text.setSize(text.size()-2); return 0; }
void x86_mce_callback_register(x86_mce_callback_t cbfunc) { mc_callback_bank_extended = cbfunc; } /* Machine check recoverable judgement callback handler * It is used to judge whether an UC error is recoverable by software */ static mce_recoverable_t mc_recoverable_scan = NULL; void mce_recoverable_register(mce_recoverable_t cbfunc) { mc_recoverable_scan = cbfunc; } /* Judging whether to Clear Machine Check error bank callback handler * According to Intel latest MCA OS Recovery Writer's Guide, * whether the error MCA bank needs to be cleared is decided by the mca_source * and MCi_status bit value. */ static mce_need_clearbank_t mc_need_clearbank_scan = NULL; void mce_need_clearbank_register(mce_need_clearbank_t cbfunc) { mc_need_clearbank_scan = cbfunc; } /* Utility function to perform MCA bank telemetry readout and to push that * telemetry towards an interested dom0 for logging and diagnosis. * The caller - #MC handler or MCA poll function - must arrange that we * do not migrate cpus. */ /* XXFM Could add overflow counting? */ /* Add out_param clear_bank for Machine Check Handler Caller. * For Intel latest CPU, whether to clear the error bank status needs to * be judged by the callback function defined above. */ mctelem_cookie_t mcheck_mca_logout(enum mca_source who, cpu_banks_t bankmask, struct mca_summary *sp, cpu_banks_t* clear_bank) { struct vcpu *v = current; struct domain *d; uint64_t gstatus, status, addr, misc; struct mcinfo_global mcg; /* on stack */ struct mcinfo_common *mic; struct mcinfo_global *mig; /* on stack */ mctelem_cookie_t mctc = NULL; uint32_t uc = 0, pcc = 0, recover, need_clear = 1 ; struct mc_info *mci = NULL; mctelem_class_t which = MC_URGENT; /* XXXgcc */ unsigned int cpu_nr; int errcnt = 0; int i; enum mca_extinfo cbret = MCA_EXTINFO_IGNORED; cpu_nr = smp_processor_id(); BUG_ON(cpu_nr != v->processor); mca_rdmsrl(MSR_IA32_MCG_STATUS, gstatus); memset(&mcg, 0, sizeof (mcg)); mcg.common.type = MC_TYPE_GLOBAL; mcg.common.size = sizeof (mcg); if (v != NULL && ((d = v->domain) != NULL)) { mcg.mc_domid = d->domain_id; mcg.mc_vcpuid = v->vcpu_id; } else { mcg.mc_domid = -1; mcg.mc_vcpuid = -1; } mcg.mc_gstatus = gstatus; /* MCG_STATUS */ switch (who) { case MCA_MCE_HANDLER: case MCA_MCE_SCAN: mcg.mc_flags = MC_FLAG_MCE; which = MC_URGENT; break; case MCA_POLLER: case MCA_RESET: mcg.mc_flags = MC_FLAG_POLLED; which = MC_NONURGENT; break; case MCA_CMCI_HANDLER: mcg.mc_flags = MC_FLAG_CMCI; which = MC_NONURGENT; break; default: BUG(); } /* Retrieve detector information */ x86_mc_get_cpu_info(cpu_nr, &mcg.mc_socketid, &mcg.mc_coreid, &mcg.mc_core_threadid, &mcg.mc_apicid, NULL, NULL, NULL); /* If no mc_recovery_scan callback handler registered, * this error is not recoverable */ recover = (mc_recoverable_scan)? 1: 0; for (i = 0; i < 32 && i < nr_mce_banks; i++) { struct mcinfo_bank mcb; /* on stack */ /* Skip bank if corresponding bit in bankmask is clear */ if (!test_bit(i, bankmask)) continue; mca_rdmsrl(MSR_IA32_MC0_STATUS + i * 4, status); if (!(status & MCi_STATUS_VAL)) continue; /* this bank has no valid telemetry */ /* For Intel Latest CPU CMCI/MCE Handler caller, we need to * decide whether to clear bank by MCi_STATUS bit value such as * OVER/UC/EN/PCC/S/AR */ if ( mc_need_clearbank_scan ) need_clear = mc_need_clearbank_scan(who, status); /* If this is the first bank with valid MCA DATA, then * try to reserve an entry from the urgent/nonurgent queue * depending on whethere we are called from an exception or * a poller; this can fail (for example dom0 may not * yet have consumed past telemetry). */ if (errcnt == 0) { if ((mctc = mctelem_reserve(which)) != NULL) { mci = mctelem_dataptr(mctc); mcinfo_clear(mci); } } memset(&mcb, 0, sizeof (mcb)); mcb.common.type = MC_TYPE_BANK; mcb.common.size = sizeof (mcb); mcb.mc_bank = i; mcb.mc_status = status; /* form a mask of which banks have logged uncorrected errors */ if ((status & MCi_STATUS_UC) != 0) uc |= (1 << i); /* likewise for those with processor context corrupt */ if ((status & MCi_STATUS_PCC) != 0) pcc |= (1 << i); if (recover && uc) /* uc = 1, recover = 1, we need not panic. */ recover = mc_recoverable_scan(status); addr = misc = 0; if (status & MCi_STATUS_ADDRV) { mca_rdmsrl(MSR_IA32_MC0_ADDR + 4 * i, addr); d = maddr_get_owner(addr); if (d != NULL && (who == MCA_POLLER || who == MCA_CMCI_HANDLER)) mcb.mc_domid = d->domain_id; } if (status & MCi_STATUS_MISCV) mca_rdmsrl(MSR_IA32_MC0_MISC + 4 * i, misc); mcb.mc_addr = addr; mcb.mc_misc = misc; if (who == MCA_CMCI_HANDLER) { mca_rdmsrl(MSR_IA32_MC0_CTL2 + i, mcb.mc_ctrl2); rdtscll(mcb.mc_tsc); } /* Increment the error count; if this is the first bank * with a valid error then add the global info to the mcinfo. */ if (errcnt++ == 0 && mci != NULL) x86_mcinfo_add(mci, &mcg); /* Add the bank data */ if (mci != NULL) x86_mcinfo_add(mci, &mcb); if (mc_callback_bank_extended && cbret != MCA_EXTINFO_GLOBAL) { cbret = mc_callback_bank_extended(mci, i, status);