/* This is called whenever we have a unrecoverable failure: transmit timeout Bad ring buffer packet header */ static void el2_reset_8390(struct net_device *dev) { if (ei_debug > 1) { pr_debug("%s: Resetting the 3c503 board...", dev->name); pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR), E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR)); } outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL); ei_status.txing = 0; outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); el2_init_card(dev); if (ei_debug > 1) pr_cont("done\n"); }
void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) { unsigned long *irq_stack_end; unsigned long *irq_stack; unsigned long *stack; int cpu; int i; preempt_disable(); cpu = smp_processor_id(); irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); /* * Debugging aid: "show_stack(NULL, NULL);" prints the * back trace for this cpu: */ if (sp == NULL) { if (task) sp = (unsigned long *)task->thread.sp; else sp = (unsigned long *)&sp; } stack = sp; for (i = 0; i < kstack_depth_to_print; i++) { if (stack >= irq_stack && stack <= irq_stack_end) { if (stack == irq_stack_end) { stack = (unsigned long *) (irq_stack_end[-1]); pr_cont(" <EOI> "); } } else { if (((long) stack & (THREAD_SIZE-1)) == 0) break; } if (i && ((i % STACKSLOTS_PER_LINE) == 0)) pr_cont("\n"); pr_cont(" %016lx", *stack++); touch_nmi_watchdog(); } preempt_enable(); pr_cont("\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); }
static ssize_t keypadled_poweron(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; int data; struct matrix_keypad_platform_data *pdata = dev_get_drvdata(dev); sscanf(buf, "%d", &data); dev_err(dev,"%s : data = %d", __func__, data); if (data){ if (pdata->vddo_vreg){ ret = regulator_enable(pdata->vddo_vreg); if (ret) { dev_err(dev,"%s: failed to enable vddo, %d\n", __func__, ret); return size; } } } else { if (pdata->vddo_vreg) { ret = regulator_disable(pdata->vddo_vreg); if (ret) { dev_err(dev,"%s: failed to disable vddo, %d\n", __func__, ret); return size; } } } pr_cont(" [%d]\n", regulator_is_enabled(pdata->vddo_vreg)); return size; }
/** * pr_cont_kernfs_path - pr_cont path of a kernfs_node * @kn: kernfs_node of interest * * This function can be called from any context. */ void pr_cont_kernfs_path(struct kernfs_node *kn) { unsigned long flags; char *p; spin_lock_irqsave(&kernfs_rename_lock, flags); p = kernfs_path_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); if (p) pr_cont("%s", p); else pr_cont("<name too long>"); spin_unlock_irqrestore(&kernfs_rename_lock, flags); }
__init int p6_pmu_init(void) { switch (boot_cpu_data.x86_model) { case 1: case 3: /* Pentium Pro */ case 5: case 6: /* Pentium II */ case 7: case 8: case 11: /* Pentium III */ case 9: case 13: /* Pentium M */ break; default: pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); return -ENODEV; } x86_pmu = p6_pmu; memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, sizeof(hw_cache_event_ids)); return 0; }
static void poison_error(mempool_t *pool, void *element, size_t size, size_t byte) { const int nr = pool->curr_nr; const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); int i; pr_err("BUG: mempool element poison mismatch\n"); pr_err("Mempool %p size %zu\n", pool, size); pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); for (i = start; i < end; i++) pr_cont("%x ", *(u8 *)(element + i)); pr_cont("%s\n", end < size ? "..." : ""); dump_stack(); }
static void nv84_crypt_intr(struct nouveau_subdev *subdev) { struct nouveau_fifo *pfifo = nouveau_fifo(subdev); struct nouveau_engine *engine = nv_engine(subdev); struct nouveau_object *engctx; struct nv84_crypt_priv *priv = (void *)subdev; u32 stat = nv_rd32(priv, 0x102130); u32 mthd = nv_rd32(priv, 0x102190); u32 data = nv_rd32(priv, 0x102194); u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff; int chid; engctx = nouveau_engctx_get(engine, inst); chid = pfifo->chid(pfifo, engctx); if (stat) { nv_error(priv, "%s", ""); nouveau_bitfield_print(nv84_crypt_intr_mask, stat); pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n", chid, (u64)inst << 12, nouveau_client_name(engctx), mthd, data); } nv_wr32(priv, 0x102130, stat); nv_wr32(priv, 0x10200c, 0x10); nouveau_engctx_put(engctx); }
__init int p6_pmu_init(void) { x86_pmu = p6_pmu; switch (boot_cpu_data.x86_model) { case 1: /* Pentium Pro */ x86_add_quirk(p6_pmu_rdpmc_quirk); break; case 3: /* Pentium II - Klamath */ case 5: /* Pentium II - Deschutes */ case 6: /* Pentium II - Mendocino */ break; case 7: /* Pentium III - Katmai */ case 8: /* Pentium III - Coppermine */ case 10: /* Pentium III Xeon */ case 11: /* Pentium III - Tualatin */ break; case 9: /* Pentium M - Banias */ case 13: /* Pentium M - Dothan */ break; default: pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); return -ENODEV; } memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, sizeof(hw_cache_event_ids)); return 0; }
static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r, const char *const units[], u32 status) { int i; nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status); for (i = 0; units[i] && status; i++) { if ((status & 7) == 1) pr_cont(" %s", units[i]); status >>= 3; } if (status) pr_cont(" (invalid: 0x%x)", status); pr_cont("\n"); }
int send_fault_sig(struct pt_regs *regs) { int signo, si_code; void __user *addr; signo = current->thread.signo; si_code = current->thread.code; addr = (void __user *)current->thread.faddr; pr_debug("send_fault_sig: %p,%d,%d\n", addr, signo, si_code); if (user_mode(regs)) { force_sig_fault(signo, si_code, addr, current); } else { if (fixup_exception(regs)) return -1; //if (signo == SIGBUS) // force_sig_fault(si_signo, si_code, addr, current); /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long)addr < PAGE_SIZE) pr_alert("Unable to handle kernel NULL pointer dereference"); else pr_alert("Unable to handle kernel access"); pr_cont(" at virtual address %p\n", addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } return 1; }
static int __init msr_init(void) { int i, j = 0; if (!boot_cpu_has(X86_FEATURE_TSC)) { pr_cont("no MSR PMU driver.\n"); return 0; } /* Probe the MSRs. */ for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) { u64 val; /* * Virt sucks arse; you cannot tell if a R/O MSR is present :/ */ if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) msr[i].attr = NULL; } /* List remaining MSRs in the sysfs attrs. */ for (i = 0; i < PERF_MSR_EVENT_MAX; i++) { if (msr[i].attr) events_attrs[j++] = &msr[i].attr->attr.attr; } events_attrs[j] = NULL; perf_pmu_register(&pmu_msr, "msr", -1); return 0; }
static void decode_mc3_mce(struct mce *m) { u16 ec = EC(m->status); u8 xec = XEC(m->status, xec_mask); if (boot_cpu_data.x86 >= 0x14) { pr_emerg("You shouldn't be seeing MC3 MCE on this cpu family," " please report on LKML.\n"); return; } pr_emerg(HW_ERR "MC3 Error"); if (xec == 0x0) { u8 r4 = R4(ec); if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) goto wrong_mc3_mce; pr_cont(" during %s.\n", R4_MSG(ec)); } else goto wrong_mc3_mce; return; wrong_mc3_mce: pr_emerg(HW_ERR "Corrupted MC3 MCE info?\n"); }
/* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * -Prints 3 regs per line and a CR. * -To continue, callee regs right after scratch, special handling of CR */ static noinline void print_reg_file(long *reg_rev, int start_num) { unsigned int i; char buf[512]; int n = 0, len = sizeof(buf); for (i = start_num; i < start_num + 13; i++) { n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", i, (unsigned long)*reg_rev); if (((i + 1) % 3) == 0) n += scnprintf(buf + n, len - n, "\n"); /* because pt_regs has regs reversed: r12..r0, r25..r13 */ if (is_isa_arcv2() && start_num == 0) reg_rev++; else reg_rev--; } if (start_num != 0) n += scnprintf(buf + n, len - n, "\n\n"); /* To continue printing callee regs on same line as scratch regs */ if (start_num == 0) pr_info("%s", buf); else pr_cont("%s\n", buf); }
static void decode_BRCC_0(unsigned int opcode) { int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask); int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask); pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : ""); }
static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { pr_debug("%s", s); while(na--) pr_cont(" %08x", *(addr++)); pr_debug("\n"); }
static void __init setup_initrd(void) { unsigned long size; if (initrd_start >= initrd_end) { pr_info("initrd not found or empty"); goto disable; } if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { pr_err("initrd extends beyond end of memory"); goto disable; } size = initrd_end - initrd_start; memblock_reserve(__pa(initrd_start), size); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", (void *)(initrd_start), size); return; disable: pr_cont(" - disabling initrd\n"); initrd_start = 0; initrd_end = 0; }
static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; int c, v; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); pr_info("VPE topology "); for (c = nvpes = 0; c < ncores; c++) { core_vpes = core_vpe_count(c); pr_cont("%c%u", c ? ',' : '{', core_vpes); /* Use the number of VPEs in core 0 for smp_num_siblings */ if (!c) smp_num_siblings = core_vpes; for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_data[nvpes + v].core = c; #ifdef CONFIG_MIPS_MT_SMP cpu_data[nvpes + v].vpe_id = v; #endif } nvpes += core_vpes; } pr_cont("} total %u\n", nvpes); /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { set_cpu_possible(v, true); set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); /* Initialise core 0 */ mips_cps_core_init(); /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); }
static bool f15h_mc0_mce(u16 ec, u8 xec) { bool ret = true; if (MEM_ERROR(ec)) { switch (xec) { case 0x0: pr_cont("Data Array access error.\n"); break; case 0x1: pr_cont("UC error during a linefill from L2/NB.\n"); break; case 0x2: case 0x11: pr_cont("STQ access error.\n"); break; case 0x3: pr_cont("SCB access error.\n"); break; case 0x10: pr_cont("Tag error.\n"); break; case 0x12: pr_cont("LDQ access error.\n"); break; default: ret = false; } } else if (BUS_ERROR(ec)) { if (!xec) pr_cont("System Read Data Error.\n"); else pr_cont(" Internal error condition type %d.\n", xec); } else if (INT_ERROR(ec)) { if (xec <= 0x1f) pr_cont("Hardware Assert.\n"); else ret = false; } else ret = false; return ret; }
void show_regs(struct pt_regs *regs) { int i; unsigned long sp; sp = regs->sp; show_regs_print_info(KERN_DEFAULT); __show_regs(regs, 1); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (!user_mode(regs)) { unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; u8 *ip; show_lbrs(); /* called before show_stack_log_lvl() as it could trig page_fault again and reenable LBR */ printk(KERN_DEFAULT "Stack:\n"); show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 0, KERN_DEFAULT); printk(KERN_DEFAULT "Code: "); ip = (u8 *)regs->ip - code_prologue; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { /* try starting at IP */ ip = (u8 *)regs->ip; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, ip++) { if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { pr_cont(" Bad RIP value."); break; } if (ip == (u8 *)regs->ip) pr_cont("<%02x> ", c); else pr_cont("%02x ", c); } } pr_cont("\n"); }
static void decode_dspLDST_0(unsigned int opcode) { int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask); int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask); int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask); int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask); int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask); if (W == 0) { pr_cont("R%i", reg); switch (m) { case 0: pr_cont(" = "); break; case 1: pr_cont(".L = "); break; case 2: pr_cont(".W = "); break; } } pr_cont("[ I%i", i); switch (aop) { case 0: pr_cont("++ ]"); break; case 1: pr_cont("-- ]"); break; } if (W == 1) { pr_cont(" = R%i", reg); switch (m) { case 1: pr_cont(".L = "); break; case 2: pr_cont(".W = "); break; } } }
static bool f10h_mc0_mce(u16 ec, u8 xec) { if (R4(ec) == R4_GEN && LL(ec) == LL_L1) { pr_cont("during data scrub.\n"); return true; } return f12h_mc0_mce(ec, xec); }
static bool f12h_mc0_mce(u16 ec, u8 xec) { bool ret = false; if (MEM_ERROR(ec)) { u8 ll = LL(ec); ret = true; if (ll == LL_L2) pr_cont("during L1 linefill from L2.\n"); else if (ll == LL_L1) pr_cont("Data/Tag %s error.\n", R4_MSG(ec)); else ret = false; } return ret; }
void amd_decode_nb_mce(struct mce *m) { struct cpuinfo_x86 *c = &boot_cpu_data; int node_id = amd_get_nb_id(m->extcpu); u16 ec = EC(m->status); u8 xec = XEC(m->status, 0x1f); pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id); switch (xec) { case 0x2: pr_cont("Sync error (sync packets on HT link detected).\n"); return; case 0x3: pr_cont("HT Master abort.\n"); return; case 0x4: pr_cont("HT Target abort.\n"); return; case 0x7: pr_cont("NB Watchdog timeout.\n"); return; case 0x9: pr_cont("SVM DMA Exclusion Vector error.\n"); return; default: break; } if (!fam_ops->nb_mce(ec, xec)) goto wrong_nb_mce; if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15) if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) nb_bus_decoder(node_id, m); return; wrong_nb_mce: pr_emerg(HW_ERR "Corrupted NB MCE info?\n"); }
static void iss_isp_isr_dbg(struct iss_device *iss, u32 irqstatus) { static const char * const name[] = { "ISIF_0", "ISIF_1", "ISIF_2", "ISIF_3", "IPIPEREQ", "IPIPELAST_PIX", "IPIPEDMA", "IPIPEBSC", "IPIPEHST", "IPIPEIF", "AEW", "AF", "H3A", "RSZ_REG", "RSZ_LAST_PIX", "RSZ_DMA", "RSZ_CYC_RZA", "RSZ_CYC_RZB", "RSZ_FIFO_OVF", "RSZ_FIFO_IN_BLK_ERR", "20", "21", "RSZ_EOF0", "RSZ_EOF1", "H3A_EOF", "IPIPE_EOF", "26", "IPIPE_DPC_INI", "IPIPE_DPC_RNEW0", "IPIPE_DPC_RNEW1", "30", "OCP_ERR", }; unsigned int i; dev_dbg(iss->dev, "ISP IRQ: "); for (i = 0; i < ARRAY_SIZE(name); i++) { if ((1 << i) & irqstatus) pr_cont("%s ", name[i]); } pr_cont("\n"); }
static void iss_isr_dbg(struct iss_device *iss, u32 irqstatus) { static const char * const name[] = { "ISP_0", "ISP_1", "ISP_2", "ISP_3", "CSIA", "CSIB", "CCP2_0", "CCP2_1", "CCP2_2", "CCP2_3", "CBUFF", "BTE", "SIMCOP_0", "SIMCOP_1", "SIMCOP_2", "SIMCOP_3", "CCP2_8", "HS_VS", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", }; unsigned int i; dev_dbg(iss->dev, "ISS IRQ: "); for (i = 0; i < ARRAY_SIZE(name); i++) { if ((1 << i) & irqstatus) pr_cont("%s ", name[i]); } pr_cont("\n"); }
static int nv84_graph_tlb_flush(struct nouveau_engine *engine) { struct nouveau_timer *ptimer = nouveau_timer(engine); struct nv50_graph_priv *priv = (void *)engine; bool idle, timeout = false; unsigned long flags; u64 start; u32 tmp; spin_lock_irqsave(&priv->lock, flags); nv_mask(priv, 0x400500, 0x00000001, 0x00000000); start = ptimer->read(ptimer); do { idle = true; for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } } while (!idle && !(timeout = ptimer->read(ptimer) - start > 2000000000)); if (timeout) { nv_error(priv, "PGRAPH TLB flush idle timeout fail\n"); tmp = nv_rd32(priv, 0x400700); nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp); nouveau_bitfield_print(nv50_pgraph_status, tmp); pr_cont("\n"); nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0, nv_rd32(priv, 0x400380)); nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1, nv_rd32(priv, 0x400384)); nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2, nv_rd32(priv, 0x400388)); } nv_wr32(priv, 0x100c80, 0x00000001); if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) nv_error(priv, "vm flush timeout\n"); nv_mask(priv, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&priv->lock, flags); return timeout ? -EBUSY : 0; }
void __show_regs(struct pt_regs *regs) { int i, top_reg; u64 lr, sp; if (compat_user_mode(regs)) { lr = regs->compat_lr; sp = regs->compat_sp; top_reg = 12; } else { lr = regs->regs[30]; sp = regs->sp; top_reg = 29; } show_regs_print_info(KERN_DEFAULT); print_pstate(regs); if (!user_mode(regs)) { printk("pc : %pS\n", (void *)regs->pc); printk("lr : %pS\n", (void *)lr); } else { printk("pc : %016llx\n", regs->pc); printk("lr : %016llx\n", lr); } printk("sp : %016llx\n", sp); if (system_uses_irq_prio_masking()) printk("pmr_save: %08llx\n", regs->pmr_save); i = top_reg; while (i >= 0) { printk("x%-2d: %016llx ", i, regs->regs[i]); i--; if (i % 2 == 0) { pr_cont("x%-2d: %016llx ", i, regs->regs[i]); i--; } pr_cont("\n"); } }
static bool k8_mc2_mce(u16 ec, u8 xec) { bool ret = true; if (xec == 0x1) pr_cont(" in the write data buffers.\n"); else if (xec == 0x3) pr_cont(" in the victim data buffers.\n"); else if (xec == 0x2 && MEM_ERROR(ec)) pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec)); else if (xec == 0x0) { if (TLB_ERROR(ec)) pr_cont(": %s error in a Page Descriptor Cache or " "Guest TLB.\n", TT_MSG(ec)); else if (BUS_ERROR(ec)) pr_cont(": %s/ECC error in data read from NB: %s.\n", R4_MSG(ec), PP_MSG(ec)); else if (MEM_ERROR(ec)) { u8 r4 = R4(ec); if (r4 >= 0x7) pr_cont(": %s error during data copyback.\n", R4_MSG(ec)); else if (r4 <= 0x1) pr_cont(": %s parity/ECC error during data " "access from L2.\n", R4_MSG(ec)); else ret = false; } else ret = false; } else ret = false; return ret; }
void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif }
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { unsigned long saved_preempt_count = preempt_count(); WARN_ON(irqs_disabled()); testcase_fn(); /* * Filter out expected failures: */ #ifndef CONFIG_PROVE_LOCKING if (expected == FAILURE && debug_locks) { expected_testcase_failures++; pr_cont("failed|"); } else #endif if (debug_locks != expected) { unexpected_testcase_failures++; pr_cont("FAILED|"); dump_stack(); } else { testcase_successes++; pr_cont(" ok |"); } testcase_total++; if (debug_locks_verbose) pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); /* * Some tests (e.g. double-unlock) might corrupt the preemption * count, so restore it: */ preempt_count_set(saved_preempt_count); #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; else current->softirqs_enabled = 1; #endif reset_locks(); }