/* * platform specific persistent storage driver registers with * us here. If pstore is already mounted, call the platform * read function right away to populate the file system. If not * then the pstore mount code will call us later to fill out * the file system. * * Register with kmsg_dump to save last part of console log on panic. */ int pstore_register(struct pstore_info *psi) { struct module *owner = psi->owner; spin_lock(&pstore_lock); if (psinfo) { spin_unlock(&pstore_lock); return -EBUSY; } if (backend && strcmp(backend, psi->name)) { spin_unlock(&pstore_lock); return -EINVAL; } psinfo = psi; spin_unlock(&pstore_lock); if (owner && !try_module_get(owner)) { psinfo = NULL; return -EINVAL; } if (pstore_is_mounted()) pstore_get_records(); kmsg_dump_register(&pstore_dumper); return 0; }
/* * platform specific persistent storage driver registers with * us here. If pstore is already mounted, call the platform * read function right away to populate the file system. If not * then the pstore mount code will call us later to fill out * the file system. * * Register with kmsg_dump to save last part of console log on panic. */ int pstore_register(struct pstore_info *psi) { struct module *owner = psi->owner; spin_lock(&pstore_lock); if (psinfo) { spin_unlock(&pstore_lock); return -EBUSY; } if (backend && strcmp(backend, psi->name)) { spin_unlock(&pstore_lock); return -EINVAL; } psinfo = psi; mutex_init(&psinfo->read_mutex); spin_unlock(&pstore_lock); if (owner && !try_module_get(owner)) { psinfo = NULL; return -EINVAL; } if (pstore_is_mounted()) pstore_get_records(0); kmsg_dump_register(&pstore_dumper); pstore_timer.expires = jiffies + PSTORE_INTERVAL; add_timer(&pstore_timer); return 0; }
void mrst_early_console_init(void) { u32 ctrlr0 = 0; u32 spi0_cdiv; u32 freq; /* Freqency info only need be searched once */ /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */ pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, MRST_CLK_SPI0_REG); spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9; freq = 100000000 / (spi0_cdiv + 1); if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) mrst_spi_paddr = MRST_REGBASE_SPI1; else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) mrst_spi_paddr = CLV_REGBASE_SPI1; pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, mrst_spi_paddr); /* Disable SPI controller */ dw_writel(pspi, ssienr, 0); /* Set control param, 8 bits, transmit only mode */ ctrlr0 = dw_readl(pspi, ctrl0); ctrlr0 &= 0xfcc0; ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET) | (SPI_TMOD_TO << SPI_TMOD_OFFSET); dw_writel(pspi, ctrl0, ctrlr0); /* * Change the spi0 clk to comply with 115200 bps, use 100000 to * calculate the clk dividor to make the clock a little slower * than real baud rate. */ dw_writel(pspi, baudr, freq/100000); /* Disable all INT for early phase */ dw_writel(pspi, imr, 0x0); /* Set the cs to spi-uart */ dw_writel(pspi, ser, 0x2); /* Enable the HW, the last step for HW init */ dw_writel(pspi, ssienr, 0x1); /* Set the default configuration */ max3110_spi_write_config(); /* Register the kmsg dumper */ if (!dumper_registered) { dw_dumper.dump = dw_kmsg_dump; kmsg_dump_register(&dw_dumper); dumper_registered = 1; } }
void __init opal_kmsg_init(void) { int rc; /* Add our dumper to the list */ rc = kmsg_dump_register(&opal_kmsg_dumper); if (rc != 0) pr_err("opal: kmsg_dump_register failed; returned %d\n", rc); }
/* * platform specific persistent storage driver registers with * us here. If pstore is already mounted, call the platform * read function right away to populate the file system. If not * then the pstore mount code will call us later to fill out * the file system. * * Register with kmsg_dump to save last part of console log on panic. */ int pstore_register(struct pstore_info *psi) { struct module *owner = psi->owner; if (backend && strcmp(backend, psi->name)) return -EPERM; spin_lock(&pstore_lock); if (psinfo) { spin_unlock(&pstore_lock); return -EBUSY; } if (!psi->write) psi->write = pstore_write_compat; psinfo = psi; mutex_init(&psinfo->read_mutex); spin_unlock(&pstore_lock); if (owner && !try_module_get(owner)) { psinfo = NULL; return -EINVAL; } allocate_buf_for_compression(); if (pstore_is_mounted()) pstore_get_records(0); kmsg_dump_register(&pstore_dumper); if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { pstore_register_console(); pstore_register_ftrace(); pstore_register_pmsg(); } if (pstore_update_ms >= 0) { pstore_timer.expires = jiffies + msecs_to_jiffies(pstore_update_ms); add_timer(&pstore_timer); } /* * Update the module parameter backend, so it is visible * through /sys/module/pstore/parameters/backend */ backend = psi->name; pr_info("Registered %s as persistent store backend\n", psi->name); return 0; }
static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; u64 mtdoops_pages = div_u64(mtd->size, record_size); int err; if (!strcmp(mtd->name, mtddev)) cxt->mtd_index = mtd->index; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (mtd->size < mtd->erasesize * 2) { printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", mtd->index); return; } if (mtd->erasesize < record_size) { printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", mtd->index); return; } if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); return; } /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG)); if (!cxt->oops_page_used) { printk(KERN_ERR "mtdoops: could not allocate page array\n"); return; } cxt->dump.dump = mtdoops_do_dump; err = kmsg_dump_register(&cxt->dump); if (err) { printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); vfree(cxt->oops_page_used); cxt->oops_page_used = NULL; return; } cxt->mtd = mtd; cxt->oops_pages = (int)mtd->size / record_size; find_next_position(cxt); printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); }
/* * platform specific persistent storage driver registers with * us here. If pstore is already mounted, call the platform * read function right away to populate the file system. If not * then the pstore mount code will call us later to fill out * the file system. * * Register with kmsg_dump to save last part of console log on panic. */ int pstore_register(struct pstore_info *psi) { struct module *owner = psi->owner; spin_lock(&pstore_lock); if (psinfo) { spin_unlock(&pstore_lock); return -EBUSY; } if (backend && strcmp(backend, psi->name)) { spin_unlock(&pstore_lock); return -EINVAL; } if (!psi->write) psi->write = pstore_write_compat; psinfo = psi; mutex_init(&psinfo->read_mutex); spin_unlock(&pstore_lock); if (owner && !try_module_get(owner)) { psinfo = NULL; return -EINVAL; } if (pstore_is_mounted()) pstore_get_records(0); kmsg_dump_register(&pstore_dumper); pstore_register_console(); pstore_register_ftrace(); pstore_register_pmsg(); if (pstore_update_ms >= 0) { pstore_timer.expires = jiffies + msecs_to_jiffies(pstore_update_ms); add_timer(&pstore_timer); } pr_info("psi registered\n"); return 0; }
/* * Register with kmsg_dump to save last part of console log on panic. */ static void pstore_register_kmsg(void) { kmsg_dump_register(&pstore_dumper); }
static int __init ram_console_init(struct ram_console_buffer *buffer, size_t buffer_size, char *old_buf) { #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION int numerr; uint8_t *par; #endif ram_console_buffer = buffer; ram_console_buffer_size = buffer_size - sizeof(struct ram_console_buffer); if (ram_console_buffer_size > buffer_size) { pr_err("ram_console: buffer %p, invalid size %zu, " "datasize %zu\n", buffer, buffer_size, ram_console_buffer_size); return 0; } #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) + 1) * ECC_SIZE; if (ram_console_buffer_size > buffer_size) { pr_err("ram_console: buffer %p, invalid size %zu, " "non-ecc datasize %zu\n", buffer, buffer_size, ram_console_buffer_size); return 0; } ram_console_par_buffer = buffer->data + ram_console_buffer_size; /* first consecutive root is 0 * primitive element to generate roots = 1 */ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE); if (ram_console_rs_decoder == NULL) { printk(KERN_INFO "ram_console: init_rs failed\n"); return 0; } ram_console_corrected_bytes = 0; ram_console_bad_blocks = 0; par = ram_console_par_buffer + DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par); if (numerr > 0) { printk(KERN_INFO "ram_console: error in header, %d\n", numerr); ram_console_corrected_bytes += numerr; } else if (numerr < 0) { printk(KERN_INFO "ram_console: uncorrectable error in header\n"); ram_console_bad_blocks++; } #endif if (buffer->sig == RAM_CONSOLE_SIG) { if (buffer->size > ram_console_buffer_size || buffer->start > buffer->size) printk(KERN_INFO "ram_console: found existing invalid " "buffer, size %d, start %d\n", buffer->size, buffer->start); else { printk(KERN_INFO "ram_console: found existing buffer, " "size %d, start %d\n", buffer->size, buffer->start); ram_console_save_old(buffer, old_buf); } } else { printk(KERN_INFO "ram_console: no valid data in buffer " "(sig = 0x%08x)\n", buffer->sig); } buffer->sig = RAM_CONSOLE_SIG; buffer->start = 0; buffer->size = 0; #ifdef CONFIG_ANDROID_RAM_CONSOLE_PANIC ram_dumper.dump = ram_console_dump; if (kmsg_dump_register(&ram_dumper)) pr_warn("Failed to register kmsg dumper"); #endif register_console(&ram_console); #ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE console_verbose(); #endif return 0; }