void __init lge_add_persistent_device(void) { #ifdef CONFIG_ANDROID_RAM_CONSOLE platform_device_register(&ram_console_device); #ifdef CONFIG_LGE_HANDLE_PANIC /* write ram console addr to imem */ lge_set_ram_console_addr(persist_ram.start, LGE_RAM_CONSOLE_SIZE); #endif #endif #ifdef CONFIG_PERSISTENT_TRACER platform_device_register(&persistent_trace_device); #endif }
void __init lge_add_persistent_device(void) { int ret; if (!lge_ramoops_data.mem_address) { pr_err("%s: not allocated memory for ramoops\n", __func__); return; } ret = platform_device_register(&lge_ramoops_dev); if (ret){ pr_err("unable to register platform device\n"); return; } #ifdef CONFIG_LGE_HANDLE_PANIC /* write ram console addr to imem */ lge_set_ram_console_addr(lge_ramoops_data.mem_address, lge_ramoops_data.console_size); #endif }
static int __init lge_panic_handler_early_init(void) { struct device_node *np; uint32_t crash_handler_magic = 0; uint32_t mem_addr = 0; uint32_t mem_size = 0; np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem"); if (!np) { pr_err("unable to find DT imem node\n"); return -ENODEV; } msm_imem_base = of_iomap(np, 0); if (!msm_imem_base) { pr_err("unable to map imem\n"); return -ENODEV; } np = of_find_compatible_node(NULL, NULL, "ramoops"); if (!np) { pr_err("unable to find DT ramoops node\n"); return -ENODEV; } of_property_read_u32(np, "mem-address", &mem_addr); of_property_read_u32(np, "mem-size", &mem_size); pr_info("mem-address=%d\n", mem_addr); pr_info("mem-size=%d\n", mem_size); lge_set_ram_console_addr(mem_addr, mem_size); /* check struct boot_shared_imem_cookie_type is matched */ crash_handler_magic = __raw_readl(CRASH_HANDLER_MAGIC); WARN(crash_handler_magic != CRASH_HANDLER_MAGIC_VALUE, "Check sbl's struct boot_shared_imem_cookie_type.\n" "Need to update lge_handle_panic's imem offset.\n"); /* Set default restart_reason to hw reset. */ lge_set_restart_reason(LGE_RB_MAGIC | LGE_ERR_TZ); return 0; }
static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = pdev->dev.platform_data; struct ramoops_context *cxt = &oops_cxt; struct device_node *node = pdev->dev.of_node; size_t dump_mem_sz; phys_addr_t paddr; int err = -EINVAL; if (!pdata) { err = ramoops_parse_dt(&pdev->dev, node); if (err < 0) return err; pdata = pdev->dev.platform_data; } /* Only a single ramoops area allowed at a time, so fail extra * probes. */ if (cxt->max_dump_cnt) goto fail_out; if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && !pdata->ftrace_size && !pdata->pmsg_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); goto fail_out; } if (!is_power_of_2(pdata->mem_size)) pdata->mem_size = rounddown_pow_of_two(pdata->mem_size); if (!is_power_of_2(pdata->record_size)) pdata->record_size = rounddown_pow_of_two(pdata->record_size); if (!is_power_of_2(pdata->console_size)) pdata->console_size = rounddown_pow_of_two(pdata->console_size); if (!is_power_of_2(pdata->ftrace_size)) pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); cxt->size = pdata->mem_size; cxt->phys_addr = pdata->mem_address; cxt->memtype = pdata->mem_type; cxt->record_size = pdata->record_size; cxt->console_size = pdata->console_size; cxt->ftrace_size = pdata->ftrace_size; cxt->pmsg_size = pdata->pmsg_size; cxt->dump_oops = pdata->dump_oops; cxt->ecc_info = pdata->ecc_info; paddr = cxt->phys_addr; dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size; if (dump_mem_sz) { err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz); if (err) goto fail_out; } err = ramoops_init_prz(dev, cxt, &cxt->cprz, &paddr, cxt->console_size, 0); if (err) goto fail_init_cprz; err = ramoops_init_prz(dev, cxt, &cxt->fprz, &paddr, cxt->ftrace_size, LINUX_VERSION_CODE); if (err) goto fail_init_fprz; err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0); if (err) goto fail_init_mprz; cxt->pstore.data = cxt; /* * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we * have to handle dumps, we must have at least record_size buffer. And * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be * ZERO_SIZE_PTR). */ if (cxt->console_size) cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */ cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize); cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL); spin_lock_init(&cxt->pstore.buf_lock); if (!cxt->pstore.buf) { pr_err("cannot allocate pstore buffer\n"); err = -ENOMEM; goto fail_clear; } err = pstore_register(&cxt->pstore); if (err) { pr_err("registering with pstore failed\n"); goto fail_buf; } /* * Update the module parameter variables as well so they are visible * through /sys/module/ramoops/parameters/ */ mem_size = pdata->mem_size; mem_address = pdata->mem_address; record_size = pdata->record_size; dump_oops = pdata->dump_oops; pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n", cxt->size, (unsigned long long)cxt->phys_addr, cxt->ecc_info.ecc_size, cxt->ecc_info.block_size); #ifdef CONFIG_LGE_HANDLE_PANIC /* write ramoops addr to imem */ lge_set_ram_console_addr(cxt->phys_addr, cxt->size); #endif return 0; fail_buf: kfree(cxt->pstore.buf); fail_clear: cxt->pstore.bufsize = 0; cxt->max_dump_cnt = 0; kfree(cxt->mprz); fail_init_mprz: kfree(cxt->fprz); fail_init_fprz: kfree(cxt->cprz); fail_init_cprz: ramoops_free_przs(cxt); fail_out: return err; }