コード例 #1
0
ファイル: ftrace.c プロジェクト: 0xroot/Blackphone-BP1-Kernel
int ftrace_arch_code_modify_prepare(void)
{
	set_kernel_text_rw();
	set_all_modules_text_rw();
	return 0;
}
コード例 #2
0
/**
 * Recover a chunk of memory that failed.
 *
 * We can only recover a chunk whose pages were processed originally with
 * bitfix_process_page().
 *
 * TODO: we might be able to recover from bit errors of two chips at the same
 * time if we pass in a CRC function here too.  Then we can try all combinations
 * of only recovering bit errors from different sets of failing chips and find
 * one that makes CRC pass.
 *
 * @failed_addr: Any address in the chunk that failed.
 * @should_skip_fn: This will be called one page at a time.  If a page was never
 *	processed with calls to bitfix_process_page() then the should_skip_fn
 *	_must_ return true.  This means that the skip function must call the
 *	bitfix_does_overlap_reserved() function.
 */
void bitfix_recover_chunk(phys_addr_t failed_addr,
			  bitfix_should_skip_fn_t should_skip_fn)
{
	const phys_addr_t bad_chunk_addr = failed_addr & ~(CHUNK_MASK);
	int pgnum;
	int bytes_fixed = 0;

	if (!bitfix_enabled)
		return;

	pr_info("%s: Attempting recovery at %08x\n", __func__, failed_addr);

	/*
	 * We recover to recover_chunk and then copy instead of recovering
	 * directly to the destination chunk.  That could be critical if
	 * the block we're recovering to is used for something important
	 * (like maybe storing the bitfix code?)
	 */
	memset(recover_chunk, 0, CHUNK_SIZE);
	_bitfix_recover_chunk(bad_chunk_addr, should_skip_fn);

	/* Do comparisons to characterize the corruption and copy. */
	for (pgnum = 0; pgnum < PAGES_PER_CHUNK; pgnum++) {
		u32 offset = pgnum * PAGE_SIZE;
		phys_addr_t addr = bad_chunk_addr + offset;
		u32 *virt;
		u32 *recover_page = recover_chunk + offset / sizeof(u32);

		if (should_skip_fn(addr)) {
			struct page *page = phys_to_page(addr);

			/*
			 * If the page is unused then we really don't care that
			 * we can't recover it.  Just continue on.
			 */
			if (atomic_read(&page->_count) == 0) {
				pr_info("%s: Skip unused page at %08x\n",
					__func__, addr);
				continue;
			}

			/*
			 * If one page in a chunk has bit errors it's likely
			 * that other pages in the chunk will have errors too.
			 * Unfortunately we can't check, so reboot and be safe
			 * rather than sorry.
			 */
			pr_err("%s: Can't recover skipped page at %08x\n",
			       __func__, addr);
			panic("Rebooting due to likely bit errors\n");
		}

		virt = kmap_atomic(phys_to_page(addr));
		bytes_fixed += bitfix_compare(addr, virt, recover_page);

		/*
		 * We might end up recovering something which is marked as
		 * read-only, and crash in here.  So, we set all kernel text
		 * to be read-write for this operation.
		 * The early_boot_irqs_disabled is to avoid a warning in
		 * smp_call_function_many() due to the A-15 TLB erratum
		 * workaround.
		 */
		early_boot_irqs_disabled = 1;
		set_all_modules_text_rw();
		set_kernel_text_rw();

		memcpy(virt, recover_page, PAGE_SIZE);
		kunmap_atomic(virt);

		set_kernel_text_ro();
		set_all_modules_text_ro();
		early_boot_irqs_disabled = 0;
	}

	BUG_ON(bytes_fixed > MAX_BYTES_TO_FIX);
}