Пример #1
0
static int __ftrace_modify_code(void *data)
{
	int *command = data;

	set_kernel_text_rw();
	ftrace_modify_all_code(*command);
	set_kernel_text_ro();

	return 0;
}
Пример #2
0
void machine_kexec(struct kimage *image)
{
	unsigned long page_list;
	unsigned long reboot_code_buffer_phys;
	unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
	unsigned long reboot_entry_phys;
	void *reboot_code_buffer;

	/*
	 * This can only happen if machine_shutdown() failed to disable some
	 * CPU, and that can only happen if the checks in
	 * machine_kexec_prepare() were not correct. If this fails, we can't
	 * reliably kexec anyway, so BUG_ON is appropriate.
	 */
	BUG_ON(num_online_cpus() > 1);

	page_list = image->head & PAGE_MASK;

	/* we need both effective and real address here */
	reboot_code_buffer_phys =
	    page_to_pfn(image->control_code_page) << PAGE_SHIFT;
	reboot_code_buffer = page_address(image->control_code_page);

	/* Prepare parameters for reboot_code_buffer*/
	set_kernel_text_rw();
	kexec_start_address = image->start;
	kexec_indirection_page = page_list;
	kexec_mach_type = machine_arch_type;
	kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
				     + KEXEC_ARM_ATAGS_OFFSET;

	/* copy our kernel relocation code to the control code page */
	reboot_entry = fncpy(reboot_code_buffer,
			     reboot_entry,
			     relocate_new_kernel_size);
	reboot_entry_phys = (unsigned long)reboot_entry +
		(reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);

	pr_info("Bye!\n");

	if (kexec_reinit)
		kexec_reinit();

	soft_restart(reboot_entry_phys);
}
Пример #3
0
int ftrace_arch_code_modify_prepare(void)
{
	set_kernel_text_rw();
	set_all_modules_text_rw();
	return 0;
}
/**
 * Recover a chunk of memory that failed.
 *
 * We can only recover a chunk whose pages were processed originally with
 * bitfix_process_page().
 *
 * TODO: we might be able to recover from bit errors of two chips at the same
 * time if we pass in a CRC function here too.  Then we can try all combinations
 * of only recovering bit errors from different sets of failing chips and find
 * one that makes CRC pass.
 *
 * @failed_addr: Any address in the chunk that failed.
 * @should_skip_fn: This will be called one page at a time.  If a page was never
 *	processed with calls to bitfix_process_page() then the should_skip_fn
 *	_must_ return true.  This means that the skip function must call the
 *	bitfix_does_overlap_reserved() function.
 */
void bitfix_recover_chunk(phys_addr_t failed_addr,
			  bitfix_should_skip_fn_t should_skip_fn)
{
	const phys_addr_t bad_chunk_addr = failed_addr & ~(CHUNK_MASK);
	int pgnum;
	int bytes_fixed = 0;

	if (!bitfix_enabled)
		return;

	pr_info("%s: Attempting recovery at %08x\n", __func__, failed_addr);

	/*
	 * We recover to recover_chunk and then copy instead of recovering
	 * directly to the destination chunk.  That could be critical if
	 * the block we're recovering to is used for something important
	 * (like maybe storing the bitfix code?)
	 */
	memset(recover_chunk, 0, CHUNK_SIZE);
	_bitfix_recover_chunk(bad_chunk_addr, should_skip_fn);

	/* Do comparisons to characterize the corruption and copy. */
	for (pgnum = 0; pgnum < PAGES_PER_CHUNK; pgnum++) {
		u32 offset = pgnum * PAGE_SIZE;
		phys_addr_t addr = bad_chunk_addr + offset;
		u32 *virt;
		u32 *recover_page = recover_chunk + offset / sizeof(u32);

		if (should_skip_fn(addr)) {
			struct page *page = phys_to_page(addr);

			/*
			 * If the page is unused then we really don't care that
			 * we can't recover it.  Just continue on.
			 */
			if (atomic_read(&page->_count) == 0) {
				pr_info("%s: Skip unused page at %08x\n",
					__func__, addr);
				continue;
			}

			/*
			 * If one page in a chunk has bit errors it's likely
			 * that other pages in the chunk will have errors too.
			 * Unfortunately we can't check, so reboot and be safe
			 * rather than sorry.
			 */
			pr_err("%s: Can't recover skipped page at %08x\n",
			       __func__, addr);
			panic("Rebooting due to likely bit errors\n");
		}

		virt = kmap_atomic(phys_to_page(addr));
		bytes_fixed += bitfix_compare(addr, virt, recover_page);

		/*
		 * We might end up recovering something which is marked as
		 * read-only, and crash in here.  So, we set all kernel text
		 * to be read-write for this operation.
		 * The early_boot_irqs_disabled is to avoid a warning in
		 * smp_call_function_many() due to the A-15 TLB erratum
		 * workaround.
		 */
		early_boot_irqs_disabled = 1;
		set_all_modules_text_rw();
		set_kernel_text_rw();

		memcpy(virt, recover_page, PAGE_SIZE);
		kunmap_atomic(virt);

		set_kernel_text_ro();
		set_all_modules_text_ro();
		early_boot_irqs_disabled = 0;
	}

	BUG_ON(bytes_fixed > MAX_BYTES_TO_FIX);
}