Beispiel #1
0
static int set_up_temporary_mappings(void)
{
	struct x86_mapping_info info = {
		.alloc_pgt_page	= alloc_pgt_page,
		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
		.offset		= __PAGE_OFFSET,
	};
	unsigned long mstart, mend;
	pgd_t *pgd;
	int result;
	int i;

	pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
	if (!pgd)
		return -ENOMEM;

	/* Prepare a temporary mapping for the kernel text */
	result = set_up_temporary_text_mapping(pgd);
	if (result)
		return result;

	/* Set up the direct mapping from scratch */
	for (i = 0; i < nr_pfn_mapped; i++) {
		mstart = pfn_mapped[i].start << PAGE_SHIFT;
		mend   = pfn_mapped[i].end << PAGE_SHIFT;

		result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
		if (result)
			return result;
	}

	temp_level4_pgt = __pa(pgd);
	return 0;
}
Beispiel #2
0
static int set_up_temporary_mappings(void)
{
	struct x86_mapping_info info = {
		.alloc_pgt_page	= alloc_pgt_page,
		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
		.kernel_mapping = true,
	};
	unsigned long mstart, mend;
	int result;
	int i;

	temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
	if (!temp_level4_pgt)
		return -ENOMEM;

	/* It is safe to reuse the original kernel mapping */
	set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
		init_level4_pgt[pgd_index(__START_KERNEL_map)]);

	/* Set up the direct mapping from scratch */
	for (i = 0; i < nr_pfn_mapped; i++) {
		mstart = pfn_mapped[i].start << PAGE_SHIFT;
		mend   = pfn_mapped[i].end << PAGE_SHIFT;

		result = kernel_ident_mapping_init(&info, temp_level4_pgt,
						   mstart, mend);

		if (result)
			return result;
	}

	return 0;
}

int swsusp_arch_resume(void)
{
	int error;

	/* We have got enough memory and from now on we cannot recover */
	if ((error = set_up_temporary_mappings()))
		return error;

	relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
	if (!relocated_restore_code)
		return -ENOMEM;
	memcpy(relocated_restore_code, &core_restore_code,
	       &restore_registers - &core_restore_code);

	restore_image();
	return 0;
}
/*
 * Adds the specified range to what will become the new identity mappings.
 * Once all ranges have been added, the new mapping is activated by calling
 * finalize_identity_maps() below.
 */
void add_identity_map(unsigned long start, unsigned long size)
{
	unsigned long end = start + size;

	/* Align boundary to 2M. */
	start = round_down(start, PMD_SIZE);
	end = round_up(end, PMD_SIZE);
	if (start >= end)
		return;

	/* Build the mapping. */
	kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
				  start, end);
}
Beispiel #4
0
/*
 * Adds the specified range to what will become the new identity mappings.
 * Once all ranges have been added, the new mapping is activated by calling
 * finalize_identity_maps() below.
 */
void add_identity_map(unsigned long start, unsigned long size)
{
	struct x86_mapping_info mapping_info = {
		.alloc_pgt_page	= alloc_pgt_page,
		.context	= &pgt_data,
		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
	};
	unsigned long end = start + size;

	/* Make sure we have a top level page table ready to use. */
	if (!level4p)
		prepare_level4();

	/* Align boundary to 2M. */
	start = round_down(start, PMD_SIZE);
	end = round_up(end, PMD_SIZE);
	if (start >= end)
		return;

	/* Build the mapping. */
	kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
				  start, end);
}