1/*
2 * Hibernation support for x86-64
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */
10
11#include <linux/gfp.h>
12#include <linux/smp.h>
13#include <linux/suspend.h>
14
15#include <asm/init.h>
16#include <asm/proto.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/mtrr.h>
20#include <asm/sections.h>
21#include <asm/suspend.h>
22
23/* Defined in hibernate_asm_64.S */
24extern asmlinkage __visible int restore_image(void);
25
26/*
27 * Address to jump to in the last phase of restore in order to get to the image
28 * kernel's text (this value is passed in the image header).
29 */
30unsigned long restore_jump_address __visible;
31
32/*
33 * Value of the cr3 register from before the hibernation (this value is passed
34 * in the image header).
35 */
36unsigned long restore_cr3 __visible;
37
38pgd_t *temp_level4_pgt __visible;
39
40void *relocated_restore_code __visible;
41
42static void *alloc_pgt_page(void *context)
43{
44	return (void *)get_safe_page(GFP_ATOMIC);
45}
46
47static int set_up_temporary_mappings(void)
48{
49	struct x86_mapping_info info = {
50		.alloc_pgt_page	= alloc_pgt_page,
51		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
52		.kernel_mapping = true,
53	};
54	unsigned long mstart, mend;
55	int result;
56	int i;
57
58	temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
59	if (!temp_level4_pgt)
60		return -ENOMEM;
61
62	/* It is safe to reuse the original kernel mapping */
63	set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
64		init_level4_pgt[pgd_index(__START_KERNEL_map)]);
65
66	/* Set up the direct mapping from scratch */
67	for (i = 0; i < nr_pfn_mapped; i++) {
68		mstart = pfn_mapped[i].start << PAGE_SHIFT;
69		mend   = pfn_mapped[i].end << PAGE_SHIFT;
70
71		result = kernel_ident_mapping_init(&info, temp_level4_pgt,
72						   mstart, mend);
73
74		if (result)
75			return result;
76	}
77
78	return 0;
79}
80
81int swsusp_arch_resume(void)
82{
83	int error;
84
85	/* We have got enough memory and from now on we cannot recover */
86	if ((error = set_up_temporary_mappings()))
87		return error;
88
89	relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
90	if (!relocated_restore_code)
91		return -ENOMEM;
92	memcpy(relocated_restore_code, &core_restore_code,
93	       &restore_registers - &core_restore_code);
94
95	restore_image();
96	return 0;
97}
98
99/*
100 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
101 */
102
103int pfn_is_nosave(unsigned long pfn)
104{
105	unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
106	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
107	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
108}
109
110struct restore_data_record {
111	unsigned long jump_address;
112	unsigned long cr3;
113	unsigned long magic;
114};
115
116#define RESTORE_MAGIC	0x0123456789ABCDEFUL
117
118/**
119 *	arch_hibernation_header_save - populate the architecture specific part
120 *		of a hibernation image header
121 *	@addr: address to save the data at
122 */
123int arch_hibernation_header_save(void *addr, unsigned int max_size)
124{
125	struct restore_data_record *rdr = addr;
126
127	if (max_size < sizeof(struct restore_data_record))
128		return -EOVERFLOW;
129	rdr->jump_address = restore_jump_address;
130	rdr->cr3 = restore_cr3;
131	rdr->magic = RESTORE_MAGIC;
132	return 0;
133}
134
135/**
136 *	arch_hibernation_header_restore - read the architecture specific data
137 *		from the hibernation image header
138 *	@addr: address to read the data from
139 */
140int arch_hibernation_header_restore(void *addr)
141{
142	struct restore_data_record *rdr = addr;
143
144	restore_jump_address = rdr->jump_address;
145	restore_cr3 = rdr->cr3;
146	return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
147}
148