1/*
2 * Common EFI (Extensible Firmware Interface) support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
4 *
5 * Copyright (C) 1999 VA Linux Systems
6 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
7 * Copyright (C) 1999-2002 Hewlett-Packard Co.
8 *	David Mosberger-Tang <davidm@hpl.hp.com>
9 *	Stephane Eranian <eranian@hpl.hp.com>
10 * Copyright (C) 2005-2008 Intel Co.
11 *	Fenghua Yu <fenghua.yu@intel.com>
12 *	Bibo Mao <bibo.mao@intel.com>
13 *	Chandramouli Narayanan <mouli@linux.intel.com>
14 *	Huang Ying <ying.huang@intel.com>
15 * Copyright (C) 2013 SuSE Labs
16 *	Borislav Petkov <bp@suse.de> - runtime services VA mapping
17 *
18 * Copied from efi_32.c to eliminate the duplicated code between EFI
19 * 32/64 support code. --ying 2007-10-26
20 *
21 * All EFI Runtime Services are not implemented yet as EFI only
22 * supports physical mode addressing on SoftSDV. This is to be fixed
23 * in a future version.  --drummond 1999-07-20
24 *
25 * Implemented EFI runtime services and virtual mode calls.  --davidm
26 *
27 * Goutham Rao: <goutham.rao@intel.com>
28 *	Skip non-WB memory and ignore empty memory ranges.
29 */
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/kernel.h>
34#include <linux/init.h>
35#include <linux/efi.h>
36#include <linux/efi-bgrt.h>
37#include <linux/export.h>
38#include <linux/bootmem.h>
39#include <linux/slab.h>
40#include <linux/memblock.h>
41#include <linux/spinlock.h>
42#include <linux/uaccess.h>
43#include <linux/time.h>
44#include <linux/io.h>
45#include <linux/reboot.h>
46#include <linux/bcd.h>
47
48#include <asm/setup.h>
49#include <asm/efi.h>
50#include <asm/time.h>
51#include <asm/cacheflush.h>
52#include <asm/tlbflush.h>
53#include <asm/x86_init.h>
54#include <asm/rtc.h>
55#include <asm/uv/uv.h>
56
57#define EFI_DEBUG
58
59struct efi_memory_map memmap;
60
61static struct efi efi_phys __initdata;
62static efi_system_table_t efi_systab __initdata;
63
64static efi_config_table_type_t arch_tables[] __initdata = {
65#ifdef CONFIG_X86_UV
66	{UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
67#endif
68	{NULL_GUID, NULL, NULL},
69};
70
71u64 efi_setup;		/* efi setup_data physical address */
72
73static int add_efi_memmap __initdata;
74static int __init setup_add_efi_memmap(char *arg)
75{
76	add_efi_memmap = 1;
77	return 0;
78}
79early_param("add_efi_memmap", setup_add_efi_memmap);
80
81static efi_status_t __init phys_efi_set_virtual_address_map(
82	unsigned long memory_map_size,
83	unsigned long descriptor_size,
84	u32 descriptor_version,
85	efi_memory_desc_t *virtual_map)
86{
87	efi_status_t status;
88
89	efi_call_phys_prolog();
90	status = efi_call_phys(efi_phys.set_virtual_address_map,
91			       memory_map_size, descriptor_size,
92			       descriptor_version, virtual_map);
93	efi_call_phys_epilog();
94	return status;
95}
96
97void efi_get_time(struct timespec *now)
98{
99	efi_status_t status;
100	efi_time_t eft;
101	efi_time_cap_t cap;
102
103	status = efi.get_time(&eft, &cap);
104	if (status != EFI_SUCCESS)
105		pr_err("Oops: efitime: can't read time!\n");
106
107	now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour,
108			     eft.minute, eft.second);
109	now->tv_nsec = 0;
110}
111
112/*
113 * Tell the kernel about the EFI memory map.  This might include
114 * more than the max 128 entries that can fit in the e820 legacy
115 * (zeropage) memory map.
116 */
117
118static void __init do_add_efi_memmap(void)
119{
120	void *p;
121
122	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
123		efi_memory_desc_t *md = p;
124		unsigned long long start = md->phys_addr;
125		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
126		int e820_type;
127
128		switch (md->type) {
129		case EFI_LOADER_CODE:
130		case EFI_LOADER_DATA:
131		case EFI_BOOT_SERVICES_CODE:
132		case EFI_BOOT_SERVICES_DATA:
133		case EFI_CONVENTIONAL_MEMORY:
134			if (md->attribute & EFI_MEMORY_WB)
135				e820_type = E820_RAM;
136			else
137				e820_type = E820_RESERVED;
138			break;
139		case EFI_ACPI_RECLAIM_MEMORY:
140			e820_type = E820_ACPI;
141			break;
142		case EFI_ACPI_MEMORY_NVS:
143			e820_type = E820_NVS;
144			break;
145		case EFI_UNUSABLE_MEMORY:
146			e820_type = E820_UNUSABLE;
147			break;
148		default:
149			/*
150			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
151			 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
152			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
153			 */
154			e820_type = E820_RESERVED;
155			break;
156		}
157		e820_add_region(start, size, e820_type);
158	}
159	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
160}
161
162int __init efi_memblock_x86_reserve_range(void)
163{
164	struct efi_info *e = &boot_params.efi_info;
165	unsigned long pmap;
166
167	if (efi_enabled(EFI_PARAVIRT))
168		return 0;
169
170#ifdef CONFIG_X86_32
171	/* Can't handle data above 4GB at this time */
172	if (e->efi_memmap_hi) {
173		pr_err("Memory map is above 4GB, disabling EFI.\n");
174		return -EINVAL;
175	}
176	pmap =  e->efi_memmap;
177#else
178	pmap = (e->efi_memmap |	((__u64)e->efi_memmap_hi << 32));
179#endif
180	memmap.phys_map		= (void *)pmap;
181	memmap.nr_map		= e->efi_memmap_size /
182				  e->efi_memdesc_size;
183	memmap.desc_size	= e->efi_memdesc_size;
184	memmap.desc_version	= e->efi_memdesc_version;
185
186	memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
187
188	efi.memmap = &memmap;
189
190	return 0;
191}
192
193static void __init print_efi_memmap(void)
194{
195#ifdef EFI_DEBUG
196	efi_memory_desc_t *md;
197	void *p;
198	int i;
199
200	for (p = memmap.map, i = 0;
201	     p < memmap.map_end;
202	     p += memmap.desc_size, i++) {
203		char buf[64];
204
205		md = p;
206		pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
207			i, efi_md_typeattr_format(buf, sizeof(buf), md),
208			md->phys_addr,
209			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
210			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
211	}
212#endif  /*  EFI_DEBUG  */
213}
214
215void __init efi_unmap_memmap(void)
216{
217	clear_bit(EFI_MEMMAP, &efi.flags);
218	if (memmap.map) {
219		early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size);
220		memmap.map = NULL;
221	}
222}
223
224static int __init efi_systab_init(void *phys)
225{
226	if (efi_enabled(EFI_64BIT)) {
227		efi_system_table_64_t *systab64;
228		struct efi_setup_data *data = NULL;
229		u64 tmp = 0;
230
231		if (efi_setup) {
232			data = early_memremap(efi_setup, sizeof(*data));
233			if (!data)
234				return -ENOMEM;
235		}
236		systab64 = early_memremap((unsigned long)phys,
237					 sizeof(*systab64));
238		if (systab64 == NULL) {
239			pr_err("Couldn't map the system table!\n");
240			if (data)
241				early_memunmap(data, sizeof(*data));
242			return -ENOMEM;
243		}
244
245		efi_systab.hdr = systab64->hdr;
246		efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
247					      systab64->fw_vendor;
248		tmp |= data ? data->fw_vendor : systab64->fw_vendor;
249		efi_systab.fw_revision = systab64->fw_revision;
250		efi_systab.con_in_handle = systab64->con_in_handle;
251		tmp |= systab64->con_in_handle;
252		efi_systab.con_in = systab64->con_in;
253		tmp |= systab64->con_in;
254		efi_systab.con_out_handle = systab64->con_out_handle;
255		tmp |= systab64->con_out_handle;
256		efi_systab.con_out = systab64->con_out;
257		tmp |= systab64->con_out;
258		efi_systab.stderr_handle = systab64->stderr_handle;
259		tmp |= systab64->stderr_handle;
260		efi_systab.stderr = systab64->stderr;
261		tmp |= systab64->stderr;
262		efi_systab.runtime = data ?
263				     (void *)(unsigned long)data->runtime :
264				     (void *)(unsigned long)systab64->runtime;
265		tmp |= data ? data->runtime : systab64->runtime;
266		efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
267		tmp |= systab64->boottime;
268		efi_systab.nr_tables = systab64->nr_tables;
269		efi_systab.tables = data ? (unsigned long)data->tables :
270					   systab64->tables;
271		tmp |= data ? data->tables : systab64->tables;
272
273		early_memunmap(systab64, sizeof(*systab64));
274		if (data)
275			early_memunmap(data, sizeof(*data));
276#ifdef CONFIG_X86_32
277		if (tmp >> 32) {
278			pr_err("EFI data located above 4GB, disabling EFI.\n");
279			return -EINVAL;
280		}
281#endif
282	} else {
283		efi_system_table_32_t *systab32;
284
285		systab32 = early_memremap((unsigned long)phys,
286					 sizeof(*systab32));
287		if (systab32 == NULL) {
288			pr_err("Couldn't map the system table!\n");
289			return -ENOMEM;
290		}
291
292		efi_systab.hdr = systab32->hdr;
293		efi_systab.fw_vendor = systab32->fw_vendor;
294		efi_systab.fw_revision = systab32->fw_revision;
295		efi_systab.con_in_handle = systab32->con_in_handle;
296		efi_systab.con_in = systab32->con_in;
297		efi_systab.con_out_handle = systab32->con_out_handle;
298		efi_systab.con_out = systab32->con_out;
299		efi_systab.stderr_handle = systab32->stderr_handle;
300		efi_systab.stderr = systab32->stderr;
301		efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
302		efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
303		efi_systab.nr_tables = systab32->nr_tables;
304		efi_systab.tables = systab32->tables;
305
306		early_memunmap(systab32, sizeof(*systab32));
307	}
308
309	efi.systab = &efi_systab;
310
311	/*
312	 * Verify the EFI Table
313	 */
314	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
315		pr_err("System table signature incorrect!\n");
316		return -EINVAL;
317	}
318	if ((efi.systab->hdr.revision >> 16) == 0)
319		pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
320		       efi.systab->hdr.revision >> 16,
321		       efi.systab->hdr.revision & 0xffff);
322
323	set_bit(EFI_SYSTEM_TABLES, &efi.flags);
324
325	return 0;
326}
327
328static int __init efi_runtime_init32(void)
329{
330	efi_runtime_services_32_t *runtime;
331
332	runtime = early_memremap((unsigned long)efi.systab->runtime,
333			sizeof(efi_runtime_services_32_t));
334	if (!runtime) {
335		pr_err("Could not map the runtime service table!\n");
336		return -ENOMEM;
337	}
338
339	/*
340	 * We will only need *early* access to the SetVirtualAddressMap
341	 * EFI runtime service. All other runtime services will be called
342	 * via the virtual mapping.
343	 */
344	efi_phys.set_virtual_address_map =
345			(efi_set_virtual_address_map_t *)
346			(unsigned long)runtime->set_virtual_address_map;
347	early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
348
349	return 0;
350}
351
352static int __init efi_runtime_init64(void)
353{
354	efi_runtime_services_64_t *runtime;
355
356	runtime = early_memremap((unsigned long)efi.systab->runtime,
357			sizeof(efi_runtime_services_64_t));
358	if (!runtime) {
359		pr_err("Could not map the runtime service table!\n");
360		return -ENOMEM;
361	}
362
363	/*
364	 * We will only need *early* access to the SetVirtualAddressMap
365	 * EFI runtime service. All other runtime services will be called
366	 * via the virtual mapping.
367	 */
368	efi_phys.set_virtual_address_map =
369			(efi_set_virtual_address_map_t *)
370			(unsigned long)runtime->set_virtual_address_map;
371	early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
372
373	return 0;
374}
375
376static int __init efi_runtime_init(void)
377{
378	int rv;
379
380	/*
381	 * Check out the runtime services table. We need to map
382	 * the runtime services table so that we can grab the physical
383	 * address of several of the EFI runtime functions, needed to
384	 * set the firmware into virtual mode.
385	 *
386	 * When EFI_PARAVIRT is in force then we could not map runtime
387	 * service memory region because we do not have direct access to it.
388	 * However, runtime services are available through proxy functions
389	 * (e.g. in case of Xen dom0 EFI implementation they call special
390	 * hypercall which executes relevant EFI functions) and that is why
391	 * they are always enabled.
392	 */
393
394	if (!efi_enabled(EFI_PARAVIRT)) {
395		if (efi_enabled(EFI_64BIT))
396			rv = efi_runtime_init64();
397		else
398			rv = efi_runtime_init32();
399
400		if (rv)
401			return rv;
402	}
403
404	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
405
406	return 0;
407}
408
409static int __init efi_memmap_init(void)
410{
411	if (efi_enabled(EFI_PARAVIRT))
412		return 0;
413
414	/* Map the EFI memory map */
415	memmap.map = early_memremap((unsigned long)memmap.phys_map,
416				   memmap.nr_map * memmap.desc_size);
417	if (memmap.map == NULL) {
418		pr_err("Could not map the memory map!\n");
419		return -ENOMEM;
420	}
421	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
422
423	if (add_efi_memmap)
424		do_add_efi_memmap();
425
426	set_bit(EFI_MEMMAP, &efi.flags);
427
428	return 0;
429}
430
431void __init efi_init(void)
432{
433	efi_char16_t *c16;
434	char vendor[100] = "unknown";
435	int i = 0;
436	void *tmp;
437
438#ifdef CONFIG_X86_32
439	if (boot_params.efi_info.efi_systab_hi ||
440	    boot_params.efi_info.efi_memmap_hi) {
441		pr_info("Table located above 4GB, disabling EFI.\n");
442		return;
443	}
444	efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
445#else
446	efi_phys.systab = (efi_system_table_t *)
447			  (boot_params.efi_info.efi_systab |
448			  ((__u64)boot_params.efi_info.efi_systab_hi<<32));
449#endif
450
451	if (efi_systab_init(efi_phys.systab))
452		return;
453
454	efi.config_table = (unsigned long)efi.systab->tables;
455	efi.fw_vendor	 = (unsigned long)efi.systab->fw_vendor;
456	efi.runtime	 = (unsigned long)efi.systab->runtime;
457
458	/*
459	 * Show what we know for posterity
460	 */
461	c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
462	if (c16) {
463		for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
464			vendor[i] = *c16++;
465		vendor[i] = '\0';
466	} else
467		pr_err("Could not map the firmware vendor!\n");
468	early_memunmap(tmp, 2);
469
470	pr_info("EFI v%u.%.02u by %s\n",
471		efi.systab->hdr.revision >> 16,
472		efi.systab->hdr.revision & 0xffff, vendor);
473
474	if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
475		return;
476
477	if (efi_config_init(arch_tables))
478		return;
479
480	/*
481	 * Note: We currently don't support runtime services on an EFI
482	 * that doesn't match the kernel 32/64-bit mode.
483	 */
484
485	if (!efi_runtime_supported())
486		pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
487	else {
488		if (efi_runtime_disabled() || efi_runtime_init())
489			return;
490	}
491	if (efi_memmap_init())
492		return;
493
494	print_efi_memmap();
495}
496
497void __init efi_late_init(void)
498{
499	efi_bgrt_init();
500}
501
502void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
503{
504	u64 addr, npages;
505
506	addr = md->virt_addr;
507	npages = md->num_pages;
508
509	memrange_efi_to_native(&addr, &npages);
510
511	if (executable)
512		set_memory_x(addr, npages);
513	else
514		set_memory_nx(addr, npages);
515}
516
517void __init runtime_code_page_mkexec(void)
518{
519	efi_memory_desc_t *md;
520	void *p;
521
522	/* Make EFI runtime service code area executable */
523	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
524		md = p;
525
526		if (md->type != EFI_RUNTIME_SERVICES_CODE)
527			continue;
528
529		efi_set_executable(md, true);
530	}
531}
532
533void __init efi_memory_uc(u64 addr, unsigned long size)
534{
535	unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
536	u64 npages;
537
538	npages = round_up(size, page_shift) / page_shift;
539	memrange_efi_to_native(&addr, &npages);
540	set_memory_uc(addr, npages);
541}
542
543void __init old_map_region(efi_memory_desc_t *md)
544{
545	u64 start_pfn, end_pfn, end;
546	unsigned long size;
547	void *va;
548
549	start_pfn = PFN_DOWN(md->phys_addr);
550	size	  = md->num_pages << PAGE_SHIFT;
551	end	  = md->phys_addr + size;
552	end_pfn   = PFN_UP(end);
553
554	if (pfn_range_is_mapped(start_pfn, end_pfn)) {
555		va = __va(md->phys_addr);
556
557		if (!(md->attribute & EFI_MEMORY_WB))
558			efi_memory_uc((u64)(unsigned long)va, size);
559	} else
560		va = efi_ioremap(md->phys_addr, size,
561				 md->type, md->attribute);
562
563	md->virt_addr = (u64) (unsigned long) va;
564	if (!va)
565		pr_err("ioremap of 0x%llX failed!\n",
566		       (unsigned long long)md->phys_addr);
567}
568
569/* Merge contiguous regions of the same type and attribute */
570static void __init efi_merge_regions(void)
571{
572	void *p;
573	efi_memory_desc_t *md, *prev_md = NULL;
574
575	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
576		u64 prev_size;
577		md = p;
578
579		if (!prev_md) {
580			prev_md = md;
581			continue;
582		}
583
584		if (prev_md->type != md->type ||
585		    prev_md->attribute != md->attribute) {
586			prev_md = md;
587			continue;
588		}
589
590		prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
591
592		if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
593			prev_md->num_pages += md->num_pages;
594			md->type = EFI_RESERVED_TYPE;
595			md->attribute = 0;
596			continue;
597		}
598		prev_md = md;
599	}
600}
601
602static void __init get_systab_virt_addr(efi_memory_desc_t *md)
603{
604	unsigned long size;
605	u64 end, systab;
606
607	size = md->num_pages << EFI_PAGE_SHIFT;
608	end = md->phys_addr + size;
609	systab = (u64)(unsigned long)efi_phys.systab;
610	if (md->phys_addr <= systab && systab < end) {
611		systab += md->virt_addr - md->phys_addr;
612		efi.systab = (efi_system_table_t *)(unsigned long)systab;
613	}
614}
615
616static void __init save_runtime_map(void)
617{
618#ifdef CONFIG_KEXEC
619	efi_memory_desc_t *md;
620	void *tmp, *p, *q = NULL;
621	int count = 0;
622
623	if (efi_enabled(EFI_OLD_MEMMAP))
624		return;
625
626	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
627		md = p;
628
629		if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
630		    (md->type == EFI_BOOT_SERVICES_CODE) ||
631		    (md->type == EFI_BOOT_SERVICES_DATA))
632			continue;
633		tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
634		if (!tmp)
635			goto out;
636		q = tmp;
637
638		memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
639		count++;
640	}
641
642	efi_runtime_map_setup(q, count, memmap.desc_size);
643	return;
644
645out:
646	kfree(q);
647	pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
648#endif
649}
650
651static void *realloc_pages(void *old_memmap, int old_shift)
652{
653	void *ret;
654
655	ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
656	if (!ret)
657		goto out;
658
659	/*
660	 * A first-time allocation doesn't have anything to copy.
661	 */
662	if (!old_memmap)
663		return ret;
664
665	memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
666
667out:
668	free_pages((unsigned long)old_memmap, old_shift);
669	return ret;
670}
671
672/*
673 * Map the efi memory ranges of the runtime services and update new_mmap with
674 * virtual addresses.
675 */
676static void * __init efi_map_regions(int *count, int *pg_shift)
677{
678	void *p, *new_memmap = NULL;
679	unsigned long left = 0;
680	efi_memory_desc_t *md;
681
682	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
683		md = p;
684		if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
685#ifdef CONFIG_X86_64
686			if (md->type != EFI_BOOT_SERVICES_CODE &&
687			    md->type != EFI_BOOT_SERVICES_DATA)
688#endif
689				continue;
690		}
691
692		efi_map_region(md);
693		get_systab_virt_addr(md);
694
695		if (left < memmap.desc_size) {
696			new_memmap = realloc_pages(new_memmap, *pg_shift);
697			if (!new_memmap)
698				return NULL;
699
700			left += PAGE_SIZE << *pg_shift;
701			(*pg_shift)++;
702		}
703
704		memcpy(new_memmap + (*count * memmap.desc_size), md,
705		       memmap.desc_size);
706
707		left -= memmap.desc_size;
708		(*count)++;
709	}
710
711	return new_memmap;
712}
713
714static void __init kexec_enter_virtual_mode(void)
715{
716#ifdef CONFIG_KEXEC
717	efi_memory_desc_t *md;
718	void *p;
719
720	efi.systab = NULL;
721
722	/*
723	 * We don't do virtual mode, since we don't do runtime services, on
724	 * non-native EFI
725	 */
726	if (!efi_is_native()) {
727		efi_unmap_memmap();
728		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
729		return;
730	}
731
732	/*
733	* Map efi regions which were passed via setup_data. The virt_addr is a
734	* fixed addr which was used in first kernel of a kexec boot.
735	*/
736	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
737		md = p;
738		efi_map_region_fixed(md); /* FIXME: add error handling */
739		get_systab_virt_addr(md);
740	}
741
742	save_runtime_map();
743
744	BUG_ON(!efi.systab);
745
746	efi_sync_low_kernel_mappings();
747
748	/*
749	 * Now that EFI is in virtual mode, update the function
750	 * pointers in the runtime service table to the new virtual addresses.
751	 *
752	 * Call EFI services through wrapper functions.
753	 */
754	efi.runtime_version = efi_systab.hdr.revision;
755
756	efi_native_runtime_setup();
757
758	efi.set_virtual_address_map = NULL;
759
760	if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
761		runtime_code_page_mkexec();
762
763	/* clean DUMMY object */
764	efi_delete_dummy_variable();
765#endif
766}
767
768/*
769 * This function will switch the EFI runtime services to virtual mode.
770 * Essentially, we look through the EFI memmap and map every region that
771 * has the runtime attribute bit set in its memory descriptor into the
772 * ->trampoline_pgd page table using a top-down VA allocation scheme.
773 *
774 * The old method which used to update that memory descriptor with the
775 * virtual address obtained from ioremap() is still supported when the
776 * kernel is booted with efi=old_map on its command line. Same old
777 * method enabled the runtime services to be called without having to
778 * thunk back into physical mode for every invocation.
779 *
780 * The new method does a pagetable switch in a preemption-safe manner
781 * so that we're in a different address space when calling a runtime
782 * function. For function arguments passing we do copy the PGDs of the
783 * kernel page table into ->trampoline_pgd prior to each call.
784 *
785 * Specially for kexec boot, efi runtime maps in previous kernel should
786 * be passed in via setup_data. In that case runtime ranges will be mapped
787 * to the same virtual addresses as the first kernel, see
788 * kexec_enter_virtual_mode().
789 */
790static void __init __efi_enter_virtual_mode(void)
791{
792	int count = 0, pg_shift = 0;
793	void *new_memmap = NULL;
794	efi_status_t status;
795
796	efi.systab = NULL;
797
798	efi_merge_regions();
799	new_memmap = efi_map_regions(&count, &pg_shift);
800	if (!new_memmap) {
801		pr_err("Error reallocating memory, EFI runtime non-functional!\n");
802		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
803		return;
804	}
805
806	save_runtime_map();
807
808	BUG_ON(!efi.systab);
809
810	if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
811		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
812		return;
813	}
814
815	efi_sync_low_kernel_mappings();
816	efi_dump_pagetable();
817
818	if (efi_is_native()) {
819		status = phys_efi_set_virtual_address_map(
820				memmap.desc_size * count,
821				memmap.desc_size,
822				memmap.desc_version,
823				(efi_memory_desc_t *)__pa(new_memmap));
824	} else {
825		status = efi_thunk_set_virtual_address_map(
826				efi_phys.set_virtual_address_map,
827				memmap.desc_size * count,
828				memmap.desc_size,
829				memmap.desc_version,
830				(efi_memory_desc_t *)__pa(new_memmap));
831	}
832
833	if (status != EFI_SUCCESS) {
834		pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
835			 status);
836		panic("EFI call to SetVirtualAddressMap() failed!");
837	}
838
839	/*
840	 * Now that EFI is in virtual mode, update the function
841	 * pointers in the runtime service table to the new virtual addresses.
842	 *
843	 * Call EFI services through wrapper functions.
844	 */
845	efi.runtime_version = efi_systab.hdr.revision;
846
847	if (efi_is_native())
848		efi_native_runtime_setup();
849	else
850		efi_thunk_runtime_setup();
851
852	efi.set_virtual_address_map = NULL;
853
854	efi_runtime_mkexec();
855
856	/*
857	 * We mapped the descriptor array into the EFI pagetable above but we're
858	 * not unmapping it here. Here's why:
859	 *
860	 * We're copying select PGDs from the kernel page table to the EFI page
861	 * table and when we do so and make changes to those PGDs like unmapping
862	 * stuff from them, those changes appear in the kernel page table and we
863	 * go boom.
864	 *
865	 * From setup_real_mode():
866	 *
867	 * ...
868	 * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
869	 *
870	 * In this particular case, our allocation is in PGD 0 of the EFI page
871	 * table but we've copied that PGD from PGD[272] of the EFI page table:
872	 *
873	 *	pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
874	 *
875	 * where the direct memory mapping in kernel space is.
876	 *
877	 * new_memmap's VA comes from that direct mapping and thus clearing it,
878	 * it would get cleared in the kernel page table too.
879	 *
880	 * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
881	 */
882	free_pages((unsigned long)new_memmap, pg_shift);
883
884	/* clean DUMMY object */
885	efi_delete_dummy_variable();
886}
887
888void __init efi_enter_virtual_mode(void)
889{
890	if (efi_enabled(EFI_PARAVIRT))
891		return;
892
893	if (efi_setup)
894		kexec_enter_virtual_mode();
895	else
896		__efi_enter_virtual_mode();
897}
898
899/*
900 * Convenience functions to obtain memory types and attributes
901 */
902u32 efi_mem_type(unsigned long phys_addr)
903{
904	efi_memory_desc_t *md;
905	void *p;
906
907	if (!efi_enabled(EFI_MEMMAP))
908		return 0;
909
910	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
911		md = p;
912		if ((md->phys_addr <= phys_addr) &&
913		    (phys_addr < (md->phys_addr +
914				  (md->num_pages << EFI_PAGE_SHIFT))))
915			return md->type;
916	}
917	return 0;
918}
919
920u64 efi_mem_attributes(unsigned long phys_addr)
921{
922	efi_memory_desc_t *md;
923	void *p;
924
925	if (!efi_enabled(EFI_MEMMAP))
926		return 0;
927
928	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
929		md = p;
930		if ((md->phys_addr <= phys_addr) &&
931		    (phys_addr < (md->phys_addr +
932				  (md->num_pages << EFI_PAGE_SHIFT))))
933			return md->attribute;
934	}
935	return 0;
936}
937
938static int __init arch_parse_efi_cmdline(char *str)
939{
940	if (parse_option_str(str, "old_map"))
941		set_bit(EFI_OLD_MEMMAP, &efi.flags);
942
943	return 0;
944}
945early_param("efi", arch_parse_efi_cmdline);
946