x86/efi: Rename print_efi_memmap() to efi_print_memmap()
[cascardo/linux.git] / arch / x86 / include / asm / efi.h
1 #ifndef _ASM_X86_EFI_H
2 #define _ASM_X86_EFI_H
3
4 #include <asm/fpu/api.h>
5 #include <asm/pgtable.h>
6
7 /*
8  * We map the EFI regions needed for runtime services non-contiguously,
9  * with preserved alignment on virtual addresses starting from -4G down
10  * for a total max space of 64G. This way, we provide for stable runtime
11  * services addresses across kernels so that a kexec'd kernel can still
12  * use them.
13  *
14  * This is the main reason why we're doing stable VA mappings for RT
15  * services.
16  *
17  * This flag is used in conjuction with a chicken bit called
18  * "efi=old_map" which can be used as a fallback to the old runtime
19  * services mapping method in case there's some b0rkage with a
20  * particular EFI implementation (haha, it is hard to hold up the
21  * sarcasm here...).
22  */
23 #define EFI_OLD_MEMMAP          EFI_ARCH_1
24
25 #define EFI32_LOADER_SIGNATURE  "EL32"
26 #define EFI64_LOADER_SIGNATURE  "EL64"
27
28 #ifdef CONFIG_X86_32
29
30
31 extern unsigned long asmlinkage efi_call_phys(void *, ...);
32
33 /*
34  * Wrap all the virtual calls in a way that forces the parameters on the stack.
35  */
36
37 /* Use this macro if your virtual returns a non-void value */
38 #define efi_call_virt(f, args...) \
39 ({                                                                      \
40         efi_status_t __s;                                               \
41         kernel_fpu_begin();                                             \
42         __s = ((efi_##f##_t __attribute__((regparm(0)))*)               \
43                 efi.systab->runtime->f)(args);                          \
44         kernel_fpu_end();                                               \
45         __s;                                                            \
46 })
47
48 /* Use this macro if your virtual call does not return any value */
49 #define __efi_call_virt(f, args...) \
50 ({                                                                      \
51         kernel_fpu_begin();                                             \
52         ((efi_##f##_t __attribute__((regparm(0)))*)                     \
53                 efi.systab->runtime->f)(args);                          \
54         kernel_fpu_end();                                               \
55 })
56
57 #define efi_ioremap(addr, size, type, attr)     ioremap_cache(addr, size)
58
59 #else /* !CONFIG_X86_32 */
60
61 #define EFI_LOADER_SIGNATURE    "EL64"
62
63 extern u64 asmlinkage efi_call(void *fp, ...);
64
65 #define efi_call_phys(f, args...)               efi_call((f), args)
66
67 #define efi_call_virt(f, ...)                                           \
68 ({                                                                      \
69         efi_status_t __s;                                               \
70                                                                         \
71         efi_sync_low_kernel_mappings();                                 \
72         preempt_disable();                                              \
73         __kernel_fpu_begin();                                           \
74         __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
75         __kernel_fpu_end();                                             \
76         preempt_enable();                                               \
77         __s;                                                            \
78 })
79
80 /*
81  * All X86_64 virt calls return non-void values. Thus, use non-void call for
82  * virt calls that would be void on X86_32.
83  */
84 #define __efi_call_virt(f, args...) efi_call_virt(f, args)
85
86 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
87                                         u32 type, u64 attribute);
88
89 #endif /* CONFIG_X86_32 */
90
91 extern struct efi_scratch efi_scratch;
92 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
93 extern int __init efi_memblock_x86_reserve_range(void);
94 extern pgd_t * __init efi_call_phys_prolog(void);
95 extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
96 extern void __init efi_print_memmap(void);
97 extern void __init efi_unmap_memmap(void);
98 extern void __init efi_memory_uc(u64 addr, unsigned long size);
99 extern void __init efi_map_region(efi_memory_desc_t *md);
100 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
101 extern void efi_sync_low_kernel_mappings(void);
102 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
103 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
104 extern void __init old_map_region(efi_memory_desc_t *md);
105 extern void __init runtime_code_page_mkexec(void);
106 extern void __init efi_runtime_mkexec(void);
107 extern void __init efi_dump_pagetable(void);
108 extern void __init efi_apply_memmap_quirks(void);
109 extern int __init efi_reuse_config(u64 tables, int nr_tables);
110 extern void efi_delete_dummy_variable(void);
111
112 struct efi_setup_data {
113         u64 fw_vendor;
114         u64 runtime;
115         u64 tables;
116         u64 smbios;
117         u64 reserved[8];
118 };
119
120 extern u64 efi_setup;
121
122 #ifdef CONFIG_EFI
123
124 static inline bool efi_is_native(void)
125 {
126         return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
127 }
128
129 static inline bool efi_runtime_supported(void)
130 {
131         if (efi_is_native())
132                 return true;
133
134         if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
135                 return true;
136
137         return false;
138 }
139
140 extern struct console early_efi_console;
141 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
142
143 #ifdef CONFIG_EFI_MIXED
144 extern void efi_thunk_runtime_setup(void);
145 extern efi_status_t efi_thunk_set_virtual_address_map(
146         void *phys_set_virtual_address_map,
147         unsigned long memory_map_size,
148         unsigned long descriptor_size,
149         u32 descriptor_version,
150         efi_memory_desc_t *virtual_map);
151 #else
152 static inline void efi_thunk_runtime_setup(void) {}
153 static inline efi_status_t efi_thunk_set_virtual_address_map(
154         void *phys_set_virtual_address_map,
155         unsigned long memory_map_size,
156         unsigned long descriptor_size,
157         u32 descriptor_version,
158         efi_memory_desc_t *virtual_map)
159 {
160         return EFI_SUCCESS;
161 }
162 #endif /* CONFIG_EFI_MIXED */
163
164
165 /* arch specific definitions used by the stub code */
166
167 struct efi_config {
168         u64 image_handle;
169         u64 table;
170         u64 allocate_pool;
171         u64 allocate_pages;
172         u64 get_memory_map;
173         u64 free_pool;
174         u64 free_pages;
175         u64 locate_handle;
176         u64 handle_protocol;
177         u64 exit_boot_services;
178         u64 text_output;
179         efi_status_t (*call)(unsigned long, ...);
180         bool is64;
181 } __packed;
182
183 __pure const struct efi_config *__efi_early(void);
184
185 #define efi_call_early(f, ...)                                          \
186         __efi_early()->call(__efi_early()->f, __VA_ARGS__);
187
188 extern bool efi_reboot_required(void);
189
190 #else
191 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
192 static inline bool efi_reboot_required(void)
193 {
194         return false;
195 }
196 #endif /* CONFIG_EFI */
197
198 #endif /* _ASM_X86_EFI_H */