2 * Based on arch/arm/include/asm/uaccess.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
22 * User space memory access functions
24 #include <linux/bitops.h>
25 #include <linux/kasan-checks.h>
26 #include <linux/string.h>
27 #include <linux/thread_info.h>
29 #include <asm/alternative.h>
30 #include <asm/cpufeature.h>
31 #include <asm/ptrace.h>
32 #include <asm/sysreg.h>
33 #include <asm/errno.h>
34 #include <asm/memory.h>
35 #include <asm/compiler.h>
38 #define VERIFY_WRITE 1
41 * The exception table consists of pairs of relative offsets: the first
42 * is the relative offset to an instruction that is allowed to fault,
43 * and the second is the relative offset at which the program should
44 * continue. No registers are modified, so it is entirely up to the
45 * continuation code to figure out what to do.
47 * All the routines below use bits of fixup code that are out of line
48 * with the main instruction path. This means when everything is well,
49 * we don't even have to jump over them. Further, they do not intrude
50 * on our cache or tlb entries.
53 struct exception_table_entry
58 #define ARCH_HAS_RELATIVE_EXTABLE
60 extern int fixup_exception(struct pt_regs *regs);
62 #define KERNEL_DS (-1UL)
63 #define get_ds() (KERNEL_DS)
65 #define USER_DS TASK_SIZE_64
66 #define get_fs() (current_thread_info()->addr_limit)
68 static inline void set_fs(mm_segment_t fs)
70 current_thread_info()->addr_limit = fs;
73 * Enable/disable UAO so that copy_to_user() etc can access
74 * kernel memory with the unprivileged instructions.
76 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
77 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
79 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
83 #define segment_eq(a, b) ((a) == (b))
86 * Test whether a block of memory is a valid user space address.
87 * Returns 1 if the range is valid, 0 otherwise.
89 * This is equivalent to the following test:
90 * (u65)addr + (u65)size <= current->addr_limit
92 * This needs 65-bit arithmetic.
94 #define __range_ok(addr, size) \
96 unsigned long flag, roksum; \
97 __chk_user_ptr(addr); \
98 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
99 : "=&r" (flag), "=&r" (roksum) \
100 : "1" (addr), "Ir" (size), \
101 "r" (current_thread_info()->addr_limit) \
107 * When dealing with data aborts or instruction traps we may end up with
108 * a tagged userland pointer. Clear the tag to get a sane pointer to pass
109 * on to access_ok(), for instance.
111 #define untagged_addr(addr) sign_extend64(addr, 55)
113 #define access_ok(type, addr, size) __range_ok(addr, size)
114 #define user_addr_max get_fs
116 #define _ASM_EXTABLE(from, to) \
117 " .pushsection __ex_table, \"a\"\n" \
119 " .long (" #from " - .), (" #to " - .)\n" \
123 * The "__xxx" versions of the user access functions do not verify the address
124 * space - it must have been done previously with a separate "access_ok()"
127 * The "__xxx_error" versions set the third argument to -EFAULT if an error
128 * occurs, and leave it unchanged on success.
130 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
132 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
133 alt_instr " " reg "1, [%2]\n", feature) \
135 " .section .fixup, \"ax\"\n" \
141 _ASM_EXTABLE(1b, 3b) \
142 : "+r" (err), "=&r" (x) \
143 : "r" (addr), "i" (-EFAULT))
145 #define __get_user_err(x, ptr, err) \
147 unsigned long __gu_val; \
148 __chk_user_ptr(ptr); \
149 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
150 CONFIG_ARM64_PAN)); \
151 switch (sizeof(*(ptr))) { \
153 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
154 (err), ARM64_HAS_UAO); \
157 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
158 (err), ARM64_HAS_UAO); \
161 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
162 (err), ARM64_HAS_UAO); \
165 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
166 (err), ARM64_HAS_UAO); \
171 (x) = (__force __typeof__(*(ptr)))__gu_val; \
172 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
173 CONFIG_ARM64_PAN)); \
176 #define __get_user(x, ptr) \
179 __get_user_err((x), (ptr), __gu_err); \
183 #define __get_user_error(x, ptr, err) \
185 __get_user_err((x), (ptr), (err)); \
189 #define __get_user_unaligned __get_user
191 #define get_user(x, ptr) \
193 __typeof__(*(ptr)) __user *__p = (ptr); \
195 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
196 __get_user((x), __p) : \
197 ((x) = 0, -EFAULT); \
200 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
202 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
203 alt_instr " " reg "1, [%2]\n", feature) \
205 " .section .fixup,\"ax\"\n" \
210 _ASM_EXTABLE(1b, 3b) \
212 : "r" (x), "r" (addr), "i" (-EFAULT))
214 #define __put_user_err(x, ptr, err) \
216 __typeof__(*(ptr)) __pu_val = (x); \
217 __chk_user_ptr(ptr); \
218 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
219 CONFIG_ARM64_PAN)); \
220 switch (sizeof(*(ptr))) { \
222 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
223 (err), ARM64_HAS_UAO); \
226 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
227 (err), ARM64_HAS_UAO); \
230 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
231 (err), ARM64_HAS_UAO); \
234 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
235 (err), ARM64_HAS_UAO); \
240 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
241 CONFIG_ARM64_PAN)); \
244 #define __put_user(x, ptr) \
247 __put_user_err((x), (ptr), __pu_err); \
251 #define __put_user_error(x, ptr, err) \
253 __put_user_err((x), (ptr), (err)); \
257 #define __put_user_unaligned __put_user
259 #define put_user(x, ptr) \
261 __typeof__(*(ptr)) __user *__p = (ptr); \
263 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
264 __put_user((x), __p) : \
268 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
269 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
270 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
271 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
273 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
275 kasan_check_write(to, n);
276 check_object_size(to, n, false);
277 return __arch_copy_from_user(to, from, n);
280 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
282 kasan_check_read(from, n);
283 check_object_size(from, n, true);
284 return __arch_copy_to_user(to, from, n);
287 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
289 unsigned long res = n;
290 kasan_check_write(to, n);
292 if (access_ok(VERIFY_READ, from, n)) {
293 check_object_size(to, n, false);
294 res = __arch_copy_from_user(to, from, n);
297 memset(to + (n - res), 0, res);
301 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
303 kasan_check_read(from, n);
305 if (access_ok(VERIFY_WRITE, to, n)) {
306 check_object_size(from, n, true);
307 n = __arch_copy_to_user(to, from, n);
312 static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
314 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
315 n = __copy_in_user(to, from, n);
319 #define __copy_to_user_inatomic __copy_to_user
320 #define __copy_from_user_inatomic __copy_from_user
322 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
324 if (access_ok(VERIFY_WRITE, to, n))
325 n = __clear_user(to, n);
329 extern long strncpy_from_user(char *dest, const char __user *src, long count);
331 extern __must_check long strlen_user(const char __user *str);
332 extern __must_check long strnlen_user(const char __user *str, long n);
334 #endif /* __ASM_UACCESS_H */