1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
5 * User space memory access functions
10 #include <asm-generic/uaccess-unaligned.h>
12 #include <linux/bug.h>
15 #define VERIFY_WRITE 1
17 #define KERNEL_DS ((mm_segment_t){0})
18 #define USER_DS ((mm_segment_t){1})
20 #define segment_eq(a, b) ((a).seg == (b).seg)
22 #define get_ds() (KERNEL_DS)
23 #define get_fs() (current_thread_info()->addr_limit)
24 #define set_fs(x) (current_thread_info()->addr_limit = (x))
27 * Note that since kernel addresses are in a separate address space on
28 * parisc, we don't need to do anything for access_ok().
29 * We just let the page fault handler do the right thing. This also means
30 * that put_user is the same as __put_user, etc.
33 static inline long access_ok(int type, const void __user * addr,
39 #define put_user __put_user
40 #define get_user __get_user
42 #if !defined(CONFIG_64BIT)
43 #define LDD_USER(ptr) BUILD_BUG()
44 #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
46 #define LDD_USER(ptr) __get_user_asm("ldd", ptr)
47 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
51 * The exception table contains two values: the first is the relative offset to
52 * the address of the instruction that is allowed to fault, and the second is
53 * the relative offset to the address of the fixup routine. Since relative
54 * addresses are used, 32bit values are sufficient even on 64bit kernel.
57 #define ARCH_HAS_RELATIVE_EXTABLE
58 struct exception_table_entry {
59 int insn; /* relative address of insn that is allowed to fault. */
60 int fixup; /* relative address of fixup routine */
63 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
64 ".section __ex_table,\"aw\"\n" \
65 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
69 * The page fault handler stores, in a per-cpu area, the following information
70 * if a fixup routine is available.
72 struct exception_data {
73 unsigned long fault_ip;
74 unsigned long fault_gp;
75 unsigned long fault_space;
76 unsigned long fault_addr;
80 * load_sr2() preloads the space register %%sr2 - based on the value of
81 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
82 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
83 * memory. The following __get_user_asm() and __put_user_asm() functions have
84 * %%sr2 hard-coded to access the requested memory.
87 __asm__(" or,= %0,%%r0,%%r0\n\t" \
88 " mfsp %%sr3,%0\n\t" \
89 " mtsp %0,%%sr2\n\t" \
92 #define __get_user(x, ptr) \
94 register long __gu_err __asm__ ("r8") = 0; \
95 register long __gu_val __asm__ ("r9") = 0; \
98 switch (sizeof(*(ptr))) { \
99 case 1: __get_user_asm("ldb", ptr); break; \
100 case 2: __get_user_asm("ldh", ptr); break; \
101 case 4: __get_user_asm("ldw", ptr); break; \
102 case 8: LDD_USER(ptr); break; \
103 default: BUILD_BUG(); break; \
106 (x) = (__force __typeof__(*(ptr))) __gu_val; \
110 #define __get_user_asm(ldx, ptr) \
111 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
112 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
113 : "=r"(__gu_val), "=r"(__gu_err) \
114 : "r"(ptr), "1"(__gu_err) \
117 #define __put_user(x, ptr) \
119 register long __pu_err __asm__ ("r8") = 0; \
120 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
123 switch (sizeof(*(ptr))) { \
124 case 1: __put_user_asm("stb", __x, ptr); break; \
125 case 2: __put_user_asm("sth", __x, ptr); break; \
126 case 4: __put_user_asm("stw", __x, ptr); break; \
127 case 8: STD_USER(__x, ptr); break; \
128 default: BUILD_BUG(); break; \
135 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
136 * instead of writing. This is because they do not write to any memory
137 * gcc knows about, so there are no aliasing issues. These macros must
138 * also be aware that "fixup_put_user_skip_[12]" are executed in the
139 * context of the fault, and any registers used there must be listed
140 * as clobbers. In this case only "r1" is used by the current routines.
141 * r8/r9 are already listed as err/val.
144 #define __put_user_asm(stx, x, ptr) \
145 __asm__ __volatile__ ( \
146 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
147 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
149 : "r"(ptr), "r"(x), "0"(__pu_err) \
153 #if !defined(CONFIG_64BIT)
155 #define __put_user_asm64(__val, ptr) do { \
156 __asm__ __volatile__ ( \
157 "\n1:\tstw %2,0(%%sr2,%1)" \
158 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
159 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
160 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
162 : "r"(ptr), "r"(__val), "0"(__pu_err) \
166 #endif /* !defined(CONFIG_64BIT) */
170 * Complex access routines -- external declarations
173 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
174 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
175 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
176 extern long strncpy_from_user(char *, const char __user *, long);
177 extern unsigned lclear_user(void __user *, unsigned long);
178 extern long lstrnlen_user(const char __user *, long);
180 * Complex access routines -- macros
182 #define user_addr_max() (~0UL)
184 #define strnlen_user lstrnlen_user
185 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
186 #define clear_user lclear_user
187 #define __clear_user lclear_user
189 unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
190 #define __copy_to_user copy_to_user
191 unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
192 unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
193 #define __copy_in_user copy_in_user
194 #define __copy_to_user_inatomic __copy_to_user
195 #define __copy_from_user_inatomic __copy_from_user
197 extern void copy_from_user_overflow(void)
198 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
199 __compiletime_error("copy_from_user() buffer size is not provably correct")
201 __compiletime_warning("copy_from_user() buffer size is not provably correct")
205 static inline unsigned long __must_check copy_from_user(void *to,
206 const void __user *from,
209 int sz = __compiletime_object_size(to);
212 if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
213 ret = __copy_from_user(to, from, n);
215 copy_from_user_overflow();
221 int fixup_exception(struct pt_regs *regs);
223 #endif /* __PARISC_UACCESS_H */