blob: 17b26bceb71659f66dacb9717af3727840c767f2 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02002/*
Christoph Hellwig3f0acb12020-06-08 21:34:11 -07003 * Access kernel or user memory without faulting.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02004 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02006#include <linux/mm.h>
David Howells7c7fcf72010-10-27 17:29:01 +01007#include <linux/uaccess.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02008
Christoph Hellwig98a23602020-06-08 21:34:50 -07009bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size)
Christoph Hellwigeab0c602020-06-08 21:34:27 -070010{
11 return true;
12}
13
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020014/**
Christoph Hellwig98a23602020-06-08 21:34:50 -070015 * probe_kernel_read(): safely attempt to read from kernel-space
Christoph Hellwig4f6de122020-06-08 21:34:07 -070016 * @dst: pointer to the buffer that shall take the data
17 * @src: address to read from
18 * @size: size of the data chunk
19 *
20 * Safely read from kernel address @src to the buffer at @dst. If a kernel
21 * fault happens, handle that and return -EFAULT.
Andrew Morton0ab32b62015-11-05 18:46:03 -080022 *
23 * We ensure that the copy_from_user is executed in atomic context so that
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -070024 * do_page_fault() doesn't attempt to take mmap_lock. This makes
Andrew Morton0ab32b62015-11-05 18:46:03 -080025 * probe_kernel_read() suitable for use within regions where the caller
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -070026 * already holds mmap_lock, or other locks which nest inside mmap_lock.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020027 */
Christoph Hellwig98a23602020-06-08 21:34:50 -070028long probe_kernel_read(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020029{
30 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -060031 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020032
Christoph Hellwig98a23602020-06-08 21:34:50 -070033 if (!probe_kernel_read_allowed(src, size))
Christoph Hellwigeab0c602020-06-08 21:34:27 -070034 return -EFAULT;
35
Jason Wesselb4b8ac52008-02-20 13:33:38 -060036 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -070037 pagefault_disable();
38 ret = __copy_from_user_inatomic(dst, (__force const void __user *)src,
39 size);
40 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -060041 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020042
Christoph Hellwigcd030902020-06-08 21:34:24 -070043 if (ret)
44 return -EFAULT;
45 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020046}
Christoph Hellwig98a23602020-06-08 21:34:50 -070047EXPORT_SYMBOL_GPL(probe_kernel_read);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020048
49/**
50 * probe_kernel_write(): safely attempt to write to a location
51 * @dst: address to write to
52 * @src: pointer to the data that shall be written
53 * @size: size of the data chunk
54 *
55 * Safely write to address @dst from the buffer at @src. If a kernel fault
56 * happens, handle that and return -EFAULT.
57 */
Christoph Hellwig48c49c02020-06-08 21:34:01 -070058long probe_kernel_write(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020059{
60 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -060061 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020062
Jason Wesselb4b8ac52008-02-20 13:33:38 -060063 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -070064 pagefault_disable();
65 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
66 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -060067 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020068
Christoph Hellwigcd030902020-06-08 21:34:24 -070069 if (ret)
70 return -EFAULT;
71 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020072}
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -070073
Daniel Borkmann1d1585c2019-11-02 00:17:56 +010074/**
Christoph Hellwigc4cb1642020-06-08 21:34:17 -070075 * strncpy_from_kernel_nofault: - Copy a NUL terminated string from unsafe
Christoph Hellwig4f6de122020-06-08 21:34:07 -070076 * address.
77 * @dst: Destination address, in kernel space. This buffer must be at
78 * least @count bytes long.
79 * @unsafe_addr: Unsafe address.
80 * @count: Maximum number of bytes to copy, including the trailing NUL.
81 *
82 * Copies a NUL-terminated string from unsafe address to kernel buffer.
83 *
84 * On success, returns the length of the string INCLUDING the trailing NUL.
85 *
86 * If access fails, returns -EFAULT (some data may have been copied
87 * and the trailing NUL added).
88 *
89 * If @count is smaller than the length of the string, copies @count-1 bytes,
90 * sets the last byte of @dst buffer to NUL and returns @count.
91 */
Christoph Hellwigeab0c602020-06-08 21:34:27 -070092long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
93{
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -070094 mm_segment_t old_fs = get_fs();
95 const void *src = unsafe_addr;
96 long ret;
97
98 if (unlikely(count <= 0))
99 return 0;
Christoph Hellwig98a23602020-06-08 21:34:50 -0700100 if (!probe_kernel_read_allowed(unsafe_addr, count))
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700101 return -EFAULT;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700102
103 set_fs(KERNEL_DS);
104 pagefault_disable();
105
106 do {
Linus Torvaldsbd28b142016-05-22 17:21:27 -0700107 ret = __get_user(*dst++, (const char __user __force *)src++);
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700108 } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
109
110 dst[-1] = '\0';
111 pagefault_enable();
112 set_fs(old_fs);
113
Rasmus Villemoes9dd861d2015-11-05 18:50:11 -0800114 return ret ? -EFAULT : src - unsafe_addr;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700115}
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900116
117/**
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700118 * probe_user_read(): safely attempt to read from a user-space location
119 * @dst: pointer to the buffer that shall take the data
120 * @src: address to read from. This must be a user address.
121 * @size: size of the data chunk
122 *
123 * Safely read from user address @src to the buffer at @dst. If a kernel fault
124 * happens, handle that and return -EFAULT.
125 */
126long probe_user_read(void *dst, const void __user *src, size_t size)
127{
128 long ret = -EFAULT;
129 mm_segment_t old_fs = get_fs();
130
131 set_fs(USER_DS);
132 if (access_ok(src, size)) {
133 pagefault_disable();
134 ret = __copy_from_user_inatomic(dst, src, size);
135 pagefault_enable();
136 }
137 set_fs(old_fs);
138
139 if (ret)
140 return -EFAULT;
141 return 0;
142}
143EXPORT_SYMBOL_GPL(probe_user_read);
144
145/**
146 * probe_user_write(): safely attempt to write to a user-space location
147 * @dst: address to write to
148 * @src: pointer to the data that shall be written
149 * @size: size of the data chunk
150 *
151 * Safely write to address @dst from the buffer at @src. If a kernel fault
152 * happens, handle that and return -EFAULT.
153 */
154long probe_user_write(void __user *dst, const void *src, size_t size)
155{
156 long ret = -EFAULT;
157 mm_segment_t old_fs = get_fs();
158
159 set_fs(USER_DS);
160 if (access_ok(dst, size)) {
161 pagefault_disable();
162 ret = __copy_to_user_inatomic(dst, src, size);
163 pagefault_enable();
164 }
165 set_fs(old_fs);
166
167 if (ret)
168 return -EFAULT;
169 return 0;
170}
171EXPORT_SYMBOL_GPL(probe_user_write);
172
173/**
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700174 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900175 * address.
176 * @dst: Destination address, in kernel space. This buffer must be at
177 * least @count bytes long.
178 * @unsafe_addr: Unsafe user address.
179 * @count: Maximum number of bytes to copy, including the trailing NUL.
180 *
181 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
182 *
183 * On success, returns the length of the string INCLUDING the trailing NUL.
184 *
185 * If access fails, returns -EFAULT (some data may have been copied
186 * and the trailing NUL added).
187 *
188 * If @count is smaller than the length of the string, copies @count-1 bytes,
189 * sets the last byte of @dst buffer to NUL and returns @count.
190 */
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700191long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900192 long count)
193{
194 mm_segment_t old_fs = get_fs();
195 long ret;
196
197 if (unlikely(count <= 0))
198 return 0;
199
200 set_fs(USER_DS);
201 pagefault_disable();
202 ret = strncpy_from_user(dst, unsafe_addr, count);
203 pagefault_enable();
204 set_fs(old_fs);
205
206 if (ret >= count) {
207 ret = count;
208 dst[ret - 1] = '\0';
209 } else if (ret > 0) {
210 ret++;
211 }
212
213 return ret;
214}
215
216/**
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700217 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900218 * @unsafe_addr: The string to measure.
219 * @count: Maximum count (including NUL)
220 *
221 * Get the size of a NUL-terminated string in user space without pagefault.
222 *
223 * Returns the size of the string INCLUDING the terminating NUL.
224 *
225 * If the string is too long, returns a number larger than @count. User
226 * has to check the return value against "> count".
227 * On exception (or invalid count), returns 0.
228 *
229 * Unlike strnlen_user, this can be used from IRQ handler etc. because
230 * it disables pagefaults.
231 */
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700232long strnlen_user_nofault(const void __user *unsafe_addr, long count)
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900233{
234 mm_segment_t old_fs = get_fs();
235 int ret;
236
237 set_fs(USER_DS);
238 pagefault_disable();
239 ret = strnlen_user(unsafe_addr, count);
240 pagefault_enable();
241 set_fs(old_fs);
242
243 return ret;
244}