blob: 7ee63df042d7ecae778354553264f32d25de1f74 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Aleksa Saraif5a1a532019-10-01 11:10:52 +10002#include <linux/bitops.h>
Albert van der Linde4d0e9df2020-10-15 20:13:50 -07003#include <linux/fault-inject-usercopy.h>
Marco Elver76d6f062020-01-21 17:05:12 +01004#include <linux/instrumented.h>
5#include <linux/uaccess.h>
Dave Hansen3b6ce542023-02-21 12:30:15 -08006#include <linux/nospec.h>
Al Virod5975802017-03-20 21:56:06 -04007
8/* out-of-line parts */
9
10#ifndef INLINE_COPY_FROM_USER
11unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
12{
13 unsigned long res = n;
Al Viro9c5f6902017-06-29 21:39:54 -040014 might_fault();
Albert van der Linde4d0e9df2020-10-15 20:13:50 -070015 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
Dave Hansen3b6ce542023-02-21 12:30:15 -080016 /*
17 * Ensure that bad access_ok() speculation will not
18 * lead to nasty side effects *after* the copy is
19 * finished:
20 */
21 barrier_nospec();
Marco Elver76d6f062020-01-21 17:05:12 +010022 instrument_copy_from_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -040023 res = raw_copy_from_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -040024 }
Al Virod5975802017-03-20 21:56:06 -040025 if (unlikely(res))
26 memset(to + (n - res), 0, res);
27 return res;
28}
29EXPORT_SYMBOL(_copy_from_user);
30#endif
31
32#ifndef INLINE_COPY_TO_USER
Christophe Leroya0e94592017-12-09 17:24:24 +010033unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
Al Virod5975802017-03-20 21:56:06 -040034{
Al Viro9c5f6902017-06-29 21:39:54 -040035 might_fault();
Albert van der Linde4d0e9df2020-10-15 20:13:50 -070036 if (should_fail_usercopy())
37 return n;
Linus Torvalds96d4f262019-01-03 18:57:57 -080038 if (likely(access_ok(to, n))) {
Marco Elver76d6f062020-01-21 17:05:12 +010039 instrument_copy_to_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -040040 n = raw_copy_to_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -040041 }
Al Virod5975802017-03-20 21:56:06 -040042 return n;
43}
44EXPORT_SYMBOL(_copy_to_user);
45#endif
Aleksa Saraif5a1a532019-10-01 11:10:52 +100046
47/**
48 * check_zeroed_user: check if a userspace buffer only contains zero bytes
49 * @from: Source address, in userspace.
50 * @size: Size of buffer.
51 *
52 * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
53 * userspace addresses (and is more efficient because we don't care where the
54 * first non-zero byte is).
55 *
56 * Returns:
57 * * 0: There were non-zero bytes present in the buffer.
58 * * 1: The buffer was full of zero bytes.
59 * * -EFAULT: access to userspace failed.
60 */
61int check_zeroed_user(const void __user *from, size_t size)
62{
63 unsigned long val;
64 uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
65
66 if (unlikely(size == 0))
67 return 1;
68
69 from -= align;
70 size += align;
71
Christophe Leroy41cd7802020-04-03 07:20:51 +000072 if (!user_read_access_begin(from, size))
Aleksa Saraif5a1a532019-10-01 11:10:52 +100073 return -EFAULT;
74
75 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
76 if (align)
77 val &= ~aligned_byte_mask(align);
78
79 while (size > sizeof(unsigned long)) {
80 if (unlikely(val))
81 goto done;
82
83 from += sizeof(unsigned long);
84 size -= sizeof(unsigned long);
85
86 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
87 }
88
89 if (size < sizeof(unsigned long))
90 val &= aligned_byte_mask(size);
91
92done:
Christophe Leroy41cd7802020-04-03 07:20:51 +000093 user_read_access_end();
Aleksa Saraif5a1a532019-10-01 11:10:52 +100094 return (val == 0);
95err_fault:
Christophe Leroy41cd7802020-04-03 07:20:51 +000096 user_read_access_end();
Aleksa Saraif5a1a532019-10-01 11:10:52 +100097 return -EFAULT;
98}
99EXPORT_SYMBOL(check_zeroed_user);