blob: ef663bcf83e56230edb64736569a86ccead13880 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrey Ryabinin3f158012015-02-13 14:39:53 -08002/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
Andrey Ryabinin3f158012015-02-13 14:39:53 -08006 */
7
Marco Elver19a33ca2019-07-11 20:53:52 -07008#include <linux/bitops.h>
Greg Thelen0386bf32017-02-24 15:00:08 -08009#include <linux/delay.h>
Marco Elver19a33ca2019-07-11 20:53:52 -070010#include <linux/kasan.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080011#include <linux/kernel.h>
Andrey Ryabinineae08dc2016-05-20 16:59:34 -070012#include <linux/mm.h>
Marco Elver19a33ca2019-07-11 20:53:52 -070013#include <linux/mman.h>
14#include <linux/module.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080015#include <linux/printk.h>
Andrey Konovalov782ba452021-02-03 15:35:00 +110016#include <linux/random.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080017#include <linux/slab.h>
18#include <linux/string.h>
Andrey Ryabinineae08dc2016-05-20 16:59:34 -070019#include <linux/uaccess.h>
Mark Rutlandb92a9532019-09-23 15:34:16 -070020#include <linux/io.h>
Daniel Axtens06513912019-11-30 17:54:53 -080021#include <linux/vmalloc.h>
Mark Rutlandb92a9532019-09-23 15:34:16 -070022
23#include <asm/page.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080024
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070025#include <kunit/test.h>
26
Walter Wuf33a0142020-08-06 23:24:54 -070027#include "../mm/kasan/kasan.h"
28
Andrey Konovalov70585d92020-12-22 12:00:24 -080029#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
Walter Wuf33a0142020-08-06 23:24:54 -070030
Dmitry Vyukov828347f2016-11-30 15:54:16 -080031/*
Andrey Konovalovf3e66b22021-02-03 15:34:59 +110032 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
Daniel Axtensadb72ae2020-06-03 15:56:43 -070034 */
Daniel Axtensadb72ae2020-06-03 15:56:43 -070035void *kasan_ptr_result;
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070036int kasan_int_result;
37
38static struct kunit_resource resource;
39static struct kunit_kasan_expectation fail_data;
40static bool multishot;
41
Andrey Konovalovf3e66b22021-02-03 15:34:59 +110042/*
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
Andrey Konovalova599a4e2021-02-03 15:35:02 +110044 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
Andrey Konovalovf3e66b22021-02-03 15:34:59 +110047 */
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070048static int kasan_test_init(struct kunit *test)
49{
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070050 multishot = kasan_save_enable_multi_shot();
Andrey Konovalova599a4e2021-02-03 15:35:02 +110051 hw_set_tagging_report_once(false);
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070052 return 0;
53}
54
55static void kasan_test_exit(struct kunit *test)
56{
Andrey Konovalova599a4e2021-02-03 15:35:02 +110057 hw_set_tagging_report_once(true);
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070058 kasan_restore_multi_shot(multishot);
59}
60
61/**
Andrey Konovalovf3e66b22021-02-03 15:34:59 +110062 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
63 * KASAN report; causes a test failure otherwise. This relies on a KUnit
64 * resource named "kasan_data". Do not use this name for KUnit resources
65 * outside of KASAN tests.
Andrey Konovalova599a4e2021-02-03 15:35:02 +110066 *
67 * For hardware tag-based KASAN, when a tag fault happens, tag checking is
68 * normally auto-disabled. When this happens, this test handler reenables
69 * tag checking. As tag checking can be only disabled or enabled per CPU, this
70 * handler disables migration (preemption).
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070071 */
Andrey Konovalova599a4e2021-02-03 15:35:02 +110072#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
73 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
74 migrate_disable(); \
75 fail_data.report_expected = true; \
76 fail_data.report_found = false; \
77 kunit_add_named_resource(test, \
78 NULL, \
79 NULL, \
80 &resource, \
81 "kasan_data", &fail_data); \
82 expression; \
83 KUNIT_EXPECT_EQ(test, \
84 fail_data.report_expected, \
85 fail_data.report_found); \
86 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
87 if (fail_data.report_found) \
88 hw_enable_tagging(); \
89 migrate_enable(); \
90 } \
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070091} while (0)
92
Andrey Konovalov127ffef2021-02-03 15:35:00 +110093#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
94 if (!IS_ENABLED(config)) { \
95 kunit_info((test), "skipping, " #config " required"); \
96 return; \
97 } \
98} while (0)
99
100#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
101 if (IS_ENABLED(config)) { \
102 kunit_info((test), "skipping, " #config " enabled"); \
103 return; \
104 } \
105} while (0)
106
Patricia Alfonso73228c72020-10-13 16:55:06 -0700107static void kmalloc_oob_right(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800108{
109 char *ptr;
110 size_t size = 123;
111
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800112 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700113 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800114
Patricia Alfonso73228c72020-10-13 16:55:06 -0700115 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800116 kfree(ptr);
117}
118
Patricia Alfonso73228c72020-10-13 16:55:06 -0700119static void kmalloc_oob_left(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800120{
121 char *ptr;
122 size_t size = 15;
123
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800124 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700125 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800126
Patricia Alfonso73228c72020-10-13 16:55:06 -0700127 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800128 kfree(ptr);
129}
130
Patricia Alfonso73228c72020-10-13 16:55:06 -0700131static void kmalloc_node_oob_right(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800132{
133 char *ptr;
134 size_t size = 4096;
135
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800136 ptr = kmalloc_node(size, GFP_KERNEL, 0);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700137 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800138
Patricia Alfonso73228c72020-10-13 16:55:06 -0700139 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800140 kfree(ptr);
141}
142
Patricia Alfonso73228c72020-10-13 16:55:06 -0700143static void kmalloc_pagealloc_oob_right(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800144{
145 char *ptr;
146 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
147
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100148 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700149
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100150 /*
151 * Allocate a chunk that does not fit into a SLUB cache to trigger
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700152 * the page allocator fallback.
153 */
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700154 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700155 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700156
Patricia Alfonso73228c72020-10-13 16:55:06 -0700157 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700158 kfree(ptr);
159}
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800160
Patricia Alfonso73228c72020-10-13 16:55:06 -0700161static void kmalloc_pagealloc_uaf(struct kunit *test)
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800162{
163 char *ptr;
164 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
165
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100166 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800167
Patricia Alfonso73228c72020-10-13 16:55:06 -0700168 ptr = kmalloc(size, GFP_KERNEL);
169 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
170
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800171 kfree(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700172 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800173}
174
Patricia Alfonso73228c72020-10-13 16:55:06 -0700175static void kmalloc_pagealloc_invalid_free(struct kunit *test)
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800176{
177 char *ptr;
178 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
179
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100180 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800181
Patricia Alfonso73228c72020-10-13 16:55:06 -0700182 ptr = kmalloc(size, GFP_KERNEL);
183 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700184
Patricia Alfonso73228c72020-10-13 16:55:06 -0700185 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
186}
187
188static void kmalloc_large_oob_right(struct kunit *test)
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700189{
190 char *ptr;
191 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100192
193 /*
194 * Allocate a chunk that is large enough, but still fits into a slab
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700195 * and does not trigger the page allocator fallback in SLUB.
196 */
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800197 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700198 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800199
Patricia Alfonso73228c72020-10-13 16:55:06 -0700200 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800201 kfree(ptr);
202}
203
Patricia Alfonso73228c72020-10-13 16:55:06 -0700204static void kmalloc_oob_krealloc_more(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800205{
206 char *ptr1, *ptr2;
207 size_t size1 = 17;
208 size_t size2 = 19;
209
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800210 ptr1 = kmalloc(size1, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700211 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
212
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800213 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700214 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800215
Patricia Alfonso73228c72020-10-13 16:55:06 -0700216 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800217 kfree(ptr2);
218}
219
Patricia Alfonso73228c72020-10-13 16:55:06 -0700220static void kmalloc_oob_krealloc_less(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800221{
222 char *ptr1, *ptr2;
223 size_t size1 = 17;
224 size_t size2 = 15;
225
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800226 ptr1 = kmalloc(size1, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700227 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
228
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800229 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700230 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
Walter Wuf33a0142020-08-06 23:24:54 -0700231
Patricia Alfonso73228c72020-10-13 16:55:06 -0700232 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800233 kfree(ptr2);
234}
235
Patricia Alfonso73228c72020-10-13 16:55:06 -0700236static void kmalloc_oob_16(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800237{
238 struct {
239 u64 words[2];
240 } *ptr1, *ptr2;
241
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800242 /* This test is specifically crafted for the generic mode. */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100243 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800244
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800245 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700246 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
247
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800248 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700249 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
250
251 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800252 kfree(ptr1);
253 kfree(ptr2);
254}
255
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800256static void kmalloc_uaf_16(struct kunit *test)
257{
258 struct {
259 u64 words[2];
260 } *ptr1, *ptr2;
261
262 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
263 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
264
265 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
266 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
267 kfree(ptr2);
268
269 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
270 kfree(ptr1);
271}
272
Patricia Alfonso73228c72020-10-13 16:55:06 -0700273static void kmalloc_oob_memset_2(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800274{
275 char *ptr;
276 size_t size = 8;
277
Wang Longf523e732015-11-05 18:51:15 -0800278 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700279 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800280
Patricia Alfonso73228c72020-10-13 16:55:06 -0700281 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
Wang Longf523e732015-11-05 18:51:15 -0800282 kfree(ptr);
283}
284
Patricia Alfonso73228c72020-10-13 16:55:06 -0700285static void kmalloc_oob_memset_4(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800286{
287 char *ptr;
288 size_t size = 8;
289
Wang Longf523e732015-11-05 18:51:15 -0800290 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700291 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800292
Patricia Alfonso73228c72020-10-13 16:55:06 -0700293 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
Wang Longf523e732015-11-05 18:51:15 -0800294 kfree(ptr);
295}
296
297
Patricia Alfonso73228c72020-10-13 16:55:06 -0700298static void kmalloc_oob_memset_8(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800299{
300 char *ptr;
301 size_t size = 8;
302
Wang Longf523e732015-11-05 18:51:15 -0800303 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700304 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800305
Patricia Alfonso73228c72020-10-13 16:55:06 -0700306 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
Wang Longf523e732015-11-05 18:51:15 -0800307 kfree(ptr);
308}
309
Patricia Alfonso73228c72020-10-13 16:55:06 -0700310static void kmalloc_oob_memset_16(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800311{
312 char *ptr;
313 size_t size = 16;
314
Wang Longf523e732015-11-05 18:51:15 -0800315 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700316 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800317
Patricia Alfonso73228c72020-10-13 16:55:06 -0700318 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
Wang Longf523e732015-11-05 18:51:15 -0800319 kfree(ptr);
320}
321
Patricia Alfonso73228c72020-10-13 16:55:06 -0700322static void kmalloc_oob_in_memset(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800323{
324 char *ptr;
325 size_t size = 666;
326
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800327 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700328 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800329
Patricia Alfonso73228c72020-10-13 16:55:06 -0700330 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800331 kfree(ptr);
332}
333
Patricia Alfonso73228c72020-10-13 16:55:06 -0700334static void kmalloc_memmove_invalid_size(struct kunit *test)
Walter Wu98f3b562020-04-01 21:09:40 -0700335{
336 char *ptr;
337 size_t size = 64;
338 volatile size_t invalid_size = -2;
339
Walter Wu98f3b562020-04-01 21:09:40 -0700340 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700341 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Walter Wu98f3b562020-04-01 21:09:40 -0700342
343 memset((char *)ptr, 0, 64);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700344
345 KUNIT_EXPECT_KASAN_FAIL(test,
346 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
Walter Wu98f3b562020-04-01 21:09:40 -0700347 kfree(ptr);
348}
349
Patricia Alfonso73228c72020-10-13 16:55:06 -0700350static void kmalloc_uaf(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800351{
352 char *ptr;
353 size_t size = 10;
354
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800355 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700356 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800357
358 kfree(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700359 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800360}
361
Patricia Alfonso73228c72020-10-13 16:55:06 -0700362static void kmalloc_uaf_memset(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800363{
364 char *ptr;
365 size_t size = 33;
366
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800367 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700368 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800369
370 kfree(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700371 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800372}
373
Patricia Alfonso73228c72020-10-13 16:55:06 -0700374static void kmalloc_uaf2(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800375{
376 char *ptr1, *ptr2;
377 size_t size = 43;
378
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800379 ptr1 = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700380 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800381
382 kfree(ptr1);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800383
Patricia Alfonso73228c72020-10-13 16:55:06 -0700384 ptr2 = kmalloc(size, GFP_KERNEL);
385 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
386
387 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
388 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
389
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800390 kfree(ptr2);
391}
392
Patricia Alfonso73228c72020-10-13 16:55:06 -0700393static void kfree_via_page(struct kunit *test)
Mark Rutlandb92a9532019-09-23 15:34:16 -0700394{
395 char *ptr;
396 size_t size = 8;
397 struct page *page;
398 unsigned long offset;
399
Mark Rutlandb92a9532019-09-23 15:34:16 -0700400 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700401 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Mark Rutlandb92a9532019-09-23 15:34:16 -0700402
403 page = virt_to_page(ptr);
404 offset = offset_in_page(ptr);
405 kfree(page_address(page) + offset);
406}
407
Patricia Alfonso73228c72020-10-13 16:55:06 -0700408static void kfree_via_phys(struct kunit *test)
Mark Rutlandb92a9532019-09-23 15:34:16 -0700409{
410 char *ptr;
411 size_t size = 8;
412 phys_addr_t phys;
413
Mark Rutlandb92a9532019-09-23 15:34:16 -0700414 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700415 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Mark Rutlandb92a9532019-09-23 15:34:16 -0700416
417 phys = virt_to_phys(ptr);
418 kfree(phys_to_virt(phys));
419}
420
Patricia Alfonso73228c72020-10-13 16:55:06 -0700421static void kmem_cache_oob(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800422{
423 char *p;
424 size_t size = 200;
425 struct kmem_cache *cache = kmem_cache_create("test_cache",
426 size, 0,
427 0, NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700428 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800429 p = kmem_cache_alloc(cache, GFP_KERNEL);
430 if (!p) {
Patricia Alfonso73228c72020-10-13 16:55:06 -0700431 kunit_err(test, "Allocation failed: %s\n", __func__);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800432 kmem_cache_destroy(cache);
433 return;
434 }
435
Patricia Alfonso73228c72020-10-13 16:55:06 -0700436 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800437 kmem_cache_free(cache, p);
438 kmem_cache_destroy(cache);
439}
440
Patricia Alfonso73228c72020-10-13 16:55:06 -0700441static void memcg_accounted_kmem_cache(struct kunit *test)
Greg Thelen0386bf32017-02-24 15:00:08 -0800442{
443 int i;
444 char *p;
445 size_t size = 200;
446 struct kmem_cache *cache;
447
448 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700449 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
Greg Thelen0386bf32017-02-24 15:00:08 -0800450
Greg Thelen0386bf32017-02-24 15:00:08 -0800451 /*
452 * Several allocations with a delay to allow for lazy per memcg kmem
453 * cache creation.
454 */
455 for (i = 0; i < 5; i++) {
456 p = kmem_cache_alloc(cache, GFP_KERNEL);
Markus Elfringdc2bf0002017-11-17 15:28:00 -0800457 if (!p)
Greg Thelen0386bf32017-02-24 15:00:08 -0800458 goto free_cache;
Markus Elfringdc2bf0002017-11-17 15:28:00 -0800459
Greg Thelen0386bf32017-02-24 15:00:08 -0800460 kmem_cache_free(cache, p);
461 msleep(100);
462 }
463
464free_cache:
465 kmem_cache_destroy(cache);
466}
467
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800468static char global_array[10];
469
Patricia Alfonso73228c72020-10-13 16:55:06 -0700470static void kasan_global_oob(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800471{
472 volatile int i = 3;
473 char *p = &global_array[ARRAY_SIZE(global_array) + i];
474
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800475 /* Only generic mode instruments globals. */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100476 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800477
Patricia Alfonso73228c72020-10-13 16:55:06 -0700478 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800479}
480
Patricia Alfonso73228c72020-10-13 16:55:06 -0700481static void ksize_unpoisons_memory(struct kunit *test)
482{
483 char *ptr;
484 size_t size = 123, real_size;
485
486 ptr = kmalloc(size, GFP_KERNEL);
487 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
488 real_size = ksize(ptr);
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100489
490 /* This access shouldn't trigger a KASAN report. */
Patricia Alfonso73228c72020-10-13 16:55:06 -0700491 ptr[size] = 'x';
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100492
493 /* This one must. */
Patricia Alfonso73228c72020-10-13 16:55:06 -0700494 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100495
Patricia Alfonso73228c72020-10-13 16:55:06 -0700496 kfree(ptr);
497}
498
499static void kasan_stack_oob(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800500{
501 char stack_array[10];
Andrey Konovalov51dcc812020-08-06 23:25:12 -0700502 volatile int i = OOB_TAG_OFF;
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800503 char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
504
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100505 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700506
Patricia Alfonso73228c72020-10-13 16:55:06 -0700507 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700508}
509
Patricia Alfonso73228c72020-10-13 16:55:06 -0700510static void kasan_alloca_oob_left(struct kunit *test)
Paul Lawrence00a14292018-02-06 15:36:16 -0800511{
512 volatile int i = 10;
513 char alloca_array[i];
514 char *p = alloca_array - 1;
515
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800516 /* Only generic mode instruments dynamic allocas. */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100517 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
518 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700519
520 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Paul Lawrence00a14292018-02-06 15:36:16 -0800521}
522
Patricia Alfonso73228c72020-10-13 16:55:06 -0700523static void kasan_alloca_oob_right(struct kunit *test)
Paul Lawrence00a14292018-02-06 15:36:16 -0800524{
525 volatile int i = 10;
526 char alloca_array[i];
527 char *p = alloca_array + i;
528
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800529 /* Only generic mode instruments dynamic allocas. */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100530 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
531 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700532
533 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Paul Lawrence00a14292018-02-06 15:36:16 -0800534}
535
Patricia Alfonso73228c72020-10-13 16:55:06 -0700536static void kmem_cache_double_free(struct kunit *test)
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800537{
538 char *p;
539 size_t size = 200;
540 struct kmem_cache *cache;
541
542 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700543 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
544
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800545 p = kmem_cache_alloc(cache, GFP_KERNEL);
546 if (!p) {
Patricia Alfonso73228c72020-10-13 16:55:06 -0700547 kunit_err(test, "Allocation failed: %s\n", __func__);
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800548 kmem_cache_destroy(cache);
549 return;
550 }
551
552 kmem_cache_free(cache, p);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700553 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800554 kmem_cache_destroy(cache);
555}
556
Patricia Alfonso73228c72020-10-13 16:55:06 -0700557static void kmem_cache_invalid_free(struct kunit *test)
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800558{
559 char *p;
560 size_t size = 200;
561 struct kmem_cache *cache;
562
563 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
564 NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700565 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
566
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800567 p = kmem_cache_alloc(cache, GFP_KERNEL);
568 if (!p) {
Patricia Alfonso73228c72020-10-13 16:55:06 -0700569 kunit_err(test, "Allocation failed: %s\n", __func__);
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800570 kmem_cache_destroy(cache);
571 return;
572 }
573
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100574 /* Trigger invalid free, the object doesn't get freed. */
Patricia Alfonso73228c72020-10-13 16:55:06 -0700575 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
Andrey Konovalov91c93ed2018-04-10 16:30:35 -0700576
577 /*
578 * Properly free the object to prevent the "Objects remaining in
579 * test_cache on __kmem_cache_shutdown" BUG failure.
580 */
581 kmem_cache_free(cache, p);
582
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800583 kmem_cache_destroy(cache);
584}
585
Patricia Alfonso73228c72020-10-13 16:55:06 -0700586static void kasan_memchr(struct kunit *test)
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700587{
588 char *ptr;
589 size_t size = 24;
590
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100591 /*
592 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
593 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
594 */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100595 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700596
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800597 if (OOB_TAG_OFF)
598 size = round_up(size, OOB_TAG_OFF);
599
Patricia Alfonso73228c72020-10-13 16:55:06 -0700600 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
601 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
602
603 KUNIT_EXPECT_KASAN_FAIL(test,
604 kasan_ptr_result = memchr(ptr, '1', size + 1));
605
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700606 kfree(ptr);
607}
608
Patricia Alfonso73228c72020-10-13 16:55:06 -0700609static void kasan_memcmp(struct kunit *test)
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700610{
611 char *ptr;
612 size_t size = 24;
613 int arr[9];
614
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100615 /*
616 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
617 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
618 */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100619 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700620
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800621 if (OOB_TAG_OFF)
622 size = round_up(size, OOB_TAG_OFF);
623
Patricia Alfonso73228c72020-10-13 16:55:06 -0700624 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
625 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700626 memset(arr, 0, sizeof(arr));
Patricia Alfonso73228c72020-10-13 16:55:06 -0700627
628 KUNIT_EXPECT_KASAN_FAIL(test,
629 kasan_int_result = memcmp(ptr, arr, size+1));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700630 kfree(ptr);
631}
632
Patricia Alfonso73228c72020-10-13 16:55:06 -0700633static void kasan_strings(struct kunit *test)
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700634{
635 char *ptr;
636 size_t size = 24;
637
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100638 /*
639 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
640 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
641 */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100642 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700643
644 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
645 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700646
647 kfree(ptr);
648
649 /*
650 * Try to cause only 1 invalid access (less spam in dmesg).
651 * For that we need ptr to point to zeroed byte.
652 * Skip metadata that could be stored in freed object so ptr
653 * will likely point to zeroed byte.
654 */
655 ptr += 16;
Patricia Alfonso73228c72020-10-13 16:55:06 -0700656 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700657
Patricia Alfonso73228c72020-10-13 16:55:06 -0700658 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700659
Patricia Alfonso73228c72020-10-13 16:55:06 -0700660 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700661
Patricia Alfonso73228c72020-10-13 16:55:06 -0700662 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700663
Patricia Alfonso73228c72020-10-13 16:55:06 -0700664 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700665
Patricia Alfonso73228c72020-10-13 16:55:06 -0700666 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700667}
668
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800669static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
Marco Elver19a33ca2019-07-11 20:53:52 -0700670{
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800671 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
672 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
673 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
674 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
675 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
676 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
677 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
678 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
679}
680
681static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
682{
683 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
684 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
685 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
686 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
687 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
688 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
689 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
690 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
691
692#if defined(clear_bit_unlock_is_negative_byte)
693 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
694 clear_bit_unlock_is_negative_byte(nr, addr));
695#endif
696}
697
698static void kasan_bitops_generic(struct kunit *test)
699{
700 long *bits;
701
702 /* This test is specifically crafted for the generic mode. */
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100703 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800704
Marco Elver19a33ca2019-07-11 20:53:52 -0700705 /*
Andrey Konovalovf3e66b22021-02-03 15:34:59 +1100706 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
Marco Elver19a33ca2019-07-11 20:53:52 -0700707 * this way we do not actually corrupt other memory.
708 */
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800709 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700710 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700711
712 /*
713 * Below calls try to access bit within allocated memory; however, the
714 * below accesses are still out-of-bounds, since bitops are defined to
715 * operate on the whole long the bit is in.
716 */
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800717 kasan_bitops_modify(test, BITS_PER_LONG, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700718
719 /*
720 * Below calls try to access bit beyond allocated memory.
721 */
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800722 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700723
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800724 kfree(bits);
725}
Marco Elver19a33ca2019-07-11 20:53:52 -0700726
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800727static void kasan_bitops_tags(struct kunit *test)
728{
729 long *bits;
Marco Elver19a33ca2019-07-11 20:53:52 -0700730
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100731 /* This test is specifically crafted for tag-based modes. */
732 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
Marco Elver19a33ca2019-07-11 20:53:52 -0700733
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800734 /* Allocation size will be rounded to up granule size, which is 16. */
735 bits = kzalloc(sizeof(*bits), GFP_KERNEL);
736 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700737
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800738 /* Do the accesses past the 16 allocated bytes. */
739 kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
740 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
Marco Elver19a33ca2019-07-11 20:53:52 -0700741
Marco Elver19a33ca2019-07-11 20:53:52 -0700742 kfree(bits);
743}
744
Patricia Alfonso73228c72020-10-13 16:55:06 -0700745static void kmalloc_double_kzfree(struct kunit *test)
Marco Elverbb104ed2019-07-11 20:54:11 -0700746{
747 char *ptr;
748 size_t size = 16;
749
Marco Elverbb104ed2019-07-11 20:54:11 -0700750 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700751 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Marco Elverbb104ed2019-07-11 20:54:11 -0700752
Waiman Long453431a2020-08-06 23:18:13 -0700753 kfree_sensitive(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700754 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
Marco Elverbb104ed2019-07-11 20:54:11 -0700755}
756
Patricia Alfonso73228c72020-10-13 16:55:06 -0700757static void vmalloc_oob(struct kunit *test)
Daniel Axtens06513912019-11-30 17:54:53 -0800758{
759 void *area;
760
Andrey Konovalov127ffef2021-02-03 15:35:00 +1100761 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
Daniel Axtens06513912019-11-30 17:54:53 -0800762
763 /*
764 * We have to be careful not to hit the guard page.
765 * The MMU will catch that and crash us.
766 */
767 area = vmalloc(3000);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700768 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
Daniel Axtens06513912019-11-30 17:54:53 -0800769
Patricia Alfonso73228c72020-10-13 16:55:06 -0700770 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
Daniel Axtens06513912019-11-30 17:54:53 -0800771 vfree(area);
772}
Daniel Axtens06513912019-11-30 17:54:53 -0800773
Andrey Konovalov782ba452021-02-03 15:35:00 +1100774/*
775 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
776 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
777 * modes.
778 */
779static void match_all_not_assigned(struct kunit *test)
780{
781 char *ptr;
782 struct page *pages;
783 int i, size, order;
784
785 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
786
787 for (i = 0; i < 256; i++) {
788 size = (get_random_int() % 1024) + 1;
789 ptr = kmalloc(size, GFP_KERNEL);
790 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
791 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
792 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
793 kfree(ptr);
794 }
795
796 for (i = 0; i < 256; i++) {
797 order = (get_random_int() % 4) + 1;
798 pages = alloc_pages(GFP_KERNEL, order);
799 ptr = page_address(pages);
800 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
801 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
802 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
803 free_pages((unsigned long)ptr, order);
804 }
805}
806
807/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
808static void match_all_ptr_tag(struct kunit *test)
809{
810 char *ptr;
811 u8 tag;
812
813 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
814
815 ptr = kmalloc(128, GFP_KERNEL);
816 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
817
818 /* Backup the assigned tag. */
819 tag = get_tag(ptr);
820 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
821
822 /* Reset the tag to 0xff.*/
823 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
824
825 /* This access shouldn't trigger a KASAN report. */
826 *ptr = 0;
827
828 /* Recover the pointer tag and free. */
829 ptr = set_tag(ptr, tag);
830 kfree(ptr);
831}
832
833/* Check that there are no match-all memory tags for tag-based modes. */
834static void match_all_mem_tag(struct kunit *test)
835{
836 char *ptr;
837 int tag;
838
839 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
840
841 ptr = kmalloc(128, GFP_KERNEL);
842 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
843 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
844
845 /* For each possible tag value not matching the pointer tag. */
846 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
847 if (tag == get_tag(ptr))
848 continue;
849
850 /* Mark the first memory granule with the chosen memory tag. */
851 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
852
853 /* This access must cause a KASAN report. */
854 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
855 }
856
857 /* Recover the memory tag and free. */
858 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
859 kfree(ptr);
860}
861
Patricia Alfonso73228c72020-10-13 16:55:06 -0700862static struct kunit_case kasan_kunit_test_cases[] = {
863 KUNIT_CASE(kmalloc_oob_right),
864 KUNIT_CASE(kmalloc_oob_left),
865 KUNIT_CASE(kmalloc_node_oob_right),
866 KUNIT_CASE(kmalloc_pagealloc_oob_right),
867 KUNIT_CASE(kmalloc_pagealloc_uaf),
868 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
869 KUNIT_CASE(kmalloc_large_oob_right),
870 KUNIT_CASE(kmalloc_oob_krealloc_more),
871 KUNIT_CASE(kmalloc_oob_krealloc_less),
872 KUNIT_CASE(kmalloc_oob_16),
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800873 KUNIT_CASE(kmalloc_uaf_16),
Patricia Alfonso73228c72020-10-13 16:55:06 -0700874 KUNIT_CASE(kmalloc_oob_in_memset),
875 KUNIT_CASE(kmalloc_oob_memset_2),
876 KUNIT_CASE(kmalloc_oob_memset_4),
877 KUNIT_CASE(kmalloc_oob_memset_8),
878 KUNIT_CASE(kmalloc_oob_memset_16),
879 KUNIT_CASE(kmalloc_memmove_invalid_size),
880 KUNIT_CASE(kmalloc_uaf),
881 KUNIT_CASE(kmalloc_uaf_memset),
882 KUNIT_CASE(kmalloc_uaf2),
883 KUNIT_CASE(kfree_via_page),
884 KUNIT_CASE(kfree_via_phys),
885 KUNIT_CASE(kmem_cache_oob),
886 KUNIT_CASE(memcg_accounted_kmem_cache),
887 KUNIT_CASE(kasan_global_oob),
888 KUNIT_CASE(kasan_stack_oob),
889 KUNIT_CASE(kasan_alloca_oob_left),
890 KUNIT_CASE(kasan_alloca_oob_right),
891 KUNIT_CASE(ksize_unpoisons_memory),
892 KUNIT_CASE(kmem_cache_double_free),
893 KUNIT_CASE(kmem_cache_invalid_free),
894 KUNIT_CASE(kasan_memchr),
895 KUNIT_CASE(kasan_memcmp),
896 KUNIT_CASE(kasan_strings),
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800897 KUNIT_CASE(kasan_bitops_generic),
898 KUNIT_CASE(kasan_bitops_tags),
Patricia Alfonso73228c72020-10-13 16:55:06 -0700899 KUNIT_CASE(kmalloc_double_kzfree),
900 KUNIT_CASE(vmalloc_oob),
Andrey Konovalov782ba452021-02-03 15:35:00 +1100901 KUNIT_CASE(match_all_not_assigned),
902 KUNIT_CASE(match_all_ptr_tag),
903 KUNIT_CASE(match_all_mem_tag),
Patricia Alfonso73228c72020-10-13 16:55:06 -0700904 {}
905};
Walter Wu387d6e42020-08-06 23:24:42 -0700906
Patricia Alfonso73228c72020-10-13 16:55:06 -0700907static struct kunit_suite kasan_kunit_test_suite = {
908 .name = "kasan",
909 .init = kasan_test_init,
910 .test_cases = kasan_kunit_test_cases,
911 .exit = kasan_test_exit,
912};
Walter Wu387d6e42020-08-06 23:24:42 -0700913
Patricia Alfonso73228c72020-10-13 16:55:06 -0700914kunit_test_suite(kasan_kunit_test_suite);
Walter Wu387d6e42020-08-06 23:24:42 -0700915
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800916MODULE_LICENSE("GPL");