diff options
author | Oliver Glitta <glittao@gmail.com> | 2021-06-28 19:34:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-29 10:53:46 -0700 |
commit | 1f9f78b1b376f82cdd8ed73cc0abdb74d0453d43 (patch) | |
tree | 1b2cbf976a0b729a4b66e38c37403a39f0d538af /lib/slub_kunit.c | |
parent | 26c6cb7cf830349c6518a7efe1c32ac796cd192e (diff) |
mm/slub, kunit: add a KUnit test for SLUB debugging functionality
SLUB has resiliency_test() function which is hidden behind #ifdef
SLUB_RESILIENCY_TEST that is not part of Kconfig, so nobody runs it.
KUnit should be a proper replacement for it.
Try changing byte in redzone after allocation and changing pointer to next
free node, first byte, 50th byte and redzone byte. Check if validation
finds errors.
There are several differences from the original resiliency test: Tests
create own caches with known state instead of corrupting shared kmalloc
caches.
The corruption of freepointer uses correct offset, the original resiliency
test got broken with freepointer changes.
Scratch changing random byte test, because it does not have meaning in
this form where we need deterministic results.
Add new option CONFIG_SLUB_KUNIT_TEST in Kconfig. Tests next_pointer,
first_word and clobber_50th_byte do not run with KASAN option on. Because
the test deliberately modifies non-allocated objects.
Use kunit_resource to count errors in cache and silence bug reports.
Count error whenever slab_bug() or slab_fix() is called or when the count
of pages is wrong.
[glittao@gmail.com: remove unused function test_exit(), from SLUB KUnit test]
Link: https://lkml.kernel.org/r/20210512140656.12083-1-glittao@gmail.com
[akpm@linux-foundation.org: export kasan_enable/disable_current to modules]
Link: https://lkml.kernel.org/r/20210511150734.3492-2-glittao@gmail.com
Signed-off-by: Oliver Glitta <glittao@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Daniel Latypov <dlatypov@google.com>
Acked-by: Marco Elver <elver@google.com>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/slub_kunit.c')
-rw-r--r-- | lib/slub_kunit.c | 152 |
1 files changed, 152 insertions, 0 deletions
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c new file mode 100644 index 000000000000..8662dc6cb509 --- /dev/null +++ b/lib/slub_kunit.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <kunit/test.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include "../mm/slab.h" + +static struct kunit_resource resource; +static int slab_errors; + +static void test_clobber_zone(struct kunit *test) +{ + struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0, + SLAB_RED_ZONE, NULL); + u8 *p = kmem_cache_alloc(s, GFP_KERNEL); + + kasan_disable_current(); + p[64] = 0x12; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + kasan_enable_current(); + kmem_cache_free(s, p); + kmem_cache_destroy(s); +} + +#ifndef CONFIG_KASAN +static void test_next_pointer(struct kunit *test) +{ + struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0, + SLAB_POISON, NULL); + u8 *p = kmem_cache_alloc(s, GFP_KERNEL); + unsigned long tmp; + unsigned long *ptr_addr; + + kmem_cache_free(s, p); + + ptr_addr = (unsigned long *)(p + s->offset); + tmp = *ptr_addr; + p[s->offset] = 0x12; + + /* + * Expecting three errors. + * One for the corrupted freechain and the other one for the wrong + * count of objects in use. The third error is fixing broken cache. + */ + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 3, slab_errors); + + /* + * Try to repair corrupted freepointer. + * Still expecting two errors. The first for the wrong count + * of objects in use. + * The second error is for fixing broken cache. + */ + *ptr_addr = tmp; + slab_errors = 0; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + /* + * Previous validation repaired the count of objects in use. + * Now expecting no error. + */ + slab_errors = 0; + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 0, slab_errors); + + kmem_cache_destroy(s); +} + +static void test_first_word(struct kunit *test) +{ + struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0, + SLAB_POISON, NULL); + u8 *p = kmem_cache_alloc(s, GFP_KERNEL); + + kmem_cache_free(s, p); + *p = 0x78; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + kmem_cache_destroy(s); +} + +static void test_clobber_50th_byte(struct kunit *test) +{ + struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0, + SLAB_POISON, NULL); + u8 *p = kmem_cache_alloc(s, GFP_KERNEL); + + kmem_cache_free(s, p); + p[50] = 0x9a; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + kmem_cache_destroy(s); +} +#endif + +static void test_clobber_redzone_free(struct kunit *test) +{ + struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0, + SLAB_RED_ZONE, NULL); + u8 *p = kmem_cache_alloc(s, GFP_KERNEL); + + kasan_disable_current(); + kmem_cache_free(s, p); + p[64] = 0xab; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + kasan_enable_current(); + kmem_cache_destroy(s); +} + +static int test_init(struct kunit *test) +{ + slab_errors = 0; + + kunit_add_named_resource(test, NULL, NULL, &resource, + "slab_errors", &slab_errors); + return 0; +} + +static struct kunit_case test_cases[] = { + KUNIT_CASE(test_clobber_zone), + +#ifndef CONFIG_KASAN + KUNIT_CASE(test_next_pointer), + KUNIT_CASE(test_first_word), + KUNIT_CASE(test_clobber_50th_byte), +#endif + + KUNIT_CASE(test_clobber_redzone_free), + {} +}; + +static struct kunit_suite test_suite = { + .name = "slub_test", + .init = test_init, + .test_cases = test_cases, +}; +kunit_test_suite(test_suite); + +MODULE_LICENSE("GPL"); |