summaryrefslogtreecommitdiff
path: root/mm/kfence
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 19:42:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 19:42:02 -0700
commit7fa8a8ee9400fe8ec188426e40e481717bc5e924 (patch)
treecc8fd6b4f936ec01e73238643757451e20478c07 /mm/kfence
parent91ec4b0d11fe115581ce2835300558802ce55e6c (diff)
parent4d4b6d66db63ceed399f1fb1a4b24081d2590eb1 (diff)
downloadlinux-7fa8a8ee9400fe8ec188426e40e481717bc5e924.tar.gz
Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: - Nick Piggin's "shoot lazy tlbs" series, to improve the peformance of switching from a user process to a kernel thread. - More folio conversions from Kefeng Wang, Zhang Peng and Pankaj Raghav. - zsmalloc performance improvements from Sergey Senozhatsky. - Yue Zhao has found and fixed some data race issues around the alteration of memcg userspace tunables. - VFS rationalizations from Christoph Hellwig: - removal of most of the callers of write_one_page() - make __filemap_get_folio()'s return value more useful - Luis Chamberlain has changed tmpfs so it no longer requires swap backing. Use `mount -o noswap'. - Qi Zheng has made the slab shrinkers operate locklessly, providing some scalability benefits. - Keith Busch has improved dmapool's performance, making part of its operations O(1) rather than O(n). - Peter Xu adds the UFFD_FEATURE_WP_UNPOPULATED feature to userfaultd, permitting userspace to wr-protect anon memory unpopulated ptes. - Kirill Shutemov has changed MAX_ORDER's meaning to be inclusive rather than exclusive, and has fixed a bunch of errors which were caused by its unintuitive meaning. - Axel Rasmussen give userfaultfd the UFFDIO_CONTINUE_MODE_WP feature, which causes minor faults to install a write-protected pte. - Vlastimil Babka has done some maintenance work on vma_merge(): cleanups to the kernel code and improvements to our userspace test harness. - Cleanups to do_fault_around() by Lorenzo Stoakes. - Mike Rapoport has moved a lot of initialization code out of various mm/ files and into mm/mm_init.c. - Lorenzo Stoakes removd vmf_insert_mixed_prot(), which was added for DRM, but DRM doesn't use it any more. - Lorenzo has also coverted read_kcore() and vread() to use iterators and has thereby removed the use of bounce buffers in some cases. - Lorenzo has also contributed further cleanups of vma_merge(). - Chaitanya Prakash provides some fixes to the mmap selftesting code. - Matthew Wilcox changes xfs and afs so they no longer take sleeping locks in ->map_page(), a step towards RCUification of pagefaults. - Suren Baghdasaryan has improved mmap_lock scalability by switching to per-VMA locking. - Frederic Weisbecker has reworked the percpu cache draining so that it no longer causes latency glitches on cpu isolated workloads. - Mike Rapoport cleans up and corrects the ARCH_FORCE_MAX_ORDER Kconfig logic. - Liu Shixin has changed zswap's initialization so we no longer waste a chunk of memory if zswap is not being used. - Yosry Ahmed has improved the performance of memcg statistics flushing. - David Stevens has fixed several issues involving khugepaged, userfaultfd and shmem. - Christoph Hellwig has provided some cleanup work to zram's IO-related code paths. - David Hildenbrand has fixed up some issues in the selftest code's testing of our pte state changing. - Pankaj Raghav has made page_endio() unneeded and has removed it. - Peter Xu contributed some rationalizations of the userfaultfd selftests. - Yosry Ahmed has fixed an issue around memcg's page recalim accounting. - Chaitanya Prakash has fixed some arm-related issues in the selftests/mm code. - Longlong Xia has improved the way in which KSM handles hwpoisoned pages. - Peter Xu fixes a few issues with uffd-wp at fork() time. - Stefan Roesch has changed KSM so that it may now be used on a per-process and per-cgroup basis. * tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (369 commits) mm,unmap: avoid flushing TLB in batch if PTE is inaccessible shmem: restrict noswap option to initial user namespace mm/khugepaged: fix conflicting mods to collapse_file() sparse: remove unnecessary 0 values from rc mm: move 'mmap_min_addr' logic from callers into vm_unmapped_area() hugetlb: pte_alloc_huge() to replace huge pte_alloc_map() maple_tree: fix allocation in mas_sparse_area() mm: do not increment pgfault stats when page fault handler retries zsmalloc: allow only one active pool compaction context selftests/mm: add new selftests for KSM mm: add new KSM process and sysfs knobs mm: add new api to enable ksm per process mm: shrinkers: fix debugfs file permissions mm: don't check VMA write permissions if the PTE/PMD indicates write permissions migrate_pages_batch: fix statistics for longterm pin retry userfaultfd: use helper function range_in_vma() lib/show_mem.c: use for_each_populated_zone() simplify code mm: correct arg in reclaim_pages()/reclaim_clean_pages_from_list() fs/buffer: convert create_page_buffers to folio_create_buffers fs/buffer: add folio_create_empty_buffers helper ...
Diffstat (limited to 'mm/kfence')
-rw-r--r--mm/kfence/core.c70
-rw-r--r--mm/kfence/kfence.h10
-rw-r--r--mm/kfence/kfence_test.c22
-rw-r--r--mm/kfence/report.c2
4 files changed, 61 insertions, 43 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 7d01a2c76e80..dad3c0eb70a0 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -297,20 +297,13 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
WRITE_ONCE(meta->state, next);
}
-/* Write canary byte to @addr. */
-static inline bool set_canary_byte(u8 *addr)
-{
- *addr = KFENCE_CANARY_PATTERN(addr);
- return true;
-}
-
/* Check canary byte at @addr. */
static inline bool check_canary_byte(u8 *addr)
{
struct kfence_metadata *meta;
unsigned long flags;
- if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
+ if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
return true;
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
@@ -323,15 +316,31 @@ static inline bool check_canary_byte(u8 *addr)
return false;
}
-/* __always_inline this to ensure we won't do an indirect call to fn. */
-static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
+static inline void set_canary(const struct kfence_metadata *meta)
{
const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
- unsigned long addr;
+ unsigned long addr = pageaddr;
+
+ /*
+ * The canary may be written to part of the object memory, but it does
+ * not affect it. The user should initialize the object before using it.
+ */
+ for (; addr < meta->addr; addr += sizeof(u64))
+ *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
+
+ addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
+ for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
+ *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
+}
+
+static inline void check_canary(const struct kfence_metadata *meta)
+{
+ const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
+ unsigned long addr = pageaddr;
/*
- * We'll iterate over each canary byte per-side until fn() returns
- * false. However, we'll still iterate over the canary bytes to the
+ * We'll iterate over each canary byte per-side until a corrupted byte
+ * is found. However, we'll still iterate over the canary bytes to the
* right of the object even if there was an error in the canary bytes to
* the left of the object. Specifically, if check_canary_byte()
* generates an error, showing both sides might give more clues as to
@@ -339,16 +348,35 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
*/
/* Apply to left of object. */
- for (addr = pageaddr; addr < meta->addr; addr++) {
- if (!fn((u8 *)addr))
+ for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
+ if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
break;
}
- /* Apply to right of object. */
- for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
- if (!fn((u8 *)addr))
+ /*
+ * If the canary is corrupted in a certain 64 bytes, or the canary
+ * memory cannot be completely covered by multiple consecutive 64 bytes,
+ * it needs to be checked one by one.
+ */
+ for (; addr < meta->addr; addr++) {
+ if (unlikely(!check_canary_byte((u8 *)addr)))
break;
}
+
+ /* Apply to right of object. */
+ for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
+ if (unlikely(!check_canary_byte((u8 *)addr)))
+ return;
+ }
+ for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
+ if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
+
+ for (; addr - pageaddr < PAGE_SIZE; addr++) {
+ if (!check_canary_byte((u8 *)addr))
+ return;
+ }
+ }
+ }
}
static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
@@ -434,7 +462,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
#endif
/* Memory initialization. */
- for_each_canary(meta, set_canary_byte);
+ set_canary(meta);
/*
* We check slab_want_init_on_alloc() ourselves, rather than letting
@@ -495,7 +523,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
alloc_covered_add(meta->alloc_stack_hash, -1);
/* Check canary bytes for memory corruption. */
- for_each_canary(meta, check_canary_byte);
+ check_canary(meta);
/*
* Clear memory if init-on-free is set. While we protect the page, the
@@ -751,7 +779,7 @@ static void kfence_check_all_canary(void)
struct kfence_metadata *meta = &kfence_metadata[i];
if (meta->state == KFENCE_OBJECT_ALLOCATED)
- for_each_canary(meta, check_canary_byte);
+ check_canary(meta);
}
}
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index 600f2e2431d6..2aafc46a4aaf 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -21,7 +21,15 @@
* lower 3 bits of the address, to detect memory corruptions with higher
* probability, where similar constants are used.
*/
-#define KFENCE_CANARY_PATTERN(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
+#define KFENCE_CANARY_PATTERN_U8(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
+
+/*
+ * Define a continuous 8-byte canary starting from a multiple of 8. The canary
+ * of each byte is only related to the lowest three bits of its address, so the
+ * canary of every 8 bytes is the same. 64-bit memory can be filled and checked
+ * at a time instead of byte by byte to improve performance.
+ */
+#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100))
/* Maximum stack depth for reports. */
#define KFENCE_STACK_DEPTH 64
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index b5d66a69200d..6aee19a79236 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -825,33 +825,15 @@ static void test_exit(struct kunit *test)
test_cache_destroy();
}
-static void register_tracepoints(struct tracepoint *tp, void *ignore)
-{
- check_trace_callback_type_console(probe_console);
- if (!strcmp(tp->name, "console"))
- WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
-}
-
-static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
-{
- if (!strcmp(tp->name, "console"))
- tracepoint_probe_unregister(tp, probe_console, NULL);
-}
-
static int kfence_suite_init(struct kunit_suite *suite)
{
- /*
- * Because we want to be able to build the test as a module, we need to
- * iterate through all known tracepoints, since the static registration
- * won't work here.
- */
- for_each_kernel_tracepoint(register_tracepoints, NULL);
+ register_trace_console(probe_console, NULL);
return 0;
}
static void kfence_suite_exit(struct kunit_suite *suite)
{
- for_each_kernel_tracepoint(unregister_tracepoints, NULL);
+ unregister_trace_console(probe_console, NULL);
tracepoint_synchronize_unregister();
}
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 60205f1257ef..197430a5be4a 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -168,7 +168,7 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show,
pr_cont("[");
for (cur = (const u8 *)address; cur < end; cur++) {
- if (*cur == KFENCE_CANARY_PATTERN(cur))
+ if (*cur == KFENCE_CANARY_PATTERN_U8(cur))
pr_cont(" .");
else if (no_hash_pointers)
pr_cont(" 0x%02x", *cur);