summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-01-07 18:30:22 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-01-07 18:30:22 +0100
commitc92b84a95543ed10de290ae699f7f0a778332e65 (patch)
treed1ad1dae4c54154d40c5b8d03a6030784fd4a902
parent7f65b70c26b22c29e93b5260780a08679a723225 (diff)
downloadlinux-rt-c92b84a95543ed10de290ae699f7f0a778332e65.tar.gz
[ANNOUNCE] v5.10.4-rt21v5.10.4-rt21-patches
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-z3fold-simplify-freeing-slots.patch177
-rw-r--r--patches/0002-z3fold-stricter-locking-and-more-careful-reclaim.patch341
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch6
-rw-r--r--patches/series4
5 files changed, 5 insertions, 525 deletions
diff --git a/patches/0001-z3fold-simplify-freeing-slots.patch b/patches/0001-z3fold-simplify-freeing-slots.patch
deleted file mode 100644
index f1e9b9151afc..000000000000
--- a/patches/0001-z3fold-simplify-freeing-slots.patch
+++ /dev/null
@@ -1,177 +0,0 @@
-From: Vitaly Wool <vitaly.wool@konsulko.com>
-Date: Mon, 14 Dec 2020 19:12:30 -0800
-Subject: [PATCH 1/3] z3fold: simplify freeing slots
-
-Patch series "z3fold: stability / rt fixes".
-
-Address z3fold stability issues under stress load, primarily in the
-reclaim and free aspects. Besides, it fixes the locking problems that
-were only seen in real-time kernel configuration.
-
-This patch (of 3):
-
-There used to be two places in the code where slots could be freed, namely
-when freeing the last allocated handle from the slots and when releasing
-the z3fold header these slots aree linked to. The logic to decide on
-whether to free certain slots was complicated and error prone in both
-functions and it led to failures in RT case.
-
-To fix that, make free_handle() the single point of freeing slots.
-
-Link: https://lkml.kernel.org/r/20201209145151.18994-1-vitaly.wool@konsulko.com
-Link: https://lkml.kernel.org/r/20201209145151.18994-2-vitaly.wool@konsulko.com
-Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.com>
-Tested-by: Mike Galbraith <efault@gmx.de>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/z3fold.c | 55 +++++++++++++------------------------------------------
- 1 file changed, 13 insertions(+), 42 deletions(-)
-
---- a/mm/z3fold.c
-+++ b/mm/z3fold.c
-@@ -90,7 +90,7 @@ struct z3fold_buddy_slots {
- * be enough slots to hold all possible variants
- */
- unsigned long slot[BUDDY_MASK + 1];
-- unsigned long pool; /* back link + flags */
-+ unsigned long pool; /* back link */
- rwlock_t lock;
- };
- #define HANDLE_FLAG_MASK (0x03)
-@@ -182,13 +182,6 @@ enum z3fold_page_flags {
- };
-
- /*
-- * handle flags, go under HANDLE_FLAG_MASK
-- */
--enum z3fold_handle_flags {
-- HANDLES_ORPHANED = 0,
--};
--
--/*
- * Forward declarations
- */
- static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
-@@ -303,10 +296,9 @@ static inline void put_z3fold_header(str
- z3fold_page_unlock(zhdr);
- }
-
--static inline void free_handle(unsigned long handle)
-+static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
- {
- struct z3fold_buddy_slots *slots;
-- struct z3fold_header *zhdr;
- int i;
- bool is_free;
-
-@@ -316,22 +308,13 @@ static inline void free_handle(unsigned
- if (WARN_ON(*(unsigned long *)handle == 0))
- return;
-
-- zhdr = handle_to_z3fold_header(handle);
- slots = handle_to_slots(handle);
- write_lock(&slots->lock);
- *(unsigned long *)handle = 0;
-- if (zhdr->slots == slots) {
-- write_unlock(&slots->lock);
-- return; /* simple case, nothing else to do */
-- }
-+ if (zhdr->slots != slots)
-+ zhdr->foreign_handles--;
-
-- /* we are freeing a foreign handle if we are here */
-- zhdr->foreign_handles--;
- is_free = true;
-- if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
-- write_unlock(&slots->lock);
-- return;
-- }
- for (i = 0; i <= BUDDY_MASK; i++) {
- if (slots->slot[i]) {
- is_free = false;
-@@ -343,6 +326,8 @@ static inline void free_handle(unsigned
- if (is_free) {
- struct z3fold_pool *pool = slots_to_pool(slots);
-
-+ if (zhdr->slots == slots)
-+ zhdr->slots = NULL;
- kmem_cache_free(pool->c_handle, slots);
- }
- }
-@@ -525,8 +510,6 @@ static void __release_z3fold_page(struct
- {
- struct page *page = virt_to_page(zhdr);
- struct z3fold_pool *pool = zhdr_to_pool(zhdr);
-- bool is_free = true;
-- int i;
-
- WARN_ON(!list_empty(&zhdr->buddy));
- set_bit(PAGE_STALE, &page->private);
-@@ -536,21 +519,6 @@ static void __release_z3fold_page(struct
- list_del_init(&page->lru);
- spin_unlock(&pool->lock);
-
-- /* If there are no foreign handles, free the handles array */
-- read_lock(&zhdr->slots->lock);
-- for (i = 0; i <= BUDDY_MASK; i++) {
-- if (zhdr->slots->slot[i]) {
-- is_free = false;
-- break;
-- }
-- }
-- if (!is_free)
-- set_bit(HANDLES_ORPHANED, &zhdr->slots->pool);
-- read_unlock(&zhdr->slots->lock);
--
-- if (is_free)
-- kmem_cache_free(pool->c_handle, zhdr->slots);
--
- if (locked)
- z3fold_page_unlock(zhdr);
-
-@@ -973,6 +941,9 @@ static inline struct z3fold_header *__z3
- }
- }
-
-+ if (zhdr && !zhdr->slots)
-+ zhdr->slots = alloc_slots(pool,
-+ can_sleep ? GFP_NOIO : GFP_ATOMIC);
- return zhdr;
- }
-
-@@ -1270,7 +1241,7 @@ static void z3fold_free(struct z3fold_po
- }
-
- if (!page_claimed)
-- free_handle(handle);
-+ free_handle(handle, zhdr);
- if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
- atomic64_dec(&pool->pages_nr);
- return;
-@@ -1429,19 +1400,19 @@ static int z3fold_reclaim_page(struct z3
- ret = pool->ops->evict(pool, middle_handle);
- if (ret)
- goto next;
-- free_handle(middle_handle);
-+ free_handle(middle_handle, zhdr);
- }
- if (first_handle) {
- ret = pool->ops->evict(pool, first_handle);
- if (ret)
- goto next;
-- free_handle(first_handle);
-+ free_handle(first_handle, zhdr);
- }
- if (last_handle) {
- ret = pool->ops->evict(pool, last_handle);
- if (ret)
- goto next;
-- free_handle(last_handle);
-+ free_handle(last_handle, zhdr);
- }
- next:
- if (test_bit(PAGE_HEADLESS, &page->private)) {
diff --git a/patches/0002-z3fold-stricter-locking-and-more-careful-reclaim.patch b/patches/0002-z3fold-stricter-locking-and-more-careful-reclaim.patch
deleted file mode 100644
index 00c342c24938..000000000000
--- a/patches/0002-z3fold-stricter-locking-and-more-careful-reclaim.patch
+++ /dev/null
@@ -1,341 +0,0 @@
-From: Vitaly Wool <vitaly.wool@konsulko.com>
-Date: Mon, 14 Dec 2020 19:12:33 -0800
-Subject: [PATCH 2/3] z3fold: stricter locking and more careful reclaim
-
-Use temporary slots in reclaim function to avoid possible race when
-freeing those.
-
-While at it, make sure we check CLAIMED flag under page lock in the
-reclaim function to make sure we are not racing with z3fold_alloc().
-
-Link: https://lkml.kernel.org/r/20201209145151.18994-4-vitaly.wool@konsulko.com
-Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.com>
-Cc: <stable@vger.kernel.org>
-Cc: Mike Galbraith <efault@gmx.de>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/z3fold.c | 143 +++++++++++++++++++++++++++++++++++-------------------------
- 1 file changed, 85 insertions(+), 58 deletions(-)
-
---- a/mm/z3fold.c
-+++ b/mm/z3fold.c
-@@ -182,6 +182,13 @@ enum z3fold_page_flags {
- };
-
- /*
-+ * handle flags, go under HANDLE_FLAG_MASK
-+ */
-+enum z3fold_handle_flags {
-+ HANDLES_NOFREE = 0,
-+};
-+
-+/*
- * Forward declarations
- */
- static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
-@@ -311,6 +318,12 @@ static inline void free_handle(unsigned
- slots = handle_to_slots(handle);
- write_lock(&slots->lock);
- *(unsigned long *)handle = 0;
-+
-+ if (test_bit(HANDLES_NOFREE, &slots->pool)) {
-+ write_unlock(&slots->lock);
-+ return; /* simple case, nothing else to do */
-+ }
-+
- if (zhdr->slots != slots)
- zhdr->foreign_handles--;
-
-@@ -621,6 +634,28 @@ static inline void add_to_unbuddied(stru
- }
- }
-
-+static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
-+{
-+ enum buddy bud = HEADLESS;
-+
-+ if (zhdr->middle_chunks) {
-+ if (!zhdr->first_chunks &&
-+ chunks <= zhdr->start_middle - ZHDR_CHUNKS)
-+ bud = FIRST;
-+ else if (!zhdr->last_chunks)
-+ bud = LAST;
-+ } else {
-+ if (!zhdr->first_chunks)
-+ bud = FIRST;
-+ else if (!zhdr->last_chunks)
-+ bud = LAST;
-+ else
-+ bud = MIDDLE;
-+ }
-+
-+ return bud;
-+}
-+
- static inline void *mchunk_memmove(struct z3fold_header *zhdr,
- unsigned short dst_chunk)
- {
-@@ -682,18 +717,7 @@ static struct z3fold_header *compact_sin
- if (WARN_ON(new_zhdr == zhdr))
- goto out_fail;
-
-- if (new_zhdr->first_chunks == 0) {
-- if (new_zhdr->middle_chunks != 0 &&
-- chunks >= new_zhdr->start_middle) {
-- new_bud = LAST;
-- } else {
-- new_bud = FIRST;
-- }
-- } else if (new_zhdr->last_chunks == 0) {
-- new_bud = LAST;
-- } else if (new_zhdr->middle_chunks == 0) {
-- new_bud = MIDDLE;
-- }
-+ new_bud = get_free_buddy(new_zhdr, chunks);
- q = new_zhdr;
- switch (new_bud) {
- case FIRST:
-@@ -815,9 +839,8 @@ static void do_compact_page(struct z3fol
- return;
- }
-
-- if (unlikely(PageIsolated(page) ||
-- test_bit(PAGE_CLAIMED, &page->private) ||
-- test_bit(PAGE_STALE, &page->private))) {
-+ if (test_bit(PAGE_STALE, &page->private) ||
-+ test_and_set_bit(PAGE_CLAIMED, &page->private)) {
- z3fold_page_unlock(zhdr);
- return;
- }
-@@ -826,13 +849,16 @@ static void do_compact_page(struct z3fol
- zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
- if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
- atomic64_dec(&pool->pages_nr);
-- else
-+ else {
-+ clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
-+ }
- return;
- }
-
- z3fold_compact_page(zhdr);
- add_to_unbuddied(pool, zhdr);
-+ clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
- }
-
-@@ -1080,17 +1106,8 @@ static int z3fold_alloc(struct z3fold_po
- retry:
- zhdr = __z3fold_alloc(pool, size, can_sleep);
- if (zhdr) {
-- if (zhdr->first_chunks == 0) {
-- if (zhdr->middle_chunks != 0 &&
-- chunks >= zhdr->start_middle)
-- bud = LAST;
-- else
-- bud = FIRST;
-- } else if (zhdr->last_chunks == 0)
-- bud = LAST;
-- else if (zhdr->middle_chunks == 0)
-- bud = MIDDLE;
-- else {
-+ bud = get_free_buddy(zhdr, chunks);
-+ if (bud == HEADLESS) {
- if (kref_put(&zhdr->refcount,
- release_z3fold_page_locked))
- atomic64_dec(&pool->pages_nr);
-@@ -1236,7 +1253,6 @@ static void z3fold_free(struct z3fold_po
- pr_err("%s: unknown bud %d\n", __func__, bud);
- WARN_ON(1);
- put_z3fold_header(zhdr);
-- clear_bit(PAGE_CLAIMED, &page->private);
- return;
- }
-
-@@ -1251,8 +1267,7 @@ static void z3fold_free(struct z3fold_po
- z3fold_page_unlock(zhdr);
- return;
- }
-- if (unlikely(PageIsolated(page)) ||
-- test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
-+ if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
- put_z3fold_header(zhdr);
- clear_bit(PAGE_CLAIMED, &page->private);
- return;
-@@ -1316,6 +1331,10 @@ static int z3fold_reclaim_page(struct z3
- struct page *page = NULL;
- struct list_head *pos;
- unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
-+ struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
-+
-+ rwlock_init(&slots.lock);
-+ slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
-
- spin_lock(&pool->lock);
- if (!pool->ops || !pool->ops->evict || retries == 0) {
-@@ -1330,35 +1349,36 @@ static int z3fold_reclaim_page(struct z3
- list_for_each_prev(pos, &pool->lru) {
- page = list_entry(pos, struct page, lru);
-
-- /* this bit could have been set by free, in which case
-- * we pass over to the next page in the pool.
-- */
-- if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
-- page = NULL;
-- continue;
-- }
--
-- if (unlikely(PageIsolated(page))) {
-- clear_bit(PAGE_CLAIMED, &page->private);
-- page = NULL;
-- continue;
-- }
- zhdr = page_address(page);
- if (test_bit(PAGE_HEADLESS, &page->private))
- break;
-
-+ if (kref_get_unless_zero(&zhdr->refcount) == 0) {
-+ zhdr = NULL;
-+ break;
-+ }
- if (!z3fold_page_trylock(zhdr)) {
-- clear_bit(PAGE_CLAIMED, &page->private);
-+ if (kref_put(&zhdr->refcount,
-+ release_z3fold_page))
-+ atomic64_dec(&pool->pages_nr);
- zhdr = NULL;
- continue; /* can't evict at this point */
- }
-- if (zhdr->foreign_handles) {
-- clear_bit(PAGE_CLAIMED, &page->private);
-- z3fold_page_unlock(zhdr);
-+
-+ /* test_and_set_bit is of course atomic, but we still
-+ * need to do it under page lock, otherwise checking
-+ * that bit in __z3fold_alloc wouldn't make sense
-+ */
-+ if (zhdr->foreign_handles ||
-+ test_and_set_bit(PAGE_CLAIMED, &page->private)) {
-+ if (kref_put(&zhdr->refcount,
-+ release_z3fold_page))
-+ atomic64_dec(&pool->pages_nr);
-+ else
-+ z3fold_page_unlock(zhdr);
- zhdr = NULL;
- continue; /* can't evict such page */
- }
-- kref_get(&zhdr->refcount);
- list_del_init(&zhdr->buddy);
- zhdr->cpu = -1;
- break;
-@@ -1380,12 +1400,16 @@ static int z3fold_reclaim_page(struct z3
- first_handle = 0;
- last_handle = 0;
- middle_handle = 0;
-+ memset(slots.slot, 0, sizeof(slots.slot));
- if (zhdr->first_chunks)
-- first_handle = encode_handle(zhdr, FIRST);
-+ first_handle = __encode_handle(zhdr, &slots,
-+ FIRST);
- if (zhdr->middle_chunks)
-- middle_handle = encode_handle(zhdr, MIDDLE);
-+ middle_handle = __encode_handle(zhdr, &slots,
-+ MIDDLE);
- if (zhdr->last_chunks)
-- last_handle = encode_handle(zhdr, LAST);
-+ last_handle = __encode_handle(zhdr, &slots,
-+ LAST);
- /*
- * it's safe to unlock here because we hold a
- * reference to this page
-@@ -1400,19 +1424,16 @@ static int z3fold_reclaim_page(struct z3
- ret = pool->ops->evict(pool, middle_handle);
- if (ret)
- goto next;
-- free_handle(middle_handle, zhdr);
- }
- if (first_handle) {
- ret = pool->ops->evict(pool, first_handle);
- if (ret)
- goto next;
-- free_handle(first_handle, zhdr);
- }
- if (last_handle) {
- ret = pool->ops->evict(pool, last_handle);
- if (ret)
- goto next;
-- free_handle(last_handle, zhdr);
- }
- next:
- if (test_bit(PAGE_HEADLESS, &page->private)) {
-@@ -1426,9 +1447,11 @@ static int z3fold_reclaim_page(struct z3
- spin_unlock(&pool->lock);
- clear_bit(PAGE_CLAIMED, &page->private);
- } else {
-+ struct z3fold_buddy_slots *slots = zhdr->slots;
- z3fold_page_lock(zhdr);
- if (kref_put(&zhdr->refcount,
- release_z3fold_page_locked)) {
-+ kmem_cache_free(pool->c_handle, slots);
- atomic64_dec(&pool->pages_nr);
- return 0;
- }
-@@ -1544,8 +1567,7 @@ static bool z3fold_page_isolate(struct p
- VM_BUG_ON_PAGE(!PageMovable(page), page);
- VM_BUG_ON_PAGE(PageIsolated(page), page);
-
-- if (test_bit(PAGE_HEADLESS, &page->private) ||
-- test_bit(PAGE_CLAIMED, &page->private))
-+ if (test_bit(PAGE_HEADLESS, &page->private))
- return false;
-
- zhdr = page_address(page);
-@@ -1557,6 +1579,8 @@ static bool z3fold_page_isolate(struct p
- if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
- goto out;
-
-+ if (test_and_set_bit(PAGE_CLAIMED, &page->private))
-+ goto out;
- pool = zhdr_to_pool(zhdr);
- spin_lock(&pool->lock);
- if (!list_empty(&zhdr->buddy))
-@@ -1583,16 +1607,17 @@ static int z3fold_page_migrate(struct ad
-
- VM_BUG_ON_PAGE(!PageMovable(page), page);
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
-+ VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
- VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
-
- zhdr = page_address(page);
- pool = zhdr_to_pool(zhdr);
-
-- if (!z3fold_page_trylock(zhdr)) {
-+ if (!z3fold_page_trylock(zhdr))
- return -EAGAIN;
-- }
- if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
- z3fold_page_unlock(zhdr);
-+ clear_bit(PAGE_CLAIMED, &page->private);
- return -EBUSY;
- }
- if (work_pending(&zhdr->work)) {
-@@ -1634,6 +1659,7 @@ static int z3fold_page_migrate(struct ad
- queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
-
- page_mapcount_reset(page);
-+ clear_bit(PAGE_CLAIMED, &page->private);
- put_page(page);
- return 0;
- }
-@@ -1657,6 +1683,7 @@ static void z3fold_page_putback(struct p
- spin_lock(&pool->lock);
- list_add(&page->lru, &pool->lru);
- spin_unlock(&pool->lock);
-+ clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
- }
-
diff --git a/patches/localversion.patch b/patches/localversion.patch
index d7c1a50b87ee..bba4391fd1bd 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt20
++-rt21
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 46a0bf2fd08e..17d86492aefc 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -22,8 +22,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
-
-@@ -548,6 +549,9 @@ struct mm_struct {
+ #include <linux/seqlock.h>
+@@ -556,6 +557,9 @@ struct mm_struct {
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -7269,6 +7273,7 @@ void sched_setnuma(struct task_struct *p
+@@ -7265,6 +7269,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/series b/patches/series
index b9975fd0af51..949197974061 100644
--- a/patches/series
+++ b/patches/series
@@ -5,9 +5,7 @@
############################################################
# UPSTREAM merged
############################################################
-# z3fold, fc5488651c7d840c9cad9b0f273f2f31bd03413a+
-0001-z3fold-simplify-freeing-slots.patch
-0002-z3fold-stricter-locking-and-more-careful-reclaim.patch
+# z3fold, 135f97fd0c398f20a544cc52c3f8a3cb925a8aef
0003-z3fold-remove-preempt-disabled-sections-for-RT.patch
############################################################