diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2015-12-23 23:25:26 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2015-12-23 23:52:17 +0100 |
commit | 7bbd599b8bb8ca9e9b7cf0fdf77da61fc1eb7d11 (patch) | |
tree | a218f64a3fd415012d135ec0c1202211d66600fa /patches/mm-enable-slub.patch | |
parent | 1ccda4237a810cc0ef5c801ed1fd512543c70785 (diff) | |
download | linux-rt-7bbd599b8bb8ca9e9b7cf0fdf77da61fc1eb7d11.tar.gz |
[ANNOUNCE] 4.4-rc6-rt1v4.4-rc6-rt1-patches
Please don't continue reading before christmas eve (or morning,
depending on your schedule). If you don't celebrate christmas,
well go ahead.
Dear RT folks!
I'm pleased to announce the v4.4-rc6-rt1 patch set. I tested it on my
AMD A10, 64bit. Nothing exploded so far, filesystem is still there.
I haven't tested it on anything else. Before someone asks: this does not
mean it does *not* work on ARM I simply did not try it.
If you are brave then download it, install it and have fun. If something
breaks, please report it. If your machine starts blinking like a
christmas tree while using the patch then *please* send a photo.
Changes since v4.1.15-rt17:
- rebase to v4.4-rc6
Known issues (inherited from v4.1-RT):
- bcache stays disabled
- CPU hotplug is not better than before
- The netlink_release() OOPS, reported by Clark, is still on the
list, but unsolved due to lack of information
- Christoph Mathys reported a stall in cgroup locking code while using
Linux containers.
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4-rc6-rt1
The RT patch against 4.4-rc6 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4-rc6-rt1.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4-rc6-rt1.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/mm-enable-slub.patch')
-rw-r--r-- | patches/mm-enable-slub.patch | 153 |
1 files changed, 93 insertions, 60 deletions
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch index 3a1e45bb4c68..4d3a46dd9db1 100644 --- a/patches/mm-enable-slub.patch +++ b/patches/mm-enable-slub.patch @@ -7,13 +7,13 @@ move the freeing out of the lock held region. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- - mm/slab.h | 4 ++ - mm/slub.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++--------------- - 2 files changed, 95 insertions(+), 27 deletions(-) + mm/slab.h | 4 + + mm/slub.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++-------------- + 2 files changed, 102 insertions(+), 27 deletions(-) --- a/mm/slab.h +++ b/mm/slab.h -@@ -330,7 +330,11 @@ static inline struct kmem_cache *cache_f +@@ -324,7 +324,11 @@ static inline struct kmem_cache *cache_f * The slab lists for all objects. */ struct kmem_cache_node { @@ -27,16 +27,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> struct list_head slabs_partial; /* partial list first, better asm code */ --- a/mm/slub.c +++ b/mm/slub.c -@@ -1069,7 +1069,7 @@ static noinline struct kmem_cache_node * - { - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); +@@ -1075,7 +1075,7 @@ static noinline struct kmem_cache_node * + void *object = head; + int cnt = 0; - spin_lock_irqsave(&n->list_lock, *flags); + raw_spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) -@@ -1116,7 +1116,7 @@ static noinline struct kmem_cache_node * +@@ -1136,7 +1136,7 @@ static noinline struct kmem_cache_node * fail: slab_unlock(page); @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> slab_fix(s, "Object at 0x%p not freed", object); return NULL; } -@@ -1242,6 +1242,12 @@ static inline void dec_slabs_node(struct +@@ -1263,6 +1263,12 @@ static inline void dec_slabs_node(struct #endif /* CONFIG_SLUB_DEBUG */ @@ -58,32 +58,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. -@@ -1352,7 +1358,11 @@ static struct page *allocate_slab(struct +@@ -1402,7 +1408,11 @@ static struct page *allocate_slab(struct flags &= gfp_allowed_mask; +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) +#else - if (flags & __GFP_WAIT) + if (gfpflags_allow_blocking(flags)) +#endif local_irq_enable(); flags |= s->allocflags; -@@ -1421,7 +1431,11 @@ static struct page *allocate_slab(struct +@@ -1473,7 +1483,11 @@ static struct page *allocate_slab(struct page->frozen = 1; out: +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) +#else - if (flags & __GFP_WAIT) + if (gfpflags_allow_blocking(flags)) +#endif local_irq_disable(); if (!page) return NULL; -@@ -1478,6 +1492,16 @@ static void __free_slab(struct kmem_cach - memcg_uncharge_slab(s, order); +@@ -1529,6 +1543,16 @@ static void __free_slab(struct kmem_cach + __free_kmem_pages(page, order); } +static void free_delayed(struct list_head *h) @@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) -@@ -1512,6 +1536,12 @@ static void free_slab(struct kmem_cache +@@ -1560,6 +1584,12 @@ static void free_slab(struct kmem_cache } call_rcu(head, rcu_free_slab); @@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } else __free_slab(s, page); } -@@ -1625,7 +1655,7 @@ static void *get_partial_node(struct kme +@@ -1673,7 +1703,7 @@ static void *get_partial_node(struct kme if (!n || !n->nr_partial) return NULL; @@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; -@@ -1650,7 +1680,7 @@ static void *get_partial_node(struct kme +@@ -1698,7 +1728,7 @@ static void *get_partial_node(struct kme break; } @@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return object; } -@@ -1896,7 +1926,7 @@ static void deactivate_slab(struct kmem_ +@@ -1944,7 +1974,7 @@ static void deactivate_slab(struct kmem_ * that acquire_slab() will see a slab page that * is frozen */ @@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } else { m = M_FULL; -@@ -1907,7 +1937,7 @@ static void deactivate_slab(struct kmem_ +@@ -1955,7 +1985,7 @@ static void deactivate_slab(struct kmem_ * slabs from diagnostic functions will not see * any frozen slabs. */ @@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } -@@ -1942,7 +1972,7 @@ static void deactivate_slab(struct kmem_ +@@ -1990,7 +2020,7 @@ static void deactivate_slab(struct kmem_ goto redo; if (lock) @@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); -@@ -1974,10 +2004,10 @@ static void unfreeze_partials(struct kme +@@ -2022,10 +2052,10 @@ static void unfreeze_partials(struct kme n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) @@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } do { -@@ -2006,7 +2036,7 @@ static void unfreeze_partials(struct kme +@@ -2054,7 +2084,7 @@ static void unfreeze_partials(struct kme } if (n) @@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> while (discard_page) { page = discard_page; -@@ -2045,14 +2075,21 @@ static void put_cpu_partial(struct kmem_ +@@ -2093,14 +2123,21 @@ static void put_cpu_partial(struct kmem_ pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { @@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> oldpage = NULL; pobjects = 0; pages = 0; -@@ -2124,7 +2161,22 @@ static bool has_cpu_slab(int cpu, void * +@@ -2172,7 +2209,22 @@ static bool has_cpu_slab(int cpu, void * static void flush_all(struct kmem_cache *s) { @@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2160,10 +2212,10 @@ static unsigned long count_partial(struc +@@ -2208,10 +2260,10 @@ static unsigned long count_partial(struc unsigned long x = 0; struct page *page; @@ -237,53 +237,61 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2300,9 +2352,11 @@ static inline void *get_freelist(struct - static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) +@@ -2349,8 +2401,10 @@ static inline void *get_freelist(struct + * already disabled (which is the case for bulk allocation). + */ + static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, +- unsigned long addr, struct kmem_cache_cpu *c) ++ unsigned long addr, struct kmem_cache_cpu *c, ++ struct list_head *to_free) { + struct slub_free_list *f; void *freelist; struct page *page; - unsigned long flags; -+ LIST_HEAD(tofree); - local_irq_save(flags); - #ifdef CONFIG_PREEMPT -@@ -2370,7 +2424,13 @@ static void *__slab_alloc(struct kmem_ca +@@ -2410,6 +2464,13 @@ static void *___slab_alloc(struct kmem_c VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); ++ +out: + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, &tofree); ++ list_splice_init(&f->list, to_free); + raw_spin_unlock(&f->lock); - local_irq_restore(flags); -+ free_delayed(&tofree); ++ return freelist; new_slab: -@@ -2387,8 +2447,7 @@ static void *__slab_alloc(struct kmem_ca - - if (unlikely(!freelist)) { - slab_out_of_memory(s, gfpflags, node); -- local_irq_restore(flags); -- return NULL; -+ goto out; - } - - page = c->page; -@@ -2403,8 +2462,7 @@ static void *__slab_alloc(struct kmem_ca +@@ -2441,7 +2502,7 @@ static void *___slab_alloc(struct kmem_c deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; -- local_irq_restore(flags); - return freelist; + goto out; } /* -@@ -2588,7 +2646,7 @@ static void __slab_free(struct kmem_cach +@@ -2453,6 +2514,7 @@ static void *__slab_alloc(struct kmem_ca + { + void *p; + unsigned long flags; ++ LIST_HEAD(tofree); + + local_irq_save(flags); + #ifdef CONFIG_PREEMPT +@@ -2464,8 +2526,9 @@ static void *__slab_alloc(struct kmem_ca + c = this_cpu_ptr(s->cpu_slab); + #endif + +- p = ___slab_alloc(s, gfpflags, node, addr, c); ++ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); + local_irq_restore(flags); ++ free_delayed(&tofree); + return p; + } + +@@ -2652,7 +2715,7 @@ static void __slab_free(struct kmem_cach do { if (unlikely(n)) { @@ -292,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> n = NULL; } prior = page->freelist; -@@ -2620,7 +2678,7 @@ static void __slab_free(struct kmem_cach +@@ -2684,7 +2747,7 @@ static void __slab_free(struct kmem_cach * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ @@ -301,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } -@@ -2662,7 +2720,7 @@ static void __slab_free(struct kmem_cach +@@ -2726,7 +2789,7 @@ static void __slab_free(struct kmem_cach add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -310,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; slab_empty: -@@ -2677,7 +2735,7 @@ static void __slab_free(struct kmem_cach +@@ -2741,7 +2804,7 @@ static void __slab_free(struct kmem_cach remove_full(s, n, page); } @@ -319,7 +327,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> stat(s, FREE_SLAB); discard_slab(s, page); } -@@ -2876,7 +2934,7 @@ static void +@@ -2913,6 +2976,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca + void **p) + { + struct kmem_cache_cpu *c; ++ LIST_HEAD(to_free); + int i; + + /* memcg and kmem_cache debug support */ +@@ -2936,7 +3000,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca + * of re-populating per CPU c->freelist + */ + p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, +- _RET_IP_, c); ++ _RET_IP_, c, &to_free); + if (unlikely(!p[i])) + goto error; + +@@ -2948,6 +3012,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca + } + c->tid = next_tid(c->tid); + local_irq_enable(); ++ free_delayed(&to_free); + + /* Clear memory outside IRQ disabled fastpath loop */ + if (unlikely(flags & __GFP_ZERO)) { +@@ -3095,7 +3160,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; @@ -328,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); -@@ -3458,7 +3516,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3677,7 +3742,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); @@ -337,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Build lists of slabs to discard or promote. -@@ -3489,7 +3547,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3708,7 +3773,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); @@ -346,7 +379,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, lru) -@@ -3665,6 +3723,12 @@ void __init kmem_cache_init(void) +@@ -3884,6 +3949,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; @@ -359,7 +392,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (debug_guardpage_minorder()) slub_max_order = 0; -@@ -3907,7 +3971,7 @@ static int validate_slab_node(struct kme +@@ -4127,7 +4198,7 @@ static int validate_slab_node(struct kme struct page *page; unsigned long flags; @@ -368,7 +401,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); -@@ -3929,7 +3993,7 @@ static int validate_slab_node(struct kme +@@ -4149,7 +4220,7 @@ static int validate_slab_node(struct kme s->name, count, atomic_long_read(&n->nr_slabs)); out: @@ -377,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return count; } -@@ -4117,12 +4181,12 @@ static int list_locations(struct kmem_ca +@@ -4337,12 +4408,12 @@ static int list_locations(struct kmem_ca if (!atomic_long_read(&n->nr_slabs)) continue; |