summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-02-19 16:30:01 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-02-19 16:30:01 +0100
commit71ddc37dfde8908d449ad0b40dd9a617bd145a6a (patch)
treec322dbab6581f2b86f132075f9b08bcc5c1a68a8
parentf7decc32ee239783ef21948a5cd9a86b293ec0e1 (diff)
downloadlinux-rt-71ddc37dfde8908d449ad0b40dd9a617bd145a6a.tar.gz
[ANNOUNCE] v4.9.11-rt9v4.9.11-rt9-patches
Dear RT folks! I'm pleased to announce the v4.9.11-rt9 patch set. Changes since v4.9.11-rt8: - rt_mutex_destroy() is EXPORT_SYMBOL_GPL again. As pointed by Peter Zijlstra, the removal of _GPL is not required. - Added a rescheduling point so we don't forget to run a runnable task at elevated priority under certain circumstances. - The static initializes for PER_CPU locks were wrong. This affects the local_locks and resulted in lockdep disabling itself a few minutes after boot on _big_ iron (100 CPUs+). Reported by Mike Galbraith and patched by Thomas Gleixner. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against v4.9.11-rt9 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/incr/patch-4.9.11-rt8-rt9.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9.11-rt9 The RT patch against v4.9.11 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9.11-rt9.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.11-rt9.tar.xz Sebastian diff --git a/include/linux/module.h b/include/linux/module.h --- a/include/linux/module.h +++ b/include/linux/module.h @@ -496,6 +496,7 @@ static inline int module_is_live(struct module *mod) struct module *__module_text_address(unsigned long addr); struct module *__module_address(unsigned long addr); bool is_module_address(unsigned long addr); +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); bool is_module_percpu_address(unsigned long addr); bool is_module_text_address(unsigned long addr); diff --git a/include/linux/percpu.h b/include/linux/percpu.h --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -139,6 +139,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, #endif extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) struct lockdep_subclass_key *key; struct hlist_head *hash_head; struct lock_class *class; + bool is_static = false; if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { debug_locks_off(); @@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) /* * Static locks do not have their class-keys yet - for them the key - * is the lock object itself: + * is the lock object itself. If the lock is in the per cpu area, + * the canonical address of the lock (per cpu offset removed) is + * used. */ - if (unlikely(!lock->key)) - lock->key = (void *)lock; + if (unlikely(!lock->key)) { + unsigned long can_addr, addr = (unsigned long)lock; + + if (__is_kernel_percpu_address(addr, &can_addr)) + lock->key = (void *)can_addr; + else if (__is_module_percpu_address(addr, &can_addr)) + lock->key = (void *)can_addr; + else if (static_obj(lock)) + lock->key = (void *)lock; + else + return ERR_PTR(-EINVAL); + is_static = true; + } /* * NOTE: the class-key must be unique. For dynamic locks, a static @@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) } } - return NULL; + return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); } /* @@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) DEBUG_LOCKS_WARN_ON(!irqs_disabled()); class = look_up_lock_class(lock, subclass); - if (likely(class)) + if (likely(!IS_ERR_OR_NULL(class))) goto out_set_class_cache; /* * Debug-check: all keys must be persistent! - */ - if (!static_obj(lock->key)) { + */ + if (IS_ERR(class)) { debug_locks_off(); printk("INFO: trying to register non-static key.\n"); printk("the code is fine but needs lockdep annotation.\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); - return NULL; } @@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) * Clearly if the lock hasn't been acquired _ever_, we're not * holding it either, so report failure. */ - if (!class) + if (IS_ERR_OR_NULL(class)) return 0; /* @@ -4161,7 +4174,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) * If the class exists we look it up and zap it: */ class = look_up_lock_class(lock, j); - if (class) + if (!IS_ERR_OR_NULL(class)) zap_class(class); } /* diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -2211,7 +2211,8 @@ void rt_mutex_destroy(struct rt_mutex *lock) lock->magic = NULL; #endif } -EXPORT_SYMBOL(rt_mutex_destroy); + +EXPORT_SYMBOL_GPL(rt_mutex_destroy); /** * __rt_mutex_init - initialize the rt lock diff --git a/kernel/module.c b/kernel/module.c --- a/kernel/module.c +++ b/kernel/module.c @@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod, memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); } -/** - * is_module_percpu_address - test whether address is from module static percpu - * @addr: address to test - * - * Test whether @addr belongs to module static percpu area. - * - * RETURNS: - * %true if @addr is from module static percpu area - */ -bool is_module_percpu_address(unsigned long addr) +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) { struct module *mod; unsigned int cpu; @@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned long addr) continue; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(mod->percpu, cpu); + void *va = (void *)addr; - if ((void *)addr >= start && - (void *)addr < start + mod->percpu_size) { + if (va >= start && va < start + mod->percpu_size) { + if (can_addr) + *can_addr = (unsigned long) (va - start); preempt_enable(); return true; } @@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned long addr) return false; } +/** + * is_module_percpu_address - test whether address is from module static percpu + * @addr: address to test + * + * Test whether @addr belongs to module static percpu area. + * + * RETURNS: + * %true if @addr is from module static percpu area + */ +bool is_module_percpu_address(unsigned long addr) +{ + return __is_module_percpu_address(addr, NULL); +} + #else /* ... !CONFIG_SMP */ static inline void __percpu *mod_percpu(struct module *mod) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1730,12 +1730,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) #ifdef CONFIG_SMP if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) queue_push_tasks(rq); -#else +#endif if (dl_task(rq->curr)) check_preempt_curr_dl(rq, p, 0); else resched_curr(rq); -#endif } } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2200,10 +2200,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) #ifdef CONFIG_SMP if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) queue_push_tasks(rq); -#else +#endif /* CONFIG_SMP */ if (p->prio < rq->curr->prio) resched_curr(rq); -#endif /* CONFIG_SMP */ } } diff --git a/localversion-rt b/localversion-rt --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt8 +-rt9 diff --git a/mm/percpu.c b/mm/percpu.c --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1280,6 +1280,28 @@ void free_percpu(void __percpu *ptr) } EXPORT_SYMBOL_GPL(free_percpu); +bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) +{ +#ifdef CONFIG_SMP + const size_t static_size = __per_cpu_end - __per_cpu_start; + void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); + unsigned int cpu; + + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(base, cpu); + void *va = (void *)addr; + + if (va >= start && va < start + static_size) { + if (can_addr) + *can_addr = (unsigned long) (va - start); + return true; + } + } +#endif + /* on UP, can't distinguish from other static vars, always false */ + return false; +} + /** * is_kernel_percpu_address - test whether address is from static percpu area * @addr: address to test @@ -1293,20 +1315,7 @@ EXPORT_SYMBOL_GPL(free_percpu); */ bool is_kernel_percpu_address(unsigned long addr) { -#ifdef CONFIG_SMP - const size_t static_size = __per_cpu_end - __per_cpu_start; - void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); - unsigned int cpu; - - for_each_possible_cpu(cpu) { - void *start = per_cpu_ptr(base, cpu); - - if ((void *)addr >= start && (void *)addr < start + static_size) - return true; - } -#endif - /* on UP, can't distinguish from other static vars, always false */ - return false; + return __is_kernel_percpu_address(addr, NULL); } /** Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/dm-make-rt-aware.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch269
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/mm-enable-slub.patch74
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch2
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch8
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/rt-drop_mutex_disable_on_not_debug.patch20
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch4
-rw-r--r--patches/sched-rt-Add-a-missing-rescheduling-point.patch70
-rw-r--r--patches/series2
-rw-r--r--patches/skbufhead-raw-lock.patch16
-rw-r--r--patches/slub-enable-irqs-for-no-wait.patch4
-rw-r--r--patches/softirq-preempt-fix-3-re.patch14
-rw-r--r--patches/softirq-split-locks.patch2
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
20 files changed, 416 insertions, 91 deletions
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index 5d4b4f5aae93..a4d929a38c46 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
-@@ -838,7 +838,7 @@ static void dm_old_request_fn(struct req
+@@ -842,7 +842,7 @@ static void dm_old_request_fn(struct req
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 68c7b973cc48..02952cda4bfa 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt8
++-rt9
diff --git a/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch b/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
new file mode 100644
index 000000000000..7a064885bfdd
--- /dev/null
+++ b/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
@@ -0,0 +1,269 @@
+From 178617ad4fa9d79f89d2ba3bf437a38fdec3ef5b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 17 Feb 2017 19:44:39 +0100
+Subject: [PATCH] lockdep: Handle statically initialized PER_CPU locks proper
+
+If a PER_CPU struct which contains a spin_lock is statically initialized
+via:
+
+DEFINE_PER_CPU(struct foo, bla) = {
+ .lock = __SPIN_LOCK_UNLOCKED(bla.lock)
+};
+
+then lockdep assigns a seperate key to each lock because the logic for
+assigning a key to statically initialized locks is to use the address as
+the key. With per CPU locks the address is obvioulsy different on each CPU.
+
+That's wrong, because all locks should have the same key.
+
+To solve this the following modifications are required:
+
+ 1) Extend the is_kernel/module_percpu_addr() functions to hand back the
+ canonical address of the per CPU address, i.e. the per CPU address
+ minus the per CPU offset.
+
+ 2) Check the lock address with these functions and if the per CPU check
+ matches use the returned canonical address as the lock key, so all per
+ CPU locks have the same key.
+
+ 3) Move the static_obj(key) check into look_up_lock_class() so this check
+ can be avoided for statically initialized per CPU locks. That's
+ required because the canonical address fails the static_obj(key) check
+ for obvious reasons.
+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/module.h | 1 +
+ include/linux/percpu.h | 1 +
+ kernel/locking/lockdep.c | 35 ++++++++++++++++++++++++-----------
+ kernel/module.c | 31 +++++++++++++++++++------------
+ mm/percpu.c | 37 +++++++++++++++++++++++--------------
+ 5 files changed, 68 insertions(+), 37 deletions(-)
+
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -496,6 +496,7 @@ static inline int module_is_live(struct
+ struct module *__module_text_address(unsigned long addr);
+ struct module *__module_address(unsigned long addr);
+ bool is_module_address(unsigned long addr);
++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
+
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(
+ #endif
+
+ extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
++extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
+ extern bool is_kernel_percpu_address(unsigned long addr);
+
+ #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *l
+ struct lockdep_subclass_key *key;
+ struct hlist_head *hash_head;
+ struct lock_class *class;
++ bool is_static = false;
+
+ if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ debug_locks_off();
+@@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *l
+
+ /*
+ * Static locks do not have their class-keys yet - for them the key
+- * is the lock object itself:
+- */
+- if (unlikely(!lock->key))
+- lock->key = (void *)lock;
++ * is the lock object itself. If the lock is in the per cpu area,
++ * the canonical address of the lock (per cpu offset removed) is
++ * used.
++ */
++ if (unlikely(!lock->key)) {
++ unsigned long can_addr, addr = (unsigned long)lock;
++
++ if (__is_kernel_percpu_address(addr, &can_addr))
++ lock->key = (void *)can_addr;
++ else if (__is_module_percpu_address(addr, &can_addr))
++ lock->key = (void *)can_addr;
++ else if (static_obj(lock))
++ lock->key = (void *)lock;
++ else
++ return ERR_PTR(-EINVAL);
++ is_static = true;
++ }
+
+ /*
+ * NOTE: the class-key must be unique. For dynamic locks, a static
+@@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *l
+ }
+ }
+
+- return NULL;
++ return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+ }
+
+ /*
+@@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
+ class = look_up_lock_class(lock, subclass);
+- if (likely(class))
++ if (likely(!IS_ERR_OR_NULL(class)))
+ goto out_set_class_cache;
+
+ /*
+ * Debug-check: all keys must be persistent!
+- */
+- if (!static_obj(lock->key)) {
++ */
++ if (IS_ERR(class)) {
+ debug_locks_off();
+ printk("INFO: trying to register non-static key.\n");
+ printk("the code is fine but needs lockdep annotation.\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+-
+ return NULL;
+ }
+
+@@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_l
+ * Clearly if the lock hasn't been acquired _ever_, we're not
+ * holding it either, so report failure.
+ */
+- if (!class)
++ if (IS_ERR_OR_NULL(class))
+ return 0;
+
+ /*
+@@ -4159,7 +4172,7 @@ void lockdep_reset_lock(struct lockdep_m
+ * If the class exists we look it up and zap it:
+ */
+ class = look_up_lock_class(lock, j);
+- if (class)
++ if (!IS_ERR_OR_NULL(class))
+ zap_class(class);
+ }
+ /*
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -660,16 +660,7 @@ static void percpu_modcopy(struct module
+ memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
+ }
+
+-/**
+- * is_module_percpu_address - test whether address is from module static percpu
+- * @addr: address to test
+- *
+- * Test whether @addr belongs to module static percpu area.
+- *
+- * RETURNS:
+- * %true if @addr is from module static percpu area
+- */
+-bool is_module_percpu_address(unsigned long addr)
++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
+ {
+ struct module *mod;
+ unsigned int cpu;
+@@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned l
+ continue;
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(mod->percpu, cpu);
++ void *va = (void *)addr;
+
+- if ((void *)addr >= start &&
+- (void *)addr < start + mod->percpu_size) {
++ if (va >= start && va < start + mod->percpu_size) {
++ if (can_addr)
++ *can_addr = (unsigned long) (va - start);
+ preempt_enable();
+ return true;
+ }
+@@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned l
+ return false;
+ }
+
++/**
++ * is_module_percpu_address - test whether address is from module static percpu
++ * @addr: address to test
++ *
++ * Test whether @addr belongs to module static percpu area.
++ *
++ * RETURNS:
++ * %true if @addr is from module static percpu area
++ */
++bool is_module_percpu_address(unsigned long addr)
++{
++ return __is_module_percpu_address(addr, NULL);
++}
++
+ #else /* ... !CONFIG_SMP */
+
+ static inline void __percpu *mod_percpu(struct module *mod)
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1280,18 +1280,7 @@ void free_percpu(void __percpu *ptr)
+ }
+ EXPORT_SYMBOL_GPL(free_percpu);
+
+-/**
+- * is_kernel_percpu_address - test whether address is from static percpu area
+- * @addr: address to test
+- *
+- * Test whether @addr belongs to in-kernel static percpu area. Module
+- * static percpu areas are not considered. For those, use
+- * is_module_percpu_address().
+- *
+- * RETURNS:
+- * %true if @addr is from in-kernel static percpu area, %false otherwise.
+- */
+-bool is_kernel_percpu_address(unsigned long addr)
++bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
+ {
+ #ifdef CONFIG_SMP
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
+@@ -1300,16 +1289,36 @@ bool is_kernel_percpu_address(unsigned l
+
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
++ void *va = (void *)addr;
+
+- if ((void *)addr >= start && (void *)addr < start + static_size)
++ if (va >= start && va < start + static_size) {
++ if (can_addr)
++ *can_addr = (unsigned long) (va - start);
+ return true;
+- }
++ }
++ }
+ #endif
+ /* on UP, can't distinguish from other static vars, always false */
+ return false;
+ }
+
+ /**
++ * is_kernel_percpu_address - test whether address is from static percpu area
++ * @addr: address to test
++ *
++ * Test whether @addr belongs to in-kernel static percpu area. Module
++ * static percpu areas are not considered. For those, use
++ * is_module_percpu_address().
++ *
++ * RETURNS:
++ * %true if @addr is from in-kernel static percpu area, %false otherwise.
++ */
++bool is_kernel_percpu_address(unsigned long addr)
++{
++ return __is_kernel_percpu_address(addr, NULL);
++}
++
++/**
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
+ *
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 6ec5dd2420ea..413c64184f79 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3689,6 +3689,7 @@ static void check_flags(unsigned long fl
+@@ -3702,6 +3702,7 @@ static void check_flags(unsigned long fl
}
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3703,6 +3704,7 @@ static void check_flags(unsigned long fl
+@@ -3716,6 +3717,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index f81c4a4494a4..7148ed90d0b6 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1526,7 +1532,11 @@ static struct page *allocate_slab(struct
+@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1601,7 +1611,11 @@ static struct page *allocate_slab(struct
+@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_disable();
if (!page)
return NULL;
-@@ -1660,6 +1674,16 @@ static void __free_slab(struct kmem_cach
+@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1691,6 +1715,12 @@ static void free_slab(struct kmem_cache
+@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -1798,7 +1828,7 @@ static void *get_partial_node(struct kme
+@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1823,7 +1853,7 @@ static void *get_partial_node(struct kme
+@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return object;
}
-@@ -2069,7 +2099,7 @@ static void deactivate_slab(struct kmem_
+@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
m = M_FULL;
-@@ -2080,7 +2110,7 @@ static void deactivate_slab(struct kmem_
+@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2115,7 +2145,7 @@ static void deactivate_slab(struct kmem_
+@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2147,10 +2177,10 @@ static void unfreeze_partials(struct kme
+@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
do {
-@@ -2179,7 +2209,7 @@ static void unfreeze_partials(struct kme
+@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2218,14 +2248,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2297,7 +2334,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2352,10 +2404,10 @@ static unsigned long count_partial(struc
+@@ -2356,10 +2408,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2493,8 +2545,10 @@ static inline void *get_freelist(struct
+@@ -2497,8 +2549,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *freelist;
struct page *page;
-@@ -2554,6 +2608,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2558,6 +2612,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return freelist;
new_slab:
-@@ -2585,7 +2646,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2589,7 +2650,7 @@ static void *___slab_alloc(struct kmem_c
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
@@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2597,6 +2658,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2601,6 +2662,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@@ -280,7 +280,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2608,8 +2670,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2612,8 +2674,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return p;
}
-@@ -2795,7 +2858,7 @@ static void __slab_free(struct kmem_cach
+@@ -2799,7 +2862,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2827,7 +2890,7 @@ static void __slab_free(struct kmem_cach
+@@ -2831,7 +2894,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2869,7 +2932,7 @@ static void __slab_free(struct kmem_cach
+@@ -2873,7 +2936,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
slab_empty:
-@@ -2884,7 +2947,7 @@ static void __slab_free(struct kmem_cach
+@@ -2888,7 +2951,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3089,6 +3152,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3093,6 +3156,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
-@@ -3112,7 +3176,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3116,7 +3180,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(!p[i]))
goto error;
-@@ -3124,6 +3188,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3128,6 +3192,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3271,7 +3336,7 @@ static void
+@@ -3275,7 +3340,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3615,6 +3680,10 @@ static void list_slab_objects(struct kme
+@@ -3619,6 +3684,10 @@ static void list_slab_objects(struct kme
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -372,7 +372,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *addr = page_address(page);
void *p;
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3635,6 +3704,7 @@ static void list_slab_objects(struct kme
+@@ -3639,6 +3708,7 @@ static void list_slab_objects(struct kme
slab_unlock(page);
kfree(map);
#endif
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -3648,7 +3718,7 @@ static void free_partial(struct kmem_cac
+@@ -3652,7 +3722,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -389,7 +389,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3658,7 +3728,7 @@ static void free_partial(struct kmem_cac
+@@ -3662,7 +3732,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
@@ -398,7 +398,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
-@@ -3916,7 +3986,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3920,7 +3990,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -3947,7 +4017,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3951,7 +4021,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -416,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4123,6 +4193,12 @@ void __init kmem_cache_init(void)
+@@ -4127,6 +4197,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -429,7 +429,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4331,7 +4407,7 @@ static int validate_slab_node(struct kme
+@@ -4335,7 +4411,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4353,7 +4429,7 @@ static int validate_slab_node(struct kme
+@@ -4357,7 +4433,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return count;
}
-@@ -4541,12 +4617,12 @@ static int list_locations(struct kmem_ca
+@@ -4545,12 +4621,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index f75f2ad747da..2430990eeb34 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4916,6 +4916,7 @@ void __napi_schedule(struct napi_struct
+@@ -4911,6 +4911,7 @@ void __napi_schedule(struct napi_struct
}
EXPORT_SYMBOL(__napi_schedule);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -4927,6 +4928,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -4922,6 +4923,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 9bc59696fdfa..bfddd24daabf 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -8024,7 +8024,7 @@ static int dev_cpu_callback(struct notif
+@@ -8019,7 +8019,7 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index a553cb7e7a1f..2c7142969598 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3080,7 +3080,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3075,7 +3075,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 5845849626c5..2d1256cfc07c 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2461,14 +2461,53 @@ void netdev_freemem(struct net_device *d
+@@ -2463,14 +2463,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct task_struct *oom_reaper_list;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3143,8 +3143,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3138,8 +3138,10 @@ static void skb_update_prio(struct sk_bu
#define skb_update_prio(skb)
#endif
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* dev_loopback_xmit - loop back @skb
-@@ -3378,8 +3380,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3373,8 +3375,7 @@ static int __dev_queue_xmit(struct sk_bu
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3389,9 +3390,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3384,9 +3385,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 8f070899cf83..b9071361fe25 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5257,7 +5257,7 @@ static __latent_entropy void net_rx_acti
+@@ -5252,7 +5252,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index abac3e29139e..038bfcd4098a 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -8358,6 +8358,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8363,6 +8363,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/rt-drop_mutex_disable_on_not_debug.patch b/patches/rt-drop_mutex_disable_on_not_debug.patch
index 1ed6508020bb..f99a1ad5f49e 100644
--- a/patches/rt-drop_mutex_disable_on_not_debug.patch
+++ b/patches/rt-drop_mutex_disable_on_not_debug.patch
@@ -7,17 +7,13 @@ which won't link and therefore fail on a non-GPL kernel module.
This does not happen on !RT and is a regression on RT which we would like to
avoid.
I try here the easy thing and to not use rt_mutex_destroy() if
-CONFIG_DEBUG_MUTEXES is not enabled. This will still break for the DEBUG
-configs so instead of adding a wrapper around rt_mutex_destroy() (which we have
-for rt_mutex_lock() for instance) I am simply dropping the GPL part from the
-export.
+CONFIG_DEBUG_MUTEXES is not enabled.
Reported-by: Alex Goins <agoins@nvidia.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/mutex_rt.h | 5 +++++
- kernel/locking/rtmutex.c | 3 +--
- 2 files changed, 6 insertions(+), 2 deletions(-)
+ 1 file changed, 5 insertions(+)
--- a/include/linux/mutex_rt.h
+++ b/include/linux/mutex_rt.h
@@ -34,15 +30,3 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -2027,8 +2027,7 @@ void rt_mutex_destroy(struct rt_mutex *l
- lock->magic = NULL;
- #endif
- }
--
--EXPORT_SYMBOL_GPL(rt_mutex_destroy);
-+EXPORT_SYMBOL(rt_mutex_destroy);
-
- /**
- * __rt_mutex_init - initialize the rt lock
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 2c9fdd63a9d0..56afc2458734 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -311,7 +311,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2247,7 +2384,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2248,7 +2385,7 @@ int rt_mutex_finish_proxy_lock(struct rt
set_current_state(TASK_INTERRUPTIBLE);
/* sleep on the mutex */
@@ -320,7 +320,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
if (unlikely(ret))
remove_waiter(lock, waiter);
-@@ -2263,24 +2400,88 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2264,24 +2401,88 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
diff --git a/patches/sched-rt-Add-a-missing-rescheduling-point.patch b/patches/sched-rt-Add-a-missing-rescheduling-point.patch
new file mode 100644
index 000000000000..d38d05ee8012
--- /dev/null
+++ b/patches/sched-rt-Add-a-missing-rescheduling-point.patch
@@ -0,0 +1,70 @@
+From 619bd4a71874a8fd78eb6ccf9f272c5e98bcc7b7 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 24 Jan 2017 15:40:06 +0100
+Subject: [PATCH] sched/rt: Add a missing rescheduling point
+
+Since the change in commit:
+
+ fd7a4bed1835 ("sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks")
+
+... we don't reschedule a task under certain circumstances:
+
+Lets say task-A, SCHED_OTHER, is running on CPU0 (and it may run only on
+CPU0) and holds a PI lock. This task is removed from the CPU because it
+used up its time slice and another SCHED_OTHER task is running. Task-B on
+CPU1 runs at RT priority and asks for the lock owned by task-A. This
+results in a priority boost for task-A. Task-B goes to sleep until the
+lock has been made available. Task-A is already runnable (but not active),
+so it receives no wake up.
+
+The reality now is that task-A gets on the CPU once the scheduler decides
+to remove the current task despite the fact that a high priority task is
+enqueued and waiting. This may take a long time.
+
+The desired behaviour is that CPU0 immediately reschedules after the
+priority boost which made task-A the task with the lowest priority.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: fd7a4bed1835 ("sched, rt: Convert switched_{from, to}_rt() prio_changed_rt() to balance callbacks")
+Link: http://lkml.kernel.org/r/20170124144006.29821-1-bigeasy@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ kernel/sched/deadline.c | 3 +--
+ kernel/sched/rt.c | 3 +--
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1729,12 +1729,11 @@ static void switched_to_dl(struct rq *rq
+ #ifdef CONFIG_SMP
+ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
+ queue_push_tasks(rq);
+-#else
++#endif
+ if (dl_task(rq->curr))
+ check_preempt_curr_dl(rq, p, 0);
+ else
+ resched_curr(rq);
+-#endif
+ }
+ }
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2198,10 +2198,9 @@ static void switched_to_rt(struct rq *rq
+ #ifdef CONFIG_SMP
+ if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
+ queue_push_tasks(rq);
+-#else
++#endif /* CONFIG_SMP */
+ if (p->prio < rq->curr->prio)
+ resched_curr(rq);
+-#endif /* CONFIG_SMP */
+ }
+ }
+
diff --git a/patches/series b/patches/series
index 1795f8399403..79ba181156f9 100644
--- a/patches/series
+++ b/patches/series
@@ -5,6 +5,7 @@
############################################################
# UPSTREAM changes queued
############################################################
+sched-rt-Add-a-missing-rescheduling-point.patch
############################################################
# UPSTREAM FIXES, patches pending
@@ -14,6 +15,7 @@ timer-make-the-base-lock-raw.patch
############################################################
# Stuff broken upstream, patches submitted
############################################################
+lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 72456d99e2c7..a5355c7aaaf5 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2851,6 +2851,7 @@ struct softnet_data {
+@@ -2855,6 +2855,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4315,7 +4315,7 @@ static void flush_backlog(struct work_st
+@@ -4310,7 +4310,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4325,11 +4325,14 @@ static void flush_backlog(struct work_st
+@@ -4320,11 +4320,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -4854,7 +4857,9 @@ static int process_backlog(struct napi_s
+@@ -4849,7 +4852,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -4862,9 +4867,9 @@ static int process_backlog(struct napi_s
+@@ -4857,9 +4862,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5206,13 +5211,21 @@ static __latent_entropy void net_rx_acti
+@@ -5201,13 +5206,21 @@ static __latent_entropy void net_rx_acti
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -8015,6 +8028,9 @@ static int dev_cpu_callback(struct notif
+@@ -8010,6 +8023,9 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
}
-@@ -8319,8 +8335,9 @@ static int __init net_dev_init(void)
+@@ -8314,8 +8330,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index 9d975d84a4c1..b4af19555631 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1529,14 +1529,17 @@ static struct page *allocate_slab(struct
+@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct
void *start, *p;
int idx, order;
bool shuffle;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1611,11 +1614,7 @@ static struct page *allocate_slab(struct
+@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 54ceb2b0dd89..293305e760b7 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2263,6 +2263,7 @@ static void __netif_reschedule(struct Qd
+@@ -2258,6 +2258,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2344,6 +2345,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2339,6 +2340,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3763,6 +3765,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3758,6 +3760,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4809,6 +4812,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4804,6 +4807,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4822,6 +4826,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4817,6 +4821,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4899,6 +4904,7 @@ void __napi_schedule(struct napi_struct
+@@ -4894,6 +4899,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8000,6 +8006,7 @@ static int dev_cpu_callback(struct notif
+@@ -7995,6 +8001,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 9f832e50a66c..4dd1cc4a6ed0 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -816,7 +816,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3834,11 +3834,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3829,11 +3829,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 66284f8c1574..d6bc5b2b83ca 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3789,7 +3789,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3784,7 +3784,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3799,13 +3799,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3794,13 +3794,13 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();