summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch53
-rw-r--r--patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch9
-rw-r--r--patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch33
-rw-r--r--patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch19
-rw-r--r--patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch19
-rw-r--r--patches/0006-padata-Make-padata_alloc-static.patch17
-rw-r--r--patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch17
-rw-r--r--patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch7
-rw-r--r--patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch19
-rw-r--r--patches/0009-tracing-Make-traceprobe-parsing-code-reusable.patch8
-rw-r--r--patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch11
-rw-r--r--patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch15
-rw-r--r--patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch15
-rw-r--r--patches/0013-extable-Adjust-system_state-checks.patch2
-rw-r--r--patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch7
-rw-r--r--patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch13
-rw-r--r--patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch9
-rw-r--r--patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch11
-rw-r--r--patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch9
-rw-r--r--patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch7
-rw-r--r--patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch13
-rw-r--r--patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch9
-rw-r--r--patches/0021-PCI-Replace-the-racy-recursion-prevention.patch15
-rw-r--r--patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch9
-rw-r--r--patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch51
-rw-r--r--patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch43
-rw-r--r--patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch45
-rw-r--r--patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch17
-rw-r--r--patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch15
-rw-r--r--patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch15
-rw-r--r--patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch22
-rw-r--r--patches/0030-sched-Provide-is_percpu_thread-helper.patch9
-rw-r--r--patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch13
-rw-r--r--patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch15
-rw-r--r--patches/add_migrate_disable.patch10
-rw-r--r--patches/completion-use-simple-wait-queues.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch2
-rw-r--r--patches/cpu-hotplug--Implement-CPU-pinning.patch110
-rw-r--r--patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch55
-rw-r--r--patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch114
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch525
-rw-r--r--patches/cpu_down_move_migrate_enable_back.patch52
-rw-r--r--patches/fs-dcache-init-in_lookup_hashtable.patch2
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch4
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch6
-rw-r--r--patches/fs-namespace-preemption-fix.patch2
-rw-r--r--patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch46
-rw-r--r--patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch95
-rw-r--r--patches/hotplug-light-get-online-cpus.patch172
-rw-r--r--patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch24
-rw-r--r--patches/hotplug-use-migrate-disable.patch39
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch280
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch85
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch58
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch256
-rw-r--r--patches/locking-rt-rwlock--Make-reader-biased-rwlocks-selectable.patch231
-rw-r--r--patches/locking-rt-rwlock--Provide-reader-biased-rwlock-for-RT.patch338
-rw-r--r--patches/locking-rtmutex--Make-inner-working-of-rt_spin_slow_lock---accessible.patch112
-rw-r--r--patches/mm-protect-activate-switch-mm.patch2
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch2
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/preempt-lazy-support.patch30
-rw-r--r--patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch34
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch34
-rw-r--r--patches/rt-locking--Consolidate-lock-functions.patch178
-rw-r--r--patches/rt-locking--Consolidate-rwlock-variants.patch262
-rw-r--r--patches/rt-locking--Simplify-rt-rwlock.patch104
-rw-r--r--patches/rt-locking-Reenable-migration-accross-schedule.patch111
-rw-r--r--patches/rt-rwlock--Remove-recursive-support.patch135
-rw-r--r--patches/sched-rt-mutex-wakeup.patch2
-rw-r--r--patches/series91
-rw-r--r--patches/skbufhead-raw-lock.patch10
-rw-r--r--patches/softirq-preempt-fix-3-re.patch8
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
80 files changed, 2134 insertions, 2100 deletions
diff --git a/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch b/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
index 242dd583b190..f877950a3716 100644
--- a/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
+++ b/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
@@ -21,15 +21,13 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.080397752@linutronix.de
---
- include/linux/cpu.h | 32 ++++++++++++++++++--------------
- kernel/cpu.c | 36 ++++++++++++++++++------------------
+ include/linux/cpu.h | 32 ++++++++++++++++++--------------
+ kernel/cpu.c | 36 ++++++++++++++++++------------------
2 files changed, 36 insertions(+), 32 deletions(-)
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h
-index f92081234afd..055876003914 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -99,26 +99,30 @@ static inline void cpu_maps_update_done(void)
+@@ -99,26 +99,30 @@ static inline void cpu_maps_update_done(
extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
@@ -50,6 +48,14 @@ index f92081234afd..055876003914 100644
-#else /* CONFIG_HOTPLUG_CPU */
+#else /* CONFIG_HOTPLUG_CPU */
++
++static inline void cpus_write_lock(void) { }
++static inline void cpus_write_unlock(void) { }
++static inline void cpus_read_lock(void) { }
++static inline void cpus_read_unlock(void) { }
++static inline void cpu_hotplug_disable(void) { }
++static inline void cpu_hotplug_enable(void) { }
++#endif /* !CONFIG_HOTPLUG_CPU */
-static inline void cpu_hotplug_begin(void) {}
-static inline void cpu_hotplug_done(void) {}
@@ -58,14 +64,6 @@ index f92081234afd..055876003914 100644
-#define cpu_hotplug_disable() do { } while (0)
-#define cpu_hotplug_enable() do { } while (0)
-#endif /* CONFIG_HOTPLUG_CPU */
-+static inline void cpus_write_lock(void) { }
-+static inline void cpus_write_unlock(void) { }
-+static inline void cpus_read_lock(void) { }
-+static inline void cpus_read_unlock(void) { }
-+static inline void cpu_hotplug_disable(void) { }
-+static inline void cpu_hotplug_enable(void) { }
-+#endif /* !CONFIG_HOTPLUG_CPU */
-+
+/* Wrappers which go away once all code is converted */
+static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
+static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
@@ -74,8 +72,6 @@ index f92081234afd..055876003914 100644
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 9ae6fbe5b5cf..d3221ae5b474 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -235,7 +235,7 @@ static struct {
@@ -126,7 +122,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
{
cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock);
-@@ -773,7 +773,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+@@ -783,7 +783,7 @@ static int __ref _cpu_down(unsigned int
if (!cpu_present(cpu))
return -EINVAL;
@@ -135,7 +131,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
cpuhp_tasks_frozen = tasks_frozen;
-@@ -811,7 +811,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+@@ -821,7 +821,7 @@ static int __ref _cpu_down(unsigned int
}
out:
@@ -144,7 +140,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
return ret;
}
-@@ -893,7 +893,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+@@ -892,7 +892,7 @@ static int _cpu_up(unsigned int cpu, int
struct task_struct *idle;
int ret = 0;
@@ -153,7 +149,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
if (!cpu_present(cpu)) {
ret = -EINVAL;
-@@ -941,7 +941,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+@@ -940,7 +940,7 @@ static int _cpu_up(unsigned int cpu, int
target = min((int)target, CPUHP_BRINGUP_CPU);
ret = cpuhp_up_callbacks(cpu, st, target);
out:
@@ -162,7 +158,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
return ret;
}
-@@ -1424,7 +1424,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+@@ -1423,7 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuh
if (sp->multi_instance == false)
return -EINVAL;
@@ -171,7 +167,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
-@@ -1453,7 +1453,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+@@ -1452,7 +1452,7 @@ int __cpuhp_state_add_instance(enum cpuh
hlist_add_head(node, &sp->list);
unlock:
mutex_unlock(&cpuhp_state_mutex);
@@ -180,7 +176,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
return ret;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
-@@ -1486,7 +1486,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+@@ -1485,7 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
@@ -189,7 +185,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
-@@ -1522,7 +1522,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+@@ -1521,7 +1521,7 @@ int __cpuhp_setup_state(enum cpuhp_state
}
out:
mutex_unlock(&cpuhp_state_mutex);
@@ -198,7 +194,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
-@@ -1544,7 +1544,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+@@ -1543,7 +1543,7 @@ int __cpuhp_state_remove_instance(enum c
if (!sp->multi_instance)
return -EINVAL;
@@ -207,7 +203,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !cpuhp_get_teardown_cb(state))
-@@ -1565,7 +1565,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+@@ -1564,7 +1564,7 @@ int __cpuhp_state_remove_instance(enum c
remove:
hlist_del(node);
mutex_unlock(&cpuhp_state_mutex);
@@ -216,7 +212,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
return 0;
}
-@@ -1587,7 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+@@ -1586,7 +1586,7 @@ void __cpuhp_remove_state(enum cpuhp_sta
BUG_ON(cpuhp_cb_check(state));
@@ -225,7 +221,7 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
-@@ -1615,7 +1615,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+@@ -1614,7 +1614,7 @@ void __cpuhp_remove_state(enum cpuhp_sta
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
mutex_unlock(&cpuhp_state_mutex);
@@ -234,6 +230,3 @@ index 9ae6fbe5b5cf..d3221ae5b474 100644
}
EXPORT_SYMBOL(__cpuhp_remove_state);
---
-2.11.0
-
diff --git a/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch b/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
index 9e6b2015c19a..d3ddb7be1ab5 100644
--- a/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
+++ b/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
@@ -17,11 +17,9 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.161282442@linutronix.de
---
- include/linux/cpu.h | 2 ++
+ include/linux/cpu.h | 2 ++
1 file changed, 2 insertions(+)
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h
-index 055876003914..af4d660798e5 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -103,6 +103,7 @@ extern void cpus_write_lock(void);
@@ -32,7 +30,7 @@ index 055876003914..af4d660798e5 100644
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
-@@ -114,6 +115,7 @@ static inline void cpus_write_lock(void) { }
+@@ -114,6 +115,7 @@ static inline void cpus_write_lock(void)
static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { }
@@ -40,6 +38,3 @@ index 055876003914..af4d660798e5 100644
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
#endif /* !CONFIG_HOTPLUG_CPU */
---
-2.11.0
-
diff --git a/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch b/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
index 9d0373376ec2..a62c414e2f13 100644
--- a/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
+++ b/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
@@ -21,15 +21,13 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.239600868@linutronix.de
---
- include/linux/cpuhotplug.h | 29 ++++++++++++++++++++++++++++
- kernel/cpu.c | 47 +++++++++++++++++++++++++++++++++++-----------
+ include/linux/cpuhotplug.h | 29 +++++++++++++++++++++++++++
+ kernel/cpu.c | 47 ++++++++++++++++++++++++++++++++++-----------
2 files changed, 65 insertions(+), 11 deletions(-)
-diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
-index 0f2a80377520..4fac564dde70 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
-@@ -153,6 +153,11 @@ int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
+@@ -151,6 +151,11 @@ int __cpuhp_setup_state(enum cpuhp_state
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu), bool multi_instance);
@@ -41,7 +39,7 @@ index 0f2a80377520..4fac564dde70 100644
/**
* cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
* @state: The state for which the calls are installed
-@@ -171,6 +176,15 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
+@@ -169,6 +174,15 @@ static inline int cpuhp_setup_state(enum
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
@@ -57,7 +55,7 @@ index 0f2a80377520..4fac564dde70 100644
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
* callbacks
-@@ -191,6 +205,15 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
+@@ -189,6 +203,15 @@ static inline int cpuhp_setup_state_noca
false);
}
@@ -73,7 +71,7 @@ index 0f2a80377520..4fac564dde70 100644
/**
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
-@@ -250,6 +273,7 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
+@@ -248,6 +271,7 @@ static inline int cpuhp_state_add_instan
}
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
@@ -81,7 +79,7 @@ index 0f2a80377520..4fac564dde70 100644
/**
* cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
-@@ -273,6 +297,11 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
+@@ -271,6 +295,11 @@ static inline void cpuhp_remove_state_no
__cpuhp_remove_state(state, false);
}
@@ -93,11 +91,9 @@ index 0f2a80377520..4fac564dde70 100644
/**
* cpuhp_remove_multi_state - Remove hotplug multi state callback
* @state: The state for which the calls are removed
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index d3221ae5b474..dc27c5a28153 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1459,7 +1459,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+@@ -1458,7 +1458,7 @@ int __cpuhp_state_add_instance(enum cpuh
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
/**
@@ -106,7 +102,7 @@ index d3221ae5b474..dc27c5a28153 100644
* @state: The state to setup
* @invoke: If true, the startup function is invoked for cpus where
* cpu state >= @state
-@@ -1468,25 +1468,27 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+@@ -1467,25 +1467,27 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_inst
* @multi_instance: State is set up for multiple instances which get
* added afterwards.
*
@@ -140,7 +136,7 @@ index d3221ae5b474..dc27c5a28153 100644
mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
-@@ -1522,7 +1524,6 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+@@ -1521,7 +1523,6 @@ int __cpuhp_setup_state(enum cpuhp_state
}
out:
mutex_unlock(&cpuhp_state_mutex);
@@ -148,7 +144,7 @@ index d3221ae5b474..dc27c5a28153 100644
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
-@@ -1531,6 +1532,22 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+@@ -1530,6 +1531,22 @@ int __cpuhp_setup_state(enum cpuhp_state
return state;
return ret;
}
@@ -171,7 +167,7 @@ index d3221ae5b474..dc27c5a28153 100644
EXPORT_SYMBOL(__cpuhp_setup_state);
int __cpuhp_state_remove_instance(enum cpuhp_state state,
-@@ -1572,22 +1589,23 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+@@ -1571,22 +1588,23 @@ int __cpuhp_state_remove_instance(enum c
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
/**
@@ -198,7 +194,7 @@ index d3221ae5b474..dc27c5a28153 100644
mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
-@@ -1615,6 +1633,13 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+@@ -1614,6 +1632,13 @@ void __cpuhp_remove_state(enum cpuhp_sta
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
mutex_unlock(&cpuhp_state_mutex);
@@ -212,6 +208,3 @@ index d3221ae5b474..dc27c5a28153 100644
cpus_read_unlock();
}
EXPORT_SYMBOL(__cpuhp_remove_state);
---
-2.11.0
-
diff --git a/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch b/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
index adf931932e62..a18f9a9ece4b 100644
--- a/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
+++ b/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
@@ -14,15 +14,13 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.321782217@linutronix.de
---
- include/linux/cpuhotplug.h | 9 +++++++++
- kernel/cpu.c | 18 +++++++++++++++---
+ include/linux/cpuhotplug.h | 9 +++++++++
+ kernel/cpu.c | 18 +++++++++++++++---
2 files changed, 24 insertions(+), 3 deletions(-)
-diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
-index 4fac564dde70..df3d2719a796 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
-@@ -240,6 +240,8 @@ static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
+@@ -238,6 +238,8 @@ static inline int cpuhp_setup_state_mult
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke);
@@ -31,7 +29,7 @@ index 4fac564dde70..df3d2719a796 100644
/**
* cpuhp_state_add_instance - Add an instance for a state and invoke startup
-@@ -272,6 +274,13 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
+@@ -270,6 +272,13 @@ static inline int cpuhp_state_add_instan
return __cpuhp_state_add_instance(state, node, false);
}
@@ -45,11 +43,9 @@ index 4fac564dde70..df3d2719a796 100644
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index dc27c5a28153..e4389ac55b65 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1413,18 +1413,20 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
+@@ -1412,18 +1412,20 @@ static void cpuhp_rollback_install(int f
}
}
@@ -73,7 +69,7 @@ index dc27c5a28153..e4389ac55b65 100644
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
-@@ -1453,6 +1455,16 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+@@ -1452,6 +1454,16 @@ int __cpuhp_state_add_instance(enum cpuh
hlist_add_head(node, &sp->list);
unlock:
mutex_unlock(&cpuhp_state_mutex);
@@ -90,6 +86,3 @@ index dc27c5a28153..e4389ac55b65 100644
cpus_read_unlock();
return ret;
}
---
-2.11.0
-
diff --git a/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch b/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch
index d1dd0d8a5081..dd0fa6f147bb 100644
--- a/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch
+++ b/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch
@@ -20,15 +20,13 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.400700852@linutronix.de
---
- include/linux/stop_machine.h | 26 +++++++++++++++++++++++---
- kernel/stop_machine.c | 11 +++++++----
+ include/linux/stop_machine.h | 26 +++++++++++++++++++++++---
+ kernel/stop_machine.c | 11 +++++++----
2 files changed, 30 insertions(+), 7 deletions(-)
-diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
-index 3cc9632dcc2a..3d60275e3ba9 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
-@@ -116,15 +116,29 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
+@@ -116,15 +116,29 @@ static inline int try_stop_cpus(const st
* @fn() runs.
*
* This can be thought of as a very heavy write lock, equivalent to
@@ -61,7 +59,7 @@ index 3cc9632dcc2a..3d60275e3ba9 100644
{
unsigned long flags;
int ret;
-@@ -134,6 +148,12 @@ static inline int stop_machine(cpu_stop_fn_t fn, void *data,
+@@ -134,6 +148,12 @@ static inline int stop_machine(cpu_stop_
return ret;
}
@@ -74,8 +72,6 @@ index 3cc9632dcc2a..3d60275e3ba9 100644
static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
-diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
-index 1eb82661ecdb..b7591261652d 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -552,7 +552,8 @@ static int __init cpu_stop_init(void)
@@ -88,7 +84,7 @@ index 1eb82661ecdb..b7591261652d 100644
{
struct multi_stop_data msdata = {
.fn = fn,
-@@ -561,6 +562,8 @@ static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cp
+@@ -561,6 +562,8 @@ static int __stop_machine(cpu_stop_fn_t
.active_cpus = cpus,
};
@@ -97,7 +93,7 @@ index 1eb82661ecdb..b7591261652d 100644
if (!stop_machine_initialized) {
/*
* Handle the case where stop_machine() is called
-@@ -590,9 +593,9 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
+@@ -590,9 +593,9 @@ int stop_machine(cpu_stop_fn_t fn, void
int ret;
/* No CPUs can come up or down during this. */
@@ -110,6 +106,3 @@ index 1eb82661ecdb..b7591261652d 100644
return ret;
}
EXPORT_SYMBOL_GPL(stop_machine);
---
-2.11.0
-
diff --git a/patches/0006-padata-Make-padata_alloc-static.patch b/patches/0006-padata-Make-padata_alloc-static.patch
index 7c79b20c0622..e7605db4db76 100644
--- a/patches/0006-padata-Make-padata_alloc-static.patch
+++ b/patches/0006-padata-Make-padata_alloc-static.patch
@@ -14,12 +14,10 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-crypto@vger.kernel.org
Link: http://lkml.kernel.org/r/20170524081547.491457256@linutronix.de
---
- include/linux/padata.h | 3 ---
- kernel/padata.c | 32 ++++++++++++++++----------------
+ include/linux/padata.h | 3 ---
+ kernel/padata.c | 32 ++++++++++++++++----------------
2 files changed, 16 insertions(+), 19 deletions(-)
-diff --git a/include/linux/padata.h b/include/linux/padata.h
-index 0f9e567d5e15..2f9c1f93b1ce 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -166,9 +166,6 @@ struct padata_instance {
@@ -32,11 +30,9 @@ index 0f9e567d5e15..2f9c1f93b1ce 100644
extern void padata_free(struct padata_instance *pinst);
extern int padata_do_parallel(struct padata_instance *pinst,
struct padata_priv *padata, int cb_cpu);
-diff --git a/kernel/padata.c b/kernel/padata.c
-index ac8f1e524836..0c708f648853 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
-@@ -934,19 +934,6 @@ static struct kobj_type padata_attr_type = {
+@@ -939,19 +939,6 @@ static struct kobj_type padata_attr_type
};
/**
@@ -56,7 +52,7 @@ index ac8f1e524836..0c708f648853 100644
* padata_alloc - allocate and initialize a padata instance and specify
* cpumasks for serial and parallel workers.
*
-@@ -954,9 +941,9 @@ EXPORT_SYMBOL(padata_alloc_possible);
+@@ -959,9 +946,9 @@ EXPORT_SYMBOL(padata_alloc_possible);
* @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization
*/
@@ -69,7 +65,7 @@ index ac8f1e524836..0c708f648853 100644
{
struct padata_instance *pinst;
struct parallel_data *pd = NULL;
-@@ -1011,6 +998,19 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+@@ -1016,6 +1003,19 @@ struct padata_instance *padata_alloc(str
}
/**
@@ -89,6 +85,3 @@ index ac8f1e524836..0c708f648853 100644
* padata_free - free a padata instance
*
* @padata_inst: padata instance to free
---
-2.11.0
-
diff --git a/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch b/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
index 9929deb90bd5..71e99d636fc5 100644
--- a/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
+++ b/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
@@ -28,14 +28,12 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-crypto@vger.kernel.org
Link: http://lkml.kernel.org/r/20170524081547.571278910@linutronix.de
---
- kernel/padata.c | 11 ++++++-----
+ kernel/padata.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
-diff --git a/kernel/padata.c b/kernel/padata.c
-index 0c708f648853..868f947166d7 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
-@@ -940,6 +940,8 @@ static struct kobj_type padata_attr_type = {
+@@ -945,6 +945,8 @@ static struct kobj_type padata_attr_type
* @wq: workqueue to use for the allocated padata instance
* @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization
@@ -44,7 +42,7 @@ index 0c708f648853..868f947166d7 100644
*/
static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
const struct cpumask *pcpumask,
-@@ -952,7 +954,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+@@ -957,7 +959,6 @@ static struct padata_instance *padata_al
if (!pinst)
goto err;
@@ -52,7 +50,7 @@ index 0c708f648853..868f947166d7 100644
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_inst;
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
-@@ -976,14 +977,12 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+@@ -981,14 +982,12 @@ static struct padata_instance *padata_al
pinst->flags = 0;
@@ -68,7 +66,7 @@ index 0c708f648853..868f947166d7 100644
#endif
return pinst;
-@@ -992,7 +991,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+@@ -997,7 +996,6 @@ static struct padata_instance *padata_al
free_cpumask_var(pinst->cpumask.cbcpu);
err_free_inst:
kfree(pinst);
@@ -76,7 +74,7 @@ index 0c708f648853..868f947166d7 100644
err:
return NULL;
}
-@@ -1003,9 +1001,12 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+@@ -1008,9 +1006,12 @@ static struct padata_instance *padata_al
* parallel workers.
*
* @wq: workqueue to use for the allocated padata instance
@@ -89,6 +87,3 @@ index 0c708f648853..868f947166d7 100644
return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
}
EXPORT_SYMBOL(padata_alloc_possible);
---
-2.11.0
-
diff --git a/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch b/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
index 01ad8962113f..a650f376b1af 100644
--- a/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
+++ b/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
@@ -20,11 +20,9 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.651378834@linutronix.de
---
- arch/x86/kernel/cpu/mtrr/main.c | 2 --
+ arch/x86/kernel/cpu/mtrr/main.c | 2 --
1 file changed, 2 deletions(-)
-diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 2bce84d91c2b..c5bb63be4ba1 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -807,10 +807,8 @@ void mtrr_save_state(void)
@@ -38,6 +36,3 @@ index 2bce84d91c2b..c5bb63be4ba1 100644
}
void set_mtrr_aps_delayed_init(void)
---
-2.11.0
-
diff --git a/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch b/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
index f45df3bddaef..3cdf80c77398 100644
--- a/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
+++ b/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
@@ -24,14 +24,12 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.731628408@linutronix.de
---
- drivers/cpufreq/cpufreq.c | 21 +++++++++++----------
+ drivers/cpufreq/cpufreq.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
-diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 0e3f6496524d..6001369f9aeb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -887,7 +887,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
+@@ -887,7 +887,7 @@ static ssize_t store(struct kobject *kob
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
@@ -40,7 +38,7 @@ index 0e3f6496524d..6001369f9aeb 100644
if (cpu_online(policy->cpu)) {
down_write(&policy->rwsem);
-@@ -895,7 +895,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
+@@ -895,7 +895,7 @@ static ssize_t store(struct kobject *kob
up_write(&policy->rwsem);
}
@@ -49,7 +47,7 @@ index 0e3f6496524d..6001369f9aeb 100644
return ret;
}
-@@ -2441,7 +2441,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2441,7 +2441,7 @@ int cpufreq_register_driver(struct cpufr
pr_debug("trying to register driver %s\n", driver_data->name);
/* Protect against concurrent CPU online/offline. */
@@ -58,7 +56,7 @@ index 0e3f6496524d..6001369f9aeb 100644
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
-@@ -2473,9 +2473,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2474,9 +2474,10 @@ int cpufreq_register_driver(struct cpufr
goto err_if_unreg;
}
@@ -72,7 +70,7 @@ index 0e3f6496524d..6001369f9aeb 100644
if (ret < 0)
goto err_if_unreg;
hp_online = ret;
-@@ -2493,7 +2494,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2494,7 +2495,7 @@ int cpufreq_register_driver(struct cpufr
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
out:
@@ -81,7 +79,7 @@ index 0e3f6496524d..6001369f9aeb 100644
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
-@@ -2516,17 +2517,17 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+@@ -2517,17 +2518,17 @@ int cpufreq_unregister_driver(struct cpu
pr_debug("unregistering driver %s\n", driver->name);
/* Protect against concurrent cpu hotplug */
@@ -102,6 +100,3 @@ index 0e3f6496524d..6001369f9aeb 100644
return 0;
}
---
-2.11.0
-
diff --git a/patches/0009-tracing-Make-traceprobe-parsing-code-reusable.patch b/patches/0009-tracing-Make-traceprobe-parsing-code-reusable.patch
index 3ae6fd7b257b..3f91dc611b9d 100644
--- a/patches/0009-tracing-Make-traceprobe-parsing-code-reusable.patch
+++ b/patches/0009-tracing-Make-traceprobe-parsing-code-reusable.patch
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* to do the manipulation, as well as saves the print formats
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
-@@ -878,8 +878,8 @@ static int probes_open(struct inode *ino
+@@ -873,8 +873,8 @@ static int probes_open(struct inode *ino
static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -146,7 +146,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static const struct file_operations kprobe_events_ops = {
-@@ -1404,9 +1404,9 @@ static __init int kprobe_trace_self_test
+@@ -1399,9 +1399,9 @@ static __init int kprobe_trace_self_test
pr_info("Testing kprobe tracing: ");
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function entry.\n");
warn++;
-@@ -1426,8 +1426,8 @@ static __init int kprobe_trace_self_test
+@@ -1421,8 +1421,8 @@ static __init int kprobe_trace_self_test
}
}
@@ -170,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function return.\n");
warn++;
-@@ -1497,13 +1497,13 @@ static __init int kprobe_trace_self_test
+@@ -1492,13 +1492,13 @@ static __init int kprobe_trace_self_test
disable_trace_kprobe(tk, file);
}
diff --git a/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch b/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
index 42c661b8a0c9..64db0e55a0f1 100644
--- a/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
+++ b/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
@@ -27,14 +27,12 @@ Cc: linuxppc-dev@lists.ozlabs.org
Cc: Alexander Graf <agraf@suse.com>
Link: http://lkml.kernel.org/r/20170524081547.809616236@linutronix.de
---
- arch/powerpc/kvm/book3s_hv.c | 14 +++++++-------
+ arch/powerpc/kvm/book3s_hv.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
-diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
-index 42b7a4fd57d9..48a6bd160011 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
-@@ -3317,7 +3317,7 @@ void kvmppc_alloc_host_rm_ops(void)
+@@ -3359,7 +3359,7 @@ void kvmppc_alloc_host_rm_ops(void)
return;
}
@@ -43,7 +41,7 @@ index 42b7a4fd57d9..48a6bd160011 100644
for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
if (!cpu_online(cpu))
-@@ -3339,17 +3339,17 @@ void kvmppc_alloc_host_rm_ops(void)
+@@ -3381,17 +3381,17 @@ void kvmppc_alloc_host_rm_ops(void)
l_ops = (unsigned long) ops;
if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
@@ -67,6 +65,3 @@ index 42b7a4fd57d9..48a6bd160011 100644
}
void kvmppc_free_host_rm_ops(void)
---
-2.11.0
-
diff --git a/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch b/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
index c1a78e06ed8a..97f56da14b53 100644
--- a/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
+++ b/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
@@ -23,14 +23,12 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170524081547.889092478@linutronix.de
---
- drivers/hwtracing/coresight/coresight-etm3x.c | 20 ++++++++++----------
+ drivers/hwtracing/coresight/coresight-etm3x.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
-diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
-index a51b6b64ecdf..93ee8fc539be 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
-@@ -587,7 +587,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
+@@ -587,7 +587,7 @@ static void etm_disable_sysfs(struct cor
* after cpu online mask indicates the cpu is offline but before the
* DYING hotplug callback is serviced by the ETM driver.
*/
@@ -39,7 +37,7 @@ index a51b6b64ecdf..93ee8fc539be 100644
spin_lock(&drvdata->spinlock);
/*
-@@ -597,7 +597,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
+@@ -597,7 +597,7 @@ static void etm_disable_sysfs(struct cor
smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
spin_unlock(&drvdata->spinlock);
@@ -48,7 +46,7 @@ index a51b6b64ecdf..93ee8fc539be 100644
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-@@ -795,7 +795,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+@@ -795,7 +795,7 @@ static int etm_probe(struct amba_device
drvdata->cpu = pdata ? pdata->cpu : 0;
@@ -57,7 +55,7 @@ index a51b6b64ecdf..93ee8fc539be 100644
etmdrvdata[drvdata->cpu] = drvdata;
if (smp_call_function_single(drvdata->cpu,
-@@ -803,17 +803,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+@@ -803,17 +803,17 @@ static int etm_probe(struct amba_device
dev_err(dev, "ETM arch init failed\n");
if (!etm_count++) {
@@ -82,6 +80,3 @@ index a51b6b64ecdf..93ee8fc539be 100644
if (etm_arch_supported(drvdata->arch) == false) {
ret = -EINVAL;
---
-2.11.0
-
diff --git a/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch b/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
index eb1ca661c657..8006cbac7daa 100644
--- a/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
+++ b/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
@@ -23,14 +23,12 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170524081547.983493849@linutronix.de
---
- drivers/hwtracing/coresight/coresight-etm4x.c | 20 ++++++++++----------
+ drivers/hwtracing/coresight/coresight-etm4x.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
-diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
-index d1340fb4e457..532adc9dd32a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
-@@ -371,7 +371,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
+@@ -371,7 +371,7 @@ static void etm4_disable_sysfs(struct co
* after cpu online mask indicates the cpu is offline but before the
* DYING hotplug callback is serviced by the ETM driver.
*/
@@ -39,7 +37,7 @@ index d1340fb4e457..532adc9dd32a 100644
spin_lock(&drvdata->spinlock);
/*
-@@ -381,7 +381,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
+@@ -381,7 +381,7 @@ static void etm4_disable_sysfs(struct co
smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
spin_unlock(&drvdata->spinlock);
@@ -48,7 +46,7 @@ index d1340fb4e457..532adc9dd32a 100644
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-@@ -982,7 +982,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+@@ -982,7 +982,7 @@ static int etm4_probe(struct amba_device
drvdata->cpu = pdata ? pdata->cpu : 0;
@@ -57,7 +55,7 @@ index d1340fb4e457..532adc9dd32a 100644
etmdrvdata[drvdata->cpu] = drvdata;
if (smp_call_function_single(drvdata->cpu,
-@@ -990,18 +990,18 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+@@ -990,18 +990,18 @@ static int etm4_probe(struct amba_device
dev_err(dev, "ETM arch init failed\n");
if (!etm4_count++) {
@@ -83,6 +81,3 @@ index d1340fb4e457..532adc9dd32a 100644
if (etm4_arch_supported(drvdata->arch) == false) {
ret = -EINVAL;
---
-2.11.0
-
diff --git a/patches/0013-extable-Adjust-system_state-checks.patch b/patches/0013-extable-Adjust-system_state-checks.patch
index c3c3175841cb..27d43e42325b 100644
--- a/patches/0013-extable-Adjust-system_state-checks.patch
+++ b/patches/0013-extable-Adjust-system_state-checks.patch
@@ -24,7 +24,7 @@ Signed-off-by: Ingo Molnar <mingo@kernel.org>
--- a/kernel/extable.c
+++ b/kernel/extable.c
-@@ -75,7 +75,7 @@ int core_kernel_text(unsigned long addr)
+@@ -75,7 +75,7 @@ int notrace core_kernel_text(unsigned lo
addr < (unsigned long)_etext)
return 1;
diff --git a/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch b/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
index 0df26fdcedd6..edb975571664 100644
--- a/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
+++ b/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
@@ -20,11 +20,9 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081548.075604046@linutronix.de
---
- arch/x86/events/intel/cqm.c | 16 ++++++++--------
+ arch/x86/events/intel/cqm.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
-diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
-index 8c00dc09a5d2..2521f771f2f5 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1682,7 +1682,7 @@ static int __init intel_cqm_init(void)
@@ -58,6 +56,3 @@ index 8c00dc09a5d2..2521f771f2f5 100644
if (ret) {
kfree(str);
---
-2.11.0
-
diff --git a/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch b/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
index 4ead97c3209b..fa6bcdef18e3 100644
--- a/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
+++ b/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
@@ -23,14 +23,12 @@ Cc: Russell King <linux@armlinux.org.uk>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170524081548.170940729@linutronix.de
---
- arch/arm/kernel/hw_breakpoint.c | 11 ++++++-----
+ arch/arm/kernel/hw_breakpoint.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
-diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
-index be3b3fbd382f..63cb4c7c6593 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
-@@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_init(void)
+@@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_ini
* driven low on this core and there isn't an architected way to
* determine that.
*/
@@ -39,7 +37,7 @@ index be3b3fbd382f..63cb4c7c6593 100644
register_undef_hook(&debug_reg_hook);
/*
-@@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_init(void)
+@@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_ini
* assume that a halting debugger will leave the world in a nice state
* for us.
*/
@@ -59,7 +57,7 @@ index be3b3fbd382f..63cb4c7c6593 100644
return 0;
}
-@@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_init(void)
+@@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_ini
TRAP_HWBKPT, "watchpoint debug exception");
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
TRAP_HWBKPT, "breakpoint debug exception");
@@ -68,6 +66,3 @@ index be3b3fbd382f..63cb4c7c6593 100644
/* Register PM notifiers. */
pm_init();
---
-2.11.0
-
diff --git a/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch b/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch
index 8a4902d43cee..961d6ed653c7 100644
--- a/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch
+++ b/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch
@@ -22,14 +22,12 @@ Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Link: http://lkml.kernel.org/r/20170524081548.250203087@linutronix.de
---
- arch/s390/kernel/time.c | 6 +++---
+ arch/s390/kernel/time.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
-diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
-index c3a52f9a69a0..192efdfac918 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
-@@ -636,10 +636,10 @@ static void stp_work_fn(struct work_struct *work)
+@@ -634,10 +634,10 @@ static void stp_work_fn(struct work_stru
goto out_unlock;
memset(&stp_sync, 0, sizeof(stp_sync));
@@ -43,6 +41,3 @@ index c3a52f9a69a0..192efdfac918 100644
if (!check_sync_clock())
/*
---
-2.11.0
-
diff --git a/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch b/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
index c6e317d6a65b..ad8bd5831243 100644
--- a/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
+++ b/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
@@ -21,14 +21,12 @@ Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20170524081548.331016542@linutronix.de
---
- arch/powerpc/platforms/powernv/subcore.c | 7 ++++---
+ arch/powerpc/platforms/powernv/subcore.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
-diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
-index 0babef11136f..e6230f104dd9 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
-@@ -348,7 +348,7 @@ static int set_subcores_per_core(int new_mode)
+@@ -348,7 +348,7 @@ static int set_subcores_per_core(int new
state->master = 0;
}
@@ -37,7 +35,7 @@ index 0babef11136f..e6230f104dd9 100644
/* This cpu will update the globals before exiting stop machine */
this_cpu_ptr(&split_state)->master = 1;
-@@ -356,9 +356,10 @@ static int set_subcores_per_core(int new_mode)
+@@ -356,9 +356,10 @@ static int set_subcores_per_core(int new
/* Ensure state is consistent before we call the other cpus */
mb();
@@ -50,6 +48,3 @@ index 0babef11136f..e6230f104dd9 100644
return 0;
}
---
-2.11.0
-
diff --git a/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch b/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
index bbebf44fead8..b3914d943d42 100644
--- a/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
+++ b/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
@@ -19,14 +19,12 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081548.423292433@linutronix.de
---
- kernel/cpu.c | 2 +-
+ kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index e4389ac55b65..142d889d9f69 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -701,7 +701,7 @@ static int takedown_cpu(unsigned int cpu)
+@@ -711,7 +711,7 @@ static int takedown_cpu(unsigned int cpu
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
@@ -35,6 +33,3 @@ index e4389ac55b65..142d889d9f69 100644
if (err) {
/* CPU refused to die */
irq_unlock_sparse();
---
-2.11.0
-
diff --git a/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch b/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
index 86d24cbcd049..47273e01bad7 100644
--- a/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
+++ b/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
@@ -16,11 +16,9 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/20170524081548.515204988@linutronix.de
---
- arch/x86/events/core.c | 1 -
+ arch/x86/events/core.c | 1 -
1 file changed, 1 deletion(-)
-diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
-index 580b60f5ac83..ac650d57ebf7 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2224,7 +2224,6 @@ void perf_check_microcode(void)
@@ -31,6 +29,3 @@ index 580b60f5ac83..ac650d57ebf7 100644
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
---
-2.11.0
-
diff --git a/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch b/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
index f5d1c72c5fe8..1a42e07cd6ab 100644
--- a/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
+++ b/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
@@ -28,14 +28,12 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/20170524081548.594862191@linutronix.de
---
- arch/x86/events/intel/core.c | 11 +++++------
+ arch/x86/events/intel/core.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
-diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
-index a6d91d4e37a1..b9174aacf42f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
-@@ -3410,12 +3410,10 @@ static void intel_snb_check_microcode(void)
+@@ -3389,12 +3389,10 @@ static void intel_snb_check_microcode(vo
int pebs_broken = 0;
int cpu;
@@ -48,7 +46,7 @@ index a6d91d4e37a1..b9174aacf42f 100644
if (pebs_broken == x86_pmu.pebs_broken)
return;
-@@ -3488,7 +3486,9 @@ static bool check_msr(unsigned long msr, u64 mask)
+@@ -3467,7 +3465,9 @@ static bool check_msr(unsigned long msr,
static __init void intel_sandybridge_quirk(void)
{
x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -58,7 +56,7 @@ index a6d91d4e37a1..b9174aacf42f 100644
}
static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
-@@ -4112,13 +4112,12 @@ static __init int fixup_ht_bug(void)
+@@ -4090,13 +4090,12 @@ static __init int fixup_ht_bug(void)
lockup_detector_resume();
@@ -75,6 +73,3 @@ index a6d91d4e37a1..b9174aacf42f 100644
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
return 0;
}
---
-2.11.0
-
diff --git a/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch b/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
index 8bd1a98ffd9b..5ebfb25b6324 100644
--- a/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
+++ b/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
@@ -63,14 +63,12 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081548.691198590@linutronix.de
---
- drivers/pci/pci-driver.c | 4 ++--
+ drivers/pci/pci-driver.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
-diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
-index 192e7b681b96..5bf92fd983e5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
-@@ -349,13 +349,13 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+@@ -349,13 +349,13 @@ static int pci_call_probe(struct pci_dri
if (node >= 0 && node != numa_node_id()) {
int cpu;
@@ -86,6 +84,3 @@ index 192e7b681b96..5bf92fd983e5 100644
} else
error = local_pci_probe(&ddi);
---
-2.11.0
-
diff --git a/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch b/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch
index 685769451f50..8261c9deedbd 100644
--- a/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch
+++ b/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch
@@ -39,12 +39,10 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081548.771457199@linutronix.de
---
- drivers/pci/pci-driver.c | 47 +++++++++++++++++++++++++----------------------
- include/linux/pci.h | 1 +
+ drivers/pci/pci-driver.c | 47 +++++++++++++++++++++++++----------------------
+ include/linux/pci.h | 1 +
2 files changed, 26 insertions(+), 22 deletions(-)
-diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
-index 5bf92fd983e5..fe6be6382505 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -320,10 +320,19 @@ static long local_pci_probe(void *_ddi)
@@ -68,7 +66,7 @@ index 5bf92fd983e5..fe6be6382505 100644
struct drv_dev_and_id ddi = { drv, dev, id };
/*
-@@ -332,33 +341,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+@@ -332,33 +341,27 @@ static int pci_call_probe(struct pci_dri
* on the right node.
*/
node = dev_to_node(&dev->dev);
@@ -117,11 +115,9 @@ index 5bf92fd983e5..fe6be6382505 100644
return error;
}
-diff --git a/include/linux/pci.h b/include/linux/pci.h
-index 33c2b0b77429..5026f2ae86db 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
-@@ -371,6 +371,7 @@ struct pci_dev {
+@@ -370,6 +370,7 @@ struct pci_dev {
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
@@ -129,6 +125,3 @@ index 33c2b0b77429..5026f2ae86db 100644
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
---
-2.11.0
-
diff --git a/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch b/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
index 9ee9bf9c9dad..56c12505a7e5 100644
--- a/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
+++ b/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
@@ -44,14 +44,12 @@ Cc: linux-acpi@vger.kernel.org
Cc: Len Brown <lenb@kernel.org>
Link: http://lkml.kernel.org/r/20170524081548.851588594@linutronix.de
---
- drivers/acpi/processor_driver.c | 4 ++--
+ drivers/acpi/processor_driver.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
-diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
-index 8697a82bd465..591d1dd3f04e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
-@@ -268,9 +268,9 @@ static int acpi_processor_start(struct device *dev)
+@@ -268,9 +268,9 @@ static int acpi_processor_start(struct d
return -ENODEV;
/* Protect against concurrent CPU hotplug operations */
@@ -63,6 +61,3 @@ index 8697a82bd465..591d1dd3f04e 100644
return ret;
}
---
-2.11.0
-
diff --git a/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch b/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
index 4860a01bd4fc..7d7b681d169f 100644
--- a/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
+++ b/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
@@ -32,15 +32,13 @@ Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Link: http://lkml.kernel.org/r/20170524081548.930941109@linutronix.de
---
- include/linux/perf_event.h | 2 +
- kernel/events/core.c | 106 ++++++++++++++++++++++++++++++++-------------
+ include/linux/perf_event.h | 2
+ kernel/events/core.c | 106 ++++++++++++++++++++++++++++++++-------------
2 files changed, 78 insertions(+), 30 deletions(-)
-diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 24a635887f28..7d6aa29094b2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
-@@ -801,6 +801,8 @@ struct perf_cpu_context {
+@@ -794,6 +794,8 @@ struct perf_cpu_context {
struct list_head sched_cb_entry;
int sched_cb_usage;
@@ -49,11 +47,9 @@ index 24a635887f28..7d6aa29094b2 100644
};
struct perf_output_handle {
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 6e75a5c9412d..b97cda4d1777 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -389,6 +389,7 @@ static atomic_t nr_switch_events __read_mostly;
+@@ -386,6 +386,7 @@ static atomic_t nr_switch_events __read_
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
@@ -61,7 +57,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
/*
* perf event paranoia level:
-@@ -3812,14 +3813,6 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
+@@ -3809,14 +3810,6 @@ find_get_context(struct pmu *pmu, struct
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
@@ -76,7 +72,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
-@@ -7703,7 +7696,8 @@ static int swevent_hlist_get_cpu(int cpu)
+@@ -7592,7 +7585,8 @@ static int swevent_hlist_get_cpu(int cpu
int err = 0;
mutex_lock(&swhash->hlist_mutex);
@@ -86,7 +82,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
-@@ -7724,7 +7718,7 @@ static int swevent_hlist_get(void)
+@@ -7613,7 +7607,7 @@ static int swevent_hlist_get(void)
{
int err, cpu, failed_cpu;
@@ -95,7 +91,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(cpu);
if (err) {
-@@ -7732,8 +7726,7 @@ static int swevent_hlist_get(void)
+@@ -7621,8 +7615,7 @@ static int swevent_hlist_get(void)
goto fail;
}
}
@@ -105,7 +101,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
return 0;
fail:
for_each_possible_cpu(cpu) {
-@@ -7741,8 +7734,7 @@ static int swevent_hlist_get(void)
+@@ -7630,8 +7623,7 @@ static int swevent_hlist_get(void)
break;
swevent_hlist_put_cpu(cpu);
}
@@ -115,7 +111,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
return err;
}
-@@ -8920,7 +8912,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+@@ -8809,7 +8801,7 @@ perf_event_mux_interval_ms_store(struct
pmu->hrtimer_interval_ms = timer;
/* update all cpuctx for this PMU */
@@ -124,7 +120,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
for_each_online_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
-@@ -8929,7 +8921,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+@@ -8818,7 +8810,7 @@ perf_event_mux_interval_ms_store(struct
cpu_function_call(cpu,
(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
}
@@ -133,7 +129,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
mutex_unlock(&mux_interval_mutex);
return count;
-@@ -9059,6 +9051,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+@@ -8948,6 +8940,7 @@ int perf_pmu_register(struct pmu *pmu, c
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.pmu = pmu;
@@ -141,7 +137,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
__perf_mux_hrtimer_init(cpuctx, cpu);
}
-@@ -9882,12 +9875,10 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -9764,12 +9757,10 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_task;
}
@@ -155,7 +151,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
/*
* Reuse ptrace permission checks for now.
-@@ -10073,6 +10064,23 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -9955,6 +9946,23 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_locked;
}
@@ -179,7 +175,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
/*
* Must be under the same ctx::mutex as perf_install_in_context(),
* because we need to serialize with concurrent event creation.
-@@ -10162,8 +10170,6 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -10044,8 +10052,6 @@ SYSCALL_DEFINE5(perf_event_open,
put_task_struct(task);
}
@@ -188,7 +184,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
mutex_lock(&current->perf_event_mutex);
list_add_tail(&event->owner_entry, &current->perf_event_list);
mutex_unlock(&current->perf_event_mutex);
-@@ -10197,8 +10203,6 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -10079,8 +10085,6 @@ SYSCALL_DEFINE5(perf_event_open,
err_cred:
if (task)
mutex_unlock(&task->signal->cred_guard_mutex);
@@ -197,7 +193,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
err_task:
if (task)
put_task_struct(task);
-@@ -10253,6 +10257,21 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+@@ -10135,6 +10139,21 @@ perf_event_create_kernel_counter(struct
goto err_unlock;
}
@@ -219,7 +215,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
if (!exclusive_event_installable(event, ctx)) {
err = -EBUSY;
goto err_unlock;
-@@ -10920,6 +10939,8 @@ static void __init perf_event_init_all_cpus(void)
+@@ -10802,6 +10821,8 @@ static void __init perf_event_init_all_c
struct swevent_htable *swhash;
int cpu;
@@ -228,7 +224,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
-@@ -10935,7 +10956,7 @@ static void __init perf_event_init_all_cpus(void)
+@@ -10817,7 +10838,7 @@ static void __init perf_event_init_all_c
}
}
@@ -237,7 +233,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
-@@ -10948,7 +10969,6 @@ int perf_event_init_cpu(unsigned int cpu)
+@@ -10830,7 +10851,6 @@ int perf_event_init_cpu(unsigned int cpu
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
mutex_unlock(&swhash->hlist_mutex);
@@ -245,7 +241,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
-@@ -10966,19 +10986,22 @@ static void __perf_event_exit_context(void *__info)
+@@ -10848,19 +10868,22 @@ static void __perf_event_exit_context(vo
static void perf_event_exit_cpu_context(int cpu)
{
@@ -273,7 +269,7 @@ index 6e75a5c9412d..b97cda4d1777 100644
}
#else
-@@ -10986,6 +11009,29 @@ static void perf_event_exit_cpu_context(int cpu) { }
+@@ -10868,6 +10891,29 @@ static void perf_event_exit_cpu_context(
#endif
@@ -303,6 +299,3 @@ index 6e75a5c9412d..b97cda4d1777 100644
int perf_event_exit_cpu(unsigned int cpu)
{
perf_event_exit_cpu_context(cpu);
---
-2.11.0
-
diff --git a/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch b/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
index 20e370565e5b..9182f6d78695 100644
--- a/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
+++ b/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
@@ -34,18 +34,16 @@ Cc: Jason Baron <jbaron@akamai.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Link: http://lkml.kernel.org/r/20170524081549.025830817@linutronix.de
---
- arch/mips/kernel/jump_label.c | 2 --
- arch/sparc/kernel/jump_label.c | 2 --
- arch/tile/kernel/jump_label.c | 2 --
- arch/x86/kernel/jump_label.c | 2 --
- kernel/jump_label.c | 20 ++++++++++++++------
+ arch/mips/kernel/jump_label.c | 2 --
+ arch/sparc/kernel/jump_label.c | 2 --
+ arch/tile/kernel/jump_label.c | 2 --
+ arch/x86/kernel/jump_label.c | 2 --
+ kernel/jump_label.c | 20 ++++++++++++++------
5 files changed, 14 insertions(+), 14 deletions(-)
-diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
-index 3e586daa3a32..32e3168316cd 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
-@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
+@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct ju
insn.word = 0; /* nop */
}
@@ -53,7 +51,7 @@ index 3e586daa3a32..32e3168316cd 100644
mutex_lock(&text_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
-@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
+@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct ju
(unsigned long)insn_p + sizeof(*insn_p));
mutex_unlock(&text_mutex);
@@ -61,11 +59,9 @@ index 3e586daa3a32..32e3168316cd 100644
}
#endif /* HAVE_JUMP_LABEL */
-diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
-index 07933b9e9ce0..93adde1ac166 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
-@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
+@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct ju
val = 0x01000000;
}
@@ -78,11 +74,9 @@ index 07933b9e9ce0..93adde1ac166 100644
}
#endif
-diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
-index 07802d586988..93931a46625b 100644
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
-@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
+@@ -45,14 +45,12 @@ static void __jump_label_transform(struc
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
@@ -97,11 +91,9 @@ index 07802d586988..93931a46625b 100644
}
__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
-diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
-index c37bd0f39c70..ab4f491da2a9 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
-@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry,
+@@ -105,11 +105,9 @@ static void __jump_label_transform(struc
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
@@ -113,8 +105,6 @@ index c37bd0f39c70..ab4f491da2a9 100644
}
static enum {
-diff --git a/kernel/jump_label.c b/kernel/jump_label.c
-index 6c9cb208ac48..d11c506a6ac3 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
@@ -125,7 +115,7 @@ index 6c9cb208ac48..d11c506a6ac3 100644
#ifdef HAVE_JUMP_LABEL
-@@ -124,6 +125,7 @@ void static_key_slow_inc(struct static_key *key)
+@@ -124,6 +125,7 @@ void static_key_slow_inc(struct static_k
return;
}
@@ -133,7 +123,7 @@ index 6c9cb208ac48..d11c506a6ac3 100644
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
-@@ -133,12 +135,14 @@ void static_key_slow_inc(struct static_key *key)
+@@ -133,12 +135,14 @@ void static_key_slow_inc(struct static_k
atomic_inc(&key->enabled);
}
jump_label_unlock();
@@ -148,7 +138,7 @@ index 6c9cb208ac48..d11c506a6ac3 100644
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
-@@ -149,6 +153,7 @@ static void __static_key_slow_dec(struct static_key *key,
+@@ -149,6 +153,7 @@ static void __static_key_slow_dec(struct
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
@@ -156,7 +146,7 @@ index 6c9cb208ac48..d11c506a6ac3 100644
return;
}
-@@ -159,6 +164,7 @@ static void __static_key_slow_dec(struct static_key *key,
+@@ -159,6 +164,7 @@ static void __static_key_slow_dec(struct
jump_label_update(key);
}
jump_label_unlock();
@@ -180,7 +170,7 @@ index 6c9cb208ac48..d11c506a6ac3 100644
}
#ifdef CONFIG_MODULES
-@@ -590,28 +598,28 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
+@@ -590,28 +598,28 @@ jump_label_module_notify(struct notifier
struct module *mod = data;
int ret = 0;
@@ -215,6 +205,3 @@ index 6c9cb208ac48..d11c506a6ac3 100644
return notifier_from_errno(ret);
}
---
-2.11.0
-
diff --git a/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch b/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
index cd00bbe23c85..4b76428dbaba 100644
--- a/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
+++ b/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
@@ -38,14 +38,12 @@ Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Link: http://lkml.kernel.org/r/20170524081549.104864779@linutronix.de
---
- kernel/kprobes.c | 59 ++++++++++++++++++++++++++++++--------------------------
+ kernel/kprobes.c | 59 +++++++++++++++++++++++++++++--------------------------
1 file changed, 32 insertions(+), 27 deletions(-)
-diff --git a/kernel/kprobes.c b/kernel/kprobes.c
-index 2d2d3a568e4e..9f6056749a28 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
-@@ -483,11 +483,6 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
+@@ -486,11 +486,6 @@ static DECLARE_DELAYED_WORK(optimizing_w
*/
static void do_optimize_kprobes(void)
{
@@ -57,7 +55,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
/*
* The optimization/unoptimization refers online_cpus via
* stop_machine() and cpu-hotplug modifies online_cpus.
-@@ -495,14 +490,19 @@ static void do_optimize_kprobes(void)
+@@ -498,14 +493,19 @@ static void do_optimize_kprobes(void)
* This combination can cause a deadlock (cpu-hotplug try to lock
* text_mutex but stop_machine can not be done because online_cpus
* has been changed)
@@ -80,7 +78,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
}
/*
-@@ -513,12 +513,13 @@ static void do_unoptimize_kprobes(void)
+@@ -516,12 +516,13 @@ static void do_unoptimize_kprobes(void)
{
struct optimized_kprobe *op, *tmp;
@@ -96,7 +94,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
mutex_lock(&text_mutex);
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop free_list for disarming */
-@@ -537,7 +538,6 @@ static void do_unoptimize_kprobes(void)
+@@ -540,7 +541,6 @@ static void do_unoptimize_kprobes(void)
list_del_init(&op->list);
}
mutex_unlock(&text_mutex);
@@ -104,7 +102,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
}
/* Reclaim all kprobes on the free_list */
-@@ -562,6 +562,7 @@ static void kick_kprobe_optimizer(void)
+@@ -565,6 +565,7 @@ static void kick_kprobe_optimizer(void)
static void kprobe_optimizer(struct work_struct *work)
{
mutex_lock(&kprobe_mutex);
@@ -112,7 +110,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
/* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex);
-@@ -587,6 +588,7 @@ static void kprobe_optimizer(struct work_struct *work)
+@@ -590,6 +591,7 @@ static void kprobe_optimizer(struct work
do_free_cleaned_kprobes();
mutex_unlock(&module_mutex);
@@ -120,7 +118,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
mutex_unlock(&kprobe_mutex);
/* Step 5: Kick optimizer again if needed */
-@@ -650,9 +652,8 @@ static void optimize_kprobe(struct kprobe *p)
+@@ -653,9 +655,8 @@ static void optimize_kprobe(struct kprob
/* Short cut to direct unoptimizing */
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
{
@@ -131,7 +129,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp);
}
-@@ -791,6 +792,7 @@ static void try_to_optimize_kprobe(struct kprobe *p)
+@@ -787,6 +788,7 @@ static void try_to_optimize_kprobe(struc
return;
/* For preparing optimization, jump_label_text_reserved() is called */
@@ -139,7 +137,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
jump_label_lock();
mutex_lock(&text_mutex);
-@@ -812,6 +814,7 @@ static void try_to_optimize_kprobe(struct kprobe *p)
+@@ -808,6 +810,7 @@ static void try_to_optimize_kprobe(struc
out:
mutex_unlock(&text_mutex);
jump_label_unlock();
@@ -147,7 +145,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
}
#ifdef CONFIG_SYSCTL
-@@ -826,6 +829,7 @@ static void optimize_all_kprobes(void)
+@@ -822,6 +825,7 @@ static void optimize_all_kprobes(void)
if (kprobes_allow_optimization)
goto out;
@@ -155,7 +153,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
-@@ -833,6 +837,7 @@ static void optimize_all_kprobes(void)
+@@ -829,6 +833,7 @@ static void optimize_all_kprobes(void)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
@@ -163,7 +161,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
printk(KERN_INFO "Kprobes globally optimized\n");
out:
mutex_unlock(&kprobe_mutex);
-@@ -851,6 +856,7 @@ static void unoptimize_all_kprobes(void)
+@@ -847,6 +852,7 @@ static void unoptimize_all_kprobes(void)
return;
}
@@ -171,7 +169,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
-@@ -859,6 +865,7 @@ static void unoptimize_all_kprobes(void)
+@@ -855,6 +861,7 @@ static void unoptimize_all_kprobes(void)
unoptimize_kprobe(p, false);
}
}
@@ -179,7 +177,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
mutex_unlock(&kprobe_mutex);
/* Wait for unoptimizing completion */
-@@ -1010,14 +1017,11 @@ static void arm_kprobe(struct kprobe *kp)
+@@ -1006,14 +1013,11 @@ static void arm_kprobe(struct kprobe *kp
arm_kprobe_ftrace(kp);
return;
}
@@ -196,7 +194,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
}
/* Disarm a kprobe with text_mutex */
-@@ -1027,10 +1031,12 @@ static void disarm_kprobe(struct kprobe *kp, bool reopt)
+@@ -1023,10 +1027,12 @@ static void disarm_kprobe(struct kprobe
disarm_kprobe_ftrace(kp);
return;
}
@@ -210,7 +208,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
}
/*
-@@ -1298,13 +1304,10 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
+@@ -1294,13 +1300,10 @@ static int register_aggr_kprobe(struct k
int ret = 0;
struct kprobe *ap = orig_p;
@@ -226,7 +224,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
mutex_lock(&text_mutex);
if (!kprobe_aggrprobe(orig_p)) {
-@@ -1352,8 +1355,8 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
+@@ -1348,8 +1351,8 @@ static int register_aggr_kprobe(struct k
out:
mutex_unlock(&text_mutex);
@@ -236,7 +234,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
-@@ -1555,9 +1558,12 @@ int register_kprobe(struct kprobe *p)
+@@ -1548,9 +1551,12 @@ int register_kprobe(struct kprobe *p)
goto out;
}
@@ -250,7 +248,7 @@ index 2d2d3a568e4e..9f6056749a28 100644
if (ret)
goto out;
-@@ -1570,7 +1576,6 @@ int register_kprobe(struct kprobe *p)
+@@ -1563,7 +1569,6 @@ int register_kprobe(struct kprobe *p)
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
@@ -258,6 +256,3 @@ index 2d2d3a568e4e..9f6056749a28 100644
out:
mutex_unlock(&kprobe_mutex);
---
-2.11.0
-
diff --git a/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch b/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
index 012d8e0915fc..8ef952ef3a2a 100644
--- a/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
+++ b/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
@@ -21,15 +21,13 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170524081549.197070135@linutronix.de
---
- arch/arm64/include/asm/insn.h | 1 -
- arch/arm64/kernel/insn.c | 5 +++--
+ arch/arm64/include/asm/insn.h | 1 -
+ arch/arm64/kernel/insn.c | 5 +++--
2 files changed, 3 insertions(+), 3 deletions(-)
-diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
-index 29cb2ca756f6..4214c38d016b 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
-@@ -433,7 +433,6 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset);
+@@ -403,7 +403,6 @@ u32 aarch64_set_branch_offset(u32 insn,
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
@@ -37,11 +35,9 @@ index 29cb2ca756f6..4214c38d016b 100644
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
s32 aarch64_insn_adrp_get_offset(u32 insn);
-diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
-index b884a926a632..cd872133e88e 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
-@@ -255,6 +255,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+@@ -255,6 +255,7 @@ static int __kprobes aarch64_insn_patch_
return ret;
}
@@ -49,7 +45,7 @@ index b884a926a632..cd872133e88e 100644
int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
{
struct aarch64_insn_patch patch = {
-@@ -267,8 +268,8 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+@@ -267,8 +268,8 @@ int __kprobes aarch64_insn_patch_text_sy
if (cnt <= 0)
return -EINVAL;
@@ -60,6 +56,3 @@ index b884a926a632..cd872133e88e 100644
}
int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
---
-2.11.0
-
diff --git a/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch b/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch
index ac0e3a83ca31..924d8f7d5662 100644
--- a/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch
+++ b/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch
@@ -19,26 +19,22 @@ Cc: Russell King <linux@armlinux.org.uk>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170524081549.275871311@linutronix.de
---
- arch/arm/kernel/patch.c | 2 +-
- arch/arm/probes/kprobes/core.c | 3 ++-
+ arch/arm/kernel/patch.c | 2 +-
+ arch/arm/probes/kprobes/core.c | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
-diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
-index 020560b2dcb7..a1a34722c655 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
-@@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
+@@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, un
.insn = insn,
};
- stop_machine(patch_text_stop_machine, &patch, NULL);
+ stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
}
-diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
-index ad1f4e6a9e33..52d1cd14fda4 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
-@@ -182,7 +182,8 @@ void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
+@@ -182,7 +182,8 @@ void __kprobes kprobes_remove_breakpoint
.addr = addr,
.insn = insn,
};
@@ -48,6 +44,3 @@ index ad1f4e6a9e33..52d1cd14fda4 100644
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
---
-2.11.0
-
diff --git a/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch b/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch
index 1499ce1e48d4..acac05937b05 100644
--- a/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch
+++ b/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch
@@ -20,15 +20,13 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Link: http://lkml.kernel.org/r/20170524081549.354513406@linutronix.de
---
- arch/s390/kernel/jump_label.c | 2 +-
- arch/s390/kernel/kprobes.c | 4 ++--
+ arch/s390/kernel/jump_label.c | 2 +-
+ arch/s390/kernel/kprobes.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
-diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
-index 6aa630a8d24f..262506cee4c3 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
-@@ -93,7 +93,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
+@@ -93,7 +93,7 @@ void arch_jump_label_transform(struct ju
args.entry = entry;
args.type = type;
@@ -37,8 +35,6 @@ index 6aa630a8d24f..262506cee4c3 100644
}
void arch_jump_label_transform_static(struct jump_entry *entry,
-diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
-index 3d6a99746454..6842e4501e2e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -196,7 +196,7 @@ void arch_arm_kprobe(struct kprobe *p)
@@ -50,7 +46,7 @@ index 3d6a99746454..6842e4501e2e 100644
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
-@@ -204,7 +204,7 @@ void arch_disarm_kprobe(struct kprobe *p)
+@@ -204,7 +204,7 @@ void arch_disarm_kprobe(struct kprobe *p
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
@@ -59,6 +55,3 @@ index 3d6a99746454..6842e4501e2e 100644
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
---
-2.11.0
-
diff --git a/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch b/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
index 79a107ed7b4c..496f4d1d7f97 100644
--- a/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
+++ b/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
@@ -15,12 +15,10 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081549.447014063@linutronix.de
---
- include/linux/cpu.h | 2 +-
- kernel/cpu.c | 107 +++++++---------------------------------------------
- 2 files changed, 14 insertions(+), 95 deletions(-)
+ include/linux/cpu.h | 2
+ kernel/cpu.c | 105 ++++++----------------------------------------------
+ 2 files changed, 14 insertions(+), 93 deletions(-)
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h
-index af4d660798e5..ca73bc1563f4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -103,7 +103,7 @@ extern void cpus_write_lock(void);
@@ -32,8 +30,6 @@ index af4d660798e5..ca73bc1563f4 100644
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 142d889d9f69..66836216ebae 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -27,6 +27,7 @@
@@ -178,15 +174,3 @@ index 142d889d9f69..66836216ebae 100644
}
/*
-@@ -344,8 +265,6 @@ void cpu_hotplug_enable(void)
- EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
- #endif /* CONFIG_HOTPLUG_CPU */
-
--/* Notifier wrappers for transitioning to state machine */
--
- static int bringup_wait_for_ap(unsigned int cpu)
- {
- struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
---
-2.11.0
-
diff --git a/patches/0030-sched-Provide-is_percpu_thread-helper.patch b/patches/0030-sched-Provide-is_percpu_thread-helper.patch
index 92aa91a555fa..7883629a3a9e 100644
--- a/patches/0030-sched-Provide-is_percpu_thread-helper.patch
+++ b/patches/0030-sched-Provide-is_percpu_thread-helper.patch
@@ -13,14 +13,12 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081549.541649540@linutronix.de
---
- include/linux/sched.h | 10 ++++++++++
+ include/linux/sched.h | 10 ++++++++++
1 file changed, 10 insertions(+)
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 2b69fc650201..3dfa5f99d6ee 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1265,6 +1265,16 @@ extern struct pid *cad_pid;
+@@ -1258,6 +1258,16 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
@@ -37,6 +35,3 @@ index 2b69fc650201..3dfa5f99d6ee 100644
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
---
-2.11.0
-
diff --git a/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch b/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
index 1919876084f2..833746254b3c 100644
--- a/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
+++ b/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
@@ -144,14 +144,12 @@ Cc: linux-acpi@vger.kernel.org
Cc: Len Brown <lenb@kernel.org>
Link: http://lkml.kernel.org/r/20170524081549.620489733@linutronix.de
---
- drivers/acpi/processor_throttling.c | 16 ++++++++--------
+ drivers/acpi/processor_throttling.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
-diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
-index 3de34633f7f9..7f9aff4b8d62 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
-@@ -909,6 +909,13 @@ static long __acpi_processor_get_throttling(void *data)
+@@ -909,6 +909,13 @@ static long __acpi_processor_get_throttl
return pr->throttling.acpi_processor_get_throttling(pr);
}
@@ -165,7 +163,7 @@ index 3de34633f7f9..7f9aff4b8d62 100644
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
if (!pr)
-@@ -926,7 +933,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
+@@ -926,7 +933,7 @@ static int acpi_processor_get_throttling
if (!cpu_online(pr->id))
return -ENODEV;
@@ -174,7 +172,7 @@ index 3de34633f7f9..7f9aff4b8d62 100644
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
-@@ -1076,13 +1083,6 @@ static long acpi_processor_throttling_fn(void *data)
+@@ -1076,13 +1083,6 @@ static long acpi_processor_throttling_fn
arg->target_state, arg->force);
}
@@ -188,6 +186,3 @@ index 3de34633f7f9..7f9aff4b8d62 100644
static int __acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force, bool direct)
{
---
-2.11.0
-
diff --git a/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch b/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
index 1e907f77cd01..027e687498cd 100644
--- a/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
+++ b/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
@@ -32,11 +32,9 @@ Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081549.709375845@linutronix.de
---
- kernel/cpu.c | 13 +++++++++++++
+ kernel/cpu.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 66836216ebae..7435ffc6163b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -66,6 +66,12 @@ struct cpuhp_cpu_state {
@@ -52,7 +50,7 @@ index 66836216ebae..7435ffc6163b 100644
/**
* cpuhp_step - Hotplug state machine step
* @name: Name of the step
-@@ -403,6 +409,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
+@@ -415,6 +421,7 @@ static void cpuhp_thread_fun(unsigned in
st->should_run = false;
@@ -60,7 +58,7 @@ index 66836216ebae..7435ffc6163b 100644
/* Single callback invocation for [un]install ? */
if (st->single) {
if (st->cb_state < CPUHP_AP_ONLINE) {
-@@ -429,6 +436,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
+@@ -441,6 +448,7 @@ static void cpuhp_thread_fun(unsigned in
else if (st->state > st->target)
ret = cpuhp_ap_offline(cpu, st);
}
@@ -68,7 +66,7 @@ index 66836216ebae..7435ffc6163b 100644
st->result = ret;
complete(&st->done);
}
-@@ -443,6 +451,9 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
+@@ -455,6 +463,9 @@ cpuhp_invoke_ap_callback(int cpu, enum c
if (!cpu_online(cpu))
return 0;
@@ -78,7 +76,7 @@ index 66836216ebae..7435ffc6163b 100644
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
-@@ -486,6 +497,8 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
+@@ -498,6 +509,8 @@ static int cpuhp_kick_ap_work(unsigned i
enum cpuhp_state state = st->state;
trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
@@ -87,6 +85,3 @@ index 66836216ebae..7435ffc6163b 100644
__cpuhp_kick_ap_work(st);
wait_for_completion(&st->done);
trace_cpuhp_exit(cpu, st->state, state, st->result);
---
-2.11.0
-
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index ed8cad2ced36..2dd2fd565fe7 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -6,9 +6,9 @@ Subject: kernel/sched/core: add migrate_disable()
include/linux/preempt.h | 23 ++++++++
include/linux/sched.h | 7 ++
include/linux/smp.h | 3 +
- kernel/sched/core.c | 134 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 4 +
- 5 files changed, 169 insertions(+), 2 deletions(-)
+ 5 files changed, 165 insertions(+), 2 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable()
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -7357,3 +7386,104 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7357,3 +7386,100 @@ const u32 sched_prio_to_wmult[40] = {
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
@@ -161,8 +161,6 @@ Subject: kernel/sched/core: add migrate_disable()
+ return;
+ }
+
-+ /* get_online_cpus(); */
-+
+ preempt_disable();
+ p->migrate_disable = 1;
+
@@ -231,11 +229,9 @@ Subject: kernel/sched/core: add migrate_disable()
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
-+ /* put_online_cpus(); */
+ return;
+ }
+ }
-+ /* put_online_cpus(); */
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 44e26d774c38..80e8f53b3086 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -288,7 +288,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7520,7 +7523,10 @@ void migrate_enable(void)
+@@ -7518,7 +7521,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 53aa32afb184..dad5f040d7a5 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1509,12 +1509,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -1519,12 +1519,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
diff --git a/patches/cpu-hotplug--Implement-CPU-pinning.patch b/patches/cpu-hotplug--Implement-CPU-pinning.patch
new file mode 100644
index 000000000000..3c4a005a1d18
--- /dev/null
+++ b/patches/cpu-hotplug--Implement-CPU-pinning.patch
@@ -0,0 +1,110 @@
+Subject: cpu/hotplug: Implement CPU pinning
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 19 Jul 2017 17:31:20 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ kernel/cpu.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 41 insertions(+)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -546,6 +546,7 @@ struct task_struct {
+ #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+ int migrate_disable;
+ int migrate_disable_update;
++ int pinned_on_cpu;
+ # ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+ # endif
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -66,6 +66,11 @@ struct cpuhp_cpu_state {
+
+ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+
++#ifdef CONFIG_HOTPLUG_CPU
++static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
++ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
++#endif
++
+ #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
+ static struct lock_class_key cpuhp_state_key;
+ static struct lockdep_map cpuhp_state_lock_map =
+@@ -216,7 +221,30 @@ static int cpu_hotplug_disabled;
+ */
+ void pin_current_cpu(void)
+ {
++ struct rt_rw_lock *cpuhp_pin;
++ unsigned int cpu;
++ int ret;
+
++again:
++ cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
++ ret = __read_rt_trylock(cpuhp_pin);
++ if (ret) {
++ current->pinned_on_cpu = smp_processor_id();
++ return;
++ }
++ cpu = smp_processor_id();
++ preempt_lazy_enable();
++ preempt_enable();
++
++ __read_rt_lock(cpuhp_pin);
++
++ preempt_disable();
++ preempt_lazy_disable();
++ if (cpu != smp_processor_id()) {
++ __read_rt_unlock(cpuhp_pin);
++ goto again;
++ }
++ current->pinned_on_cpu = cpu;
+ }
+
+ /**
+@@ -224,6 +252,13 @@ void pin_current_cpu(void)
+ */
+ void unpin_current_cpu(void)
+ {
++ struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
++
++ if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
++ cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu);
++
++ current->pinned_on_cpu = -1;
++ __read_rt_unlock(cpuhp_pin);
+ }
+
+ DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
+@@ -644,6 +679,7 @@ static int take_cpu_down(void *_param)
+
+ static int takedown_cpu(unsigned int cpu)
+ {
++ struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu);
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int err;
+
+@@ -657,11 +693,14 @@ static int takedown_cpu(unsigned int cpu
+ */
+ irq_lock_sparse();
+
++ __write_rt_lock(cpuhp_pin);
++
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
+ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
+ if (err) {
++ __write_rt_unlock(cpuhp_pin);
+ /* CPU refused to die */
+ irq_unlock_sparse();
+ /* Unpark the hotplug thread so we can rollback there */
+@@ -680,6 +719,7 @@ static int takedown_cpu(unsigned int cpu
+ wait_for_completion(&st->done);
+ BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+
++ __write_rt_unlock(cpuhp_pin);
+ /* Interrupts are moved away from the dying cpu, reenable alloc/free */
+ irq_unlock_sparse();
+
diff --git a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
deleted file mode 100644
index c2e77cee0989..000000000000
--- a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Thu, 5 Dec 2013 09:16:52 -0500
-Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock
-
-The patch:
-
- cpu: Make hotplug.lock a "sleeping" spinlock on RT
-
- Tasks can block on hotplug.lock in pin_current_cpu(), but their
- state might be != RUNNING. So the mutex wakeup will set the state
- unconditionally to RUNNING. That might cause spurious unexpected
- wakeups. We could provide a state preserving mutex_lock() function,
- but this is semantically backwards. So instead we convert the
- hotplug.lock() to a spinlock for RT, which has the state preserving
- semantics already.
-
-Fixed a bug where the hotplug lock on PREEMPT_RT can be called after a
-task set its state to TASK_UNINTERRUPTIBLE and before it called
-schedule. If the hotplug_lock used a mutex, and there was contention,
-the current task's state would be turned to TASK_RUNNABLE and the
-schedule call will not sleep. This caused unexpected results.
-
-Although the patch had a description of the change, the code had no
-comments about it. This causes confusion to those that review the code,
-and as PREEMPT_RT is held in a quilt queue and not git, it's not as easy
-to see why a change was made. Even if it was in git, the code should
-still have a comment for something as subtle as this.
-
-Document the rational for using a spinlock on PREEMPT_RT in the hotplug
-lock code.
-
-Reported-by: Nicholas Mc Guire <der.herr@hofr.at>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/cpu.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -255,6 +255,14 @@ struct hotplug_pcp {
- int grab_lock;
- struct completion synced;
- #ifdef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * Note, on PREEMPT_RT, the hotplug lock must save the state of
-+ * the task, otherwise the mutex will cause the task to fail
-+ * to sleep when required. (Because it's called from migrate_disable())
-+ *
-+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
-+ * state.
-+ */
- spinlock_t lock;
- #else
- struct mutex mutex;
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
deleted file mode 100644
index cf7c6bd5bc44..000000000000
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ /dev/null
@@ -1,114 +0,0 @@
-Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Fri, 02 Mar 2012 10:36:57 -0500
-
-Tasks can block on hotplug.lock in pin_current_cpu(), but their state
-might be != RUNNING. So the mutex wakeup will set the state
-unconditionally to RUNNING. That might cause spurious unexpected
-wakeups. We could provide a state preserving mutex_lock() function,
-but this is semantically backwards. So instead we convert the
-hotplug.lock() to a spinlock for RT, which has the state preserving
-semantics already.
-
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Cc: Carsten Emde <C.Emde@osadl.org>
-Cc: John Kacur <jkacur@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Clark Williams <clark.williams@gmail.com>
-
-Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/cpu.c | 32 +++++++++++++++++++++++++-------
- 1 file changed, 25 insertions(+), 7 deletions(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -205,10 +205,16 @@ static int cpu_hotplug_disabled;
-
- static struct {
- struct task_struct *active_writer;
-+
- /* wait queue to wake up the active_writer */
- wait_queue_head_t wq;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /* Makes the lock keep the task's state */
-+ spinlock_t lock;
-+#else
- /* verifies that no writer will get active while readers are active */
- struct mutex lock;
-+#endif
- /*
- * Also blocks the new readers during
- * an ongoing cpu hotplug operation.
-@@ -221,12 +227,24 @@ static struct {
- } cpu_hotplug = {
- .active_writer = NULL,
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
-+#else
- .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-+#endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
- #endif
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock() rt_spin_lock__no_mg(&cpu_hotplug.lock)
-+# define hotplug_unlock() rt_spin_unlock__no_mg(&cpu_hotplug.lock)
-+#else
-+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
-+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
-+#endif
-+
- /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
- #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
- #define cpuhp_lock_acquire_tryread() \
-@@ -263,8 +281,8 @@ void pin_current_cpu(void)
- return;
- }
- preempt_enable();
-- mutex_lock(&cpu_hotplug.lock);
-- mutex_unlock(&cpu_hotplug.lock);
-+ hotplug_lock();
-+ hotplug_unlock();
- preempt_disable();
- goto retry;
- }
-@@ -337,9 +355,9 @@ void get_online_cpus(void)
- if (cpu_hotplug.active_writer == current)
- return;
- cpuhp_lock_acquire_read();
-- mutex_lock(&cpu_hotplug.lock);
-+ hotplug_lock();
- atomic_inc(&cpu_hotplug.refcount);
-- mutex_unlock(&cpu_hotplug.lock);
-+ hotplug_unlock();
- }
- EXPORT_SYMBOL_GPL(get_online_cpus);
-
-@@ -392,11 +410,11 @@ void cpu_hotplug_begin(void)
- cpuhp_lock_acquire();
-
- for (;;) {
-- mutex_lock(&cpu_hotplug.lock);
-+ hotplug_lock();
- prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
- if (likely(!atomic_read(&cpu_hotplug.refcount)))
- break;
-- mutex_unlock(&cpu_hotplug.lock);
-+ hotplug_unlock();
- schedule();
- }
- finish_wait(&cpu_hotplug.wq, &wait);
-@@ -405,7 +423,7 @@ void cpu_hotplug_begin(void)
- void cpu_hotplug_done(void)
- {
- cpu_hotplug.active_writer = NULL;
-- mutex_unlock(&cpu_hotplug.lock);
-+ hotplug_unlock();
- cpuhp_lock_release();
- }
-
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
deleted file mode 100644
index 522e8389d299..000000000000
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ /dev/null
@@ -1,525 +0,0 @@
-From: Steven Rostedt <srostedt@redhat.com>
-Date: Mon, 16 Jul 2012 08:07:43 +0000
-Subject: cpu/rt: Rework cpu down for PREEMPT_RT
-
-Bringing a CPU down is a pain with the PREEMPT_RT kernel because
-tasks can be preempted in many more places than in non-RT. In
-order to handle per_cpu variables, tasks may be pinned to a CPU
-for a while, and even sleep. But these tasks need to be off the CPU
-if that CPU is going down.
-
-Several synchronization methods have been tried, but when stressed
-they failed. This is a new approach.
-
-A sync_tsk thread is still created and tasks may still block on a
-lock when the CPU is going down, but how that works is a bit different.
-When cpu_down() starts, it will create the sync_tsk and wait on it
-to inform that current tasks that are pinned on the CPU are no longer
-pinned. But new tasks that are about to be pinned will still be allowed
-to do so at this time.
-
-Then the notifiers are called. Several notifiers will bring down tasks
-that will enter these locations. Some of these tasks will take locks
-of other tasks that are on the CPU. If we don't let those other tasks
-continue, but make them block until CPU down is done, the tasks that
-the notifiers are waiting on will never complete as they are waiting
-for the locks held by the tasks that are blocked.
-
-Thus we still let the task pin the CPU until the notifiers are done.
-After the notifiers run, we then make new tasks entering the pinned
-CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
-in the hotplug_pcp descriptor.
-
-To help things along, a new function in the scheduler code is created
-called migrate_me(). This function will try to migrate the current task
-off the CPU this is going down if possible. When the sync_tsk is created,
-all tasks will then try to migrate off the CPU going down. There are
-several cases that this wont work, but it helps in most cases.
-
-After the notifiers are called and if a task can't migrate off but enters
-the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
-until the CPU down is complete. Then the scheduler will force the migration
-anyway.
-
-Also, I found that THREAD_BOUND need to also be accounted for in the
-pinned CPU, and the migrate_disable no longer treats them special.
-This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
-
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/sched.h | 7 +
- kernel/cpu.c | 236 +++++++++++++++++++++++++++++++++++++++++---------
- kernel/sched/core.c | 78 ++++++++++++++++
- 3 files changed, 280 insertions(+), 41 deletions(-)
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1342,6 +1342,10 @@ extern int task_can_attach(struct task_s
- #ifdef CONFIG_SMP
- extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
- extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
-+int migrate_me(void);
-+void tell_sched_cpu_down_begin(int cpu);
-+void tell_sched_cpu_down_done(int cpu);
-+
- #else
- static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- {
-@@ -1352,6 +1356,9 @@ static inline int set_cpus_allowed_ptr(s
- return -EINVAL;
- return 0;
- }
-+static inline int migrate_me(void) { return 0; }
-+static inline void tell_sched_cpu_down_begin(int cpu) { }
-+static inline void tell_sched_cpu_down_done(int cpu) { }
- #endif
-
- #ifndef cpu_relax_yield
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -205,16 +205,10 @@ static int cpu_hotplug_disabled;
-
- static struct {
- struct task_struct *active_writer;
--
- /* wait queue to wake up the active_writer */
- wait_queue_head_t wq;
--#ifdef CONFIG_PREEMPT_RT_FULL
-- /* Makes the lock keep the task's state */
-- spinlock_t lock;
--#else
- /* verifies that no writer will get active while readers are active */
- struct mutex lock;
--#endif
- /*
- * Also blocks the new readers during
- * an ongoing cpu hotplug operation.
-@@ -227,24 +221,12 @@ static struct {
- } cpu_hotplug = {
- .active_writer = NULL,
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
--#ifdef CONFIG_PREEMPT_RT_FULL
-- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
--#else
- .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
--#endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
- #endif
- };
-
--#ifdef CONFIG_PREEMPT_RT_FULL
--# define hotplug_lock() rt_spin_lock__no_mg(&cpu_hotplug.lock)
--# define hotplug_unlock() rt_spin_unlock__no_mg(&cpu_hotplug.lock)
--#else
--# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
--# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
--#endif
--
- /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
- #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
- #define cpuhp_lock_acquire_tryread() \
-@@ -252,12 +234,42 @@ static struct {
- #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
- #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
-
-+/**
-+ * hotplug_pcp - per cpu hotplug descriptor
-+ * @unplug: set when pin_current_cpu() needs to sync tasks
-+ * @sync_tsk: the task that waits for tasks to finish pinned sections
-+ * @refcount: counter of tasks in pinned sections
-+ * @grab_lock: set when the tasks entering pinned sections should wait
-+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
-+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
-+ * @mutex_init: zero if the mutex hasn't been initialized yet.
-+ *
-+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
-+ * is used as a flag and still exists after @sync_tsk has exited and
-+ * @sync_tsk set to NULL.
-+ */
- struct hotplug_pcp {
- struct task_struct *unplug;
-+ struct task_struct *sync_tsk;
- int refcount;
-+ int grab_lock;
- struct completion synced;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ spinlock_t lock;
-+#else
-+ struct mutex mutex;
-+#endif
-+ int mutex_init;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
-+# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
-+#else
-+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
-+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
-+#endif
-+
- static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
-
- /**
-@@ -271,18 +283,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
- void pin_current_cpu(void)
- {
- struct hotplug_pcp *hp;
-+ int force = 0;
-
- retry:
- hp = this_cpu_ptr(&hotplug_pcp);
-
-- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
-+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
- hp->unplug == current) {
- hp->refcount++;
- return;
- }
-- preempt_enable();
-- hotplug_lock();
-- hotplug_unlock();
-+ if (hp->grab_lock) {
-+ preempt_enable();
-+ hotplug_lock(hp);
-+ hotplug_unlock(hp);
-+ } else {
-+ preempt_enable();
-+ /*
-+ * Try to push this task off of this CPU.
-+ */
-+ if (!migrate_me()) {
-+ preempt_disable();
-+ hp = this_cpu_ptr(&hotplug_pcp);
-+ if (!hp->grab_lock) {
-+ /*
-+ * Just let it continue it's already pinned
-+ * or about to sleep.
-+ */
-+ force = 1;
-+ goto retry;
-+ }
-+ preempt_enable();
-+ }
-+ }
- preempt_disable();
- goto retry;
- }
-@@ -303,26 +336,84 @@ void unpin_current_cpu(void)
- wake_up_process(hp->unplug);
- }
-
--/*
-- * FIXME: Is this really correct under all circumstances ?
-- */
-+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
-+{
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (hp->refcount) {
-+ schedule_preempt_disabled();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+}
-+
- static int sync_unplug_thread(void *data)
- {
- struct hotplug_pcp *hp = data;
-
- preempt_disable();
- hp->unplug = current;
-+ wait_for_pinned_cpus(hp);
-+
-+ /*
-+ * This thread will synchronize the cpu_down() with threads
-+ * that have pinned the CPU. When the pinned CPU count reaches
-+ * zero, we inform the cpu_down code to continue to the next step.
-+ */
- set_current_state(TASK_UNINTERRUPTIBLE);
-- while (hp->refcount) {
-- schedule_preempt_disabled();
-+ preempt_enable();
-+ complete(&hp->synced);
-+
-+ /*
-+ * If all succeeds, the next step will need tasks to wait till
-+ * the CPU is offline before continuing. To do this, the grab_lock
-+ * is set and tasks going into pin_current_cpu() will block on the
-+ * mutex. But we still need to wait for those that are already in
-+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
-+ * will kick this thread out.
-+ */
-+ while (!hp->grab_lock && !kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+
-+ /* Make sure grab_lock is seen before we see a stale completion */
-+ smp_mb();
-+
-+ /*
-+ * Now just before cpu_down() enters stop machine, we need to make
-+ * sure all tasks that are in pinned CPU sections are out, and new
-+ * tasks will now grab the lock, keeping them from entering pinned
-+ * CPU sections.
-+ */
-+ if (!kthread_should_stop()) {
-+ preempt_disable();
-+ wait_for_pinned_cpus(hp);
-+ preempt_enable();
-+ complete(&hp->synced);
-+ }
-+
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
- set_current_state(TASK_UNINTERRUPTIBLE);
- }
- set_current_state(TASK_RUNNING);
-- preempt_enable();
-- complete(&hp->synced);
-+
-+ /*
-+ * Force this thread off this CPU as it's going down and
-+ * we don't want any more work on this CPU.
-+ */
-+ current->flags &= ~PF_NO_SETAFFINITY;
-+ do_set_cpus_allowed(current, cpu_present_mask);
-+ migrate_me();
- return 0;
- }
-
-+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
-+{
-+ wake_up_process(hp->sync_tsk);
-+ wait_for_completion(&hp->synced);
-+}
-+
- /*
- * Start the sync_unplug_thread on the target cpu and wait for it to
- * complete.
-@@ -330,23 +421,83 @@ static int sync_unplug_thread(void *data
- static int cpu_unplug_begin(unsigned int cpu)
- {
- struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-- struct task_struct *tsk;
-+ int err;
-+
-+ /* Protected by cpu_hotplug.lock */
-+ if (!hp->mutex_init) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ spin_lock_init(&hp->lock);
-+#else
-+ mutex_init(&hp->mutex);
-+#endif
-+ hp->mutex_init = 1;
-+ }
-+
-+ /* Inform the scheduler to migrate tasks off this CPU */
-+ tell_sched_cpu_down_begin(cpu);
-
- init_completion(&hp->synced);
-- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
-- if (IS_ERR(tsk))
-- return (PTR_ERR(tsk));
-- kthread_bind(tsk, cpu);
-- wake_up_process(tsk);
-- wait_for_completion(&hp->synced);
-+
-+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
-+ if (IS_ERR(hp->sync_tsk)) {
-+ err = PTR_ERR(hp->sync_tsk);
-+ hp->sync_tsk = NULL;
-+ return err;
-+ }
-+ kthread_bind(hp->sync_tsk, cpu);
-+
-+ /*
-+ * Wait for tasks to get out of the pinned sections,
-+ * it's still OK if new tasks enter. Some CPU notifiers will
-+ * wait for tasks that are going to enter these sections and
-+ * we must not have them block.
-+ */
-+ __cpu_unplug_sync(hp);
-+
- return 0;
- }
-
-+static void cpu_unplug_sync(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+
-+ init_completion(&hp->synced);
-+ /* The completion needs to be initialzied before setting grab_lock */
-+ smp_wmb();
-+
-+ /* Grab the mutex before setting grab_lock */
-+ hotplug_lock(hp);
-+ hp->grab_lock = 1;
-+
-+ /*
-+ * The CPU notifiers have been completed.
-+ * Wait for tasks to get out of pinned CPU sections and have new
-+ * tasks block until the CPU is completely down.
-+ */
-+ __cpu_unplug_sync(hp);
-+
-+ /* All done with the sync thread */
-+ kthread_stop(hp->sync_tsk);
-+ hp->sync_tsk = NULL;
-+}
-+
- static void cpu_unplug_done(unsigned int cpu)
- {
- struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-
- hp->unplug = NULL;
-+ /* Let all tasks know cpu unplug is finished before cleaning up */
-+ smp_wmb();
-+
-+ if (hp->sync_tsk)
-+ kthread_stop(hp->sync_tsk);
-+
-+ if (hp->grab_lock) {
-+ hotplug_unlock(hp);
-+ /* protected by cpu_hotplug.lock */
-+ hp->grab_lock = 0;
-+ }
-+ tell_sched_cpu_down_done(cpu);
- }
-
- void get_online_cpus(void)
-@@ -355,9 +506,9 @@ void get_online_cpus(void)
- if (cpu_hotplug.active_writer == current)
- return;
- cpuhp_lock_acquire_read();
-- hotplug_lock();
-+ mutex_lock(&cpu_hotplug.lock);
- atomic_inc(&cpu_hotplug.refcount);
-- hotplug_unlock();
-+ mutex_unlock(&cpu_hotplug.lock);
- }
- EXPORT_SYMBOL_GPL(get_online_cpus);
-
-@@ -410,11 +561,11 @@ void cpu_hotplug_begin(void)
- cpuhp_lock_acquire();
-
- for (;;) {
-- hotplug_lock();
-+ mutex_lock(&cpu_hotplug.lock);
- prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
- if (likely(!atomic_read(&cpu_hotplug.refcount)))
- break;
-- hotplug_unlock();
-+ mutex_unlock(&cpu_hotplug.lock);
- schedule();
- }
- finish_wait(&cpu_hotplug.wq, &wait);
-@@ -423,7 +574,7 @@ void cpu_hotplug_begin(void)
- void cpu_hotplug_done(void)
- {
- cpu_hotplug.active_writer = NULL;
-- hotplug_unlock();
-+ mutex_unlock(&cpu_hotplug.lock);
- cpuhp_lock_release();
- }
-
-@@ -816,6 +967,9 @@ static int takedown_cpu(unsigned int cpu
- kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
- smpboot_park_threads(cpu);
-
-+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
-+ cpu_unplug_sync(cpu);
-+
- /*
- * Prevent irq alloc/free while the dying cpu reorganizes the
- * interrupt affinities.
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1122,6 +1122,84 @@ void do_set_cpus_allowed(struct task_str
- __do_set_cpus_allowed_tail(p, new_mask);
- }
-
-+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
-+static DEFINE_MUTEX(sched_down_mutex);
-+static cpumask_t sched_down_cpumask;
-+
-+void tell_sched_cpu_down_begin(int cpu)
-+{
-+ mutex_lock(&sched_down_mutex);
-+ cpumask_set_cpu(cpu, &sched_down_cpumask);
-+ mutex_unlock(&sched_down_mutex);
-+}
-+
-+void tell_sched_cpu_down_done(int cpu)
-+{
-+ mutex_lock(&sched_down_mutex);
-+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
-+ mutex_unlock(&sched_down_mutex);
-+}
-+
-+/**
-+ * migrate_me - try to move the current task off this cpu
-+ *
-+ * Used by the pin_current_cpu() code to try to get tasks
-+ * to move off the current CPU as it is going down.
-+ * It will only move the task if the task isn't pinned to
-+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
-+ * and the task has to be in a RUNNING state. Otherwise the
-+ * movement of the task will wake it up (change its state
-+ * to running) when the task did not expect it.
-+ *
-+ * Returns 1 if it succeeded in moving the current task
-+ * 0 otherwise.
-+ */
-+int migrate_me(void)
-+{
-+ struct task_struct *p = current;
-+ struct migration_arg arg;
-+ struct cpumask *cpumask;
-+ const struct cpumask *mask;
-+ unsigned int dest_cpu;
-+ struct rq_flags rf;
-+ struct rq *rq;
-+
-+ /*
-+ * We can not migrate tasks bounded to a CPU or tasks not
-+ * running. The movement of the task will wake it up.
-+ */
-+ if (p->flags & PF_NO_SETAFFINITY || p->state)
-+ return 0;
-+
-+ mutex_lock(&sched_down_mutex);
-+ rq = task_rq_lock(p, &rf);
-+
-+ cpumask = this_cpu_ptr(&sched_cpumasks);
-+ mask = p->cpus_ptr;
-+
-+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
-+
-+ if (!cpumask_weight(cpumask)) {
-+ /* It's only on this CPU? */
-+ task_rq_unlock(rq, p, &rf);
-+ mutex_unlock(&sched_down_mutex);
-+ return 0;
-+ }
-+
-+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
-+
-+ arg.task = p;
-+ arg.dest_cpu = dest_cpu;
-+
-+ task_rq_unlock(rq, p, &rf);
-+
-+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-+ tlb_migrate_finish(p->mm);
-+ mutex_unlock(&sched_down_mutex);
-+
-+ return 1;
-+}
-+
- /*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
deleted file mode 100644
index 8fd55e52cd89..000000000000
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: Tiejun Chen <tiejun.chen@windriver.com>
-Subject: cpu_down: move migrate_enable() back
-Date: Thu, 7 Nov 2013 10:06:07 +0800
-
-Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
-use migrate_enable()/migrate_disable() to replace that combination
-of preempt_enable() and preempt_disable(), but actually in
-!CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable()
-are still equal to preempt_enable()/preempt_disable(). So that
-followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule()
-to trigger schedule_debug() like this:
-
-_cpu_down()
- |
- + migrate_disable() = preempt_disable()
- |
- + cpu_hotplug_begin() or cpu_unplug_begin()
- |
- + schedule()
- |
- + __schedule()
- |
- + preempt_disable();
- |
- + __schedule_bug() is true!
-
-So we should move migrate_enable() as the original scheme.
-
-
-Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
----
- kernel/cpu.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -1094,6 +1094,7 @@ static int __ref _cpu_down(unsigned int
- goto restore_cpus;
- }
-
-+ migrate_enable();
- cpu_hotplug_begin();
- ret = cpu_unplug_begin(cpu);
- if (ret) {
-@@ -1140,7 +1141,6 @@ static int __ref _cpu_down(unsigned int
- cpu_unplug_done(cpu);
- out_cancel:
- cpu_hotplug_done();
-- migrate_enable();
- restore_cpus:
- set_cpus_allowed_ptr(current, cpumask_org);
- free_cpumask_var(cpumask_org);
diff --git a/patches/fs-dcache-init-in_lookup_hashtable.patch b/patches/fs-dcache-init-in_lookup_hashtable.patch
index 2ff9152cd748..0d3d30c91b67 100644
--- a/patches/fs-dcache-init-in_lookup_hashtable.patch
+++ b/patches/fs-dcache-init-in_lookup_hashtable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -3610,6 +3610,11 @@ EXPORT_SYMBOL(d_genocide);
+@@ -3611,6 +3611,11 @@ EXPORT_SYMBOL(d_genocide);
void __init vfs_caches_init_early(void)
{
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 0140bb1aec0e..caacbbf4059d 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto repeat;
}
}
-@@ -2330,7 +2342,7 @@ void d_delete(struct dentry * dentry)
+@@ -2331,7 +2343,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/idr.h>
-@@ -357,7 +358,7 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -358,7 +359,7 @@ int __mnt_want_write(struct vfsmount *m)
smp_mb();
while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
preempt_enable();
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 554060950cae..32a09b14f9f7 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2402,21 +2402,24 @@ static inline void end_dir_add(struct in
+@@ -2403,21 +2403,24 @@ static inline void end_dir_add(struct in
static void d_wait_lookup(struct dentry *dentry)
{
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2525,7 +2528,7 @@ void __d_lookup_done(struct dentry *dent
+@@ -2526,7 +2529,7 @@ void __d_lookup_done(struct dentry *dent
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -665,7 +665,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -682,7 +682,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
diff --git a/patches/fs-namespace-preemption-fix.patch b/patches/fs-namespace-preemption-fix.patch
index 8f022a6c6d40..b9434a1f0fd0 100644
--- a/patches/fs-namespace-preemption-fix.patch
+++ b/patches/fs-namespace-preemption-fix.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -355,8 +355,11 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -356,8 +356,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
diff --git a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
deleted file mode 100644
index a81e4bceaf09..000000000000
--- a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Tue, 24 Mar 2015 08:14:49 +0100
-Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread()
-
-do_set_cpus_allowed() is not safe vs ->sched_class change.
-
-crash> bt
-PID: 11676 TASK: ffff88026f979da0 CPU: 22 COMMAND: "sync_unplug/22"
- #0 [ffff880274d25bc8] machine_kexec at ffffffff8103b41c
- #1 [ffff880274d25c18] crash_kexec at ffffffff810d881a
- #2 [ffff880274d25cd8] oops_end at ffffffff81525818
- #3 [ffff880274d25cf8] do_invalid_op at ffffffff81003096
- #4 [ffff880274d25d90] invalid_op at ffffffff8152d3de
- [exception RIP: set_cpus_allowed_rt+18]
- RIP: ffffffff8109e012 RSP: ffff880274d25e48 RFLAGS: 00010202
- RAX: ffffffff8109e000 RBX: ffff88026f979da0 RCX: ffff8802770cb6e8
- RDX: 0000000000000000 RSI: ffffffff81add700 RDI: ffff88026f979da0
- RBP: ffff880274d25e78 R8: ffffffff816112e0 R9: 0000000000000001
- R10: 0000000000000001 R11: 0000000000011940 R12: ffff88026f979da0
- R13: ffff8802770cb6d0 R14: ffff880274d25fd8 R15: 0000000000000000
- ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
- #5 [ffff880274d25e60] do_set_cpus_allowed at ffffffff8108e65f
- #6 [ffff880274d25e80] sync_unplug_thread at ffffffff81058c08
- #7 [ffff880274d25ed8] kthread at ffffffff8107cad6
- #8 [ffff880274d25f50] ret_from_fork at ffffffff8152bbbc
-crash> task_struct ffff88026f979da0 | grep class
- sched_class = 0xffffffff816111e0 <fair_sched_class+64>,
-
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/cpu.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -413,7 +413,7 @@ static int sync_unplug_thread(void *data
- * we don't want any more work on this CPU.
- */
- current->flags &= ~PF_NO_SETAFFINITY;
-- do_set_cpus_allowed(current, cpu_present_mask);
-+ set_cpus_allowed_ptr(current, cpu_present_mask);
- migrate_me();
- return 0;
- }
diff --git a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
new file mode 100644
index 000000000000..eb02caf01c57
--- /dev/null
+++ b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -0,0 +1,95 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2017 18:31:00 +0200
+Subject: [PATCH] hotplug: duct-tape RT-rwlock usage for non-RT
+
+This type is only available on -RT. We need to craft something for
+non-RT. Since the only migrate_disable() user is -RT only, there is no
+damage.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cpu.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -66,7 +66,7 @@ struct cpuhp_cpu_state {
+
+ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+
+-#ifdef CONFIG_HOTPLUG_CPU
++#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL)
+ static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
+ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
+ #endif
+@@ -221,6 +221,7 @@ static int cpu_hotplug_disabled;
+ */
+ void pin_current_cpu(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin;
+ unsigned int cpu;
+ int ret;
+@@ -245,6 +246,7 @@ void pin_current_cpu(void)
+ goto again;
+ }
+ current->pinned_on_cpu = cpu;
++#endif
+ }
+
+ /**
+@@ -252,6 +254,7 @@ void pin_current_cpu(void)
+ */
+ void unpin_current_cpu(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+
+ if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
+@@ -259,6 +262,7 @@ void unpin_current_cpu(void)
+
+ current->pinned_on_cpu = -1;
+ __read_rt_unlock(cpuhp_pin);
++#endif
+ }
+
+ DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
+@@ -679,7 +683,9 @@ static int take_cpu_down(void *_param)
+
+ static int takedown_cpu(unsigned int cpu)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu);
++#endif
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int err;
+
+@@ -693,14 +699,18 @@ static int takedown_cpu(unsigned int cpu
+ */
+ irq_lock_sparse();
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_lock(cpuhp_pin);
++#endif
+
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
+ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
+ if (err) {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_unlock(cpuhp_pin);
++#endif
+ /* CPU refused to die */
+ irq_unlock_sparse();
+ /* Unpark the hotplug thread so we can rollback there */
+@@ -719,7 +729,9 @@ static int takedown_cpu(unsigned int cpu
+ wait_for_completion(&st->done);
+ BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_unlock(cpuhp_pin);
++#endif
+ /* Interrupts are moved away from the dying cpu, reenable alloc/free */
+ irq_unlock_sparse();
+
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 37bbe43a1b50..5ddb9c5c2b06 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -12,192 +12,67 @@ tasks on the cpu which should be brought down.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/cpu.h | 5 ++
- kernel/cpu.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++
- kernel/sched/core.c | 4 +
- 3 files changed, 127 insertions(+)
+ include/linux/cpu.h | 5 +++++
+ kernel/cpu.c | 15 +++++++++++++++
+ kernel/sched/core.c | 4 ++++
+ 3 files changed, 24 insertions(+)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -109,6 +109,8 @@ extern void cpu_hotplug_disable(void);
+@@ -108,6 +108,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
+extern void pin_current_cpu(void);
+extern void unpin_current_cpu(void);
- #else /* CONFIG_HOTPLUG_CPU */
+ #else /* CONFIG_HOTPLUG_CPU */
-@@ -118,6 +120,9 @@ static inline void cpu_hotplug_done(void
- #define put_online_cpus() do { } while (0)
- #define cpu_hotplug_disable() do { } while (0)
- #define cpu_hotplug_enable() do { } while (0)
-+static inline void pin_current_cpu(void) { }
-+static inline void unpin_current_cpu(void) { }
+@@ -118,6 +120,9 @@ static inline void cpus_read_unlock(void
+ static inline void lockdep_assert_cpus_held(void) { }
+ static inline void cpu_hotplug_disable(void) { }
+ static inline void cpu_hotplug_enable(void) { }
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
+
- #endif /* CONFIG_HOTPLUG_CPU */
+ #endif /* !CONFIG_HOTPLUG_CPU */
- #ifdef CONFIG_PM_SLEEP_SMP
+ /* Wrappers which go away once all code is converted */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -234,6 +234,100 @@ static struct {
- #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
- #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+@@ -211,6 +211,21 @@ static int cpu_hotplug_disabled;
+
+ #ifdef CONFIG_HOTPLUG_CPU
-+struct hotplug_pcp {
-+ struct task_struct *unplug;
-+ int refcount;
-+ struct completion synced;
-+};
-+
-+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
-+
+/**
+ * pin_current_cpu - Prevent the current cpu from being unplugged
-+ *
-+ * Lightweight version of get_online_cpus() to prevent cpu from being
-+ * unplugged when code runs in a migration disabled region.
-+ *
-+ * Must be called with preemption disabled (preempt_count = 1)!
+ */
+void pin_current_cpu(void)
+{
-+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
+
-+retry:
-+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
-+ hp->unplug == current) {
-+ hp->refcount++;
-+ return;
-+ }
-+ preempt_enable();
-+ mutex_lock(&cpu_hotplug.lock);
-+ mutex_unlock(&cpu_hotplug.lock);
-+ preempt_disable();
-+ goto retry;
+}
+
+/**
+ * unpin_current_cpu - Allow unplug of current cpu
-+ *
-+ * Must be called with preemption or interrupts disabled!
+ */
+void unpin_current_cpu(void)
+{
-+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
-+
-+ WARN_ON(hp->refcount <= 0);
-+
-+ /* This is safe. sync_unplug_thread is pinned to this cpu */
-+ if (!--hp->refcount && hp->unplug && hp->unplug != current)
-+ wake_up_process(hp->unplug);
-+}
-+
-+/*
-+ * FIXME: Is this really correct under all circumstances ?
-+ */
-+static int sync_unplug_thread(void *data)
-+{
-+ struct hotplug_pcp *hp = data;
-+
-+ preempt_disable();
-+ hp->unplug = current;
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (hp->refcount) {
-+ schedule_preempt_disabled();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ preempt_enable();
-+ complete(&hp->synced);
-+ return 0;
+}
+
-+/*
-+ * Start the sync_unplug_thread on the target cpu and wait for it to
-+ * complete.
-+ */
-+static int cpu_unplug_begin(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+ struct task_struct *tsk;
-+
-+ init_completion(&hp->synced);
-+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
-+ if (IS_ERR(tsk))
-+ return (PTR_ERR(tsk));
-+ kthread_bind(tsk, cpu);
-+ wake_up_process(tsk);
-+ wait_for_completion(&hp->synced);
-+ return 0;
-+}
-+
-+static void cpu_unplug_done(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+
-+ hp->unplug = NULL;
-+}
+ DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
- void get_online_cpus(void)
- {
-@@ -776,6 +870,8 @@ static int __ref _cpu_down(unsigned int
- {
- struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
- int prev_state, ret = 0;
-+ int mycpu;
-+ cpumask_var_t cpumask;
-
- if (num_online_cpus() == 1)
- return -EBUSY;
-@@ -783,7 +879,27 @@ static int __ref _cpu_down(unsigned int
- if (!cpu_present(cpu))
- return -EINVAL;
-
-+ /* Move the downtaker off the unplug cpu */
-+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
-+ return -ENOMEM;
-+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
-+ set_cpus_allowed_ptr(current, cpumask);
-+ free_cpumask_var(cpumask);
-+ preempt_disable();
-+ mycpu = smp_processor_id();
-+ if (mycpu == cpu) {
-+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
-+ preempt_enable();
-+ return -EBUSY;
-+ }
-+ preempt_enable();
-+
- cpu_hotplug_begin();
-+ ret = cpu_unplug_begin(cpu);
-+ if (ret) {
-+ printk("cpu_unplug_begin(%d) failed\n", cpu);
-+ goto out_cancel;
-+ }
-
- cpuhp_tasks_frozen = tasks_frozen;
-
-@@ -821,6 +937,8 @@ static int __ref _cpu_down(unsigned int
- }
-
- out:
-+ cpu_unplug_done(cpu);
-+out_cancel:
- cpu_hotplug_done();
- return ret;
- }
+ void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7471,6 +7471,7 @@ void migrate_disable(void)
- /* get_online_cpus(); */
+@@ -7469,6 +7469,7 @@ void migrate_disable(void)
+ }
preempt_disable();
+ pin_current_cpu();
p->migrate_disable = 1;
p->cpus_ptr = cpumask_of(smp_processor_id());
-@@ -7535,13 +7536,16 @@ void migrate_enable(void)
+@@ -7533,12 +7534,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
@@ -205,12 +80,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
- /* put_online_cpus(); */
+
return;
}
}
+ unpin_current_cpu();
- /* put_online_cpus(); */
preempt_enable();
}
+ EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
deleted file mode 100644
index 1a649d432304..000000000000
--- a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-Subject: hotplug: sync_unplug: No "\n" in task name
-From: Yong Zhang <yong.zhang0@gmail.com>
-Date: Sun, 16 Oct 2011 18:56:43 +0800
-
-Otherwise the output will look a little odd.
-
-Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
-Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/cpu.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -313,7 +313,7 @@ static int cpu_unplug_begin(unsigned int
- struct task_struct *tsk;
-
- init_completion(&hp->synced);
-- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
-+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- if (IS_ERR(tsk))
- return (PTR_ERR(tsk));
- kthread_bind(tsk, cpu);
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
deleted file mode 100644
index 4cb9ba5068bf..000000000000
--- a/patches/hotplug-use-migrate-disable.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-Subject: hotplug: Use migrate disable on unplug
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 17 Jul 2011 19:35:29 +0200
-
-Migration needs to be disabled accross the unplug handling to make
-sure that the unplug thread is off the unplugged cpu.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/cpu.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -887,14 +887,13 @@ static int __ref _cpu_down(unsigned int
- cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
- set_cpus_allowed_ptr(current, cpumask);
- free_cpumask_var(cpumask);
-- preempt_disable();
-+ migrate_disable();
- mycpu = smp_processor_id();
- if (mycpu == cpu) {
- printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
-- preempt_enable();
-+ migrate_enable();
- return -EBUSY;
- }
-- preempt_enable();
-
- cpu_hotplug_begin();
- ret = cpu_unplug_begin(cpu);
-@@ -942,6 +941,7 @@ static int __ref _cpu_down(unsigned int
- cpu_unplug_done(cpu);
- out_cancel:
- cpu_hotplug_done();
-+ migrate_enable();
- return ret;
- }
-
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
deleted file mode 100644
index 5eda023568c6..000000000000
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ /dev/null
@@ -1,280 +0,0 @@
-Subject: Intrduce migrate_disable() + cpu_light()
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 17 Jun 2011 15:42:38 +0200
-
-Introduce migrate_disable(). The task can't be pushed to another CPU but can
-be preempted.
-
-From: Peter Zijlstra <a.p.zijlstra@chello.nl>:
-|Make migrate_disable() be a preempt_disable() for !rt kernels. This
-|allows generic code to use it but still enforces that these code
-|sections stay relatively small.
-|
-|A preemptible migrate_disable() accessible for general use would allow
-|people growing arbitrary per-cpu crap instead of clean these things
-|up.
-
-From: Steven Rostedt <rostedt@goodmis.org>
-| The migrate_disable() can cause a bit of a overhead to the RT kernel,
-| as changing the affinity is expensive to do at every lock encountered.
-| As a running task can not migrate, the actual disabling of migration
-| does not need to occur until the task is about to schedule out.
-|
-| In most cases, a task that disables migration will enable it before
-| it schedules making this change improve performance tremendously.
-
-On top of this build get/put_cpu_light(). It is similar to get_cpu():
-it uses migrate_disable() instead of preempt_disable(). That means the user
-remains on the same CPU but the function using it may be preempted and
-invoked again from another caller on the same CPU.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/cpu.h | 3 ++
- include/linux/preempt.h | 9 ++++++
- include/linux/sched.h | 39 +++++++++++++++++++++-----
- include/linux/smp.h | 3 ++
- kernel/sched/core.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++-
- kernel/sched/debug.c | 7 ++++
- lib/smp_processor_id.c | 5 ++-
- 7 files changed, 125 insertions(+), 11 deletions(-)
-
---- a/include/linux/cpu.h
-+++ b/include/linux/cpu.h
-@@ -173,6 +173,9 @@ static inline void cpu_notifier_register
- #endif /* CONFIG_SMP */
- extern struct bus_type cpu_subsys;
-
-+static inline void pin_current_cpu(void) { }
-+static inline void unpin_current_cpu(void) { }
-+
- #ifdef CONFIG_HOTPLUG_CPU
- /* Stop CPUs going up and down. */
-
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -257,11 +257,20 @@ do { \
- # define preempt_enable_rt() preempt_enable()
- # define preempt_disable_nort() barrier()
- # define preempt_enable_nort() barrier()
-+# ifdef CONFIG_SMP
-+ extern void migrate_disable(void);
-+ extern void migrate_enable(void);
-+# else /* CONFIG_SMP */
-+# define migrate_disable() barrier()
-+# define migrate_enable() barrier()
-+# endif /* CONFIG_SMP */
- #else
- # define preempt_disable_rt() barrier()
- # define preempt_enable_rt() barrier()
- # define preempt_disable_nort() preempt_disable()
- # define preempt_enable_nort() preempt_enable()
-+# define migrate_disable() preempt_disable()
-+# define migrate_enable() preempt_enable()
- #endif
-
- #ifdef CONFIG_PREEMPT_NOTIFIERS
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1520,6 +1520,12 @@ struct task_struct {
- #endif
-
- unsigned int policy;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int migrate_disable;
-+# ifdef CONFIG_SCHED_DEBUG
-+ int migrate_disable_atomic;
-+# endif
-+#endif
- int nr_cpus_allowed;
- cpumask_t cpus_allowed;
-
-@@ -1991,14 +1997,6 @@ static inline struct vm_struct *task_sta
- }
- #endif
-
--/* Future-safe accessor for struct task_struct's cpus_allowed. */
--#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
--
--static inline int tsk_nr_cpus_allowed(struct task_struct *p)
--{
-- return p->nr_cpus_allowed;
--}
--
- #define TNF_MIGRATED 0x01
- #define TNF_NO_GROUP 0x02
- #define TNF_SHARED 0x04
-@@ -3516,6 +3514,31 @@ static inline void set_task_cpu(struct t
-
- #endif /* CONFIG_SMP */
-
-+static inline int __migrate_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ return p->migrate_disable;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/* Future-safe accessor for struct task_struct's cpus_allowed. */
-+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-+{
-+ if (__migrate_disabled(p))
-+ return cpumask_of(task_cpu(p));
-+
-+ return &p->cpus_allowed;
-+}
-+
-+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
-+{
-+ if (__migrate_disabled(p))
-+ return 1;
-+ return p->nr_cpus_allowed;
-+}
-+
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-
---- a/include/linux/smp.h
-+++ b/include/linux/smp.h
-@@ -197,6 +197,9 @@ static inline int get_boot_cpu_id(void)
- #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
- #define put_cpu() preempt_enable()
-
-+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
-+#define put_cpu_light() migrate_enable()
-+
- /*
- * Callback to arch code if there's nosmp or maxcpus=0 on the
- * boot command line:
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1100,6 +1100,11 @@ void do_set_cpus_allowed(struct task_str
-
- lockdep_assert_held(&p->pi_lock);
-
-+ if (__migrate_disabled(p)) {
-+ cpumask_copy(&p->cpus_allowed, new_mask);
-+ return;
-+ }
-+
- queued = task_on_rq_queued(p);
- running = task_current(rq, p);
-
-@@ -1179,7 +1184,7 @@ static int __set_cpus_allowed_ptr(struct
- }
-
- /* Can the task run on the task's current CPU? If so, we're done */
-- if (cpumask_test_cpu(task_cpu(p), new_mask))
-+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
- goto out;
-
- dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-@@ -3252,6 +3257,69 @@ static inline void schedule_debug(struct
- schedstat_inc(this_rq()->sched_count);
- }
-
-+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
-+
-+void migrate_disable(void)
-+{
-+ struct task_struct *p = current;
-+
-+ if (in_atomic()) {
-+#ifdef CONFIG_SCHED_DEBUG
-+ p->migrate_disable_atomic++;
-+#endif
-+ return;
-+ }
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ WARN_ON_ONCE(p->migrate_disable_atomic);
-+#endif
-+
-+ if (p->migrate_disable) {
-+ p->migrate_disable++;
-+ return;
-+ }
-+
-+ preempt_disable();
-+ pin_current_cpu();
-+ p->migrate_disable = 1;
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+ struct task_struct *p = current;
-+
-+ if (in_atomic()) {
-+#ifdef CONFIG_SCHED_DEBUG
-+ p->migrate_disable_atomic--;
-+#endif
-+ return;
-+ }
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ WARN_ON_ONCE(p->migrate_disable_atomic);
-+#endif
-+ WARN_ON_ONCE(p->migrate_disable <= 0);
-+
-+ if (p->migrate_disable > 1) {
-+ p->migrate_disable--;
-+ return;
-+ }
-+
-+ preempt_disable();
-+ /*
-+ * Clearing migrate_disable causes tsk_cpus_allowed to
-+ * show the tasks original cpu affinity.
-+ */
-+ p->migrate_disable = 0;
-+
-+ unpin_current_cpu();
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(migrate_enable);
-+#endif
-+
- /*
- * Pick up the highest-prio task:
- */
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int
- P(rt_throttled);
- PN(rt_time);
- PN(rt_runtime);
-+#ifdef CONFIG_SMP
-+ P(rt_nr_migratory);
-+#endif
-
- #undef PN
- #undef P
-@@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_st
- #endif
- P(policy);
- P(prio);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ P(migrate_disable);
-+#endif
-+ P(nr_cpus_allowed);
- #undef PN_SCHEDSTAT
- #undef PN
- #undef __PN
---- a/lib/smp_processor_id.c
-+++ b/lib/smp_processor_id.c
-@@ -39,8 +39,9 @@ notrace static unsigned int check_preemp
- if (!printk_ratelimit())
- goto out_enable;
-
-- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
-- what1, what2, preempt_count() - 1, current->comm, current->pid);
-+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
-+ what1, what2, preempt_count() - 1, __migrate_disabled(current),
-+ current->comm, current->pid);
-
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- dump_stack();
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
deleted file mode 100644
index 24f8c9a2c1ee..000000000000
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 7 Jun 2013 22:37:06 +0200
-Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down
-
-If kthread is pinned to CPUx and CPUx is going down then we get into
-trouble:
-- first the unplug thread is created
-- it will set itself to hp->unplug. As a result, every task that is
- going to take a lock, has to leave the CPU.
-- the CPU_DOWN_PREPARE notifier are started. The worker thread will
- start a new process for the "high priority worker".
- Now kthread would like to take a lock but since it can't leave the CPU
- it will never complete its task.
-
-We could fire the unplug thread after the notifier but then the cpu is
-no longer marked "online" and the unplug thread will run on CPU0 which
-was fixed before :)
-
-So instead the unplug thread is started and kept waiting until the
-notfier complete their work.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/cpu.c | 15 +++++++++++++--
- 1 file changed, 13 insertions(+), 2 deletions(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -254,6 +254,7 @@ struct hotplug_pcp {
- int refcount;
- int grab_lock;
- struct completion synced;
-+ struct completion unplug_wait;
- #ifdef CONFIG_PREEMPT_RT_FULL
- /*
- * Note, on PREEMPT_RT, the hotplug lock must save the state of
-@@ -357,6 +358,7 @@ static int sync_unplug_thread(void *data
- {
- struct hotplug_pcp *hp = data;
-
-+ wait_for_completion(&hp->unplug_wait);
- preempt_disable();
- hp->unplug = current;
- wait_for_pinned_cpus(hp);
-@@ -422,6 +424,14 @@ static void __cpu_unplug_sync(struct hot
- wait_for_completion(&hp->synced);
- }
-
-+static void __cpu_unplug_wait(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+
-+ complete(&hp->unplug_wait);
-+ wait_for_completion(&hp->synced);
-+}
-+
- /*
- * Start the sync_unplug_thread on the target cpu and wait for it to
- * complete.
-@@ -445,6 +455,7 @@ static int cpu_unplug_begin(unsigned int
- tell_sched_cpu_down_begin(cpu);
-
- init_completion(&hp->synced);
-+ init_completion(&hp->unplug_wait);
-
- hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- if (IS_ERR(hp->sync_tsk)) {
-@@ -460,8 +471,7 @@ static int cpu_unplug_begin(unsigned int
- * wait for tasks that are going to enter these sections and
- * we must not have them block.
- */
-- __cpu_unplug_sync(hp);
--
-+ wake_up_process(hp->sync_tsk);
- return 0;
- }
-
-@@ -971,6 +981,7 @@ static int takedown_cpu(unsigned int cpu
- struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
- int err;
-
-+ __cpu_unplug_wait(cpu);
- /* Park the smpboot threads */
- kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
- smpboot_park_threads(cpu);
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
deleted file mode 100644
index f84ffbd6050b..000000000000
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 14 Jun 2013 17:16:35 +0200
-Subject: kernel/hotplug: restore original cpu mask oncpu/down
-
-If a task which is allowed to run only on CPU X puts CPU Y down then it
-will be allowed on all CPUs but the on CPU Y after it comes back from
-kernel. This patch ensures that we don't lose the initial setting unless
-the CPU the task is running is going down.
-
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/cpu.c | 13 ++++++++++++-
- 1 file changed, 12 insertions(+), 1 deletion(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -1065,6 +1065,7 @@ static int __ref _cpu_down(unsigned int
- int prev_state, ret = 0;
- int mycpu;
- cpumask_var_t cpumask;
-+ cpumask_var_t cpumask_org;
-
- if (num_online_cpus() == 1)
- return -EBUSY;
-@@ -1075,6 +1076,12 @@ static int __ref _cpu_down(unsigned int
- /* Move the downtaker off the unplug cpu */
- if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
- return -ENOMEM;
-+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
-+ free_cpumask_var(cpumask);
-+ return -ENOMEM;
-+ }
-+
-+ cpumask_copy(cpumask_org, &current->cpus_mask);
- cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
- set_cpus_allowed_ptr(current, cpumask);
- free_cpumask_var(cpumask);
-@@ -1083,7 +1090,8 @@ static int __ref _cpu_down(unsigned int
- if (mycpu == cpu) {
- printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
- migrate_enable();
-- return -EBUSY;
-+ ret = -EBUSY;
-+ goto restore_cpus;
- }
-
- cpu_hotplug_begin();
-@@ -1133,6 +1141,9 @@ static int __ref _cpu_down(unsigned int
- out_cancel:
- cpu_hotplug_done();
- migrate_enable();
-+restore_cpus:
-+ set_cpus_allowed_ptr(current, cpumask_org);
-+ free_cpumask_var(cpumask_org);
- return ret;
- }
-
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 44e7ac4418db..a02027328c47 100644
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -1043,6 +1043,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1044,6 +1044,7 @@ static void __perf_mux_hrtimer_init(stru
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
timer->function = perf_mux_hrtimer_handler;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 68c7b973cc48..02952cda4bfa 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt8
++-rt9
diff --git a/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
new file mode 100644
index 000000000000..970eddfb4a8a
--- /dev/null
+++ b/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
@@ -0,0 +1,256 @@
+From 6d0b801a75aab6ba80af0ba99c8c04d0feeffcbd Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2017 17:40:42 +0200
+Subject: [PATCH 1/2] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT
+ archs
+
+Upstream uses arch_spinlock_t within spinlock_t and requests that
+spinlock_types.h header file is included first.
+On -RT we have the rt_mutex with its raw_lock wait_lock which needs
+architectures' spinlock_types.h header file for its definition. However
+we need rt_mutex first because it is used to build the spinlock_t so
+that check does not work for us.
+Therefore I am dropping that check.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/alpha/include/asm/spinlock_types.h | 4 ----
+ arch/arm/include/asm/spinlock_types.h | 4 ----
+ arch/arm64/include/asm/spinlock_types.h | 4 ----
+ arch/blackfin/include/asm/spinlock_types.h | 4 ----
+ arch/hexagon/include/asm/spinlock_types.h | 4 ----
+ arch/ia64/include/asm/spinlock_types.h | 4 ----
+ arch/m32r/include/asm/spinlock_types.h | 4 ----
+ arch/metag/include/asm/spinlock_types.h | 4 ----
+ arch/mips/include/asm/spinlock_types.h | 4 ----
+ arch/mn10300/include/asm/spinlock_types.h | 4 ----
+ arch/powerpc/include/asm/spinlock_types.h | 4 ----
+ arch/s390/include/asm/spinlock_types.h | 4 ----
+ arch/sh/include/asm/spinlock_types.h | 4 ----
+ arch/sparc/include/asm/spinlock_types.h | 4 ----
+ arch/tile/include/asm/spinlock_types.h | 4 ----
+ arch/xtensa/include/asm/spinlock_types.h | 4 ----
+ include/linux/spinlock_types_up.h | 4 ----
+ 17 files changed, 68 deletions(-)
+
+--- a/arch/alpha/include/asm/spinlock_types.h
++++ b/arch/alpha/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ALPHA_SPINLOCK_TYPES_H
+ #define _ALPHA_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+--- a/arch/arm/include/asm/spinlock_types.h
++++ b/arch/arm/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #define TICKET_SHIFT 16
+
+ typedef struct {
+--- a/arch/arm64/include/asm/spinlock_types.h
++++ b/arch/arm64/include/asm/spinlock_types.h
+@@ -16,10 +16,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <linux/types.h>
+
+ #define TICKET_SHIFT 16
+--- a/arch/blackfin/include/asm/spinlock_types.h
++++ b/arch/blackfin/include/asm/spinlock_types.h
+@@ -7,10 +7,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <asm/rwlock.h>
+
+ typedef struct {
+--- a/arch/hexagon/include/asm/spinlock_types.h
++++ b/arch/hexagon/include/asm/spinlock_types.h
+@@ -21,10 +21,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+--- a/arch/ia64/include/asm/spinlock_types.h
++++ b/arch/ia64/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_IA64_SPINLOCK_TYPES_H
+ #define _ASM_IA64_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+--- a/arch/m32r/include/asm/spinlock_types.h
++++ b/arch/m32r/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_M32R_SPINLOCK_TYPES_H
+ #define _ASM_M32R_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile int slock;
+ } arch_spinlock_t;
+--- a/arch/metag/include/asm/spinlock_types.h
++++ b/arch/metag/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_METAG_SPINLOCK_TYPES_H
+ #define _ASM_METAG_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+--- a/arch/mips/include/asm/spinlock_types.h
++++ b/arch/mips/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <linux/types.h>
+
+ #include <asm/byteorder.h>
+--- a/arch/mn10300/include/asm/spinlock_types.h
++++ b/arch/mn10300/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct arch_spinlock {
+ unsigned int slock;
+ } arch_spinlock_t;
+--- a/arch/powerpc/include/asm/spinlock_types.h
++++ b/arch/powerpc/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+ #define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int slock;
+ } arch_spinlock_t;
+--- a/arch/s390/include/asm/spinlock_types.h
++++ b/arch/s390/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ unsigned int lock;
+ } __attribute__ ((aligned (4))) arch_spinlock_t;
+--- a/arch/sh/include/asm/spinlock_types.h
++++ b/arch/sh/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef __ASM_SH_SPINLOCK_TYPES_H
+ #define __ASM_SH_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+--- a/arch/sparc/include/asm/spinlock_types.h
++++ b/arch/sparc/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef __SPARC_SPINLOCK_TYPES_H
+ #define __SPARC_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned char lock;
+ } arch_spinlock_t;
+--- a/arch/tile/include/asm/spinlock_types.h
++++ b/arch/tile/include/asm/spinlock_types.h
+@@ -15,10 +15,6 @@
+ #ifndef _ASM_TILE_SPINLOCK_TYPES_H
+ #define _ASM_TILE_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #ifdef __tilegx__
+
+ /* Low 15 bits are "next"; high 15 bits are "current". */
+--- a/arch/xtensa/include/asm/spinlock_types.h
++++ b/arch/xtensa/include/asm/spinlock_types.h
+@@ -1,10 +1,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int slock;
+ } arch_spinlock_t;
+--- a/include/linux/spinlock_types_up.h
++++ b/include/linux/spinlock_types_up.h
+@@ -1,10 +1,6 @@
+ #ifndef __LINUX_SPINLOCK_TYPES_UP_H
+ #define __LINUX_SPINLOCK_TYPES_UP_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ /*
+ * include/linux/spinlock_types_up.h - spinlock type definitions for UP
+ *
diff --git a/patches/locking-rt-rwlock--Make-reader-biased-rwlocks-selectable.patch b/patches/locking-rt-rwlock--Make-reader-biased-rwlocks-selectable.patch
new file mode 100644
index 000000000000..da122fc7519b
--- /dev/null
+++ b/patches/locking-rt-rwlock--Make-reader-biased-rwlocks-selectable.patch
@@ -0,0 +1,231 @@
+Subject: locking/rt-rwlock: Make reader biased rwlocks selectable
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Jul 2017 17:04:09 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_rt.h | 14 ++++----
+ include/linux/rwlock_types_rt.h | 36 +++++++++++---------
+ kernel/Kconfig.locks | 17 +++++++++
+ kernel/locking/rt.c | 2 +
+ kernel/locking/rwlock-rt.c | 70 ++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 116 insertions(+), 23 deletions(-)
+
+--- a/include/linux/rwlock_rt.h
++++ b/include/linux/rwlock_rt.h
+@@ -5,13 +5,6 @@
+ #error Do not include directly. Use spinlock.h
+ #endif
+
+-#define rwlock_init(rwl) \
+-do { \
+- static struct lock_class_key __key; \
+- \
+- __rt_rwlock_init(rwl, #rwl, &__key); \
+-} while (0)
+-
+ extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
+ extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
+ extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
+@@ -101,6 +94,13 @@ static inline int __write_trylock_rt_irq
+ rt_write_unlock(lock); \
+ } while (0)
+
++#define rwlock_init(rwl) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __rt_rwlock_init(rwl, #rwl, &__key); \
++} while (0)
++
+ /*
+ * Internal functions made global for CPU pinning
+ */
+--- a/include/linux/rwlock_types_rt.h
++++ b/include/linux/rwlock_types_rt.h
+@@ -5,6 +5,13 @@
+ #error "Do not include directly. Include spinlock_types.h instead"
+ #endif
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++#ifndef CONFIG_RWLOCK_RT_READER_BIASED
+ /*
+ * rwlocks - rtmutex which allows single reader recursion
+ */
+@@ -16,12 +23,6 @@ typedef struct {
+ #endif
+ } rwlock_t;
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+-#else
+-# define RW_DEP_MAP_INIT(lockname)
+-#endif
+-
+ #define __RW_LOCK_UNLOCKED(name) \
+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
+ RW_DEP_MAP_INIT(name) }
+@@ -29,8 +30,16 @@ typedef struct {
+ #define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+-#define READER_BIAS (1U << 31)
+-#define WRITER_BIAS (1U << 30)
++#else /* CONFIG_RWLOCK_RT_READER_BIASED */
++
++typedef struct rt_rw_lock rwlock_t;
++
++#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name = __RW_LOCK_UNLOCKED(name)
++
++#endif /* !CONFIG_RWLOCK_RT_READER_BIASED */
+
+ /*
+ * A reader biased implementation primarily for CPU pinning.
+@@ -46,6 +55,9 @@ struct rt_rw_lock {
+ #endif
+ };
+
++#define READER_BIAS (1U << 31)
++#define WRITER_BIAS (1U << 30)
++
+ #define __RWLOCK_RT_INITIALIZER(name) \
+ { \
+ .readers = ATOMIC_INIT(READER_BIAS), \
+@@ -63,12 +75,4 @@ void __rwlock_biased_rt_init(struct rt_r
+ __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \
+ } while (0)
+
+-int __read_rt_trylock(struct rt_rw_lock *rwlock);
+-void __read_rt_lock(struct rt_rw_lock *rwlock);
+-void __read_rt_unlock(struct rt_rw_lock *rwlock);
+-
+-void __write_rt_lock(struct rt_rw_lock *rwlock);
+-int __write_rt_trylock(struct rt_rw_lock *rwlock);
+-void __write_rt_unlock(struct rt_rw_lock *rwlock);
+-
+ #endif
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -248,3 +248,20 @@ config ARCH_USE_QUEUED_RWLOCKS
+ config QUEUED_RWLOCKS
+ def_bool y if ARCH_USE_QUEUED_RWLOCKS
+ depends on SMP
++
++if PREEMPT_RT_FULL
++
++menu "RT Locking"
++
++config RWLOCK_RT_READER_BIASED
++ bool "Reader biased RWLOCK implementation for Preempt-RT"
++ def_bool n
++ help
++ This is option provides an alternative RWLOCK implementation for
++ PREEMPT-RT. This new implementation is not writer friendly as
++ the regular RT implementation or mainline. However nothing RT
++ related should be affected. Nevertheless here is a switch in case
++ something stalls to double check.
++endmenu
++
++endif
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -198,6 +198,7 @@ void __lockfunc _mutex_unlock(struct mut
+ }
+ EXPORT_SYMBOL(_mutex_unlock);
+
++#ifndef CONFIG_RWLOCK_RT_READER_BIASED
+ /*
+ * rwlock_t functions
+ */
+@@ -280,6 +281,7 @@ void __rt_rwlock_init(rwlock_t *rwlock,
+ rwlock->lock.save_state = 1;
+ }
+ EXPORT_SYMBOL(__rt_rwlock_init);
++#endif
+
+ /**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+--- a/kernel/locking/rwlock-rt.c
++++ b/kernel/locking/rwlock-rt.c
+@@ -244,3 +244,73 @@ void __write_rt_unlock(struct rt_rw_lock
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
+ __write_unlock_common(lock, WRITER_BIAS, flags);
+ }
++
++#ifdef CONFIG_RWLOCK_RT_READER_BIASED
++
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = __read_rt_trylock(rwlock);
++ if (ret)
++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = __write_rt_trylock(rwlock);
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++ migrate_disable();
++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __read_rt_lock(rwlock);
++}
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++ migrate_disable();
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __write_rt_lock(rwlock);
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __read_rt_unlock(rwlock);
++ migrate_enable();
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __write_rt_unlock(rwlock);
++ migrate_enable();
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++ __rwlock_biased_rt_init(rwlock, name, key);
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++#endif
diff --git a/patches/locking-rt-rwlock--Provide-reader-biased-rwlock-for-RT.patch b/patches/locking-rt-rwlock--Provide-reader-biased-rwlock-for-RT.patch
new file mode 100644
index 000000000000..93baeaca73c9
--- /dev/null
+++ b/patches/locking-rt-rwlock--Provide-reader-biased-rwlock-for-RT.patch
@@ -0,0 +1,338 @@
+Subject: locking/rt/rwlock: Provide reader biased rwlock for RT
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Jul 2017 09:50:11 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_rt.h | 10 +
+ include/linux/rwlock_types_rt.h | 42 ++++++
+ kernel/locking/Makefile | 2
+ kernel/locking/rwlock-rt.c | 246 ++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 299 insertions(+), 1 deletion(-)
+
+--- a/include/linux/rwlock_rt.h
++++ b/include/linux/rwlock_rt.h
+@@ -101,4 +101,14 @@ static inline int __write_trylock_rt_irq
+ rt_write_unlock(lock); \
+ } while (0)
+
++/*
++ * Internal functions made global for CPU pinning
++ */
++void __read_rt_lock(struct rt_rw_lock *lock);
++int __read_rt_trylock(struct rt_rw_lock *lock);
++void __write_rt_lock(struct rt_rw_lock *lock);
++int __write_rt_trylock(struct rt_rw_lock *lock);
++void __read_rt_unlock(struct rt_rw_lock *lock);
++void __write_rt_unlock(struct rt_rw_lock *lock);
++
+ #endif
+--- a/include/linux/rwlock_types_rt.h
++++ b/include/linux/rwlock_types_rt.h
+@@ -29,4 +29,46 @@ typedef struct {
+ #define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
++#define READER_BIAS (1U << 31)
++#define WRITER_BIAS (1U << 30)
++
++/*
++ * A reader biased implementation primarily for CPU pinning.
++ *
++ * Can be selected as general replacement for the single reader RT rwlock
++ * variant
++ */
++struct rt_rw_lock {
++ struct rt_mutex rtmutex;
++ atomic_t readers;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __RWLOCK_RT_INITIALIZER(name) \
++{ \
++ .readers = ATOMIC_INIT(READER_BIAS), \
++ .rtmutex = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.rtmutex), \
++ RW_DEP_MAP_INIT(name) \
++}
++
++void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name,
++ struct lock_class_key *key);
++
++#define rwlock_biased_rt_init(rwlock) \
++ do { \
++ static struct lock_class_key __key; \
++ \
++ __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \
++ } while (0)
++
++int __read_rt_trylock(struct rt_rw_lock *rwlock);
++void __read_rt_lock(struct rt_rw_lock *rwlock);
++void __read_rt_unlock(struct rt_rw_lock *rwlock);
++
++void __write_rt_lock(struct rt_rw_lock *rwlock);
++int __write_rt_trylock(struct rt_rw_lock *rwlock);
++void __write_rt_unlock(struct rt_rw_lock *rwlock);
++
+ #endif
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
+@@ -32,7 +32,7 @@ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
+ endif
+-obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o rwlock-rt.o
+ obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
+--- /dev/null
++++ b/kernel/locking/rwlock-rt.c
+@@ -0,0 +1,246 @@
++/*
++ */
++#include <linux/sched/debug.h>
++#include <linux/export.h>
++
++#include "rtmutex_common.h"
++#include <linux/rwlock_types_rt.h>
++
++/*
++ * RT-specific reader/writer locks
++ *
++ * write_lock()
++ * 1) Lock lock->rtmutex
++ * 2) Remove the reader BIAS to force readers into the slow path
++ * 3) Wait until all readers have left the critical region
++ * 4) Mark it write locked
++ *
++ * write_unlock()
++ * 1) Remove the write locked marker
++ * 2) Set the reader BIAS so readers can use the fast path again
++ * 3) Unlock lock->rtmutex to release blocked readers
++ *
++ * read_lock()
++ * 1) Try fast path acquisition (reader BIAS is set)
++ * 2) Take lock->rtmutex.wait_lock which protects the writelocked flag
++ * 3) If !writelocked, acquire it for read
++ * 4) If writelocked, block on lock->rtmutex
++ * 5) unlock lock->rtmutex, goto 1)
++ *
++ * read_unlock()
++ * 1) Try fast path release (reader count != 1)
++ * 2) Wake the writer waiting in write_lock()#3
++ *
++ * read_lock()#3 has the consequence, that rw locks on RT are not writer
++ * fair, but writers, which should be avoided in RT tasks (think tasklist
++ * lock), are subject to the rtmutex priority/DL inheritance mechanism.
++ *
++ * It's possible to make the rw locks writer fair by keeping a list of
++ * active readers. A blocked writer would force all newly incoming readers
++ * to block on the rtmutex, but the rtmutex would have to be proxy locked
++ * for one reader after the other. We can't use multi-reader inheritance
++ * because there is no way to support that with
++ * SCHED_DEADLINE. Implementing the one by one reader boosting/handover
++ * mechanism is a major surgery for a very dubious value.
++ *
++ * The risk of writer starvation is there, but the pathological use cases
++ * which trigger it are not necessarily the typical RT workloads.
++ */
++
++void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held semaphore:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++ atomic_set(&lock->readers, READER_BIAS);
++ rt_mutex_init(&lock->rtmutex);
++ lock->rtmutex.save_state = 1;
++}
++
++int __read_rt_trylock(struct rt_rw_lock *lock)
++{
++ int r, old;
++
++ /*
++ * Increment reader count, if lock->readers < 0, i.e. READER_BIAS is
++ * set.
++ */
++ for (r = atomic_read(&lock->readers); r < 0;) {
++ old = atomic_cmpxchg(&lock->readers, r, r + 1);
++ if (likely(old == r))
++ return 1;
++ r = old;
++ }
++ return 0;
++}
++
++void __sched __read_rt_lock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
++
++ if (__read_rt_trylock(lock))
++ return;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ /*
++ * Allow readers as long as the writer has not completely
++ * acquired the semaphore for write.
++ */
++ if (atomic_read(&lock->readers) != WRITER_BIAS) {
++ atomic_inc(&lock->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return;
++ }
++
++ /*
++ * Call into the slow lock path with the rtmutex->wait_lock
++ * held, so this can't result in the following race:
++ *
++ * Reader1 Reader2 Writer
++ * read_lock()
++ * write_lock()
++ * rtmutex_lock(m)
++ * swait()
++ * read_lock()
++ * unlock(m->wait_lock)
++ * read_unlock()
++ * swake()
++ * lock(m->wait_lock)
++ * lock->writelocked=true
++ * unlock(m->wait_lock)
++ *
++ * write_unlock()
++ * lock->writelocked=false
++ * rtmutex_unlock(m)
++ * read_lock()
++ * write_lock()
++ * rtmutex_lock(m)
++ * swait()
++ * rtmutex_lock(m)
++ *
++ * That would put Reader1 behind the writer waiting on
++ * Reader2 to call read_unlock() which might be unbound.
++ */
++ rt_mutex_init_waiter(&waiter, false);
++ rt_spin_lock_slowlock_locked(m, &waiter, flags);
++ /*
++ * The slowlock() above is guaranteed to return with the rtmutex is
++ * now held, so there can't be a writer active. Increment the reader
++ * count and immediately drop the rtmutex again.
++ */
++ atomic_inc(&lock->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ rt_spin_lock_slowunlock(m);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++void __read_rt_unlock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ struct task_struct *tsk;
++
++ /*
++ * sem->readers can only hit 0 when a writer is waiting for the
++ * active readers to leave the critical region.
++ */
++ if (!atomic_dec_and_test(&lock->readers))
++ return;
++
++ raw_spin_lock_irq(&m->wait_lock);
++ /*
++ * Wake the writer, i.e. the rtmutex owner. It might release the
++ * rtmutex concurrently in the fast path, but to clean up the rw
++ * lock it needs to acquire m->wait_lock. The worst case which can
++ * happen is a spurious wakeup.
++ */
++ tsk = rt_mutex_owner(m);
++ if (tsk)
++ wake_up_process(tsk);
++
++ raw_spin_unlock_irq(&m->wait_lock);
++}
++
++static void __write_unlock_common(struct rt_rw_lock *lock, int bias,
++ unsigned long flags)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++
++ atomic_add(READER_BIAS - bias, &lock->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ rt_spin_lock_slowunlock(m);
++}
++
++void __sched __write_rt_lock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ struct task_struct *self = current;
++ unsigned long flags;
++
++ /* Take the rtmutex as a first step */
++ __rt_spin_lock(m);
++
++ /* Force readers into slow path */
++ atomic_sub(READER_BIAS, &lock->readers);
++
++ for (;;) {
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++
++ raw_spin_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++
++ /* Have all readers left the critical region? */
++ if (!atomic_read(&lock->readers)) {
++ atomic_set(&lock->readers, WRITER_BIAS);
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ raw_spin_unlock(&self->pi_lock);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return;
++ }
++
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++
++ if (atomic_read(&lock->readers) != 0)
++ schedule();
++ }
++}
++
++int __write_rt_trylock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ unsigned long flags;
++
++ if (!rt_mutex_trylock(m))
++ return 0;
++
++ atomic_sub(READER_BIAS, &lock->readers);
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ if (!atomic_read(&lock->readers)) {
++ atomic_set(&lock->readers, WRITER_BIAS);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return 1;
++ }
++ __write_unlock_common(lock, 0, flags);
++ return 0;
++}
++
++void __write_rt_unlock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ __write_unlock_common(lock, WRITER_BIAS, flags);
++}
diff --git a/patches/locking-rtmutex--Make-inner-working-of-rt_spin_slow_lock---accessible.patch b/patches/locking-rtmutex--Make-inner-working-of-rt_spin_slow_lock---accessible.patch
new file mode 100644
index 000000000000..b72578e2a8a6
--- /dev/null
+++ b/patches/locking-rtmutex--Make-inner-working-of-rt_spin_slow_lock---accessible.patch
@@ -0,0 +1,112 @@
+Subject: locking/rtmutex: Make inner working of rt_spin_slow_lock() accessible
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Jul 2017 10:24:49 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 40 ++++++++++++++++++++++------------------
+ kernel/locking/rtmutex_common.h | 4 ++++
+ 2 files changed, 26 insertions(+), 18 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1045,21 +1045,16 @@ static int task_blocks_on_rt_mutex(struc
+ * We store the current state under p->pi_lock in p->saved_state and
+ * the try_to_wake_up() code handles this accordingly.
+ */
+-static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ unsigned long flags)
+ {
+ struct task_struct *lock_owner, *self = current;
+- struct rt_mutex_waiter waiter, *top_waiter;
+- unsigned long flags;
++ struct rt_mutex_waiter *top_waiter;
+ int ret;
+
+- rt_mutex_init_waiter(&waiter, true);
+-
+- raw_spin_lock_irqsave(&lock->wait_lock, flags);
+-
+- if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
+- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))
+ return;
+- }
+
+ BUG_ON(rt_mutex_owner(lock) == self);
+
+@@ -1074,12 +1069,12 @@ static void noinline __sched rt_spin_lo
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
+ raw_spin_unlock(&self->pi_lock);
+
+- ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
++ ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);
+ BUG_ON(ret);
+
+ for (;;) {
+ /* Try to acquire the lock again. */
+- if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
++ if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))
+ break;
+
+ top_waiter = rt_mutex_top_waiter(lock);
+@@ -1087,9 +1082,9 @@ static void noinline __sched rt_spin_lo
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+- debug_rt_mutex_print_deadlock(&waiter);
++ debug_rt_mutex_print_deadlock(waiter);
+
+- if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
++ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
+ schedule();
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+@@ -1117,11 +1112,20 @@ static void noinline __sched rt_spin_lo
+ */
+ fixup_rt_mutex_waiters(lock);
+
+- BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
+- BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
++ BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));
++ BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));
++}
+
+- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++{
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
+
++ rt_mutex_init_waiter(&waiter, true);
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++ rt_spin_lock_slowlock_locked(lock, &waiter, flags);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ debug_rt_mutex_free_waiter(&waiter);
+ }
+
+@@ -1131,7 +1135,7 @@ static bool __sched __rt_mutex_unlock_co
+ /*
+ * Slow path to release a rt_mutex spin_lock style
+ */
+-static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+ {
+ unsigned long flags;
+ DEFINE_WAKE_Q(wake_q);
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -139,6 +139,10 @@ int __sched rt_mutex_slowlock_locked(str
+ enum rtmutex_chainwalk chwalk,
+ struct ww_acquire_ctx *ww_ctx,
+ struct rt_mutex_waiter *waiter);
++void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ unsigned long flags);
++void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index 14a149684c3c..c6eb31661b31 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -1042,12 +1042,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1043,12 +1043,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index cc3afdd73fb8..3f670877d50f 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4962,6 +4962,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -4972,6 +4972,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -4973,6 +4974,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -4983,6 +4984,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 0440e769effe..07edeb3da1ec 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -8098,7 +8098,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8108,7 +8108,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch b/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
index 6abe3ed1c459..b62ce0524651 100644
--- a/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
+++ b/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5060,8 +5060,6 @@ static void busy_poll_stop(struct napi_s
+@@ -5070,8 +5070,6 @@ static void busy_poll_stop(struct napi_s
if (rc == BUSY_POLL_BUDGET)
__napi_schedule(napi);
local_bh_enable();
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index b14654289847..d4ee5fd864cc 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5368,7 +5368,7 @@ static __latent_entropy void net_rx_acti
+@@ -5378,7 +5378,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 5633d77d886a..1762d1cd82bb 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -8495,6 +8495,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8487,6 +8487,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 8fbff8931252..b59fd2ade258 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1509,6 +1509,44 @@ static inline int test_tsk_need_resched(
+@@ -1513,6 +1513,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2536,6 +2578,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2458,6 +2500,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3527,6 +3572,7 @@ static void __sched notrace __schedule(b
+@@ -3449,6 +3494,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3678,6 +3724,30 @@ static void __sched notrace preempt_sche
+@@ -3600,6 +3646,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3692,7 +3762,8 @@ asmlinkage __visible void __sched notrac
+@@ -3614,7 +3684,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3719,6 +3790,9 @@ asmlinkage __visible void __sched notrac
+@@ -3641,6 +3712,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5548,7 +5622,9 @@ void init_idle(struct task_struct *idle,
+@@ -5470,7 +5544,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,15 +362,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7523,6 +7599,7 @@ void migrate_disable(void)
- /* get_online_cpus(); */
+@@ -7443,6 +7519,7 @@ void migrate_disable(void)
+ }
preempt_disable();
+ preempt_lazy_disable();
pin_current_cpu();
p->migrate_disable = 1;
-@@ -7592,6 +7669,7 @@ void migrate_enable(void)
+@@ -7512,6 +7589,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -378,10 +378,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7602,6 +7680,7 @@ void migrate_enable(void)
+@@ -7520,6 +7598,7 @@ void migrate_enable(void)
+ }
}
unpin_current_cpu();
- /* put_online_cpus(); */
+ preempt_lazy_enable();
preempt_enable();
}
@@ -433,7 +433,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6231,7 +6231,7 @@ static void check_preempt_wakeup(struct
+@@ -6194,7 +6194,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -442,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -9006,7 +9006,7 @@ static void task_fork_fair(struct task_s
+@@ -8969,7 +8969,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -451,7 +451,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9030,7 +9030,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8993,7 +8993,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
diff --git a/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch b/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch
deleted file mode 100644
index beaf1c852010..000000000000
--- a/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 13 Jan 2016 14:09:05 +0100
-Subject: ptrace: don't open IRQs in ptrace_freeze_traced() too early
-
-In the non-RT case the spin_lock_irq() here disables interrupts as well
-as raw_spin_lock_irq(). So in the unlock case the interrupts are enabled
-too early.
-
-Reported-by: kernel test robot <ying.huang@linux.intel.com>
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/ptrace.c | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
---- a/kernel/ptrace.c
-+++ b/kernel/ptrace.c
-@@ -128,12 +128,14 @@ static bool ptrace_freeze_traced(struct
-
- spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
-- raw_spin_lock_irq(&task->pi_lock);
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&task->pi_lock, flags);
- if (task->state & __TASK_TRACED)
- task->state = __TASK_TRACED;
- else
- task->saved_state = __TASK_TRACED;
-- raw_spin_unlock_irq(&task->pi_lock);
-+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- ret = true;
- }
- spin_unlock_irq(&task->sighand->siglock);
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index ec32f4a999c2..9dda90ba1989 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1496,6 +1492,51 @@ static inline int test_tsk_need_resched(
+@@ -1506,6 +1502,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
deleted file mode 100644
index e29478f68b9d..000000000000
--- a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Yong Zhang <yong.zhang0@gmail.com>
-Date: Thu, 28 Jul 2011 11:16:00 +0800
-Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry
-
-When retry happens, it's likely that the task has been migrated to
-another cpu (except unplug failed), but it still derefernces the
-original hotplug_pcp per cpu data.
-
-Update the pointer to hotplug_pcp in the retry path, so it points to
-the current cpu.
-
-Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
-Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
-Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/cpu.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -252,9 +252,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
- */
- void pin_current_cpu(void)
- {
-- struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
-+ struct hotplug_pcp *hp;
-
- retry:
-+ hp = this_cpu_ptr(&hotplug_pcp);
-+
- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
- hp->unplug == current) {
- hp->refcount++;
diff --git a/patches/rt-locking--Consolidate-lock-functions.patch b/patches/rt-locking--Consolidate-lock-functions.patch
new file mode 100644
index 000000000000..20dd8f7123b6
--- /dev/null
+++ b/patches/rt-locking--Consolidate-lock-functions.patch
@@ -0,0 +1,178 @@
+Subject: rt/locking: Consolidate lock functions
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 28 Jul 2017 12:26:59 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/locallock.h | 22 +++-------------------
+ include/linux/spinlock_rt.h | 7 ++-----
+ kernel/locking/rt.c | 3 +++
+ kernel/locking/rtmutex.c | 36 +-----------------------------------
+ 4 files changed, 9 insertions(+), 59 deletions(-)
+
+--- a/include/linux/locallock.h
++++ b/include/linux/locallock.h
+@@ -36,26 +36,10 @@ struct local_irq_lock {
+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
+ } while (0)
+
+-/*
+- * spin_lock|trylock|unlock_local flavour that does not migrate disable
+- * used for __local_lock|trylock|unlock where get_local_var/put_local_var
+- * already takes care of the migrate_disable/enable
+- * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
+- */
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
+-# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
+-# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
+-#else
+-# define spin_lock_local(lock) spin_lock(lock)
+-# define spin_trylock_local(lock) spin_trylock(lock)
+-# define spin_unlock_local(lock) spin_unlock(lock)
+-#endif
+-
+ static inline void __local_lock(struct local_irq_lock *lv)
+ {
+ if (lv->owner != current) {
+- spin_lock_local(&lv->lock);
++ spin_lock(&lv->lock);
+ LL_WARN(lv->owner);
+ LL_WARN(lv->nestcnt);
+ lv->owner = current;
+@@ -71,7 +55,7 @@ static inline void __local_lock(struct l
+
+ static inline int __local_trylock(struct local_irq_lock *lv)
+ {
+- if (lv->owner != current && spin_trylock_local(&lv->lock)) {
++ if (lv->owner != current && spin_trylock(&lv->lock)) {
+ LL_WARN(lv->owner);
+ LL_WARN(lv->nestcnt);
+ lv->owner = current;
+@@ -98,7 +82,7 @@ static inline void __local_unlock(struct
+ return;
+
+ lv->owner = NULL;
+- spin_unlock_local(&lv->lock);
++ spin_unlock(&lv->lock);
+ }
+
+ #define local_unlock(lvar) \
+--- a/include/linux/spinlock_rt.h
++++ b/include/linux/spinlock_rt.h
+@@ -18,10 +18,6 @@ do { \
+ __rt_spin_lock_init(slock, #slock, &__key); \
+ } while (0)
+
+-void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
+-void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
+-int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
+-
+ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
+ extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+ extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
+@@ -35,9 +31,10 @@ extern int atomic_dec_and_spin_lock(atom
+ /*
+ * lockdep-less calls, for derived types like rwlock:
+ * (for trylock they can use rt_mutex_trylock() directly.
++ * Migrate disable handling must be done at the call site.
+ */
+-extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
+ extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
+ extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+
+ #define spin_lock(lock) rt_spin_lock(lock)
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -239,6 +239,7 @@ EXPORT_SYMBOL(rt_read_trylock);
+
+ void __lockfunc rt_write_lock(rwlock_t *rwlock)
+ {
++ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(&rwlock->lock);
+ }
+@@ -248,9 +249,11 @@ void __lockfunc rt_read_lock(rwlock_t *r
+ {
+ struct rt_mutex *lock = &rwlock->lock;
+
++ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(lock);
+ }
++
+ EXPORT_SYMBOL(rt_read_lock);
+
+ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1146,13 +1146,6 @@ static void noinline __sched rt_spin_lo
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+ }
+
+-void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
+-{
+- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+-}
+-EXPORT_SYMBOL(rt_spin_lock__no_mg);
+-
+ void __lockfunc rt_spin_lock(spinlock_t *lock)
+ {
+ migrate_disable();
+@@ -1163,35 +1156,19 @@ EXPORT_SYMBOL(rt_spin_lock);
+
+ void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+ {
+- migrate_disable();
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+ }
+-EXPORT_SYMBOL(__rt_spin_lock);
+-
+-void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
+-{
+- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+-}
+-EXPORT_SYMBOL(__rt_spin_lock__no_mg);
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+ {
+ migrate_disable();
+- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nested);
+ #endif
+
+-void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
+-{
+- /* NOTE: we always pass in '1' for nested, for simplicity */
+- spin_release(&lock->dep_map, 1, _RET_IP_);
+- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+-}
+-EXPORT_SYMBOL(rt_spin_unlock__no_mg);
+-
+ void __lockfunc rt_spin_unlock(spinlock_t *lock)
+ {
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+@@ -1219,17 +1196,6 @@ void __lockfunc rt_spin_unlock_wait(spin
+ }
+ EXPORT_SYMBOL(rt_spin_unlock_wait);
+
+-int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
+-{
+- int ret;
+-
+- ret = rt_mutex_trylock(&lock->lock);
+- if (ret)
+- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_spin_trylock__no_mg);
+-
+ int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ {
+ int ret;
diff --git a/patches/rt-locking--Consolidate-rwlock-variants.patch b/patches/rt-locking--Consolidate-rwlock-variants.patch
new file mode 100644
index 000000000000..2cc908b13bd0
--- /dev/null
+++ b/patches/rt-locking--Consolidate-rwlock-variants.patch
@@ -0,0 +1,262 @@
+Subject: rt/locking: Consolidate rwlock variants
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 28 Jul 2017 15:55:41 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/locking/rt.c | 85 ------------------------------------
+ kernel/locking/rwlock-rt.c | 104 ++++++++++++++++++++++++++++++++++++++++-----
+ 2 files changed, 94 insertions(+), 95 deletions(-)
+
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -198,91 +198,6 @@ void __lockfunc _mutex_unlock(struct mut
+ }
+ EXPORT_SYMBOL(_mutex_unlock);
+
+-#ifndef CONFIG_RWLOCK_RT_READER_BIASED
+-/*
+- * rwlock_t functions
+- */
+-int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+-{
+- int ret;
+-
+- migrate_disable();
+- ret = rt_mutex_trylock(&rwlock->lock);
+- if (ret)
+- rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+- else
+- migrate_enable();
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_write_trylock);
+-
+-int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+-{
+- struct rt_mutex *lock = &rwlock->lock;
+- int ret;
+-
+- migrate_disable();
+- ret = rt_mutex_trylock(lock);
+- if (ret)
+- rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+- else
+- migrate_enable();
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_read_trylock);
+-
+-void __lockfunc rt_write_lock(rwlock_t *rwlock)
+-{
+- migrate_disable();
+- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+- __rt_spin_lock(&rwlock->lock);
+-}
+-EXPORT_SYMBOL(rt_write_lock);
+-
+-void __lockfunc rt_read_lock(rwlock_t *rwlock)
+-{
+- struct rt_mutex *lock = &rwlock->lock;
+-
+- migrate_disable();
+- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+- __rt_spin_lock(lock);
+-}
+-
+-EXPORT_SYMBOL(rt_read_lock);
+-
+-void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+-{
+- /* NOTE: we always pass in '1' for nested, for simplicity */
+- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+- __rt_spin_unlock(&rwlock->lock);
+- migrate_enable();
+-}
+-EXPORT_SYMBOL(rt_write_unlock);
+-
+-void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+-{
+- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+- __rt_spin_unlock(&rwlock->lock);
+- migrate_enable();
+-}
+-EXPORT_SYMBOL(rt_read_unlock);
+-
+-void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+-{
+- rt_mutex_init(&rwlock->lock);
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- /*
+- * Make sure we are not reinitializing a held lock:
+- */
+- debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+- lockdep_init_map(&rwlock->dep_map, name, key, 0);
+-#endif
+- rwlock->lock.save_state = 1;
+-}
+-EXPORT_SYMBOL(__rt_rwlock_init);
+-#endif
+-
+ /**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+--- a/kernel/locking/rwlock-rt.c
++++ b/kernel/locking/rwlock-rt.c
+@@ -245,14 +245,100 @@ void __write_rt_unlock(struct rt_rw_lock
+ __write_unlock_common(lock, WRITER_BIAS, flags);
+ }
+
+-#ifdef CONFIG_RWLOCK_RT_READER_BIASED
++#ifndef CONFIG_RWLOCK_RT_READER_BIASED
++/* Map the single reader implementation */
++static inline int do_read_rt_trylock(rwlock_t *rwlock)
++{
++ return rt_mutex_trylock(&rwlock->lock);
++}
++
++static inline int do_write_rt_trylock(rwlock_t *rwlock)
++{
++ return rt_mutex_trylock(&rwlock->lock);
++}
++
++static inline void do_read_rt_lock(rwlock_t *rwlock)
++{
++ __rt_spin_lock(&rwlock->lock);
++}
++
++static inline void do_write_rt_lock(rwlock_t *rwlock)
++{
++ __rt_spin_lock(&rwlock->lock);
++}
++
++static inline void do_read_rt_unlock(rwlock_t *rwlock)
++{
++ __rt_spin_unlock(&rwlock->lock);
++}
+
++static inline void do_write_rt_unlock(rwlock_t *rwlock)
++{
++ __rt_spin_unlock(&rwlock->lock);
++}
++
++static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++ lockdep_init_map(&rwlock->dep_map, name, key, 0);
++#endif
++ rt_mutex_init(&rwlock->lock);
++ rwlock->lock.save_state = 1;
++}
++
++#else
++/* Map the reader biased implementation */
++static inline int do_read_rt_trylock(rwlock_t *rwlock)
++{
++ return __read_rt_trylock(rwlock);
++}
++
++static inline int do_write_rt_trylock(rwlock_t *rwlock)
++{
++ return __write_rt_trylock(rwlock);
++}
++
++static inline void do_read_rt_lock(rwlock_t *rwlock)
++{
++ __read_rt_lock(rwlock);
++}
++
++static inline void do_write_rt_lock(rwlock_t *rwlock)
++{
++ __write_rt_lock(rwlock);
++}
++
++static inline void do_read_rt_unlock(rwlock_t *rwlock)
++{
++ __read_rt_unlock(rwlock);
++}
++
++static inline void do_write_rt_unlock(rwlock_t *rwlock)
++{
++ __write_rt_unlock(rwlock);
++}
++
++static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name,
++ struct lock_class_key *key)
++{
++ __rwlock_biased_rt_init(rwlock, name, key);
++}
++#endif
++
++/*
++ * The common functions which get wrapped into the rwlock API.
++ */
+ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+ {
+ int ret;
+
+ migrate_disable();
+- ret = __read_rt_trylock(rwlock);
++ ret = do_read_rt_trylock(rwlock);
+ if (ret)
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+@@ -266,7 +352,7 @@ int __lockfunc rt_write_trylock(rwlock_t
+ int ret;
+
+ migrate_disable();
+- ret = __write_rt_trylock(rwlock);
++ ret = do_write_rt_trylock(rwlock);
+ if (ret)
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+@@ -279,7 +365,7 @@ void __lockfunc rt_read_lock(rwlock_t *r
+ {
+ migrate_disable();
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+- __read_rt_lock(rwlock);
++ do_read_rt_lock(rwlock);
+ }
+ EXPORT_SYMBOL(rt_read_lock);
+
+@@ -287,14 +373,14 @@ void __lockfunc rt_write_lock(rwlock_t *
+ {
+ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+- __write_rt_lock(rwlock);
++ do_write_rt_lock(rwlock);
+ }
+ EXPORT_SYMBOL(rt_write_lock);
+
+ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+ {
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+- __read_rt_unlock(rwlock);
++ do_read_rt_unlock(rwlock);
+ migrate_enable();
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+@@ -302,15 +388,13 @@ EXPORT_SYMBOL(rt_read_unlock);
+ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+ {
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+- __write_rt_unlock(rwlock);
++ do_write_rt_unlock(rwlock);
+ migrate_enable();
+ }
+ EXPORT_SYMBOL(rt_write_unlock);
+
+ void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+ {
+- __rwlock_biased_rt_init(rwlock, name, key);
++ do_rwlock_rt_init(rwlock, name, key);
+ }
+ EXPORT_SYMBOL(__rt_rwlock_init);
+-
+-#endif
diff --git a/patches/rt-locking--Simplify-rt-rwlock.patch b/patches/rt-locking--Simplify-rt-rwlock.patch
new file mode 100644
index 000000000000..9e9de5eb40ce
--- /dev/null
+++ b/patches/rt-locking--Simplify-rt-rwlock.patch
@@ -0,0 +1,104 @@
+Subject: rt/locking: Simplify rt rwlock
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 28 Jul 2017 15:05:51 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_rt.h | 21 +++++++++++++--------
+ kernel/locking/rt.c | 23 ++---------------------
+ 2 files changed, 15 insertions(+), 29 deletions(-)
+
+--- a/include/linux/rwlock_rt.h
++++ b/include/linux/rwlock_rt.h
+@@ -9,37 +9,42 @@
+ do { \
+ static struct lock_class_key __key; \
+ \
+- rt_mutex_init(&(rwl)->lock); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+ } while (0)
+
+ extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
+ extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
+ extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
+-extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
+ extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
+ extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
+ extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
+-extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
+-extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
+ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
+
+ #define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+ #define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+-#define write_trylock_irqsave(lock, flags) \
+- __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
++static inline int __write_trylock_rt_irqsave(rwlock_t *lock, unsigned long *flags)
++{
++ /* XXX ARCH_IRQ_ENABLED */
++ *flags = 0;
++ return rt_write_trylock(lock);
++}
++
++#define write_trylock_irqsave(lock, flags) \
++ __cond_lock(lock, __write_trylock_rt_irqsave(lock, &(flags)))
+
+ #define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+- flags = rt_read_lock_irqsave(lock); \
++ rt_read_lock(lock); \
++ flags = 0; \
+ } while (0)
+
+ #define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+- flags = rt_write_lock_irqsave(lock); \
++ rt_write_lock(lock); \
++ flags = 0; \
+ } while (0)
+
+ #define read_lock(lock) rt_read_lock(lock)
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -215,13 +215,6 @@ int __lockfunc rt_write_trylock(rwlock_t
+ }
+ EXPORT_SYMBOL(rt_write_trylock);
+
+-int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+-{
+- *flags = 0;
+- return rt_write_trylock(rwlock);
+-}
+-EXPORT_SYMBOL(rt_write_trylock_irqsave);
+-
+ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+ {
+ struct rt_mutex *lock = &rwlock->lock;
+@@ -273,22 +266,10 @@ void __lockfunc rt_read_unlock(rwlock_t
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+
+-unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
+-{
+- rt_write_lock(rwlock);
+- return 0;
+-}
+-EXPORT_SYMBOL(rt_write_lock_irqsave);
+-
+-unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
+-{
+- rt_read_lock(rwlock);
+- return 0;
+-}
+-EXPORT_SYMBOL(rt_read_lock_irqsave);
+-
+ void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+ {
++ rt_mutex_init(&rwlock->lock);
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
diff --git a/patches/rt-locking-Reenable-migration-accross-schedule.patch b/patches/rt-locking-Reenable-migration-accross-schedule.patch
deleted file mode 100644
index 7a5f17a30c8b..000000000000
--- a/patches/rt-locking-Reenable-migration-accross-schedule.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 8 Feb 2016 16:15:28 +0100
-Subject: rt/locking: Reenable migration accross schedule
-
-We currently disable migration across lock acquisition. That includes the part
-where we block on the lock and schedule out. We cannot disable migration after
-taking the lock as that would cause a possible lock inversion.
-
-But we can be smart and enable migration when we block and schedule out. That
-allows the scheduler to place the task freely at least if this is the first
-migrate disable level. For nested locking this does not help at all.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rtmutex.c | 32 ++++++++++++++++++++------------
- 1 file changed, 20 insertions(+), 12 deletions(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -980,14 +980,19 @@ static int __try_to_take_rt_mutex(struct
- * preemptible spin_lock functions:
- */
- static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
-- void (*slowfn)(struct rt_mutex *lock))
-+ void (*slowfn)(struct rt_mutex *lock,
-+ bool mg_off),
-+ bool do_mig_dis)
- {
- might_sleep_no_state_check();
-
-+ if (do_mig_dis)
-+ migrate_disable();
-+
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return;
- else
-- slowfn(lock);
-+ slowfn(lock, do_mig_dis);
- }
-
- static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -1045,7 +1050,8 @@ static int task_blocks_on_rt_mutex(struc
- * We store the current state under p->pi_lock in p->saved_state and
- * the try_to_wake_up() code handles this accordingly.
- */
--static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
-+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
-+ bool mg_off)
- {
- struct task_struct *lock_owner, *self = current;
- struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1089,8 +1095,13 @@ static void noinline __sched rt_spin_lo
-
- debug_rt_mutex_print_deadlock(&waiter);
-
-- if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
-+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
-+ if (mg_off)
-+ migrate_enable();
- schedule();
-+ if (mg_off)
-+ migrate_disable();
-+ }
-
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-@@ -1148,38 +1159,35 @@ static void noinline __sched rt_spin_lo
-
- void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
- {
-- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- }
- EXPORT_SYMBOL(rt_spin_lock__no_mg);
-
- void __lockfunc rt_spin_lock(spinlock_t *lock)
- {
-- migrate_disable();
-- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- }
- EXPORT_SYMBOL(rt_spin_lock);
-
- void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
- {
-- migrate_disable();
-- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
-+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
- }
- EXPORT_SYMBOL(__rt_spin_lock);
-
- void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
- {
-- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
-+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
- }
- EXPORT_SYMBOL(__rt_spin_lock__no_mg);
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
- {
-- migrate_disable();
-- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
- }
- EXPORT_SYMBOL(rt_spin_lock_nested);
- #endif
diff --git a/patches/rt-rwlock--Remove-recursive-support.patch b/patches/rt-rwlock--Remove-recursive-support.patch
new file mode 100644
index 000000000000..c5451cb0c8e1
--- /dev/null
+++ b/patches/rt-rwlock--Remove-recursive-support.patch
@@ -0,0 +1,135 @@
+Subject: rt/rwlock: Remove recursive support
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 28 Jul 2017 12:35:23 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_types_rt.h | 1
+ kernel/locking/rt.c | 56 ++++++++--------------------------------
+ 2 files changed, 12 insertions(+), 45 deletions(-)
+
+--- a/include/linux/rwlock_types_rt.h
++++ b/include/linux/rwlock_types_rt.h
+@@ -10,7 +10,6 @@
+ */
+ typedef struct {
+ struct rt_mutex lock;
+- int read_depth;
+ unsigned int break_lock;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -211,46 +211,28 @@ int __lockfunc rt_write_trylock(rwlock_t
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+-
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_write_trylock);
+
+ int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+ {
+- int ret;
+-
+ *flags = 0;
+- ret = rt_write_trylock(rwlock);
+- return ret;
++ return rt_write_trylock(rwlock);
+ }
+ EXPORT_SYMBOL(rt_write_trylock_irqsave);
+
+ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+ {
+ struct rt_mutex *lock = &rwlock->lock;
+- int ret = 1;
+-
+- /*
+- * recursive read locks succeed when current owns the lock,
+- * but not when read_depth == 0 which means that the lock is
+- * write locked.
+- */
+- if (rt_mutex_owner(lock) != current) {
+- migrate_disable();
+- ret = rt_mutex_trylock(lock);
+- if (ret)
+- rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+- else
+- migrate_enable();
+-
+- } else if (!rwlock->read_depth) {
+- ret = 0;
+- }
++ int ret;
+
++ migrate_disable();
++ ret = rt_mutex_trylock(lock);
+ if (ret)
+- rwlock->read_depth++;
+-
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_read_trylock);
+@@ -266,17 +248,9 @@ void __lockfunc rt_read_lock(rwlock_t *r
+ {
+ struct rt_mutex *lock = &rwlock->lock;
+
+-
+- /*
+- * recursive read locks succeed when current owns the lock
+- */
+- if (rt_mutex_owner(lock) != current) {
+- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+- __rt_spin_lock(lock);
+- }
+- rwlock->read_depth++;
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __rt_spin_lock(lock);
+ }
+-
+ EXPORT_SYMBOL(rt_read_lock);
+
+ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+@@ -290,19 +264,15 @@ EXPORT_SYMBOL(rt_write_unlock);
+
+ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+ {
+- /* Release the lock only when read_depth is down to 0 */
+- if (--rwlock->read_depth == 0) {
+- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+- __rt_spin_unlock(&rwlock->lock);
+- migrate_enable();
+- }
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+
+ unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
+ {
+ rt_write_lock(rwlock);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(rt_write_lock_irqsave);
+@@ -310,7 +280,6 @@ EXPORT_SYMBOL(rt_write_lock_irqsave);
+ unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
+ {
+ rt_read_lock(rwlock);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(rt_read_lock_irqsave);
+@@ -325,7 +294,6 @@ void __rt_rwlock_init(rwlock_t *rwlock,
+ lockdep_init_map(&rwlock->dep_map, name, key, 0);
+ #endif
+ rwlock->lock.save_state = 1;
+- rwlock->read_depth = 0;
+ }
+ EXPORT_SYMBOL(__rt_rwlock_init);
+
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index f3d7e6f1ea0b..c9a28a1683d7 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
/* Per task flags (PF_*), defined further below: */
-@@ -1410,6 +1412,7 @@ extern struct task_struct *find_task_by_
+@@ -1420,6 +1422,7 @@ extern struct task_struct *find_task_by_
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
diff --git a/patches/series b/patches/series
index fcfffa8bc661..9f369c6e42b2 100644
--- a/patches/series
+++ b/patches/series
@@ -86,38 +86,38 @@ smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch
0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
# recursive get_online_cpus() invocations from smp/hotplug
-#0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
-#0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
-#0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
-#0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
-#0005-stop_machine-Provide-stop_machine_cpuslocked.patch
-#0006-padata-Make-padata_alloc-static.patch
-#0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
-#0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
-#0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
-#0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
-#0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
-#0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
-#0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
-#0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
-#0015-s390-kernel-Use-stop_machine_cpuslocked.patch
-#0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
-#0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
-#0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
-#0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
-#0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
-#0021-PCI-Replace-the-racy-recursion-prevention.patch
-#0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
-#0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
-#0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
-#0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
-#0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
-#0027-arm-Prevent-hotplug-rwsem-recursion.patch
-#0028-s390-Prevent-hotplug-rwsem-recursion.patch
-#0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
-#0030-sched-Provide-is_percpu_thread-helper.patch
-#0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
-#0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
+0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
+0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
+0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
+0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
+0005-stop_machine-Provide-stop_machine_cpuslocked.patch
+0006-padata-Make-padata_alloc-static.patch
+0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
+0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
+0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
+0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
+0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
+0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
+0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
+0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
+0015-s390-kernel-Use-stop_machine_cpuslocked.patch
+0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
+0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
+0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
+0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
+0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
+0021-PCI-Replace-the-racy-recursion-prevention.patch
+0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
+0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
+0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
+0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
+0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
+0027-arm-Prevent-hotplug-rwsem-recursion.patch
+0028-s390-Prevent-hotplug-rwsem-recursion.patch
+0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
+0030-sched-Provide-is_percpu_thread-helper.patch
+0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
+0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
###
# Those two should vanish soon (not use PIT during bootup)
@@ -308,7 +308,6 @@ local-irq-rt-depending-variants.patch
preempt-nort-rt-variants.patch
# local locks & migrate disable
-#introduce_migrate_disable_cpu_light.patch
futex-workaround-migrate_disable-enable-in-different.patch
rt-local-irq-lock.patch
locallock-add-local_lock_on.patch
@@ -423,10 +422,7 @@ stop-machine-raw-lock.patch
# MIGRATE DISABLE AND PER CPU
# XXX redo
hotplug-light-get-online-cpus.patch
-hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
-re-migrate_disable-race-with-cpu-hotplug-3f.patch
ftrace-migrate-disable-tracing.patch
-hotplug-use-migrate-disable.patch
# NOHZ
@@ -472,6 +468,16 @@ futex-rtmutex-Cure-RT-double-blocking-issue.patch
rwsem-rt-Lift-single-reader-restriction.patch
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+# RWLOCK redo, fold into back
+locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
+rt-rwlock--Remove-recursive-support.patch
+rt-locking--Consolidate-lock-functions.patch
+rt-locking--Simplify-rt-rwlock.patch
+locking-rtmutex--Make-inner-working-of-rt_spin_slow_lock---accessible.patch
+locking-rt-rwlock--Provide-reader-biased-rwlock-for-RT.patch
+locking-rt-rwlock--Make-reader-biased-rwlocks-selectable.patch
+rt-locking--Consolidate-rwlock-variants.patch
+
# RCU
peter_zijlstra-frob-rcu.patch
rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -635,16 +641,8 @@ random-avoid-preempt_disable-ed-section.patch
char-random-don-t-print-that-the-init-is-done.patch
# HOTPLUG
-# XXX
-cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
-cpu-rt-rework-cpu-down.patch
-cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
-kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
-kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
-cpu_down_move_migrate_enable_back.patch
-hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
-#
-rt-locking-Reenable-migration-accross-schedule.patch
+cpu-hotplug--Implement-CPU-pinning.patch
+hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
# SCSCI QLA2xxx
scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
@@ -715,3 +713,4 @@ workqueue-prevent-deadlock-stall.patch
# Add RT to version
localversion.patch
+
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 924c5080e2fc..8a507d8a08cc 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -4867,7 +4870,9 @@ static int process_backlog(struct napi_s
+@@ -4877,7 +4880,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -4875,9 +4880,9 @@ static int process_backlog(struct napi_s
+@@ -4885,9 +4890,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5318,13 +5323,21 @@ static __latent_entropy void net_rx_acti
+@@ -5328,13 +5333,21 @@ static __latent_entropy void net_rx_acti
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -8089,6 +8102,9 @@ static int dev_cpu_dead(unsigned int old
+@@ -8099,6 +8112,9 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -8392,8 +8408,9 @@ static int __init net_dev_init(void)
+@@ -8402,8 +8418,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 82420d058513..82ccd5093e8f 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4822,6 +4825,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4832,6 +4835,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4835,6 +4839,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4845,6 +4849,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4912,6 +4917,7 @@ void __napi_schedule(struct napi_struct
+@@ -4922,6 +4927,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8074,6 +8080,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8084,6 +8090,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index fc3edf8f21ba..639ffd2d3e94 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3626,9 +3626,8 @@ void __noreturn do_task_dead(void)
+@@ -3548,9 +3548,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3636,6 +3635,10 @@ static inline void sched_submit_work(str
+@@ -3558,6 +3557,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);