summaryrefslogtreecommitdiff
path: root/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch')
-rw-r--r--patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch240
1 files changed, 240 insertions, 0 deletions
diff --git a/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch b/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
new file mode 100644
index 000000000000..3352c259f1f6
--- /dev/null
+++ b/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
@@ -0,0 +1,240 @@
+From 8f553c498e1772cccb39a114da4a498d22992758 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:12 +0200
+Subject: [PATCH 01/32] cpu/hotplug: Provide cpus_read|write_[un]lock()
+
+The counting 'rwsem' hackery of get|put_online_cpus() is going to be
+replaced by percpu rwsem.
+
+Rename the functions to make it clear that it's locking and not some
+refcount style interface. These new functions will be used for the
+preparatory patches which make the code ready for the percpu rwsem
+conversion.
+
+Rename all instances in the cpu hotplug code while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.080397752@linutronix.de
+---
+ include/linux/cpu.h | 32 ++++++++++++++++++--------------
+ kernel/cpu.c | 36 ++++++++++++++++++------------------
+ 2 files changed, 36 insertions(+), 32 deletions(-)
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index f92081234afd..055876003914 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -99,26 +99,30 @@ static inline void cpu_maps_update_done(void)
+ extern struct bus_type cpu_subsys;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+-/* Stop CPUs going up and down. */
+-
+-extern void cpu_hotplug_begin(void);
+-extern void cpu_hotplug_done(void);
+-extern void get_online_cpus(void);
+-extern void put_online_cpus(void);
++extern void cpus_write_lock(void);
++extern void cpus_write_unlock(void);
++extern void cpus_read_lock(void);
++extern void cpus_read_unlock(void);
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
+ void clear_tasks_mm_cpumask(int cpu);
+ int cpu_down(unsigned int cpu);
+
+-#else /* CONFIG_HOTPLUG_CPU */
++#else /* CONFIG_HOTPLUG_CPU */
+
+-static inline void cpu_hotplug_begin(void) {}
+-static inline void cpu_hotplug_done(void) {}
+-#define get_online_cpus() do { } while (0)
+-#define put_online_cpus() do { } while (0)
+-#define cpu_hotplug_disable() do { } while (0)
+-#define cpu_hotplug_enable() do { } while (0)
+-#endif /* CONFIG_HOTPLUG_CPU */
++static inline void cpus_write_lock(void) { }
++static inline void cpus_write_unlock(void) { }
++static inline void cpus_read_lock(void) { }
++static inline void cpus_read_unlock(void) { }
++static inline void cpu_hotplug_disable(void) { }
++static inline void cpu_hotplug_enable(void) { }
++#endif /* !CONFIG_HOTPLUG_CPU */
++
++/* Wrappers which go away once all code is converted */
++static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
++static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
++static inline void get_online_cpus(void) { cpus_read_lock(); }
++static inline void put_online_cpus(void) { cpus_read_unlock(); }
+
+ #ifdef CONFIG_PM_SLEEP_SMP
+ extern int freeze_secondary_cpus(int primary);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 9ae6fbe5b5cf..d3221ae5b474 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -235,7 +235,7 @@ static struct {
+ #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+
+
+-void get_online_cpus(void)
++void cpus_read_lock(void)
+ {
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+@@ -245,9 +245,9 @@ void get_online_cpus(void)
+ atomic_inc(&cpu_hotplug.refcount);
+ mutex_unlock(&cpu_hotplug.lock);
+ }
+-EXPORT_SYMBOL_GPL(get_online_cpus);
++EXPORT_SYMBOL_GPL(cpus_read_lock);
+
+-void put_online_cpus(void)
++void cpus_read_unlock(void)
+ {
+ int refcount;
+
+@@ -264,7 +264,7 @@ void put_online_cpus(void)
+ cpuhp_lock_release();
+
+ }
+-EXPORT_SYMBOL_GPL(put_online_cpus);
++EXPORT_SYMBOL_GPL(cpus_read_unlock);
+
+ /*
+ * This ensures that the hotplug operation can begin only when the
+@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
+ * get_online_cpus() not an api which is called all that often.
+ *
+ */
+-void cpu_hotplug_begin(void)
++void cpus_write_lock(void)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -306,7 +306,7 @@ void cpu_hotplug_begin(void)
+ finish_wait(&cpu_hotplug.wq, &wait);
+ }
+
+-void cpu_hotplug_done(void)
++void cpus_write_unlock(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+ mutex_unlock(&cpu_hotplug.lock);
+@@ -773,7 +773,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ if (!cpu_present(cpu))
+ return -EINVAL;
+
+- cpu_hotplug_begin();
++ cpus_write_lock();
+
+ cpuhp_tasks_frozen = tasks_frozen;
+
+@@ -811,7 +811,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ }
+
+ out:
+- cpu_hotplug_done();
++ cpus_write_unlock();
+ return ret;
+ }
+
+@@ -893,7 +893,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ struct task_struct *idle;
+ int ret = 0;
+
+- cpu_hotplug_begin();
++ cpus_write_lock();
+
+ if (!cpu_present(cpu)) {
+ ret = -EINVAL;
+@@ -941,7 +941,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ target = min((int)target, CPUHP_BRINGUP_CPU);
+ ret = cpuhp_up_callbacks(cpu, st, target);
+ out:
+- cpu_hotplug_done();
++ cpus_write_unlock();
+ return ret;
+ }
+
+@@ -1424,7 +1424,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ if (sp->multi_instance == false)
+ return -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ if (!invoke || !sp->startup.multi)
+@@ -1453,7 +1453,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ hlist_add_head(node, &sp->list);
+ unlock:
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+@@ -1486,7 +1486,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ if (cpuhp_cb_check(state) || !name)
+ return -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ ret = cpuhp_store_callbacks(state, name, startup, teardown,
+@@ -1522,7 +1522,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ }
+ out:
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+ /*
+ * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+ * dynamically allocated state in case of success.
+@@ -1544,7 +1544,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ if (!sp->multi_instance)
+ return -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ if (!invoke || !cpuhp_get_teardown_cb(state))
+@@ -1565,7 +1565,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ remove:
+ hlist_del(node);
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+
+ return 0;
+ }
+@@ -1587,7 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+
+ BUG_ON(cpuhp_cb_check(state));
+
+- get_online_cpus();
++ cpus_read_lock();
+
+ mutex_lock(&cpuhp_state_mutex);
+ if (sp->multi_instance) {
+@@ -1615,7 +1615,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+ remove:
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL(__cpuhp_remove_state);
+
+--
+2.11.0
+