summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch26
-rw-r--r--patches/futex-workaround-migrate_disable-enable-in-different.patch12
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/perf-x86-intel-Delay-memory-deallocation-until-cpu_d.patch73
-rw-r--r--patches/pinctrl-bcm2835-Use-raw-spinlock-for-RT-compatibilit.patch96
-rw-r--r--patches/series2
6 files changed, 27 insertions, 184 deletions
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 0f00b23dff2d..11ae91954d86 100644
--- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -9,13 +9,13 @@ OTOH, they're a lot less wasteful than an rtmutex per page.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/block/zram/zram_drv.c | 28 ++++++++++++++++++++++++++++
+ drivers/block/zram/zram_drv.c | 38 ++++++++++++++++++++++++++++++++++++++
drivers/block/zram/zram_drv.h | 3 +++
- 2 files changed, 31 insertions(+)
+ 2 files changed, 41 insertions(+)
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -53,6 +53,30 @@ static size_t huge_class_size;
+@@ -53,6 +53,40 @@ static size_t huge_class_size;
static void zram_free_page(struct zram *zram, size_t index);
@@ -28,6 +28,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ spin_lock_init(&zram->table[index].lock);
+}
+
++static int zram_slot_trylock(struct zram *zram, u32 index)
++{
++ int ret;
++
++ ret = spin_trylock(&zram->table[index].lock);
++ if (ret)
++ __set_bit(ZRAM_LOCK, &zram->table[index].value);
++ return ret;
++}
++
+static void zram_slot_lock(struct zram *zram, u32 index)
+{
+ spin_lock(&zram->table[index].lock);
@@ -43,10 +53,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#else
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
+
- static void zram_slot_lock(struct zram *zram, u32 index)
+ static int zram_slot_trylock(struct zram *zram, u32 index)
{
- bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
-@@ -62,6 +86,7 @@ static void zram_slot_unlock(struct zram
+ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value);
+@@ -67,6 +101,7 @@ static void zram_slot_unlock(struct zram
{
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
}
@@ -54,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool init_done(struct zram *zram)
{
-@@ -900,6 +925,8 @@ static DEVICE_ATTR_RO(io_stat);
+@@ -900,6 +935,8 @@ static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
static DEVICE_ATTR_RO(debug_stat);
@@ -63,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
-@@ -930,6 +957,7 @@ static bool zram_meta_alloc(struct zram
+@@ -930,6 +967,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
diff --git a/patches/futex-workaround-migrate_disable-enable-in-different.patch b/patches/futex-workaround-migrate_disable-enable-in-different.patch
index 317df4e52e6f..307166696f27 100644
--- a/patches/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/patches/futex-workaround-migrate_disable-enable-in-different.patch
@@ -15,8 +15,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2855,9 +2855,18 @@ static int futex_lock_pi(u32 __user *uad
- * lock handoff sequence.
+@@ -2856,6 +2856,14 @@ static int futex_lock_pi(u32 __user *uad
+ * before __rt_mutex_start_proxy_lock() is done.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
+ /*
@@ -28,14 +28,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ migrate_disable();
+
spin_unlock(q.lock_ptr);
+ /*
+ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+@@ -2864,6 +2872,7 @@ static int futex_lock_pi(u32 __user *uad
+ */
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
+ migrate_enable();
if (ret) {
if (ret == 1)
-@@ -3004,11 +3013,21 @@ static int futex_unlock_pi(u32 __user *u
- * observed.
+@@ -3012,11 +3021,21 @@ static int futex_unlock_pi(u32 __user *u
+ * rt_waiter. Also see the WARN in wake_futex_pi().
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ /*
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 12bd473a33f5..25e5fadbaae8 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt13
diff --git a/patches/perf-x86-intel-Delay-memory-deallocation-until-cpu_d.patch b/patches/perf-x86-intel-Delay-memory-deallocation-until-cpu_d.patch
deleted file mode 100644
index e8c998beac5e..000000000000
--- a/patches/perf-x86-intel-Delay-memory-deallocation-until-cpu_d.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Wed, 19 Dec 2018 16:46:44 +0100
-Subject: [PATCH] perf/x86/intel: Delay memory deallocation until cpu_dead
-
-intel_pmu_cpu_prepare() allocated memory for ->shared_regs among other
-members of struct cpu_hw_events. This memory is released in
-intel_pmu_cpu_dying() which is wrong. The counterpart of the
-intel_pmu_cpu_prepare() callback is x86_pmu_dead_cpu().
-
-Otherwise if the CPU fails on the UP path between CPUHP_PERF_X86_PREPARE
-and CPUHP_AP_PERF_X86_STARTING then it won't release the memory but
-allocate new memory on the next attempt to online the CPU (leaking the
-old memory).
-Also, if the CPU down path fails between CPUHP_AP_PERF_X86_STARTING and
-CPUHP_PERF_X86_PREPARE then the CPU will go back online but never
-allocate the memory that was released in x86_pmu_dying_cpu().
-
-Make the memory allocation/free symmetrical in regard to the CPU hotplug
-notifier by moving the deallocation to intel_pmu_cpu_dead().
-
-This started in commit
- a7e3ed1e47011 ("perf: Add support for supplementary event registers").
-
-Cc: stable@vger.kernel.org
-Reported-by: He Zhe <zhe.he@windriver.com>
-Fixes: a7e3ed1e47011 ("perf: Add support for supplementary event registers").
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-[bigeasy: patch description]
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/events/intel/core.c | 10 ++++++++--
- 1 file changed, 8 insertions(+), 2 deletions(-)
-
---- a/arch/x86/events/intel/core.c
-+++ b/arch/x86/events/intel/core.c
-@@ -3440,6 +3440,11 @@ static void free_excl_cntrs(int cpu)
-
- static void intel_pmu_cpu_dying(int cpu)
- {
-+ fini_debug_store_on_cpu(cpu);
-+}
-+
-+static void intel_pmu_cpu_dead(int cpu)
-+{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- struct intel_shared_regs *pc;
-
-@@ -3451,8 +3456,6 @@ static void intel_pmu_cpu_dying(int cpu)
- }
-
- free_excl_cntrs(cpu);
--
-- fini_debug_store_on_cpu(cpu);
- }
-
- static void intel_pmu_sched_task(struct perf_event_context *ctx,
-@@ -3541,6 +3544,7 @@ static __initconst const struct x86_pmu
- .cpu_prepare = intel_pmu_cpu_prepare,
- .cpu_starting = intel_pmu_cpu_starting,
- .cpu_dying = intel_pmu_cpu_dying,
-+ .cpu_dead = intel_pmu_cpu_dead,
- };
-
- static struct attribute *intel_pmu_attrs[];
-@@ -3581,6 +3585,8 @@ static __initconst const struct x86_pmu
- .cpu_prepare = intel_pmu_cpu_prepare,
- .cpu_starting = intel_pmu_cpu_starting,
- .cpu_dying = intel_pmu_cpu_dying,
-+ .cpu_dead = intel_pmu_cpu_dead,
-+
- .guest_get_msrs = intel_guest_get_msrs,
- .sched_task = intel_pmu_sched_task,
- };
diff --git a/patches/pinctrl-bcm2835-Use-raw-spinlock-for-RT-compatibilit.patch b/patches/pinctrl-bcm2835-Use-raw-spinlock-for-RT-compatibilit.patch
deleted file mode 100644
index 559af22b2d4c..000000000000
--- a/patches/pinctrl-bcm2835-Use-raw-spinlock-for-RT-compatibilit.patch
+++ /dev/null
@@ -1,96 +0,0 @@
-From: Lukas Wunner <lukas@wunner.de>
-Date: Sat, 27 Oct 2018 10:15:33 +0200
-Subject: [PATCH] pinctrl: bcm2835: Use raw spinlock for RT compatibility
-
-[Upstream commit 71dfaa749f2f7c1722ebf6716d3f797a04528cba]
-
-The BCM2835 pinctrl driver acquires a spinlock in its ->irq_enable,
-->irq_disable and ->irq_set_type callbacks. Spinlocks become sleeping
-locks with CONFIG_PREEMPT_RT_FULL=y, therefore invocation of one of the
-callbacks in atomic context may cause a hard lockup if at least two GPIO
-pins in the same bank are used as interrupts. The issue doesn't occur
-with just a single interrupt pin per bank because the lock is never
-contended. I'm experiencing such lockups with GPIO 8 and 28 used as
-level-triggered interrupts, i.e. with ->irq_disable being invoked on
-reception of every IRQ.
-
-The critical section protected by the spinlock is very small (one bitop
-and one RMW of an MMIO register), hence converting to a raw spinlock
-seems a better trade-off than converting the driver to threaded IRQ
-handling (which would increase latency to handle an interrupt).
-
-Cc: Mathias Duckeck <m.duckeck@kunbus.de>
-Signed-off-by: Lukas Wunner <lukas@wunner.de>
-Acked-by: Julia Cartwright <julia@ni.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/pinctrl/bcm/pinctrl-bcm2835.c | 16 ++++++++--------
- 1 file changed, 8 insertions(+), 8 deletions(-)
-
---- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
-+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
-@@ -90,7 +90,7 @@ struct bcm2835_pinctrl {
- struct gpio_chip gpio_chip;
- struct pinctrl_gpio_range gpio_range;
-
-- spinlock_t irq_lock[BCM2835_NUM_BANKS];
-+ raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
- };
-
- /* pins are just named GPIO0..GPIO53 */
-@@ -461,10 +461,10 @@ static void bcm2835_gpio_irq_enable(stru
- unsigned bank = GPIO_REG_OFFSET(gpio);
- unsigned long flags;
-
-- spin_lock_irqsave(&pc->irq_lock[bank], flags);
-+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
- set_bit(offset, &pc->enabled_irq_map[bank]);
- bcm2835_gpio_irq_config(pc, gpio, true);
-- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
-+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
- }
-
- static void bcm2835_gpio_irq_disable(struct irq_data *data)
-@@ -476,12 +476,12 @@ static void bcm2835_gpio_irq_disable(str
- unsigned bank = GPIO_REG_OFFSET(gpio);
- unsigned long flags;
-
-- spin_lock_irqsave(&pc->irq_lock[bank], flags);
-+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
- bcm2835_gpio_irq_config(pc, gpio, false);
- /* Clear events that were latched prior to clearing event sources */
- bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
- clear_bit(offset, &pc->enabled_irq_map[bank]);
-- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
-+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
- }
-
- static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
-@@ -584,7 +584,7 @@ static int bcm2835_gpio_irq_set_type(str
- unsigned long flags;
- int ret;
-
-- spin_lock_irqsave(&pc->irq_lock[bank], flags);
-+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
-
- if (test_bit(offset, &pc->enabled_irq_map[bank]))
- ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type);
-@@ -596,7 +596,7 @@ static int bcm2835_gpio_irq_set_type(str
- else
- irq_set_handler_locked(data, handle_level_irq);
-
-- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
-+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
-
- return ret;
- }
-@@ -1047,7 +1047,7 @@ static int bcm2835_pinctrl_probe(struct
- for_each_set_bit(offset, &events, 32)
- bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
-
-- spin_lock_init(&pc->irq_lock[i]);
-+ raw_spin_lock_init(&pc->irq_lock[i]);
- }
-
- err = gpiochip_add_data(&pc->gpio_chip, pc);
diff --git a/patches/series b/patches/series
index 38f4776880f2..dab78135f40a 100644
--- a/patches/series
+++ b/patches/series
@@ -22,7 +22,6 @@
irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch
kthread-convert-worker-lock-to-raw-spinlock.patch
-pinctrl-bcm2835-Use-raw-spinlock-for-RT-compatibilit.patch
crypto-caam-qi-simplify-CGR-allocation-freeing.patch
sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
@@ -37,7 +36,6 @@ cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
fscache-initialize-cookie-hash-table-raw-spinlocks.patch
Drivers-hv-vmbus-include-header-for-get_irq_regs.patch
percpu-include-irqflags.h-for-raw_local_irq_save.patch
-perf-x86-intel-Delay-memory-deallocation-until-cpu_d.patch
############################################################
# Ready for posting