summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-05-10 13:21:34 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-05-10 13:21:34 +0200
commit3204e8fa50bbba496843e316ff859d91702113aa (patch)
tree956e694d54b222b3491207809978f3656ba9e2a3
parentd765154e6727ba66c21f6179a5df3564696364e4 (diff)
downloadlinux-rt-3204e8fa50bbba496843e316ff859d91702113aa.tar.gz
[ANNOUNCE] v5.0.14-rt9v5.0.14-rt9-patches
Dear RT folks! I'm pleased to announce the v5.0.14-rt9 patch set. Changes since v5.0.14-rt8: - Replace one x86 related FPU patch with what landed upstream. - IOMMU series by Julien Grall to avoiding sleeping locks in non-preemptible context. - Fix a race in wait_for_completion(). Patched by Corey Minyard. Known issues - A warning triggered in "rcu_note_context_switch" originated from SyS_timer_gettime(). The issue was always there, it is now visible. Reported by Grygorii Strashko and Daniel Wagner. - rcutorture is currently broken on -RT. Reported by Juri Lelli. The delta patch against v5.0.14-rt8 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.0/incr/patch-5.0.14-rt8-rt9.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.0.14-rt9 The RT patch against v5.0.14 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patch-5.0.14-rt9.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.14-rt9.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch14
-rw-r--r--patches/0001-genirq-msi-Add-a-new-field-in-msi_desc-to-store-an-I.patch89
-rw-r--r--patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch20
-rw-r--r--patches/0002-iommu-dma-iommu-Split-iommu_dma_map_msi_msg-in-two-p.patch168
-rw-r--r--patches/0003-irqchip-gicv2m-Don-t-map-the-MSI-page-in-gicv2m_comp.patch58
-rw-r--r--patches/0004-irqchip-gic-v3-its-Don-t-map-the-MSI-page-in-its_irq.patch57
-rw-r--r--patches/0005-irqchip-ls-scfg-msi-Don-t-map-the-MSI-page-in-ls_scf.patch56
-rw-r--r--patches/0006-irqchip-gic-v3-mbi-Don-t-map-the-MSI-page-in-mbi_com.patch67
-rw-r--r--patches/0007-iommu-dma-iommu-Remove-iommu_dma_map_msi_msg.patch71
-rw-r--r--patches/0011-printk_safe-remove-printk-safe-code.patch4
-rw-r--r--patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch8
-rw-r--r--patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch4
-rw-r--r--patches/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch45
-rw-r--r--patches/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch66
-rw-r--r--patches/arm-disable-NEON-in-kernel-mode.patch2
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch2
-rw-r--r--patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch6
-rw-r--r--patches/fs-aio-simple-simple-work.patch4
-rw-r--r--patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch2
-rw-r--r--patches/ftrace-Fix-trace-header-alignment.patch2
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch4
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch2
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch34
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch2
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch38
-rw-r--r--patches/preempt-lazy-support.patch24
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/sched-completion-Fix-a-lockup-in-wait_for_completion.patch60
-rw-r--r--patches/sched-fair-Make-the-hrtimers-non-hard-again.patch2
-rw-r--r--patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch16
-rw-r--r--patches/series12
-rw-r--r--patches/workqueue-use-locallock.patch6
-rw-r--r--patches/workqueue-use-rcu.patch18
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
35 files changed, 774 insertions, 195 deletions
diff --git a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 9c568f660e6f..5a224c6391d1 100644
--- a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1112,7 +1112,7 @@ static inline void prefetch_buddy(struct
+@@ -1125,7 +1125,7 @@ static inline void prefetch_buddy(struct
}
/*
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1123,14 +1123,41 @@ static inline void prefetch_buddy(struct
+@@ -1136,14 +1136,41 @@ static inline void prefetch_buddy(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (count) {
struct list_head *list;
-@@ -1162,7 +1189,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1175,7 +1202,7 @@ static void free_pcppages_bulk(struct zo
if (bulkfree_pcp_prepare(page))
continue;
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We are going to put the page back to the global
-@@ -1177,26 +1204,6 @@ static void free_pcppages_bulk(struct zo
+@@ -1190,26 +1217,6 @@ static void free_pcppages_bulk(struct zo
prefetch_buddy(page);
} while (--count && --batch_free && !list_empty(list));
}
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2608,13 +2615,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2621,13 +2628,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2630,14 +2642,21 @@ static void drain_pages_zone(unsigned in
+@@ -2643,14 +2655,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2865,7 +2884,10 @@ static void free_unref_page_commit(struc
+@@ -2878,7 +2897,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/0001-genirq-msi-Add-a-new-field-in-msi_desc-to-store-an-I.patch b/patches/0001-genirq-msi-Add-a-new-field-in-msi_desc-to-store-an-I.patch
new file mode 100644
index 000000000000..dd0de9305bc8
--- /dev/null
+++ b/patches/0001-genirq-msi-Add-a-new-field-in-msi_desc-to-store-an-I.patch
@@ -0,0 +1,89 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:18 +0100
+Subject: [PATCH 1/7] genirq/msi: Add a new field in msi_desc to store an IOMMU
+ cookie
+
+[Upstream commit aaebdf8d68479f78d9f72b239684f70fbb0722c6]
+
+When an MSI doorbell is located downstream of an IOMMU, it is required
+to swizzle the physical address with an appropriately-mapped IOVA for any
+device attached to one of our DMA ops domain.
+
+At the moment, the allocation of the mapping may be done when composing
+the message. However, the composing may be done in non-preemtible
+context while the allocation requires to be called from preemptible
+context.
+
+A follow-up change will split the current logic in two functions
+requiring to keep an IOMMU cookie per MSI.
+
+A new field is introduced in msi_desc to store an IOMMU cookie. As the
+cookie may not be required in some configuration, the field is protected
+under a new config CONFIG_IRQ_MSI_IOMMU.
+
+A pair of helpers has also been introduced to access the field.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/msi.h | 26 ++++++++++++++++++++++++++
+ kernel/irq/Kconfig | 3 +++
+ 2 files changed, 29 insertions(+)
+
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -77,6 +77,9 @@ struct msi_desc {
+ struct device *dev;
+ struct msi_msg msg;
+ struct irq_affinity_desc *affinity;
++#ifdef CONFIG_IRQ_MSI_IOMMU
++ const void *iommu_cookie;
++#endif
+
+ union {
+ /* PCI MSI/X specific data */
+@@ -119,6 +122,29 @@ struct msi_desc {
+ #define for_each_msi_entry_safe(desc, tmp, dev) \
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
+
++#ifdef CONFIG_IRQ_MSI_IOMMU
++static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
++{
++ return desc->iommu_cookie;
++}
++
++static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
++ const void *iommu_cookie)
++{
++ desc->iommu_cookie = iommu_cookie;
++}
++#else
++static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
++{
++ return NULL;
++}
++
++static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
++ const void *iommu_cookie)
++{
++}
++#endif
++
+ #ifdef CONFIG_PCI_MSI
+ #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
+ #define for_each_pci_msi_entry(desc, pdev) \
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -91,6 +91,9 @@ config GENERIC_MSI_IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ
+
++config IRQ_MSI_IOMMU
++ bool
++
+ config HANDLE_DOMAIN_IRQ
+ bool
+
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 5c332a844145..ddd757503689 100644
--- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1122,8 +1122,8 @@ static inline void prefetch_buddy(struct
+@@ -1135,8 +1135,8 @@ static inline void prefetch_buddy(struct
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool isolated_pageblocks;
struct page *page, *tmp;
-@@ -1138,12 +1138,27 @@ static void free_pcppages_bulk(struct zo
+@@ -1151,12 +1151,27 @@ static void free_pcppages_bulk(struct zo
*/
list_for_each_entry_safe(page, tmp, head, lru) {
int mt = get_pcppage_migratetype(page);
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2626,7 +2641,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2639,7 +2654,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2656,7 +2671,7 @@ static void drain_pages_zone(unsigned in
+@@ -2669,7 +2684,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2855,7 +2870,8 @@ static bool free_unref_page_prepare(stru
+@@ -2868,7 +2883,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -2884,10 +2900,8 @@ static void free_unref_page_commit(struc
+@@ -2897,10 +2913,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2898,13 +2912,17 @@ void free_unref_page(struct page *page)
+@@ -2911,13 +2925,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2915,6 +2933,11 @@ void free_unref_page_list(struct list_he
+@@ -2928,6 +2946,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -2927,10 +2950,12 @@ void free_unref_page_list(struct list_he
+@@ -2940,10 +2963,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -2943,6 +2968,21 @@ void free_unref_page_list(struct list_he
+@@ -2956,6 +2981,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/patches/0002-iommu-dma-iommu-Split-iommu_dma_map_msi_msg-in-two-p.patch b/patches/0002-iommu-dma-iommu-Split-iommu_dma_map_msi_msg-in-two-p.patch
new file mode 100644
index 000000000000..574aa66663b0
--- /dev/null
+++ b/patches/0002-iommu-dma-iommu-Split-iommu_dma_map_msi_msg-in-two-p.patch
@@ -0,0 +1,168 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:19 +0100
+Subject: [PATCH 2/7] iommu/dma-iommu: Split iommu_dma_map_msi_msg() in two
+ parts
+
+[ Upstream commit ece6e6f0218b7777e650bf93728130ae6f4feb7d]
+
+On RT, iommu_dma_map_msi_msg() may be called from non-preemptible
+context. This will lead to a splat with CONFIG_DEBUG_ATOMIC_SLEEP as
+the function is using spin_lock (they can sleep on RT).
+
+iommu_dma_map_msi_msg() is used to map the MSI page in the IOMMU PT
+and update the MSI message with the IOVA.
+
+Only the part to lookup for the MSI page requires to be called in
+preemptible context. As the MSI page cannot change over the lifecycle
+of the MSI interrupt, the lookup can be cached and re-used later on.
+
+iomma_dma_map_msi_msg() is now split in two functions:
+ - iommu_dma_prepare_msi(): This function will prepare the mapping
+ in the IOMMU and store the cookie in the structure msi_desc. This
+ function should be called in preemptible context.
+ - iommu_dma_compose_msi_msg(): This function will update the MSI
+ message with the IOVA when the device is behind an IOMMU.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/iommu/Kconfig | 1 +
+ drivers/iommu/dma-iommu.c | 46 +++++++++++++++++++++++++++++++++++++---------
+ include/linux/dma-iommu.h | 25 +++++++++++++++++++++++++
+ 3 files changed, 63 insertions(+), 9 deletions(-)
+
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -93,6 +93,7 @@ config IOMMU_DMA
+ bool
+ select IOMMU_API
+ select IOMMU_IOVA
++ select IRQ_MSI_IOMMU
+ select NEED_SG_DMA_LENGTH
+
+ config FSL_PAMU
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -889,17 +889,18 @@ static struct iommu_dma_msi_page *iommu_
+ return NULL;
+ }
+
+-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
++int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+ {
+- struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
++ struct device *dev = msi_desc_to_dev(desc);
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_dma_cookie *cookie;
+ struct iommu_dma_msi_page *msi_page;
+- phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
+ unsigned long flags;
+
+- if (!domain || !domain->iova_cookie)
+- return;
++ if (!domain || !domain->iova_cookie) {
++ desc->iommu_cookie = NULL;
++ return 0;
++ }
+
+ cookie = domain->iova_cookie;
+
+@@ -912,7 +913,36 @@ void iommu_dma_map_msi_msg(int irq, stru
+ msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
+ spin_unlock_irqrestore(&cookie->msi_lock, flags);
+
+- if (WARN_ON(!msi_page)) {
++ msi_desc_set_iommu_cookie(desc, msi_page);
++
++ if (!msi_page)
++ return -ENOMEM;
++ return 0;
++}
++
++void iommu_dma_compose_msi_msg(struct msi_desc *desc,
++ struct msi_msg *msg)
++{
++ struct device *dev = msi_desc_to_dev(desc);
++ const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
++ const struct iommu_dma_msi_page *msi_page;
++
++ msi_page = msi_desc_get_iommu_cookie(desc);
++
++ if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
++ return;
++
++ msg->address_hi = upper_32_bits(msi_page->iova);
++ msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
++ msg->address_lo += lower_32_bits(msi_page->iova);
++}
++
++void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
++{
++ struct msi_desc *desc = irq_get_msi_desc(irq);
++ phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
++
++ if (WARN_ON(iommu_dma_prepare_msi(desc, msi_addr))) {
+ /*
+ * We're called from a void callback, so the best we can do is
+ * 'fail' by filling the message with obviously bogus values.
+@@ -923,8 +953,6 @@ void iommu_dma_map_msi_msg(int irq, stru
+ msg->address_lo = ~0U;
+ msg->data = ~0U;
+ } else {
+- msg->address_hi = upper_32_bits(msi_page->iova);
+- msg->address_lo &= cookie_msi_granule(cookie) - 1;
+- msg->address_lo += lower_32_bits(msi_page->iova);
++ iommu_dma_compose_msi_msg(desc, msg);
+ }
+ }
+--- a/include/linux/dma-iommu.h
++++ b/include/linux/dma-iommu.h
+@@ -71,12 +71,26 @@ void iommu_dma_unmap_resource(struct dev
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+
+ /* The DMA API isn't _quite_ the whole story, though... */
++/*
++ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
++ *
++ * The MSI page will be stored in @desc.
++ *
++ * Return: 0 on success otherwise an error describing the failure.
++ */
++int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
++
++/* Update the MSI message if required. */
++void iommu_dma_compose_msi_msg(struct msi_desc *desc,
++ struct msi_msg *msg);
++
+ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+
+ #else
+
+ struct iommu_domain;
++struct msi_desc;
+ struct msi_msg;
+ struct device;
+
+@@ -99,6 +113,17 @@ static inline void iommu_put_dma_cookie(
+ {
+ }
+
++static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
++ phys_addr_t msi_addr)
++{
++ return 0;
++}
++
++static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
++ struct msi_msg *msg)
++{
++}
++
+ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+ {
+ }
diff --git a/patches/0003-irqchip-gicv2m-Don-t-map-the-MSI-page-in-gicv2m_comp.patch b/patches/0003-irqchip-gicv2m-Don-t-map-the-MSI-page-in-gicv2m_comp.patch
new file mode 100644
index 000000000000..95d56ab9c790
--- /dev/null
+++ b/patches/0003-irqchip-gicv2m-Don-t-map-the-MSI-page-in-gicv2m_comp.patch
@@ -0,0 +1,58 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:20 +0100
+Subject: [PATCH 3/7] irqchip/gicv2m: Don't map the MSI page in
+ gicv2m_compose_msi_msg()
+
+[Upstream commit 737be74710f30e611ee871f7b4f47975d1c6f71a ]
+
+gicv2m_compose_msi_msg() may be called from non-preemptible context.
+However, on RT, iommu_dma_map_msi_msg() requires to be called from a
+preemptible context.
+
+A recent change split iommu_dma_map_msi_msg() in two new functions:
+one that should be called in preemptible context, the other does
+not have any requirement.
+
+The GICv2m driver is reworked to avoid executing preemptible code in
+non-preemptible context. This can be achieved by preparing the MSI
+mapping when allocating the MSI interrupt.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/irqchip/irq-gic-v2m.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-gic-v2m.c
++++ b/drivers/irqchip/irq-gic-v2m.c
+@@ -110,7 +110,7 @@ static void gicv2m_compose_msi_msg(struc
+ if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
+ msg->data -= v2m->spi_offset;
+
+- iommu_dma_map_msi_msg(data->irq, msg);
++ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ }
+
+ static struct irq_chip gicv2m_irq_chip = {
+@@ -167,6 +167,7 @@ static void gicv2m_unalloc_msi(struct v2
+ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+ {
++ msi_alloc_info_t *info = args;
+ struct v2m_data *v2m = NULL, *tmp;
+ int hwirq, offset, i, err = 0;
+
+@@ -186,6 +187,11 @@ static int gicv2m_irq_domain_alloc(struc
+
+ hwirq = v2m->spi_start + offset;
+
++ err = iommu_dma_prepare_msi(info->desc,
++ v2m->res.start + V2M_MSI_SETSPI_NS);
++ if (err)
++ return err;
++
+ for (i = 0; i < nr_irqs; i++) {
+ err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
diff --git a/patches/0004-irqchip-gic-v3-its-Don-t-map-the-MSI-page-in-its_irq.patch b/patches/0004-irqchip-gic-v3-its-Don-t-map-the-MSI-page-in-its_irq.patch
new file mode 100644
index 000000000000..a66f511785e8
--- /dev/null
+++ b/patches/0004-irqchip-gic-v3-its-Don-t-map-the-MSI-page-in-its_irq.patch
@@ -0,0 +1,57 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:21 +0100
+Subject: [PATCH 4/7] irqchip/gic-v3-its: Don't map the MSI page in
+ its_irq_compose_msi_msg()
+
+[ Upstream commit 35ae7df21be098848722f96f0f33bf33467436a8 ]
+
+its_irq_compose_msi_msg() may be called from non-preemptible context.
+However, on RT, iommu_dma_map_msi_msg requires to be called from a
+preemptible context.
+
+A recent change split iommu_dma_map_msi_msg() in two new functions:
+one that should be called in preemptible context, the other does
+not have any requirement.
+
+The GICv3 ITS driver is reworked to avoid executing preemptible code in
+non-preemptible context. This can be achieved by preparing the MSI
+mapping when allocating the MSI interrupt.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1179,7 +1179,7 @@ static void its_irq_compose_msi_msg(stru
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = its_get_event_id(d);
+
+- iommu_dma_map_msi_msg(d->irq, msg);
++ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
+ }
+
+ static int its_irq_set_irqchip_state(struct irq_data *d,
+@@ -2562,6 +2562,7 @@ static int its_irq_domain_alloc(struct i
+ {
+ msi_alloc_info_t *info = args;
+ struct its_device *its_dev = info->scratchpad[0].ptr;
++ struct its_node *its = its_dev->its;
+ irq_hw_number_t hwirq;
+ int err;
+ int i;
+@@ -2570,6 +2571,10 @@ static int its_irq_domain_alloc(struct i
+ if (err)
+ return err;
+
++ err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
++ if (err)
++ return err;
++
+ for (i = 0; i < nr_irqs; i++) {
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
diff --git a/patches/0005-irqchip-ls-scfg-msi-Don-t-map-the-MSI-page-in-ls_scf.patch b/patches/0005-irqchip-ls-scfg-msi-Don-t-map-the-MSI-page-in-ls_scf.patch
new file mode 100644
index 000000000000..92d9306cf9dd
--- /dev/null
+++ b/patches/0005-irqchip-ls-scfg-msi-Don-t-map-the-MSI-page-in-ls_scf.patch
@@ -0,0 +1,56 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:22 +0100
+Subject: [PATCH 5/7] irqchip/ls-scfg-msi: Don't map the MSI page in
+ ls_scfg_msi_compose_msg()
+
+[ Upstream commit 2cb3b16545495ee31dc9438f88232c2cfe44a41f ]
+
+ls_scfg_msi_compose_msg() may be called from non-preemptible context.
+However, on RT, iommu_dma_map_msi_msg() requires to be called from a
+preemptible context.
+
+A recent patch split iommu_dma_map_msi_msg() in two new functions:
+one that should be called in preemptible context, the other does
+not have any requirement.
+
+The FreeScale SCFG MSI driver is reworked to avoid executing preemptible
+code in non-preemptible context. This can be achieved by preparing the
+MSI maping when allocating the MSI interrupt.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/irqchip/irq-ls-scfg-msi.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-ls-scfg-msi.c
++++ b/drivers/irqchip/irq-ls-scfg-msi.c
+@@ -100,7 +100,7 @@ static void ls_scfg_msi_compose_msg(stru
+ msg->data |= cpumask_first(mask);
+ }
+
+- iommu_dma_map_msi_msg(data->irq, msg);
++ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ }
+
+ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+@@ -141,6 +141,7 @@ static int ls_scfg_msi_domain_irq_alloc(
+ unsigned int nr_irqs,
+ void *args)
+ {
++ msi_alloc_info_t *info = args;
+ struct ls_scfg_msi *msi_data = domain->host_data;
+ int pos, err = 0;
+
+@@ -157,6 +158,10 @@ static int ls_scfg_msi_domain_irq_alloc(
+ if (err)
+ return err;
+
++ err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
++ if (err)
++ return err;
++
+ irq_domain_set_info(domain, virq, pos,
+ &ls_scfg_msi_parent_chip, msi_data,
+ handle_simple_irq, NULL, NULL);
diff --git a/patches/0006-irqchip-gic-v3-mbi-Don-t-map-the-MSI-page-in-mbi_com.patch b/patches/0006-irqchip-gic-v3-mbi-Don-t-map-the-MSI-page-in-mbi_com.patch
new file mode 100644
index 000000000000..2be02f6f48ae
--- /dev/null
+++ b/patches/0006-irqchip-gic-v3-mbi-Don-t-map-the-MSI-page-in-mbi_com.patch
@@ -0,0 +1,67 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:23 +0100
+Subject: [PATCH 6/7] irqchip/gic-v3-mbi: Don't map the MSI page in
+ mbi_compose_m{b, s}i_msg()
+
+[ Upstream commit 73103975425786ebdb6c4d2868ecf26f391fb77e ]
+
+The functions mbi_compose_m{b, s}i_msg may be called from non-preemptible
+context. However, on RT, iommu_dma_map_msi_msg() requires to be called
+from a preemptible context.
+
+A recent patch split iommu_dma_map_msi_msg in two new functions:
+one that should be called in preemptible context, the other does
+not have any requirement.
+
+The GICv3 MSI driver is reworked to avoid executing preemptible code in
+non-preemptible context. This can be achieved by preparing the MSI
+mapping when allocating the MSI interrupt.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+[maz: only call iommu_dma_prepare_msi once, fix commit log accordingly]
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/irqchip/irq-gic-v3-mbi.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/irqchip/irq-gic-v3-mbi.c
++++ b/drivers/irqchip/irq-gic-v3-mbi.c
+@@ -84,6 +84,7 @@ static void mbi_free_msi(struct mbi_rang
+ static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+ {
++ msi_alloc_info_t *info = args;
+ struct mbi_range *mbi = NULL;
+ int hwirq, offset, i, err = 0;
+
+@@ -104,6 +105,11 @@ static int mbi_irq_domain_alloc(struct i
+
+ hwirq = mbi->spi_start + offset;
+
++ err = iommu_dma_prepare_msi(info->desc,
++ mbi_phys_base + GICD_SETSPI_NSR);
++ if (err)
++ return err;
++
+ for (i = 0; i < nr_irqs; i++) {
+ err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+@@ -142,7 +148,7 @@ static void mbi_compose_msi_msg(struct i
+ msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
+ msg[0].data = data->parent_data->hwirq;
+
+- iommu_dma_map_msi_msg(data->irq, msg);
++ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ }
+
+ #ifdef CONFIG_PCI_MSI
+@@ -202,7 +208,7 @@ static void mbi_compose_mbi_msg(struct i
+ msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
+ msg[1].data = data->parent_data->hwirq;
+
+- iommu_dma_map_msi_msg(data->irq, &msg[1]);
++ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
+ }
+
+ /* Platform-MSI specific irqchip */
diff --git a/patches/0007-iommu-dma-iommu-Remove-iommu_dma_map_msi_msg.patch b/patches/0007-iommu-dma-iommu-Remove-iommu_dma_map_msi_msg.patch
new file mode 100644
index 000000000000..30c1bb68d34c
--- /dev/null
+++ b/patches/0007-iommu-dma-iommu-Remove-iommu_dma_map_msi_msg.patch
@@ -0,0 +1,71 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 1 May 2019 14:58:24 +0100
+Subject: [PATCH 7/7] iommu/dma-iommu: Remove iommu_dma_map_msi_msg()
+
+[ Upstream commit 16e32c3cde7763ab875b9030b443ecbc8e352d8a ]
+
+A recent change split iommu_dma_map_msi_msg() in two new functions. The
+function was still implemented to avoid modifying all the callers at
+once.
+
+Now that all the callers have been reworked, iommu_dma_map_msi_msg() can
+be removed.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/iommu/dma-iommu.c | 20 --------------------
+ include/linux/dma-iommu.h | 5 -----
+ 2 files changed, 25 deletions(-)
+
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -936,23 +936,3 @@ void iommu_dma_compose_msi_msg(struct ms
+ msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
+ msg->address_lo += lower_32_bits(msi_page->iova);
+ }
+-
+-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+-{
+- struct msi_desc *desc = irq_get_msi_desc(irq);
+- phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
+-
+- if (WARN_ON(iommu_dma_prepare_msi(desc, msi_addr))) {
+- /*
+- * We're called from a void callback, so the best we can do is
+- * 'fail' by filling the message with obviously bogus values.
+- * Since we got this far due to an IOMMU being present, it's
+- * not like the existing address would have worked anyway...
+- */
+- msg->address_hi = ~0U;
+- msg->address_lo = ~0U;
+- msg->data = ~0U;
+- } else {
+- iommu_dma_compose_msi_msg(desc, msg);
+- }
+-}
+--- a/include/linux/dma-iommu.h
++++ b/include/linux/dma-iommu.h
+@@ -84,7 +84,6 @@ int iommu_dma_prepare_msi(struct msi_des
+ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+ struct msi_msg *msg);
+
+-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+
+ #else
+@@ -124,10 +123,6 @@ static inline void iommu_dma_compose_msi
+ {
+ }
+
+-static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+-{
+-}
+-
+ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+ {
+ }
diff --git a/patches/0011-printk_safe-remove-printk-safe-code.patch b/patches/0011-printk_safe-remove-printk-safe-code.patch
index 390bbd607f91..2322cd4aac2d 100644
--- a/patches/0011-printk_safe-remove-printk-safe-code.patch
+++ b/patches/0011-printk_safe-remove-printk-safe-code.patch
@@ -677,7 +677,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -8366,7 +8366,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8373,7 +8373,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -685,7 +685,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -8447,7 +8446,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8454,7 +8453,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch b/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
index f0d2b0346232..d79be7a382d9 100644
--- a/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
+++ b/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -3675,15 +3675,15 @@ static void fill_xsave(u8 *dest, struct
+@@ -3680,15 +3680,15 @@ static void fill_xsave(u8 *dest, struct
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
@@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
memcpy(dest + offset, &vcpu->arch.pkru,
sizeof(vcpu->arch.pkru));
else
-@@ -3691,7 +3691,7 @@ static void fill_xsave(u8 *dest, struct
+@@ -3696,7 +3696,7 @@ static void fill_xsave(u8 *dest, struct
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -3718,22 +3718,22 @@ static void load_xsave(struct kvm_vcpu *
+@@ -3723,22 +3723,22 @@ static void load_xsave(struct kvm_vcpu *
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
@@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -8839,11 +8839,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp
+@@ -8862,11 +8862,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp
if (init_event)
kvm_put_guest_fpu(vcpu);
mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
diff --git a/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch b/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
index 7e86a2c0509f..5171ad7d9b0a 100644
--- a/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
+++ b/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
@@ -542,7 +542,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_task_stack(next_p);
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -7868,6 +7868,10 @@ static int vcpu_enter_guest(struct kvm_v
+@@ -7891,6 +7891,10 @@ static int vcpu_enter_guest(struct kvm_v
wait_lapic_expire(vcpu);
guest_enter_irqoff();
@@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
-@@ -8126,22 +8130,30 @@ static int complete_emulated_mmio(struct
+@@ -8149,22 +8153,30 @@ static int complete_emulated_mmio(struct
/* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
diff --git a/patches/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch b/patches/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
index f76d1f3fd5c4..21435a4e2fc4 100644
--- a/patches/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
+++ b/patches/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
@@ -1,5 +1,5 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Apr 2019 18:39:53 +0200
+Date: Thu, 2 May 2019 19:11:39 +0200
Subject: [PATCH] x86/fpu: Fault-in user stack if copy_fpstate_to_sigframe()
fails
@@ -25,42 +25,45 @@ attempt but may be retried again if the memory is swapped out due
to memory pressure. If the user memory can not be faulted-in then
get_user_pages() returns an error so we don't loop forever.
-Fault in memory via get_user_pages() so copy_fpregs_to_sigframe()
-succeeds without a fault.
+Fault in memory via get_user_pages_unlocked() so
+copy_fpregs_to_sigframe() succeeds without a fault.
Fixes: 69277c98f5eef ("x86/fpu: Always store the registers in copy_fpstate_to_sigframe()")
Reported-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de>
Suggested-by: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
-Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jann Horn <jannh@google.com>
-Cc: Jason@zx2c4.com
-Cc: kvm ML <kvm@vger.kernel.org>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "linux-mm@kvack.org" <linux-mm@kvack.org>
+Cc: Qian Cai <cai@lca.pw>
Cc: Rik van Riel <riel@surriel.com>
-Cc: rkrcmar@redhat.com
+Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
-Link: https://lkml.kernel.org/r/20190429163953.gqxgsc5okqxp4olv@linutronix.de
+Link: https://lkml.kernel.org/r/20190502171139.mqtegctsg35cir2e@linutronix.de
---
- arch/x86/kernel/fpu/signal.c | 25 ++++++++++++++-----------
- 1 file changed, 14 insertions(+), 11 deletions(-)
+ arch/x86/kernel/fpu/signal.c | 31 +++++++++++++++----------------
+ 1 file changed, 15 insertions(+), 16 deletions(-)
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
-@@ -158,7 +158,6 @@ static inline int copy_fpregs_to_sigfram
+@@ -157,11 +157,9 @@ static inline int copy_fpregs_to_sigfram
+ */
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
{
- struct fpu *fpu = &current->thread.fpu;
+- struct fpu *fpu = &current->thread.fpu;
- struct xregs_state *xsave = &fpu->state.xsave;
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
- int ret = -EFAULT;
-@@ -174,12 +173,13 @@ int copy_fpstate_to_sigframe(void __user
+- int ret = -EFAULT;
++ int ret;
+
+ ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
+@@ -174,12 +172,13 @@ int copy_fpstate_to_sigframe(void __user
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
@@ -76,7 +79,13 @@ Link: https://lkml.kernel.org/r/20190429163953.gqxgsc5okqxp4olv@linutronix.de
*/
if (test_thread_flag(TIF_NEED_FPU_LOAD))
__fpregs_load_activate();
-@@ -193,14 +193,17 @@ int copy_fpstate_to_sigframe(void __user
+@@ -187,20 +186,20 @@ int copy_fpstate_to_sigframe(void __user
+ pagefault_disable();
+ ret = copy_fpregs_to_sigframe(buf_fx);
+ pagefault_enable();
+- if (ret && !test_thread_flag(TIF_NEED_FPU_LOAD))
+- copy_fpregs_to_fpstate(fpu);
+- set_thread_flag(TIF_NEED_FPU_LOAD);
fpregs_unlock();
if (ret) {
@@ -94,8 +103,8 @@ Link: https://lkml.kernel.org/r/20190429163953.gqxgsc5okqxp4olv@linutronix.de
+ aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
+ nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
+
-+ ret = get_user_pages((unsigned long)buf_fx, nr_pages,
-+ FOLL_WRITE, NULL, NULL);
++ ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
++ NULL, FOLL_WRITE);
+ if (ret == nr_pages)
+ goto retry;
+ return -EFAULT;
diff --git a/patches/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch b/patches/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch
deleted file mode 100644
index 435d1f0f5135..000000000000
--- a/patches/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 30 Apr 2019 10:31:26 +0200
-Subject: [PATCH] x86/fpu: Remove unnecessary saving of FPU registers in
- copy_fpstate_to_sigframe()
-
-Since commit:
-
- eeec00d73be2e ("x86/fpu: Fault-in user stack if copy_fpstate_to_sigframe() fails")
-
-there is no need to have FPU registers saved if
-copy_fpregs_to_sigframe() fails, because we retry it after we resolved
-the fault condition.
-
-Saving the registers is not wrong but it is not necessary and it forces us
-to load the FPU registers on the retry attempt.
-
-Don't save the FPU registers if copy_fpstate_to_sigframe() fails.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Fenghua Yu <fenghua.yu@intel.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Jason@zx2c4.com
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Oleg Nesterov <oleg@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: bp@suse.de
-Cc: jannh@google.com
-Cc: kurt.kanzenbach@linutronix.de
-Cc: kvm@vger.kernel.org
-Cc: pbonzini@redhat.com
-Cc: riel@surriel.com
-Cc: rkrcmar@redhat.com
-Link: http://lkml.kernel.org/r/20190430083126.rilbb76yc27vrem5@linutronix.de
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
----
- arch/x86/kernel/fpu/signal.c | 6 +-----
- 1 file changed, 1 insertion(+), 5 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -157,10 +157,9 @@ static inline int copy_fpregs_to_sigfram
- */
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
-- struct fpu *fpu = &current->thread.fpu;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-- int ret = -EFAULT;
-+ int ret;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -187,9 +186,6 @@ int copy_fpstate_to_sigframe(void __user
- pagefault_disable();
- ret = copy_fpregs_to_sigframe(buf_fx);
- pagefault_enable();
-- if (ret && !test_thread_flag(TIF_NEED_FPU_LOAD))
-- copy_fpregs_to_fpstate(fpu);
-- set_thread_flag(TIF_NEED_FPU_LOAD);
- fpregs_unlock();
-
- if (ret) {
diff --git a/patches/arm-disable-NEON-in-kernel-mode.patch b/patches/arm-disable-NEON-in-kernel-mode.patch
index 0f24a50499bf..95c0ec92d500 100644
--- a/patches/arm-disable-NEON-in-kernel-mode.patch
+++ b/patches/arm-disable-NEON-in-kernel-mode.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -2131,7 +2131,7 @@ config NEON
+@@ -2132,7 +2132,7 @@ config NEON
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index b4cd2f29d00f..952a81362cd0 100644
--- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool init_done(struct zram *zram)
{
-@@ -1153,6 +1188,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1154,6 +1189,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
diff --git a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index 62427bbf0caf..04420f3c5e0a 100644
--- a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* dynamic per-device compression frontend */
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -1251,6 +1251,7 @@ static int __zram_bvec_read(struct zram
+@@ -1252,6 +1252,7 @@ static int __zram_bvec_read(struct zram
unsigned long handle;
unsigned int size;
void *src, *dst;
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
zram_slot_lock(zram, index);
if (zram_test_flag(zram, index, ZRAM_WB)) {
-@@ -1281,6 +1282,7 @@ static int __zram_bvec_read(struct zram
+@@ -1282,6 +1283,7 @@ static int __zram_bvec_read(struct zram
size = zram_get_obj_size(zram, index);
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
-@@ -1288,14 +1290,13 @@ static int __zram_bvec_read(struct zram
+@@ -1289,14 +1291,13 @@ static int __zram_bvec_read(struct zram
kunmap_atomic(dst);
ret = 0;
} else {
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index fd88a910bf0f..329cbcea849a 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* signals when all in-flight requests are done
-@@ -613,9 +614,9 @@ static void free_ioctx_reqs(struct percp
+@@ -612,9 +613,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -633,6 +634,14 @@ static void free_ioctx_users(struct perc
+@@ -632,6 +633,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
index d62fd7769841..e1b20302dcb3 100644
--- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -694,7 +694,7 @@ struct inode {
+@@ -697,7 +697,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
diff --git a/patches/ftrace-Fix-trace-header-alignment.patch b/patches/ftrace-Fix-trace-header-alignment.patch
index 1e2789e182e5..5e73d845a99f 100644
--- a/patches/ftrace-Fix-trace-header-alignment.patch
+++ b/patches/ftrace-Fix-trace-header-alignment.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3349,17 +3349,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3352,17 +3352,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index f2724572659c..6ee0798dfd8a 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2146,6 +2146,8 @@ tracing_generic_entry_update(struct trac
+@@ -2149,6 +2149,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -3350,9 +3352,10 @@ static void print_lat_help_header(struct
+@@ -3353,9 +3355,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index 786d62499ada..9d907bb569cc 100644
--- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -26,7 +26,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -3377,10 +3377,9 @@ static bool blk_mq_poll_hybrid_sleep(str
+@@ -3378,10 +3378,9 @@ static bool blk_mq_poll_hybrid_sleep(str
kt = nsecs;
mode = HRTIMER_MODE_REL;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 8ae8b1a501f0..05fed781ac8b 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -506,7 +506,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We have to ensure that we have at least one bit
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migrat
+@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migrat
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
@@ -515,7 +515,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
-@@ -1825,7 +1825,7 @@ static void set_curr_task_dl(struct rq *
+@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq *
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -524,7 +524,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
return 0;
}
-@@ -1975,7 +1975,7 @@ static struct rq *find_lock_later_rq(str
+@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(str
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
@@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
env->dst_cpu = cpu;
-@@ -5785,7 +5785,7 @@ find_idlest_group(struct sched_domain *s
+@@ -5789,7 +5789,7 @@ find_idlest_group(struct sched_domain *s
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
@@ -562,7 +562,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5917,7 +5917,7 @@ find_idlest_group_cpu(struct sched_group
+@@ -5921,7 +5921,7 @@ find_idlest_group_cpu(struct sched_group
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
@@ -571,7 +571,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5957,7 +5957,7 @@ static inline int find_idlest_cpu(struct
+@@ -5961,7 +5961,7 @@ static inline int find_idlest_cpu(struct
{
int new_cpu = cpu;
@@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return prev_cpu;
/*
-@@ -6074,7 +6074,7 @@ static int select_idle_core(struct task_
+@@ -6078,7 +6078,7 @@ static int select_idle_core(struct task_
if (!test_idle_cores(target, false))
return -1;
@@ -589,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -6108,7 +6108,7 @@ static int select_idle_smt(struct task_s
+@@ -6112,7 +6112,7 @@ static int select_idle_smt(struct task_s
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -598,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
return cpu;
-@@ -6171,7 +6171,7 @@ static int select_idle_cpu(struct task_s
+@@ -6175,7 +6175,7 @@ static int select_idle_cpu(struct task_s
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@@ -607,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
break;
-@@ -6208,7 +6208,7 @@ static int select_idle_sibling(struct ta
+@@ -6212,7 +6212,7 @@ static int select_idle_sibling(struct ta
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
@@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
-@@ -6554,7 +6554,7 @@ static int find_energy_efficient_cpu(str
+@@ -6558,7 +6558,7 @@ static int find_energy_efficient_cpu(str
int max_spare_cap_cpu = -1;
for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
@@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
/* Skip CPUs that will be overutilized. */
-@@ -6643,7 +6643,7 @@ select_task_rq_fair(struct task_struct *
+@@ -6647,7 +6647,7 @@ select_task_rq_fair(struct task_struct *
}
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
@@ -634,7 +634,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -7393,14 +7393,14 @@ int can_migrate_task(struct task_struct
+@@ -7397,14 +7397,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -7420,7 +7420,7 @@ int can_migrate_task(struct task_struct
+@@ -7424,7 +7424,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -8017,7 +8017,7 @@ check_cpu_capacity(struct rq *rq, struct
+@@ -8021,7 +8021,7 @@ check_cpu_capacity(struct rq *rq, struct
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
-@@ -8688,7 +8688,7 @@ static struct sched_group *find_busiest_
+@@ -8692,7 +8692,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -678,7 +678,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -9116,7 +9116,7 @@ static int load_balance(int this_cpu, st
+@@ -9120,7 +9120,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 68c7b973cc48..02952cda4bfa 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt8
++-rt9
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index efe883ebdc20..77086fe6452b 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7443,8 +7443,9 @@ void __init free_area_init(unsigned long
+@@ -7456,8 +7456,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index f3d76e83d6e4..5d639f9044be 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-@@ -295,6 +296,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -308,6 +309,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1319,10 +1332,10 @@ static void __free_pages_ok(struct page
+@@ -1332,10 +1345,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2632,13 +2645,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2645,13 +2658,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2660,7 +2673,7 @@ static void drain_pages_zone(unsigned in
+@@ -2673,7 +2686,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2668,7 +2681,7 @@ static void drain_pages_zone(unsigned in
+@@ -2681,7 +2694,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -2706,6 +2719,7 @@ void drain_local_pages(struct zone *zone
+@@ -2719,6 +2732,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void drain_local_pages_wq(struct work_struct *work)
{
struct pcpu_drain *drain;
-@@ -2723,6 +2737,7 @@ static void drain_local_pages_wq(struct
+@@ -2736,6 +2750,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(drain->zone);
preempt_enable();
}
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2790,6 +2805,14 @@ void drain_all_pages(struct zone *zone)
+@@ -2803,6 +2818,14 @@ void drain_all_pages(struct zone *zone)
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_cpu(cpu, &cpus_with_pcps) {
struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
-@@ -2799,6 +2822,7 @@ void drain_all_pages(struct zone *zone)
+@@ -2812,6 +2835,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2918,9 +2942,9 @@ void free_unref_page(struct page *page)
+@@ -2931,9 +2955,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -2947,7 +2971,7 @@ void free_unref_page_list(struct list_he
+@@ -2960,7 +2984,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -2962,12 +2986,12 @@ void free_unref_page_list(struct list_he
+@@ -2975,12 +2999,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3118,7 +3142,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3131,7 +3155,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
-@@ -3126,7 +3150,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3139,7 +3163,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
@@ -185,7 +185,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3153,7 +3177,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3166,7 +3190,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3173,7 +3197,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3186,7 +3210,7 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -203,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
out:
/* Separate test+clear to avoid unnecessary atomics */
-@@ -3186,7 +3210,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3199,7 +3223,7 @@ struct page *rmqueue(struct zone *prefer
return page;
failed:
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8347,7 +8371,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8360,7 +8384,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8356,7 +8380,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8369,7 +8393,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 9721ff961d99..b88cc5d862e9 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -405,7 +405,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(migrate_enable);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4029,7 +4029,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4033,7 +4033,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -414,7 +414,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -4053,7 +4053,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4057,7 +4057,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -423,7 +423,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -4195,7 +4195,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -4199,7 +4199,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -432,7 +432,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -4379,7 +4379,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4383,7 +4383,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -441,7 +441,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5075,7 +5075,7 @@ static void hrtick_start_fair(struct rq
+@@ -5079,7 +5079,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6904,7 +6904,7 @@ static void check_preempt_wakeup(struct
+@@ -6908,7 +6908,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -459,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -10100,7 +10100,7 @@ static void task_fork_fair(struct task_s
+@@ -10104,7 +10104,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -10124,7 +10124,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -10128,7 +10128,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -509,7 +509,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2134,6 +2134,7 @@ tracing_generic_entry_update(struct trac
+@@ -2137,6 +2137,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -517,7 +517,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -2144,7 +2145,8 @@ tracing_generic_entry_update(struct trac
+@@ -2147,7 +2148,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -527,7 +527,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -3347,15 +3349,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3350,15 +3352,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -554,7 +554,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3393,15 +3397,17 @@ static void print_func_help_header_irq(s
+@@ -3396,15 +3400,17 @@ static void print_func_help_header_irq(s
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
tgid ? tgid_space : space);
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 3b319dce8254..b98cb311cd16 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* explicit rescheduling in places that are safe. The return
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -175,7 +175,14 @@ static bool ptrace_freeze_traced(struct
+@@ -176,7 +176,14 @@ static bool ptrace_freeze_traced(struct
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
diff --git a/patches/sched-completion-Fix-a-lockup-in-wait_for_completion.patch b/patches/sched-completion-Fix-a-lockup-in-wait_for_completion.patch
new file mode 100644
index 000000000000..593d992b9c1f
--- /dev/null
+++ b/patches/sched-completion-Fix-a-lockup-in-wait_for_completion.patch
@@ -0,0 +1,60 @@
+From: Corey Minyard <cminyard@mvista.com>
+Date: Thu, 9 May 2019 14:33:20 -0500
+Subject: [PATCH] sched/completion: Fix a lockup in wait_for_completion()
+
+Consider following race:
+
+ T0 T1 T2
+ wait_for_completion()
+ do_wait_for_common()
+ __prepare_to_swait()
+ schedule()
+ complete()
+ x->done++ (0 -> 1)
+ raw_spin_lock_irqsave()
+ swake_up_locked() wait_for_completion()
+ wake_up_process(T0)
+ list_del_init()
+ raw_spin_unlock_irqrestore()
+ raw_spin_lock_irq(&x->wait.lock)
+ raw_spin_lock_irq(&x->wait.lock) x->done != UINT_MAX, 1 -> 0
+ raw_spin_unlock_irq(&x->wait.lock)
+ return 1
+ while (!x->done && timeout),
+ continue loop, not enqueued
+ on &x->wait
+
+Basically, the problem is that the original wait queues used in
+completions did not remove the item from the queue in the wakeup
+function, but swake_up_locked() does.
+
+Fix it by adding the thread to the wait queue inside the do loop.
+The design of swait detects if it is already in the list and doesn't
+do the list add again.
+
+Cc: stable-rt@vger.kernel.org
+Fixes: a04ff6b4ec4ee7e ("completion: Use simple wait queues")
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+[bigeasy: shorten commit message ]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/completion.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/completion.c
++++ b/kernel/sched/completion.c
+@@ -72,12 +72,12 @@ do_wait_for_common(struct completion *x,
+ if (!x->done) {
+ DECLARE_SWAITQUEUE(wait);
+
+- __prepare_to_swait(&x->wait, &wait);
+ do {
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
++ __prepare_to_swait(&x->wait, &wait);
+ __set_current_state(state);
+ raw_spin_unlock_irq(&x->wait.lock);
+ timeout = action(timeout);
diff --git a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
index da5d6d390ad7..8c4e74aba421 100644
--- a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
+++ b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4916,9 +4916,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+@@ -4920,9 +4920,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
diff --git a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
index d0d447b09ec8..33deeb081980 100644
--- a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
+++ b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4565,7 +4565,7 @@ static u64 distribute_cfs_runtime(struct
+@@ -4569,7 +4569,7 @@ static u64 distribute_cfs_runtime(struct
struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!cfs_rq_throttled(cfs_rq))
goto next;
-@@ -4582,7 +4582,7 @@ static u64 distribute_cfs_runtime(struct
+@@ -4586,7 +4586,7 @@ static u64 distribute_cfs_runtime(struct
unthrottle_cfs_rq(cfs_rq);
next:
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!remaining)
break;
-@@ -4598,7 +4598,7 @@ static u64 distribute_cfs_runtime(struct
+@@ -4602,7 +4602,7 @@ static u64 distribute_cfs_runtime(struct
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
* used to track this state.
*/
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
u64 runtime, runtime_expires;
int throttled;
-@@ -4640,11 +4640,11 @@ static int do_sched_cfs_period_timer(str
+@@ -4644,11 +4644,11 @@ static int do_sched_cfs_period_timer(str
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
cfs_b->distribute_running = 1;
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-@@ -4753,17 +4753,18 @@ static __always_inline void return_cfs_r
+@@ -4757,17 +4757,18 @@ static __always_inline void return_cfs_r
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
-@@ -4774,18 +4775,18 @@ static void do_sched_cfs_slack_timer(str
+@@ -4778,18 +4779,18 @@ static void do_sched_cfs_slack_timer(str
if (runtime)
cfs_b->distribute_running = 1;
@@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -4865,11 +4866,12 @@ static enum hrtimer_restart sched_cfs_pe
+@@ -4869,11 +4870,12 @@ static enum hrtimer_restart sched_cfs_pe
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (;;) {
overrun = hrtimer_forward_now(timer, cfs_b->period);
if (!overrun)
-@@ -4897,11 +4899,11 @@ static enum hrtimer_restart sched_cfs_pe
+@@ -4901,11 +4903,11 @@ static enum hrtimer_restart sched_cfs_pe
count = 0;
}
diff --git a/patches/series b/patches/series
index 925859dc5531..5ac2f5213b35 100644
--- a/patches/series
+++ b/patches/series
@@ -27,6 +27,16 @@ kthread-convert-worker-lock-to-raw-spinlock.patch
sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
x86-ima-Check-EFI_RUNTIME_SERVICES-before-using.patch
+# Julien Grall | [PATCH v3 0/7] iommu/dma-iommu: Split iommu_dma_map_msi_msg in two parts
+# Date: Wed, 1 May 2019 14:58:17 +0100
+0001-genirq-msi-Add-a-new-field-in-msi_desc-to-store-an-I.patch
+0002-iommu-dma-iommu-Split-iommu_dma_map_msi_msg-in-two-p.patch
+0003-irqchip-gicv2m-Don-t-map-the-MSI-page-in-gicv2m_comp.patch
+0004-irqchip-gic-v3-its-Don-t-map-the-MSI-page-in-its_irq.patch
+0005-irqchip-ls-scfg-msi-Don-t-map-the-MSI-page-in-ls_scf.patch
+0006-irqchip-gic-v3-mbi-Don-t-map-the-MSI-page-in-mbi_com.patch
+0007-iommu-dma-iommu-Remove-iommu_dma_map_msi_msg.patch
+
# John's printk series
# [RFC PATCH v1 00/25] printk: new implementation
# Date: Tue, 12 Feb 2019 15:29:38 +0100
@@ -109,7 +119,6 @@ drm-i915-Don-t-disable-interrupts-independently-of-t.patch
0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
-0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch
############################################################
# Ready for posting
@@ -244,6 +253,7 @@ pci-switchtec-Don-t-use-completion-s-wait-queue.patch
wait.h-include-atomic.h.patch
kthread-add-a-global-worker-thread.patch
completion-use-simple-wait-queues.patch
+sched-completion-Fix-a-lockup-in-wait_for_completion.patch
fs-aio-simple-simple-work.patch
genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
psi-replace-delayed-work-with-timer-work.patch
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index e659ce977831..3301b2643865 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3022,7 +3037,7 @@ static bool __cancel_work_timer(struct w
+@@ -3025,7 +3040,7 @@ static bool __cancel_work_timer(struct w
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This allows canceling during early boot. We know that @work
-@@ -3083,10 +3098,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -3086,10 +3101,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -3124,7 +3139,7 @@ static bool __cancel_work(struct work_st
+@@ -3127,7 +3142,7 @@ static bool __cancel_work(struct work_st
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index d7ba3cc64a79..50d0b04996e4 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -207,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
}
-@@ -3364,7 +3369,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3367,7 +3372,7 @@ static void rcu_free_pool(struct rcu_hea
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -4328,7 +4333,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4331,7 +4336,8 @@ bool workqueue_congested(int cpu, struct
struct pool_workqueue *pwq;
bool ret;
@@ -226,7 +226,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4339,7 +4345,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4342,7 +4348,8 @@ bool workqueue_congested(int cpu, struct
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -236,7 +236,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4365,15 +4372,15 @@ unsigned int work_busy(struct work_struc
+@@ -4368,15 +4375,15 @@ unsigned int work_busy(struct work_struc
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -256,7 +256,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4557,7 +4564,7 @@ void show_workqueue_state(void)
+@@ -4560,7 +4567,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -265,7 +265,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4622,7 +4629,7 @@ void show_workqueue_state(void)
+@@ -4625,7 +4632,7 @@ void show_workqueue_state(void)
touch_nmi_watchdog();
}
@@ -274,7 +274,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* used to show worker information through /proc/PID/{comm,stat,status} */
-@@ -5009,16 +5016,16 @@ bool freeze_workqueues_busy(void)
+@@ -5012,16 +5019,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -5213,7 +5220,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5216,7 +5223,8 @@ static ssize_t wq_pool_ids_show(struct d
const char *delim = "";
int node, written = 0;
@@ -304,7 +304,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -5221,7 +5229,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5224,7 +5232,8 @@ static ssize_t wq_pool_ids_show(struct d
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index cad282be22c5..348069ffe32d 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6936,6 +6936,14 @@ int kvm_arch_init(void *opaque)
+@@ -6958,6 +6958,14 @@ int kvm_arch_init(void *opaque)
goto out;
}