summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch6
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch24
3 files changed, 16 insertions, 16 deletions
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 0e9822a293d3..003f348fbbf7 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -21,7 +21,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -172,6 +172,7 @@ config ARM64
+@@ -173,6 +173,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -115,7 +115,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -660,9 +660,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKIN
+@@ -649,9 +649,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKIN
mrs x0, daif
orr x24, x24, x0
alternative_else_nop_endif
@@ -135,7 +135,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+2:
#endif
- #ifdef CONFIG_ARM64_PSEUDO_NMI
+ mov x0, sp
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -925,7 +925,7 @@ asmlinkage void do_notify_resume(struct
diff --git a/patches/localversion.patch b/patches/localversion.patch
index e1f3b8d87864..340816c8febc 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt14
++-rt15
diff --git a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 6f5b6e4d832c..749042a7279b 100644
--- a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -50,11 +50,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct mapping_area {
-+ local_lock_t lock;
- #ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
- struct vm_struct *vm; /* vm area for mapping object that span pages */
- #else
-@@ -326,7 +342,7 @@ static void SetZsPageMovable(struct zs_p
++ local_lock_t lock;
+ char *vm_buf; /* copy buffer for objects that span pages */
+ char *vm_addr; /* address of kmap_atomic()'ed pages */
+ enum zs_mapmode vm_mm; /* mapping mode */
+@@ -322,7 +338,7 @@ static void SetZsPageMovable(struct zs_p
static int create_cache(struct zs_pool *pool)
{
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
0, 0, NULL);
if (!pool->handle_cachep)
return 1;
-@@ -350,9 +366,26 @@ static void destroy_cache(struct zs_pool
+@@ -346,9 +362,26 @@ static void destroy_cache(struct zs_pool
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
{
-@@ -372,12 +405,18 @@ static void cache_free_zspage(struct zs_
+@@ -368,12 +401,18 @@ static void cache_free_zspage(struct zs_
static void record_obj(unsigned long handle, unsigned long obj)
{
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* zpool driver */
-@@ -459,7 +498,10 @@ MODULE_ALIAS("zpool-zsmalloc");
+@@ -455,7 +494,10 @@ MODULE_ALIAS("zpool-zsmalloc");
#endif /* CONFIG_ZPOOL */
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool is_zspage_isolated(struct zspage *zspage)
{
-@@ -869,7 +911,13 @@ static unsigned long location_to_obj(str
+@@ -865,7 +907,13 @@ static unsigned long location_to_obj(str
static unsigned long handle_to_obj(unsigned long handle)
{
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static unsigned long obj_to_head(struct page *page, void *obj)
-@@ -883,22 +931,46 @@ static unsigned long obj_to_head(struct
+@@ -879,22 +927,46 @@ static unsigned long obj_to_head(struct
static inline int testpin_tag(unsigned long handle)
{
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void reset_page(struct page *page)
-@@ -1332,7 +1404,8 @@ void *zs_map_object(struct zs_pool *pool
+@@ -1278,7 +1350,8 @@ void *zs_map_object(struct zs_pool *pool
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK;
@@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
-@@ -1386,7 +1459,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1332,7 +1405,7 @@ void zs_unmap_object(struct zs_pool *poo
__zs_unmap_object(area, pages, off, class->size);
}