summaryrefslogtreecommitdiff
path: root/patches/mm-convert-swap-to-percpu-locked.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/mm-convert-swap-to-percpu-locked.patch')
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch34
1 files changed, 17 insertions, 17 deletions
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index c40c2d08a062..267686e4cee0 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1601,10 +1601,12 @@ static enum compact_result compact_zone(
+@@ -1633,10 +1633,12 @@ static enum compact_result compact_zone(
block_start_pfn(cc->migrate_pfn, cc->order);
if (cc->last_migrated_pfn < current_block_start) {
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -6787,8 +6787,9 @@ void __init free_area_init(unsigned long
+@@ -6918,8 +6918,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This path almost never happens for VM activity - pages are normally
-@@ -242,11 +245,11 @@ void rotate_reclaimable_page(struct page
+@@ -252,11 +255,11 @@ void rotate_reclaimable_page(struct page
unsigned long flags;
get_page(page);
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -296,12 +299,13 @@ void activate_page(struct page *page)
+@@ -306,12 +309,13 @@ void activate_page(struct page *page)
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -328,7 +332,7 @@ void activate_page(struct page *page)
+@@ -338,7 +342,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/*
-@@ -350,7 +354,7 @@ static void __lru_cache_activate_page(st
+@@ -360,7 +364,7 @@ static void __lru_cache_activate_page(st
}
}
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -392,12 +396,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -402,12 +406,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -595,9 +599,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -613,9 +617,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -629,11 +633,12 @@ void deactivate_file_page(struct page *p
+@@ -647,11 +651,12 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
@@ -164,19 +164,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -648,19 +653,20 @@ void deactivate_file_page(struct page *p
- void deactivate_page(struct page *page)
+@@ -666,19 +671,20 @@ void mark_page_lazyfree(struct page *pag
{
- if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
-- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+ if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+ !PageSwapCache(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
-+ lru_deactivate_pvecs);
++ lru_lazyfree_pvecs);
get_page(page);
if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
-- put_cpu_var(lru_deactivate_pvecs);
-+ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+- put_cpu_var(lru_lazyfree_pvecs);
++ put_locked_var(swapvec_lock, lru_lazyfree_pvecs);
}
}