summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-03-31 05:35:55 -0700
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-04-01 13:45:52 -0400
commitb4e089d705eef82364945abae325cd241c80e107 (patch)
treeb16f6dd1ea098f7d49a3f8fb3bb5598825515a31 /mm
parentdfd8b4fc76d5f7ae5663328b791c4acf222c4d39 (diff)
downloadlinux-next-b4e089d705eef82364945abae325cd241c80e107.tar.gz
mm: remove the skip_page argument to read_pages
The skip_page argument to read_pages controls if rac->_index is incremented before returning from the function. Just open code that in the callers. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Al Viro <viro@zeniv.linux.org.uk> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/readahead.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 05207a663801..2e5c695b303d 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -142,14 +142,14 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
-static void read_pages(struct readahead_control *rac, bool skip_page)
+static void read_pages(struct readahead_control *rac)
{
const struct address_space_operations *aops = rac->mapping->a_ops;
struct page *page;
struct blk_plug plug;
if (!readahead_count(rac))
- goto out;
+ return;
blk_start_plug(&plug);
@@ -179,10 +179,6 @@ static void read_pages(struct readahead_control *rac, bool skip_page)
blk_finish_plug(&plug);
BUG_ON(readahead_count(rac));
-
-out:
- if (skip_page)
- rac->_index++;
}
/**
@@ -235,7 +231,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* have a stable reference to this page, and it's
* not worth getting one just for that.
*/
- read_pages(ractl, true);
+ read_pages(ractl);
+ ractl->_index++;
i = ractl->_index + ractl->_nr_pages - index - 1;
continue;
}
@@ -246,7 +243,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
if (filemap_add_folio(mapping, folio, index + i,
gfp_mask) < 0) {
folio_put(folio);
- read_pages(ractl, true);
+ read_pages(ractl);
+ ractl->_index++;
i = ractl->_index + ractl->_nr_pages - index - 1;
continue;
}
@@ -260,7 +258,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
- read_pages(ractl, false);
+ read_pages(ractl);
filemap_invalidate_unlock_shared(mapping);
memalloc_nofs_restore(nofs);
}
@@ -534,7 +532,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
ra->async_size += index - limit - 1;
}
- read_pages(ractl, false);
+ read_pages(ractl);
/*
* If there were already pages in the page cache, then we may have