summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c14
-rw-r--r--mm/page-writeback.c18
2 files changed, 17 insertions, 15 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index c5c169c16bae..8426434042f4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -193,16 +193,20 @@ static void filemap_unaccount_folio(struct address_space *mapping,
/*
* At this point folio must be either written or cleaned by
* truncate. Dirty folio here signals a bug and loss of
- * unwritten data.
+ * unwritten data - on ordinary filesystems.
*
- * This fixes dirty accounting after removing the folio entirely
+ * But it's harmless on in-memory filesystems like tmpfs; and can
+ * occur when a driver which did get_user_pages() sets page dirty
+ * before putting it, while the inode is being finally evicted.
+ *
+ * Below fixes dirty accounting after removing the folio entirely
* but leaves the dirty flag set: it has no effect for truncated
* folio and anyway will be cleared before returning folio to
* buddy allocator.
*/
- if (WARN_ON_ONCE(folio_test_dirty(folio)))
- folio_account_cleaned(folio, mapping,
- inode_to_wb(mapping->host));
+ if (WARN_ON_ONCE(folio_test_dirty(folio) &&
+ mapping_can_writeback(mapping)))
+ folio_account_cleaned(folio, inode_to_wb(mapping->host));
}
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 435c02630593..7e2da284e427 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2465,16 +2465,14 @@ static void folio_account_dirtied(struct folio *folio,
*
* Caller must hold lock_page_memcg().
*/
-void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
- struct bdi_writeback *wb)
+void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
{
- if (mapping_can_writeback(mapping)) {
- long nr = folio_nr_pages(folio);
- lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
- wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
- task_io_account_cancelled_write(nr * PAGE_SIZE);
- }
+ long nr = folio_nr_pages(folio);
+
+ lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
+ zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+ wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
+ task_io_account_cancelled_write(nr * PAGE_SIZE);
}
/*
@@ -2683,7 +2681,7 @@ void __folio_cancel_dirty(struct folio *folio)
wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (folio_test_clear_dirty(folio))
- folio_account_cleaned(folio, mapping, wb);
+ folio_account_cleaned(folio, wb);
unlocked_inode_to_wb_end(inode, &cookie);
folio_memcg_unlock(folio);