summaryrefslogtreecommitdiff
path: root/patches/0003-fs-dcache-Use-__d_lookup_unhash-in-__d_add-move.patch
blob: f22cbe75f9be042fd5fabd4a63b39343f3ff7248 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sun, 12 Jun 2022 16:27:31 +0200
Subject: [PATCH 3/4] fs/dcache: Use __d_lookup_unhash() in __d_add/move()

__d_add() and __d_move() invoke __d_lookup_done() from within a preemption
disabled region. This violates the PREEMPT_RT constraints as the wake up
acquires wait_queue_head::lock which is a "sleeping" spinlock on RT.

As a preparation for solving this completely, invoke __d_lookup_unhash()
from __d_add/move() and handle the wakeup there.

This allows to move the spin_lock/unlock(dentry::lock) pair into
__d_lookup_done() which debloats the d_lookup_done() inline.

No functional change. Moving the wake up out of the preemption disabled
region on RT will be handled in a subsequent change.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lkml.kernel.org/r/20220613140712.77932-4-bigeasy@linutronix.de
---
 fs/dcache.c            |    6 ++++--
 include/linux/dcache.h |    7 ++-----
 2 files changed, 6 insertions(+), 7 deletions(-)

--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2737,7 +2737,9 @@ static wait_queue_head_t *__d_lookup_unh
 
 void __d_lookup_done(struct dentry *dentry)
 {
+	spin_lock(&dentry->d_lock);
 	wake_up_all(__d_lookup_unhash(dentry));
+	spin_unlock(&dentry->d_lock);
 }
 EXPORT_SYMBOL(__d_lookup_done);
 
@@ -2751,7 +2753,7 @@ static inline void __d_add(struct dentry
 	if (unlikely(d_in_lookup(dentry))) {
 		dir = dentry->d_parent->d_inode;
 		n = start_dir_add(dir);
-		__d_lookup_done(dentry);
+		wake_up_all(__d_lookup_unhash(dentry));
 	}
 	if (inode) {
 		unsigned add_flags = d_flags_for_inode(inode);
@@ -2940,7 +2942,7 @@ static void __d_move(struct dentry *dent
 	if (unlikely(d_in_lookup(target))) {
 		dir = target->d_parent->d_inode;
 		n = start_dir_add(dir);
-		__d_lookup_done(target);
+		wake_up_all(__d_lookup_unhash(target));
 	}
 
 	write_seqcount_begin(&dentry->d_seq);
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -349,7 +349,7 @@ static inline void dont_mount(struct den
 	spin_unlock(&dentry->d_lock);
 }
 
-extern void __d_lookup_done(struct dentry *);
+extern void __d_lookup_done(struct dentry *dentry);
 
 static inline int d_in_lookup(const struct dentry *dentry)
 {
@@ -358,11 +358,8 @@ static inline int d_in_lookup(const stru
 
 static inline void d_lookup_done(struct dentry *dentry)
 {
-	if (unlikely(d_in_lookup(dentry))) {
-		spin_lock(&dentry->d_lock);
+	if (unlikely(d_in_lookup(dentry)))
 		__d_lookup_done(dentry);
-		spin_unlock(&dentry->d_lock);
-	}
 }
 
 extern void dput(struct dentry *);