diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-03 08:44:56 -0500 |
---|---|---|
committer | Luis Claudio R. Goncalves <lgoncalv@redhat.com> | 2021-10-11 07:51:08 -0300 |
commit | 77c21ffb6a43fb3ea2b264472e01d1fbe3b114dd (patch) | |
tree | 042720cf70b8a114dafbcd1e43611628990a5f13 | |
parent | 295065a67c243c284d86eabb2f4777bc3488db7c (diff) | |
download | linux-rt-77c21ffb6a43fb3ea2b264472e01d1fbe3b114dd.tar.gz |
signals: Allow rt tasks to cache one sigqueue struct
To avoid allocation allow rt tasks to cache one sigqueue struct in
task struct.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/linux/signal.h | 1 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/signal.c | 81 |
5 files changed, 76 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index bb28896ffd6a..b71161f2c2bc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1701,6 +1701,7 @@ struct task_struct { /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; + struct sigqueue *sigqueue_cache; sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ diff --git a/include/linux/signal.h b/include/linux/signal.h index ffa58ff53e22..0d7faf67a464 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -250,6 +250,7 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); +extern void flush_task_sigqueue(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/kernel/exit.c b/kernel/exit.c index 8716f0780fe3..89e38e5929a9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk) * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ - flush_sigqueue(&tsk->pending); + flush_task_sigqueue(tsk); tsk->sighand = NULL; spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index 02ac2a1e2101..a434cbefcf7f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1582,6 +1582,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); + p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; diff --git a/kernel/signal.c b/kernel/signal.c index d2edbc3f2bc5..09c0f647ce17 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -14,6 +14,7 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> @@ -363,13 +364,30 @@ static bool task_participate_group_stop(struct task_struct *task) return false; } +static inline struct sigqueue *get_task_cache(struct task_struct *t) +{ + struct sigqueue *q = t->sigqueue_cache; + + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) + return NULL; + return q; +} + +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) +{ + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) + return 0; + return 1; +} + /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit, int fromslab) { struct sigqueue *q = NULL; struct user_struct *user; @@ -390,8 +408,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi get_uid(user); rcu_read_unlock(); - if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { - q = kmem_cache_alloc(sigqueue_cachep, flags); + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { + if (!fromslab) + q = get_task_cache(t); + if (!q) + q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } @@ -408,6 +431,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi return q; } +static struct sigqueue * +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit) +{ + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); +} + static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) @@ -417,6 +447,21 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } +static void sigqueue_free_current(struct sigqueue *q) +{ + struct user_struct *up; + + if (q->flags & SIGQUEUE_PREALLOC) + return; + + up = q->user; + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { + atomic_dec(&up->sigpending); + free_uid(up); + } else + __sigqueue_free(q); +} + void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -430,6 +475,21 @@ void flush_sigqueue(struct sigpending *queue) } /* + * Called from __exit_signal. Flush tsk->pending and + * tsk->sigqueue_cache + */ +void flush_task_sigqueue(struct task_struct *tsk) +{ + struct sigqueue *q; + + flush_sigqueue(&tsk->pending); + + q = get_task_cache(tsk); + if (q) + kmem_cache_free(sigqueue_cachep, q); +} + +/* * Flush all pending signals for this kthread. */ void flush_signals(struct task_struct *t) @@ -548,7 +608,7 @@ still_pending: (info->si_code == SI_TIMER) && (info->si_sys_private); - __sigqueue_free(first); + sigqueue_free_current(first); } else { /* * Ok, it wasn't in the queue. This must be @@ -584,6 +644,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) bool resched_timer = false; int signr; + WARN_ON_ONCE(tsk != current); + /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ @@ -1555,7 +1617,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); + /* Preallocated sigqueue objects always from the slabcache ! */ + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); if (q) q->flags |= SIGQUEUE_PREALLOC; @@ -2395,7 +2458,7 @@ relock: } /** - * signal_delivered - + * signal_delivered - * @ksig: kernel signal struct * @stepping: nonzero if debugger single-step or block-step in use * @@ -3339,7 +3402,7 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) */ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) { - return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); + return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); } #endif @@ -3464,7 +3527,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { sigset_to_compat(&mask, &old_ka.sa.sa_mask); - ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), + ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); @@ -3642,7 +3705,7 @@ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) return -EFAULT; return sigsuspend(&newset); } - + #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) { |