diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-09-13 15:09:40 +0200 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2015-01-16 18:13:45 -0500 |
commit | 6e7e10c72b685ed0303a59813e77ba5bdc915785 (patch) | |
tree | 319687a52edbb3efbda79f5c52fb406fca8ad2e0 /ipc/sem.c | |
parent | c72f3b89bc58a4b7d3d829197ceae1bff23cfd5c (diff) | |
download | linux-rt-6e7e10c72b685ed0303a59813e77ba5bdc915785.tar.gz |
ipc/sem: Rework semaphore wakeups
Current sysv sems have a weird ass wakeup scheme that involves keeping
preemption disabled over a potential O(n^2) loop and busy waiting on
that on other CPUs.
Kill this and simply wake the task directly from under the sem_lock.
This was discovered by a migrate_disable() debug feature that
disallows:
spin_lock();
preempt_disable();
spin_unlock()
preempt_enable();
Cc: Manfred Spraul <manfred@colorfullife.com>
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Manfred Spraul <manfred@colorfullife.com>
Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/ipc/sem.c b/ipc/sem.c index db9d241af133..6a804dab7f5f 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -607,7 +607,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, curr = sma->sem_base + sop->sem_num; sem_op = sop->sem_op; result = curr->semval; - + if (!sem_op && result) goto would_block; @@ -634,7 +634,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, un->semadj[sop->sem_num] -= sop->sem_op; sop--; } - + return 0; out_of_range: @@ -666,6 +666,13 @@ undo: static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) { +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *p = q->sleeper; + get_task_struct(p); + q->status = error; + wake_up_process(p); + put_task_struct(p); +#else if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the @@ -677,6 +684,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, q->pid = error; list_add_tail(&q->list, pt); +#endif } /** @@ -690,6 +698,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, */ static void wake_up_sem_queue_do(struct list_head *pt) { +#ifndef CONFIG_PREEMPT_RT_BASE struct sem_queue *q, *t; int did_something; @@ -702,6 +711,7 @@ static void wake_up_sem_queue_do(struct list_head *pt) } if (did_something) preempt_enable(); +#endif } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) @@ -1161,7 +1171,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, err = security_sem_semctl(NULL, cmd); if (err) return err; - + memset(&seminfo,0,sizeof(seminfo)); seminfo.semmni = ns->sc_semmni; seminfo.semmns = ns->sc_semmns; @@ -1875,7 +1885,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ - + queue.sops = sops; queue.nsops = nsops; queue.undo = un; @@ -2010,7 +2020,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) return error; atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; - } else + } else tsk->sysvsem.undo_list = NULL; return 0; |