diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-07-03 08:30:12 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2016-07-14 21:52:30 -0400 |
commit | c92d73d4c735726a47ad4a1a5c32cc56d89c6535 (patch) | |
tree | 55066f49e83d14974e732b911219cb7abfaee883 | |
parent | 812100ba19e15fa52397dc4d0008b9b40ef140df (diff) | |
download | linux-rt-c92d73d4c735726a47ad4a1a5c32cc56d89c6535.tar.gz |
ipc: Make the ipc code -rt aware
RT serializes the code with the (rt)spinlock but keeps preemption
enabled. Some parts of the code need to be atomic nevertheless.
Protect it with preempt_disable/enable_rt pairts.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | ipc/mqueue.c | 5 | ||||
-rw-r--r-- | ipc/msg.c | 16 |
2 files changed, 20 insertions, 1 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 9699d3f7989c..3c93aa1f6357 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -920,12 +920,17 @@ static inline void pipelined_send(struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { + /* + * Keep them in one critical section for PREEMPT_RT: + */ + preempt_disable_rt(); receiver->msg = message; list_del(&receiver->list); receiver->state = STATE_PENDING; wake_up_process(receiver->task); smp_wmb(); receiver->state = STATE_READY; + preempt_enable_rt(); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() diff --git a/ipc/msg.c b/ipc/msg.c index 4a036c619607..9801c5fe5455 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -253,6 +253,12 @@ static void expunge_all(struct msg_queue *msq, int res) struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable_rt(); + msr->r_msg = NULL; /* initialize expunge ordering */ wake_up_process(msr->r_tsk); /* @@ -263,6 +269,8 @@ static void expunge_all(struct msg_queue *msq, int res) */ smp_mb(); msr->r_msg = ERR_PTR(res); + + preempt_enable_rt(); } } @@ -641,6 +649,11 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable_rt(); list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { @@ -662,12 +675,13 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) */ smp_mb(); msr->r_msg = msg; + preempt_enable_rt(); return 1; } + preempt_enable_rt(); } } - return 0; } |