From a1261f54611ec4ad6a7ab7080f86747e3ac3685b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 13 Nov 2005 16:06:55 -0800 Subject: [PATCH] m68k: introduce task_thread_info new helper - task_thread_info(task). On platforms that have thread_info allocated separately (i.e. in default case) it simply returns task->thread_info. m68k wants (and for good reasons) to embed its thread_info into task_struct. So it will (in later patch) have task_thread_info() of its own. For now we just add a macro for generic case and convert existing instances of its body in core kernel to uses of new macro. Obviously safe - all normal architectures get the same preprocessor output they used to get. Signed-off-by: Al Viro Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 2 +- kernel/fork.c | 4 ++-- kernel/sched.c | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 452a1d116178..ee515683b92d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -859,7 +859,7 @@ fastcall NORET_TYPE void do_exit(long code) if (group_dead && tsk->signal->leader) disassociate_ctty(1); - module_put(tsk->thread_info->exec_domain->module); + module_put(task_thread_info(tsk)->exec_domain->module); if (tsk->binfmt) module_put(tsk->binfmt->module); diff --git a/kernel/fork.c b/kernel/fork.c index 158710d22566..7ef352ce347b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -919,7 +919,7 @@ static task_t *copy_process(unsigned long clone_flags, if (nr_threads >= max_threads) goto bad_fork_cleanup_count; - if (!try_module_get(p->thread_info->exec_domain->module)) + if (!try_module_get(task_thread_info(p)->exec_domain->module)) goto bad_fork_cleanup_count; if (p->binfmt && !try_module_get(p->binfmt->module)) @@ -1180,7 +1180,7 @@ bad_fork_cleanup: if (p->binfmt) module_put(p->binfmt->module); bad_fork_cleanup_put_domain: - module_put(p->thread_info->exec_domain->module); + module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: put_group_info(p->group_info); atomic_dec(&p->user->processes); diff --git a/kernel/sched.c b/kernel/sched.c index b6506671b2be..831f7e9d8f1c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1437,7 +1437,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) #endif #ifdef CONFIG_PREEMPT /* Want to start with kernel preemption disabled. */ - p->thread_info->preempt_count = 1; + task_thread_info(p)->preempt_count = 1; #endif /* * Share the timeslice between parent and child, thus the @@ -4410,9 +4410,9 @@ void __devinit init_idle(task_t *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) - idle->thread_info->preempt_count = (idle->lock_depth >= 0); + task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); #else - idle->thread_info->preempt_count = 0; + task_thread_info(idle)->preempt_count = 0; #endif } -- cgit v1.2.1