diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/linux/uaccess.h | 32 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | mm/memory.c | 30 |
4 files changed, 35 insertions, 29 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 91c0ab3544f0..88df56d44d2f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1463,6 +1463,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif + int pagefault_disabled; #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ecd3319dac33..9414a1b48f5c 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,36 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - preempt_count_inc(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ -#ifndef CONFIG_PREEMPT - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - preempt_count_dec(); -#else - preempt_enable(); -#endif -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/kernel/fork.c b/kernel/fork.c index 5099883025a5..f4d669c788c5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1344,6 +1344,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; diff --git a/mm/memory.c b/mm/memory.c index 90fb265b32b6..922073f7d1c5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3244,6 +3244,36 @@ unlock: return 0; } +void pagefault_disable(void) +{ + preempt_count_inc(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ +#ifndef CONFIG_PREEMPT + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + preempt_count_dec(); +#else + barrier(); + current->pagefault_disabled--; + preempt_enable(); +#endif +} +EXPORT_SYMBOL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore * |