diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-04 09:16:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-04 09:16:03 -0800 |
commit | d2fc0bacd5c438cb459fdf531eff00ab18422a00 (patch) | |
tree | d0ea52e4d2ad2fac12e19eaf6891c6af98353cfc /include | |
parent | 93890b71a34f9490673a6edd56b61c2124215e46 (diff) | |
parent | 795d45b22c079946332bf3825afefe5a981a97b6 (diff) | |
download | linux-next-d2fc0bacd5c438cb459fdf531eff00ab18422a00.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (78 commits)
x86: fix RTC lockdep warning: potential hardirq recursion
x86: cpa, micro-optimization
x86: cpa, clean up code flow
x86: cpa, eliminate CPA_ enum
x86: cpa, cleanups
x86: implement gbpages support in change_page_attr()
x86: support gbpages in pagetable dump
x86: add gbpages support to lookup_address
x86: add pgtable accessor functions for gbpages
x86: add PUD_PAGE_SIZE
x86: add feature macros for the gbpages cpuid bit
x86: switch direct mapping setup over to set_pte
x86: fix page-present check in cpa_flush_range
x86: remove cpa warning
x86: remove now unused clear_kernel_mapping
x86: switch pci-gart over to using set_memory_np() instead of clear_kernel_mapping()
x86: cpa selftest, skip non present entries
x86: CPA fix pagetable split
x86: rename LARGE_PAGE_SIZE to PMD_PAGE_SIZE
x86: cpa, fix lookup_address
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/rtc.h | 11 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 1 | ||||
-rw-r--r-- | include/asm-x86/asm.h | 7 | ||||
-rw-r--r-- | include/asm-x86/bugs.h | 2 | ||||
-rw-r--r-- | include/asm-x86/cpufeature.h | 14 | ||||
-rw-r--r-- | include/asm-x86/efi.h | 4 | ||||
-rw-r--r-- | include/asm-x86/futex.h | 23 | ||||
-rw-r--r-- | include/asm-x86/highmem.h | 4 | ||||
-rw-r--r-- | include/asm-x86/hw_irq_32.h | 2 | ||||
-rw-r--r-- | include/asm-x86/i387.h | 16 | ||||
-rw-r--r-- | include/asm-x86/io_32.h | 25 | ||||
-rw-r--r-- | include/asm-x86/mach-numaq/mach_apic.h | 2 | ||||
-rw-r--r-- | include/asm-x86/msr.h | 10 | ||||
-rw-r--r-- | include/asm-x86/page.h | 4 | ||||
-rw-r--r-- | include/asm-x86/page_64.h | 3 | ||||
-rw-r--r-- | include/asm-x86/pgalloc_32.h | 6 | ||||
-rw-r--r-- | include/asm-x86/pgtable-3level.h | 26 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-x86/pgtable_32.h | 2 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 7 | ||||
-rw-r--r-- | include/asm-x86/string_32.h | 8 | ||||
-rw-r--r-- | include/asm-x86/system.h | 23 | ||||
-rw-r--r-- | include/asm-x86/uaccess_32.h | 18 | ||||
-rw-r--r-- | include/asm-x86/uaccess_64.h | 10 | ||||
-rw-r--r-- | include/asm-x86/vm86.h | 1 |
25 files changed, 98 insertions, 135 deletions
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h index d3238f1f70a6..dd1bed860e64 100644 --- a/include/asm-generic/rtc.h +++ b/include/asm-generic/rtc.h @@ -35,10 +35,11 @@ static inline unsigned char rtc_is_updating(void) { unsigned char uip; + unsigned long flags; - spin_lock_irq(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); return uip; } @@ -46,6 +47,8 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) { unsigned long uip_watchdog = jiffies; unsigned char ctrl; + unsigned long flags; + #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; #endif @@ -72,7 +75,7 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated * by the RTC when initially set to a non-zero value. */ - spin_lock_irq(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); time->tm_sec = CMOS_READ(RTC_SECONDS); time->tm_min = CMOS_READ(RTC_MINUTES); time->tm_hour = CMOS_READ(RTC_HOURS); @@ -83,7 +86,7 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) real_year = CMOS_READ(RTC_DEC_YEAR); #endif ctrl = CMOS_READ(RTC_CONTROL); - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 75f2bfab614f..f490e43a90b9 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -14,7 +14,6 @@ #define _ASM_GENERIC__TLB_H #include <linux/swap.h> -#include <linux/quicklist.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h index 1a6980a60fc6..90dec0c23646 100644 --- a/include/asm-x86/asm.h +++ b/include/asm-x86/asm.h @@ -29,4 +29,11 @@ #endif /* CONFIG_X86_32 */ +/* Exception table entry */ +# define _ASM_EXTABLE(from,to) \ + " .section __ex_table,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR #from "," #to "\n" \ + " .previous\n" + #endif /* _ASM_X86_ASM_H */ diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index 3fcc30dc0731..021cbdd5f258 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h @@ -2,6 +2,6 @@ #define _ASM_X86_BUGS_H extern void check_bugs(void); -extern int ppro_with_ram_bug(void); +int ppro_with_ram_bug(void); #endif /* _ASM_X86_BUGS_H */ diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 3fb7dfa7fc91..065e92966c7c 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -4,9 +4,6 @@ #ifndef _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H -#ifndef __ASSEMBLY__ -#include <linux/bitops.h> -#endif #include <asm/required-features.h> #define NCAPINTS 8 /* N 32-bit words worth of info */ @@ -49,6 +46,7 @@ #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ @@ -115,6 +113,13 @@ */ #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + +#include <linux/bitops.h> + +extern const char * const x86_cap_flags[NCAPINTS*32]; +extern const char * const x86_power_flags[32]; + #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && \ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ @@ -175,6 +180,7 @@ #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) +#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 @@ -204,4 +210,6 @@ #endif /* CONFIG_X86_64 */ +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + #endif /* _ASM_X86_CPUFEATURE_H */ diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index 9c68a1f098d8..ea9734b74aca 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h @@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ efi_call_virt(f, a1, a2, a3, a4, a5, a6) -#define efi_ioremap(addr, size) ioremap(addr, size) +#define efi_ioremap(addr, size) ioremap_cache(addr, size) #else /* !CONFIG_X86_32 */ @@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) -extern void *efi_ioremap(unsigned long offset, unsigned long size); +extern void *efi_ioremap(unsigned long addr, unsigned long size); #endif /* CONFIG_X86_32 */ diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index 9d919264923a..cd9f894dd2d7 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h @@ -17,11 +17,8 @@ "2: .section .fixup,\"ax\"\n \ 3: mov %3, %1\n \ jmp 2b\n \ - .previous\n \ - .section __ex_table,\"a\"\n \ - .align 8\n" \ - _ASM_PTR "1b,3b\n \ - .previous" \ + .previous\n" \ + _ASM_EXTABLE(1b,3b) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ : "i" (-EFAULT), "0" (oparg), "1" (0)) @@ -35,11 +32,9 @@ 3: .section .fixup,\"ax\"\n \ 4: mov %5, %1\n \ jmp 3b\n \ - .previous\n \ - .section __ex_table,\"a\"\n \ - .align 8\n" \ - _ASM_PTR "1b,4b,2b,4b\n \ - .previous" \ + .previous\n" \ + _ASM_EXTABLE(1b,4b) \ + _ASM_EXTABLE(2b,4b) \ : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) @@ -111,18 +106,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) return -EFAULT; __asm__ __volatile__( - "1: lock; cmpxchgl %3, %1 \n" "2: .section .fixup, \"ax\" \n" "3: mov %2, %0 \n" " jmp 2b \n" " .previous \n" - - " .section __ex_table, \"a\" \n" - " .align 8 \n" - _ASM_PTR " 1b,3b \n" - " .previous \n" - + _ASM_EXTABLE(1b,3b) : "=a" (oldval), "+m" (*uaddr) : "i" (-EFAULT), "r" (newval), "0" (oldval) : "memory" diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 13cdcd66fff2..c25cfcaab589 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h @@ -63,8 +63,8 @@ extern pte_t *pkmap_page_table; #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) -extern void * FASTCALL(kmap_high(struct page *page)); -extern void FASTCALL(kunmap_high(struct page *page)); +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); void *kmap(struct page *page); void kunmap(struct page *page); diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h index 6d65fbb6358b..ea88054e03f3 100644 --- a/include/asm-x86/hw_irq_32.h +++ b/include/asm-x86/hw_irq_32.h @@ -47,7 +47,7 @@ void enable_8259A_irq(unsigned int irq); int i8259A_irq_pending(unsigned int irq); void make_8259A_irq(unsigned int irq); void init_8259A(int aeoi); -void FASTCALL(send_IPI_self(int vector)); +void send_IPI_self(int vector); void init_VISWS_APIC_irqs(void); void setup_IO_APIC(void); void disable_IO_APIC(void); diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index ba8105ca822b..6b1895ccd6b7 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -13,6 +13,7 @@ #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/regset.h> +#include <asm/asm.h> #include <asm/processor.h> #include <asm/sigcontext.h> #include <asm/user.h> @@ -41,10 +42,7 @@ static inline void tolerant_fwait(void) { asm volatile("1: fwait\n" "2:\n" - " .section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 1b,2b\n" - " .previous\n"); + _ASM_EXTABLE(1b,2b)); } static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) @@ -57,10 +55,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 1b,3b\n" - ".previous" + _ASM_EXTABLE(1b,3b) : [err] "=r" (err) #if 0 /* See comment in __save_init_fpu() below. */ : [fx] "r" (fx), "m" (*fx), "0" (0)); @@ -99,10 +94,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 1b,3b\n" - ".previous" + _ASM_EXTABLE(1b,3b) : [err] "=r" (err), "=m" (*fx) #if 0 /* See comment in __fxsave_clear() below. */ : [fx] "r" (fx), "0" (0)); diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index 586d7aa54ceb..58d2c45cd0b1 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -275,29 +275,6 @@ static inline void slow_down_io(void) { #endif -#ifdef CONFIG_X86_NUMAQ -extern void *xquad_portio; /* Where the IO area was mapped */ -#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) -#define __BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ - if (xquad_portio) \ - write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ - else \ - out##bwl##_local(value, port); \ -} \ -static inline void out##bwl(unsigned type value, int port) { \ - out##bwl##_quad(value, port, 0); \ -} \ -static inline unsigned type in##bwl##_quad(int port, int quad) { \ - if (xquad_portio) \ - return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ - else \ - return in##bwl##_local(port); \ -} \ -static inline unsigned type in##bwl(int port) { \ - return in##bwl##_quad(port, 0); \ -} -#else #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_local(value, port); \ @@ -305,8 +282,6 @@ static inline void out##bwl(unsigned type value, int port) { \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_local(port); \ } -#endif - #define BUILDIO(bwl,bw,type) \ static inline void out##bwl##_local(unsigned type value, int port) { \ diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 17e183bd39c1..3b637fac890b 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h @@ -109,6 +109,8 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, return logical_apicid; } +extern void *xquad_portio; + static inline void setup_portio_remap(void) { int num_quads = num_online_nodes(); diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 204a8a30fecf..3ca29ebebbb1 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -57,10 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, ".section .fixup,\"ax\"\n\t" "3: mov %3,%0 ; jmp 1b\n\t" ".previous\n\t" - ".section __ex_table,\"a\"\n" - _ASM_ALIGN "\n\t" - _ASM_PTR " 2b,3b\n\t" - ".previous" + _ASM_EXTABLE(2b,3b) : "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr), "i" (-EFAULT)); return EAX_EDX_VAL(val, low, high); @@ -81,10 +78,7 @@ static inline int native_write_msr_safe(unsigned int msr, ".section .fixup,\"ax\"\n\t" "3: mov %4,%0 ; jmp 1b\n\t" ".previous\n\t" - ".section __ex_table,\"a\"\n" - _ASM_ALIGN "\n\t" - _ASM_PTR " 2b,3b\n\t" - ".previous" + _ASM_EXTABLE(2b,3b) : "=a" (err) : "c" (msr), "0" (low), "d" (high), "i" (-EFAULT)); diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index c8b30efeed85..1cb7c51bc296 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -13,8 +13,8 @@ #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) #define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) -#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT) -#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) +#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index c1ac42d8707f..dcf0c0746075 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -23,6 +23,9 @@ #define MCE_STACK 5 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) + #define __PAGE_OFFSET _AC(0xffff810000000000, UL) #define __PHYSICAL_START CONFIG_PHYSICAL_START diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h index 7641e7b5d931..6c21ef951dab 100644 --- a/include/asm-x86/pgalloc_32.h +++ b/include/asm-x86/pgalloc_32.h @@ -80,8 +80,10 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); /* - * Pentium-II erratum A13: in PAE mode we explicitly have to flush - * the TLB via cr3 if the top-level pgd is changed... + * According to Intel App note "TLBs, Paging-Structure Caches, + * and Their Invalidation", April 2007, document 317080-001, + * section 8.1: in PAE mode we explicitly have to flush the + * TLB via cr3 if the top-level pgd is changed... */ if (mm == current->active_mm) write_cr3(read_cr3()); diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index a195c3e757b9..1d763eec740f 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -93,26 +93,22 @@ static inline void native_pmd_clear(pmd_t *pmd) static inline void pud_clear(pud_t *pudp) { + unsigned long pgd; + set_pud(pudp, __pud(0)); /* - * In principle we need to do a cr3 reload here to make sure - * the processor recognizes the changed pgd. In practice, all - * the places where pud_clear() gets called are followed by - * full tlb flushes anyway, so we can defer the cost here. - * - * Specifically: - * - * mm/memory.c:free_pmd_range() - immediately after the - * pud_clear() it does a pmd_free_tlb(). We change the - * mmu_gather structure to do a full tlb flush (which has the - * effect of reloading cr3) when the pagetable free is - * complete. + * According to Intel App note "TLBs, Paging-Structure Caches, + * and Their Invalidation", April 2007, document 317080-001, + * section 8.1: in PAE mode we explicitly have to flush the + * TLB via cr3 if the top-level pgd is changed... * - * arch/x86/mm/hugetlbpage.c:huge_pmd_unshare() - the call to - * this is followed by a flush_tlb_range, which on x86 does a - * full tlb flush. + * Make sure the pud entry we're updating is within the + * current pgd to avoid unnecessary TLB flushes. */ + pgd = read_cr3(); + if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) + write_cr3(pgd); } #define pud_page(pud) \ diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index cd2524f07452..44c0a4f1b1eb 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -13,10 +13,12 @@ #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_FILE 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ +#define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 +#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ /* @@ -36,6 +38,8 @@ #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) +#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT) +#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 21e70fbf1dae..935630d17304 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -148,6 +148,8 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) +static inline int pud_large(pud_t pud) { return 0; } + /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] * diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 6e615a103c2f..bd4740a60f29 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -21,7 +21,6 @@ extern pgd_t init_level4_pgt[]; #define swapper_pg_dir init_level4_pgt extern void paging_init(void); -extern void clear_kernel_mapping(unsigned long addr, unsigned long size); #endif /* !__ASSEMBLY__ */ @@ -199,6 +198,12 @@ static inline unsigned long pmd_bad(pmd_t pmd) #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) +static inline int pud_large(pud_t pte) +{ + return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == + (_PAGE_PSE|_PAGE_PRESENT); +} + /* PMD - Level 2 access */ #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index 55bfa308f900..c5d13a86dea7 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h @@ -213,14 +213,14 @@ static __always_inline void * __constant_c_and_count_memset(void * s, unsigned l case 0: return s; case 1: - *(unsigned char *)s = pattern; + *(unsigned char *)s = pattern & 0xff; return s; case 2: - *(unsigned short *)s = pattern; + *(unsigned short *)s = pattern & 0xffff; return s; case 3: - *(unsigned short *)s = pattern; - *(2+(unsigned char *)s) = pattern; + *(unsigned short *)s = pattern & 0xffff; + *(2+(unsigned char *)s) = pattern & 0xff; return s; case 4: *(unsigned long *)s = pattern; diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index ee32ef9367f4..9cff02ffe6c2 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -20,8 +20,8 @@ #ifdef CONFIG_X86_32 struct task_struct; /* one of the stranger aspects of C forward declarations */ -extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev, - struct task_struct *next)); +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); /* * Saving eflags is important. It switches not only IOPL between tasks, @@ -130,10 +130,7 @@ extern void load_gs_index(unsigned); "movl %k1, %%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - _ASM_ALIGN "\n\t" \ - _ASM_PTR " 1b,3b\n" \ - ".previous" \ + _ASM_EXTABLE(1b,3b) \ : :"r" (value), "r" (0)) @@ -214,12 +211,10 @@ static inline unsigned long native_read_cr4_safe(void) /* This could fault if %cr4 does not exist. In x86_64, a cr4 always * exists, so it will never fail. */ #ifdef CONFIG_X86_32 - asm volatile("1: mov %%cr4, %0 \n" - "2: \n" - ".section __ex_table,\"a\" \n" - ".long 1b,2b \n" - ".previous \n" - : "=r" (val), "=m" (__force_order) : "0" (0)); + asm volatile("1: mov %%cr4, %0\n" + "2:\n" + _ASM_EXTABLE(1b,2b) + : "=r" (val), "=m" (__force_order) : "0" (0)); #else val = native_read_cr4(); #endif @@ -276,9 +271,9 @@ static inline void native_wbinvd(void) #endif /* __KERNEL__ */ -static inline void clflush(void *__p) +static inline void clflush(volatile void *__p) { - asm volatile("clflush %0" : "+m" (*(char __force *)__p)); + asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); } #define nop() __asm__ __volatile__ ("nop") diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index d2a4f7be9c2c..fcc570ec4fee 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h @@ -8,6 +8,7 @@ #include <linux/thread_info.h> #include <linux/prefetch.h> #include <linux/string.h> +#include <asm/asm.h> #include <asm/page.h> #define VERIFY_READ 0 @@ -287,11 +288,8 @@ extern void __put_user_8(void); "4: movl %3,%0\n" \ " jmp 3b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,4b\n" \ - " .long 2b,4b\n" \ - ".previous" \ + _ASM_EXTABLE(1b,4b) \ + _ASM_EXTABLE(2b,4b) \ : "=r"(err) \ : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) @@ -338,10 +336,7 @@ struct __large_struct { unsigned long buf[100]; }; "3: movl %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,3b\n" \ - ".previous" \ + _ASM_EXTABLE(1b,3b) \ : "=r"(err) \ : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) @@ -378,10 +373,7 @@ do { \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,3b\n" \ - ".previous" \ + _ASM_EXTABLE(1b,3b) \ : "=r"(err), ltype (x) \ : "m"(__m(addr)), "i"(errret), "0"(err)) diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 31d794702719..b87eb4ba8f9d 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -181,10 +181,7 @@ struct __large_struct { unsigned long buf[100]; }; "3: mov %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 1b,3b\n" \ - ".previous" \ + _ASM_EXTABLE(1b,3b) \ : "=r"(err) \ : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) @@ -226,10 +223,7 @@ do { \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 1b,3b\n" \ - ".previous" \ + _ASM_EXTABLE(1b,3b) \ : "=r"(err), ltype (x) \ : "m"(__m(addr)), "i"(errno), "0"(err)) diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index a5edf517b992..c92fe4af52e8 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -195,6 +195,7 @@ struct kernel_vm86_struct { void handle_vm86_fault(struct kernel_vm86_regs *, long); int handle_vm86_trap(struct kernel_vm86_regs *, long, int); +struct pt_regs *save_v86_state(struct kernel_vm86_regs *); struct task_struct; void release_vm86_irqs(struct task_struct *); |