diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/boot/compressed/eboot.c | 26 | ||||
-rw-r--r-- | arch/x86/ia32/sys_ia32.c | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/efi.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/sys_ia32.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/head.c | 53 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 4 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/nmi.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/pvclock.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 26 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 11 | ||||
-rw-r--r-- | arch/x86/mm/srat.c | 125 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 7 | ||||
-rw-r--r-- | arch/x86/syscalls/syscall_32.tbl | 6 |
16 files changed, 97 insertions, 204 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6a9383370311..a4f24f5b1218 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -112,6 +112,7 @@ config X86 select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_VIRT_TO_BUS select MODULES_USE_ELF_REL if X86_32 select MODULES_USE_ELF_RELA if X86_64 select CLONE_BACKWARDS if X86_32 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index f8fa41190c35..c205035a6b96 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -19,23 +19,28 @@ static efi_system_table_t *sys_table; +static void efi_char16_printk(efi_char16_t *str) +{ + struct efi_simple_text_output_protocol *out; + + out = (struct efi_simple_text_output_protocol *)sys_table->con_out; + efi_call_phys2(out->output_string, out, str); +} + static void efi_printk(char *str) { char *s8; for (s8 = str; *s8; s8++) { - struct efi_simple_text_output_protocol *out; efi_char16_t ch[2] = { 0 }; ch[0] = *s8; - out = (struct efi_simple_text_output_protocol *)sys_table->con_out; - if (*s8 == '\n') { efi_char16_t nl[2] = { '\r', 0 }; - efi_call_phys2(out->output_string, out, nl); + efi_char16_printk(nl); } - efi_call_phys2(out->output_string, out, ch); + efi_char16_printk(ch); } } @@ -709,7 +714,12 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16)) break; - *p++ = *str++; + if (*str == '/') { + *p++ = '\\'; + *str++; + } else { + *p++ = *str++; + } } *p = '\0'; @@ -737,7 +747,9 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, status = efi_call_phys5(fh->open, fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0); if (status != EFI_SUCCESS) { - efi_printk("Failed to open initrd file\n"); + efi_printk("Failed to open initrd file: "); + efi_char16_printk(filename_16); + efi_printk("\n"); goto close_handles; } diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 592f5a9a9c0e..ad7a20cbc699 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -218,11 +218,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, * Some system calls that need sign extended arguments. This could be * done by a generic wrapper. */ -long sys32_lseek(unsigned int fd, int offset, unsigned int whence) -{ - return sys_lseek(fd, offset, whence); -} - long sys32_kill(int pid, int sig) { return sys_kill(pid, sig); diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 28677c55113f..60c89f30c727 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,7 +102,14 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); -#ifndef CONFIG_EFI +#ifdef CONFIG_EFI + +static inline bool efi_is_native(void) +{ + return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); +} + +#else /* * IF EFI is not configured, have the EFI calls return -ENOSYS. */ diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 0218d917f509..8459efc39686 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -43,7 +43,6 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); asmlinkage long sys32_personality(unsigned long); asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); -long sys32_lseek(unsigned int, int, unsigned int); long sys32_kill(int, int); long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); long sys32_vm86_warning(void); diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 48d9d4ea1020..992f442ca155 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -5,8 +5,6 @@ #include <asm/setup.h> #include <asm/bios_ebda.h> -#define BIOS_LOWMEM_KILOBYTES 0x413 - /* * The BIOS places the EBDA/XBDA at the top of conventional * memory, and usually decreases the reported amount of @@ -16,17 +14,30 @@ * chipset: reserve a page before VGA to prevent PCI prefetch * into it (errata #56). Usually the page is reserved anyways, * unless you have no PS/2 mouse plugged in. + * + * This functions is deliberately very conservative. Losing + * memory in the bottom megabyte is rarely a problem, as long + * as we have enough memory to install the trampoline. Using + * memory that is in use by the BIOS or by some DMA device + * the BIOS didn't shut down *is* a big problem. */ + +#define BIOS_LOWMEM_KILOBYTES 0x413 +#define LOWMEM_CAP 0x9f000U /* Absolute maximum */ +#define INSANE_CUTOFF 0x20000U /* Less than this = insane */ + void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; - /* To determine the position of the EBDA and the */ - /* end of conventional memory, we need to look at */ - /* the BIOS data area. In a paravirtual environment */ - /* that area is absent. We'll just have to assume */ - /* that the paravirt case can handle memory setup */ - /* correctly, without our help. */ + /* + * To determine the position of the EBDA and the + * end of conventional memory, we need to look at + * the BIOS data area. In a paravirtual environment + * that area is absent. We'll just have to assume + * that the paravirt case can handle memory setup + * correctly, without our help. + */ if (paravirt_enabled()) return; @@ -37,19 +48,23 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ ebda_addr = get_bios_ebda(); - /* Fixup: bios puts an EBDA in the top 64K segment */ - /* of conventional memory, but does not adjust lowmem. */ - if ((lowmem - ebda_addr) <= 0x10000) - lowmem = ebda_addr; + /* + * Note: some old Dells seem to need 4k EBDA without + * reporting so, so just consider the memory above 0x9f000 + * to be off limits (bugzilla 2990). + */ + + /* If the EBDA address is below 128K, assume it is bogus */ + if (ebda_addr < INSANE_CUTOFF) + ebda_addr = LOWMEM_CAP; - /* Fixup: bios does not report an EBDA at all. */ - /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ - if ((ebda_addr == 0) && (lowmem >= 0x9f000)) - lowmem = 0x9f000; + /* If lowmem is less than 128K, assume it is bogus */ + if (lowmem < INSANE_CUTOFF) + lowmem = LOWMEM_CAP; - /* Paranoia: should never happen, but... */ - if ((lowmem == 0) || (lowmem >= 0x100000)) - lowmem = 0x9f000; + /* Use the lower of the lowmem and EBDA markers as the cutoff */ + lowmem = min(lowmem, ebda_addr); + lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */ /* reserve all memory between lowmem and the 1MB mark */ memblock_reserve(lowmem, 0x100000 - lowmem); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b7de3b25adb5..6859e9626442 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -48,7 +48,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map) .globl startup_64 startup_64: /* - * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, + * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table * for us. These identity mapped page tables map all of the * kernel pages and possibly all of memory. @@ -159,7 +159,7 @@ startup_64: jmp 1f ENTRY(secondary_startup_64) /* - * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, + * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. * * %rsi holds a physical pointer to real_mode_data. diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index e124554598ee..3f06e6149981 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; kprobe_opcode_t *correct_ret_addr = NULL; @@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) * will be the real return address, and all the rest will * point to kretprobe_trampoline. */ - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) kretprobe_assert(ri, orig_ret_address, trampoline_address); correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); - hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index f84f5c57de35..60308053fdb2 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -509,3 +509,4 @@ void local_touch_nmi(void) { __this_cpu_write(last_nmi_rip, 0); } +EXPORT_SYMBOL_GPL(local_touch_nmi); diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 85c39590c1a4..2cb9470ea85b 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -185,7 +185,7 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, - __pa_symbol(i) + (idx*PAGE_SIZE), + __pa(i) + (idx*PAGE_SIZE), PAGE_KERNEL_VVAR); } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9c857f05cef0..84d32855f65c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1056,15 +1056,6 @@ void __init setup_arch(char **cmdline_p) setup_bios_corruption_check(); #endif - /* - * In the memory hotplug case, the kernel needs info from SRAT to - * determine which memory is hotpluggable before allocating memory - * using memblock. - */ - acpi_boot_table_init(); - early_acpi_boot_init(); - early_parse_srat(); - #ifdef CONFIG_X86_32 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n", (max_pfn_mapped<<PAGE_SHIFT) - 1); @@ -1110,6 +1101,10 @@ void __init setup_arch(char **cmdline_p) /* * Parse the ACPI tables for possible boot-time SMP configuration. */ + acpi_boot_table_init(); + + early_acpi_boot_init(); + initmem_init(); memblock_find_dma_reserve(); @@ -1196,8 +1191,7 @@ void __init setup_arch(char **cmdline_p) * mismatched firmware/kernel archtectures since there is no * support for runtime services. */ - if (efi_enabled(EFI_BOOT) && - IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) { + if (efi_enabled(EFI_BOOT) && !efi_is_native()) { pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); efi_unmap_memmap(); } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4ed3edbe06bd..956ca358108a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list); -#define for_each_gfn_sp(kvm, sp, gfn, pos) \ - hlist_for_each_entry(sp, pos, \ +#define for_each_gfn_sp(kvm, sp, gfn) \ + hlist_for_each_entry(sp, \ &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ if ((sp)->gfn != (gfn)) {} else -#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ - hlist_for_each_entry(sp, pos, \ +#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \ + hlist_for_each_entry(sp, \ &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ if ((sp)->gfn != (gfn) || (sp)->role.direct || \ (sp)->role.invalid) {} else @@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mmu_page *s; - struct hlist_node *node; LIST_HEAD(invalid_list); bool flush = false; - for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { + for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { if (!s->unsync) continue; @@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, union kvm_mmu_page_role role; unsigned quadrant; struct kvm_mmu_page *sp; - struct hlist_node *node; bool need_sync = false; role = vcpu->arch.mmu.base_role; @@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; role.quadrant = quadrant; } - for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { + for_each_gfn_sp(vcpu->kvm, sp, gfn) { if (!need_sync && sp->unsync) need_sync = true; @@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) { struct kvm_mmu_page *sp; - struct hlist_node *node; LIST_HEAD(invalid_list); int r; pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; spin_lock(&kvm->mmu_lock); - for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { + for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { pgprintk("%s: gfn %llx role %x\n", __func__, gfn, sp->role.word); r = 1; @@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mmu_page *s; - struct hlist_node *node; - for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { + for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { if (s->unsync) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); @@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) { struct kvm_mmu_page *s; - struct hlist_node *node; bool need_unsync = false; - for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { + for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { if (!can_unsync) return 1; @@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, gfn_t gfn = gpa >> PAGE_SHIFT; union kvm_mmu_page_role mask = { .word = 0 }; struct kvm_mmu_page *sp; - struct hlist_node *node; LIST_HEAD(invalid_list); u64 entry, gentry, *spte; int npte; @@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; - for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { if (detect_write_misaligned(sp, gpa, bytes) || detect_write_flooding(sp)) { zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index ff3633c794c6..72fe01e9e414 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -212,9 +212,10 @@ static void __init setup_node_data(int nid, u64 start, u64 end) * Allocate node data. Try node-local memory and then any node. * Never allocate in DMA zone. */ - nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); if (!nd_pa) { - pr_err("Cannot find %zu bytes in any node\n", nd_size); + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); return; } nd = __va(nd_pa); @@ -559,12 +560,10 @@ static int __init numa_init(int (*init_func)(void)) for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); - /* - * Do not clear numa_nodes_parsed or zero numa_meminfo here, because - * SRAT was parsed earlier in early_parse_srat(). - */ + nodes_clear(numa_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES)); numa_reset_distance(); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 79836d01f789..cdd0da9dd530 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -141,126 +141,11 @@ static inline int save_add_info(void) {return 1;} static inline int save_add_info(void) {return 0;} #endif -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -static void __init -handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable) -{ - int overlap, i; - unsigned long start_pfn, end_pfn; - - start_pfn = PFN_DOWN(start); - end_pfn = PFN_UP(end); - - /* - * For movablemem_map=acpi: - * - * SRAT: |_____| |_____| |_________| |_________| ...... - * node id: 0 1 1 2 - * hotpluggable: n y y n - * movablemem_map: |_____| |_________| - * - * Using movablemem_map, we can prevent memblock from allocating memory - * on ZONE_MOVABLE at boot time. - * - * Before parsing SRAT, memblock has already reserve some memory ranges - * for other purposes, such as for kernel image. We cannot prevent - * kernel from using these memory, so we need to exclude these memory - * even if it is hotpluggable. - * Furthermore, to ensure the kernel has enough memory to boot, we make - * all the memory on the node which the kernel resides in - * un-hotpluggable. - */ - if (hotpluggable && movablemem_map.acpi) { - /* Exclude ranges reserved by memblock. */ - struct memblock_type *rgn = &memblock.reserved; - - for (i = 0; i < rgn->cnt; i++) { - if (end <= rgn->regions[i].base || - start >= rgn->regions[i].base + - rgn->regions[i].size) - continue; - - /* - * If the memory range overlaps the memory reserved by - * memblock, then the kernel resides in this node. - */ - node_set(node, movablemem_map.numa_nodes_kernel); - - goto out; - } - - /* - * If the kernel resides in this node, then the whole node - * should not be hotpluggable. - */ - if (node_isset(node, movablemem_map.numa_nodes_kernel)) - goto out; - - insert_movablemem_map(start_pfn, end_pfn); - - /* - * numa_nodes_hotplug nodemask represents which nodes are put - * into movablemem_map.map[]. - */ - node_set(node, movablemem_map.numa_nodes_hotplug); - goto out; - } - - /* - * For movablemem_map=nn[KMG]@ss[KMG]: - * - * SRAT: |_____| |_____| |_________| |_________| ...... - * node id: 0 1 1 2 - * user specified: |__| |___| - * movablemem_map: |___| |_________| |______| ...... - * - * Using movablemem_map, we can prevent memblock from allocating memory - * on ZONE_MOVABLE at boot time. - * - * NOTE: In this case, SRAT info will be ingored. - */ - overlap = movablemem_map_overlap(start_pfn, end_pfn); - if (overlap >= 0) { - /* - * If part of this range is in movablemem_map, we need to - * add the range after it to extend the range to the end - * of the node, because from the min address specified to - * the end of the node will be ZONE_MOVABLE. - */ - start_pfn = max(start_pfn, - movablemem_map.map[overlap].start_pfn); - insert_movablemem_map(start_pfn, end_pfn); - - /* - * Set the nodemask, so that if the address range on one node - * is not continuse, we can add the subsequent ranges on the - * same node into movablemem_map. - */ - node_set(node, movablemem_map.numa_nodes_hotplug); - } else { - if (node_isset(node, movablemem_map.numa_nodes_hotplug)) - /* - * Insert the range if we already have movable ranges - * on the same node. - */ - insert_movablemem_map(start_pfn, end_pfn); - } -out: - return; -} -#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -static inline void -handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable) -{ -} -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { u64 start, end; - u32 hotpluggable; int node, pxm; if (srat_disabled()) @@ -269,8 +154,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) goto out_err_bad_srat; if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) goto out_err; - hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE; - if (hotpluggable && !save_add_info()) + if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) goto out_err; start = ma->base_address; @@ -290,12 +174,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) node_set(node, numa_nodes_parsed); - printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n", + printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", node, pxm, - (unsigned long long) start, (unsigned long long) end - 1, - hotpluggable ? "Hot Pluggable": ""); - - handle_movablemem(node, start, end, hotpluggable); + (unsigned long long) start, (unsigned long long) end - 1); return 0; out_err_bad_srat: diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 2f81db40d7ca..5f2ecaf3f9d8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -69,11 +69,6 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; -static inline bool efi_is_native(void) -{ - return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); -} - unsigned long x86_efi_facility; /* @@ -85,7 +80,7 @@ int efi_enabled(int facility) } EXPORT_SYMBOL(efi_enabled); -static bool disable_runtime = false; +static bool __initdata disable_runtime = false; static int __init setup_noefi(char *arg) { disable_runtime = true; diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index f2fe78ff22cc..e6d55f0064df 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -25,7 +25,7 @@ 16 i386 lchown sys_lchown16 17 i386 break 18 i386 oldstat sys_stat -19 i386 lseek sys_lseek sys32_lseek +19 i386 lseek sys_lseek compat_sys_lseek 20 i386 getpid sys_getpid 21 i386 mount sys_mount compat_sys_mount 22 i386 umount sys_oldumount @@ -98,8 +98,8 @@ 89 i386 readdir sys_old_readdir compat_sys_old_readdir 90 i386 mmap sys_old_mmap sys32_mmap 91 i386 munmap sys_munmap -92 i386 truncate sys_truncate -93 i386 ftruncate sys_ftruncate +92 i386 truncate sys_truncate compat_sys_truncate +93 i386 ftruncate sys_ftruncate compat_sys_ftruncate 94 i386 fchmod sys_fchmod 95 i386 fchown sys_fchown16 96 i386 getpriority sys_getpriority |