summaryrefslogtreecommitdiff
path: root/arch/s390/boot/startup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/boot/startup.c')
-rw-r--r--arch/s390/boot/startup.c129
1 files changed, 73 insertions, 56 deletions
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 11413f0baabc..64bd7ac3e35d 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -12,7 +12,7 @@
#include <asm/diag.h>
#include <asm/uv.h>
#include <asm/abs_lowcore.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include "decompressor.h"
#include "boot.h"
#include "uv.h"
@@ -21,7 +21,6 @@ unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(__abs_lowcore);
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
-unsigned long __bootdata(__amode31_base);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
@@ -29,8 +28,6 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
-int __bootdata(is_full_image) = 1;
-struct initrd_data __bootdata(initrd_data);
u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
@@ -76,17 +73,20 @@ unsigned long mem_safe_offset(void)
}
#endif
-static unsigned long rescue_initrd(unsigned long safe_addr)
+static void rescue_initrd(unsigned long min, unsigned long max)
{
+ unsigned long old_addr, addr, size;
+
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
- return safe_addr;
- if (!initrd_data.start || !initrd_data.size)
- return safe_addr;
- if (initrd_data.start < safe_addr) {
- memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
- initrd_data.start = safe_addr;
- }
- return initrd_data.start + initrd_data.size;
+ return;
+ if (!get_physmem_reserved(RR_INITRD, &addr, &size))
+ return;
+ if (addr >= min && addr + size <= max)
+ return;
+ old_addr = addr;
+ physmem_free(RR_INITRD);
+ addr = physmem_alloc_top_down(RR_INITRD, size, 0);
+ memmove((void *)addr, (void *)old_addr, size);
}
static void copy_bootdata(void)
@@ -140,7 +140,7 @@ static void handle_relocs(unsigned long offset)
*
* Consider the following factors:
* 1. max_physmem_end - end of physical memory online or standby.
- * Always <= end of the last online memory block (get_mem_detect_end()).
+ * Always >= end of the last online memory range (get_physmem_online_end()).
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
* kernel is able to support.
* 3. "mem=" kernel command line option which limits physical memory usage.
@@ -160,10 +160,10 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#ifdef CONFIG_CRASH_DUMP
if (oldmem_data.start) {
- kaslr_enabled = 0;
+ __kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) {
- kaslr_enabled = 0;
+ __kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
ident_map_size = min(ident_map_size, hsa_size);
}
@@ -235,9 +235,9 @@ static unsigned long setup_kernel_memory_layout(void)
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
-static void clear_bss_section(void)
+static void clear_bss_section(unsigned long vmlinux_lma)
{
- memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
+ memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
}
/*
@@ -256,7 +256,6 @@ static void setup_vmalloc_size(void)
static void offset_vmlinux_info(unsigned long offset)
{
- vmlinux.default_lma += offset;
*(unsigned long *)(&vmlinux.entry) += offset;
vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset;
@@ -266,60 +265,83 @@ static void offset_vmlinux_info(unsigned long offset)
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
-}
-
-static unsigned long reserve_amode31(unsigned long safe_addr)
-{
- __amode31_base = PAGE_ALIGN(safe_addr);
- return __amode31_base + vmlinux.amode31_size;
+#ifdef CONFIG_KASAN
+ vmlinux.kasan_early_shadow_page_off += offset;
+ vmlinux.kasan_early_shadow_pte_off += offset;
+ vmlinux.kasan_early_shadow_pmd_off += offset;
+ vmlinux.kasan_early_shadow_pud_off += offset;
+ vmlinux.kasan_early_shadow_p4d_off += offset;
+#endif
}
void startup_kernel(void)
{
unsigned long max_physmem_end;
- unsigned long random_lma;
- unsigned long safe_addr;
+ unsigned long vmlinux_lma = 0;
+ unsigned long amode31_lma = 0;
unsigned long asce_limit;
+ unsigned long safe_addr;
void *img;
psw_t psw;
- initrd_data.start = parmarea.initrd_start;
- initrd_data.size = parmarea.initrd_size;
+ setup_lpp();
+ safe_addr = mem_safe_offset();
+ /*
+ * reserve decompressor memory together with decompression heap, buffer and
+ * memory which might be occupied by uncompressed kernel at default 1Mb
+ * position (if KASLR is off or failed).
+ */
+ physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
+ physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size;
- setup_lpp();
store_ipl_parmblock();
- safe_addr = mem_safe_offset();
- safe_addr = reserve_amode31(safe_addr);
- safe_addr = read_ipl_report(safe_addr);
+ read_ipl_report();
uv_query_info();
- safe_addr = rescue_initrd(safe_addr);
sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
detect_facilities();
sanitize_prot_virt_host();
- max_physmem_end = detect_memory(&safe_addr);
+ max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
- mem_detect_set_usable_limit(ident_map_size);
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
- random_lma = get_random_base(safe_addr);
- if (random_lma) {
- __kaslr_offset = random_lma - vmlinux.default_lma;
- img = (void *)vmlinux.default_lma;
+ /* got final ident_map_size, physmem allocations could be performed now */
+ physmem_set_usable_limit(ident_map_size);
+ detect_physmem_online_ranges(max_physmem_end);
+ save_ipl_cert_comp_list();
+ rescue_initrd(safe_addr, ident_map_size);
+
+ if (kaslr_enabled()) {
+ vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
+ THREAD_SIZE, vmlinux.default_lma,
+ ident_map_size);
+ if (vmlinux_lma) {
+ __kaslr_offset = vmlinux_lma - vmlinux.default_lma;
offset_vmlinux_info(__kaslr_offset);
}
}
+ vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
+ physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
- memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
- } else if (__kaslr_offset)
- memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
+ memmove((void *)vmlinux_lma, img, vmlinux.image_size);
+ } else if (__kaslr_offset) {
+ img = (void *)vmlinux.default_lma;
+ memmove((void *)vmlinux_lma, img, vmlinux.image_size);
+ memset(img, 0, vmlinux.image_size);
+ }
+
+ /* vmlinux decompression is done, shrink reserved low memory */
+ physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
+ if (kaslr_enabled())
+ amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
+ amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
+ physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
/*
* The order of the following operations is important:
@@ -334,21 +356,16 @@ void startup_kernel(void)
* - copy_bootdata() must follow setup_vmem() to propagate changes to
* bootdata made by setup_vmem()
*/
- clear_bss_section();
+ clear_bss_section(vmlinux_lma);
handle_relocs(__kaslr_offset);
setup_vmem(asce_limit);
copy_bootdata();
- if (__kaslr_offset) {
- /*
- * Save KASLR offset for early dumps, before vmcore_info is set.
- * Mark as uneven to distinguish from real vmcore_info pointer.
- */
- S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
- /* Clear non-relocated kernel */
- if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
- memset(img, 0, vmlinux.image_size);
- }
+ /*
+ * Save KASLR offset for early dumps, before vmcore_info is set.
+ * Mark as uneven to distinguish from real vmcore_info pointer.
+ */
+ S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.