diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/boot/compressed/head.S | 11 | ||||
-rw-r--r-- | arch/s390/configs/default_defconfig | 2 | ||||
-rw-r--r-- | arch/s390/configs/gcov_defconfig | 2 | ||||
-rw-r--r-- | arch/s390/configs/performance_defconfig | 2 | ||||
-rw-r--r-- | arch/s390/crypto/crc32-vx.c | 6 | ||||
-rw-r--r-- | arch/s390/defconfig | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/facilities_src.h | 24 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.c | 37 | ||||
-rw-r--r-- | arch/s390/kvm/guestdbg.c | 59 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 98 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 70 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 14 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 21 | ||||
-rw-r--r-- | arch/s390/lib/string.c | 16 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 |
19 files changed, 240 insertions, 134 deletions
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S index f86a4eef28a9..28c4f96a2d9c 100644 --- a/arch/s390/boot/compressed/head.S +++ b/arch/s390/boot/compressed/head.S @@ -21,16 +21,21 @@ ENTRY(startup_continue) lg %r15,.Lstack-.LPG1(%r13) aghi %r15,-160 brasl %r14,decompress_kernel - # setup registers for memory mover & branch to target + # Set up registers for memory mover. We move the decompressed image to + # 0x11000, starting at offset 0x11000 in the decompressed image so + # that code living at 0x11000 in the image will end up at 0x11000 in + # memory. lgr %r4,%r2 lg %r2,.Loffset-.LPG1(%r13) la %r4,0(%r2,%r4) lg %r3,.Lmvsize-.LPG1(%r13) lgr %r5,%r3 - # move the memory mover someplace safe + # Move the memory mover someplace safe so it doesn't overwrite itself. la %r1,0x200 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) - # decompress image is started at 0x11000 + # When the memory mover is done we pass control to + # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in + # the decompressed image. lgr %r6,%r2 br %r1 mover: diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 889ea3450210..26e0c7f08814 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 1bcfd764910a..24879dab47bc 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 13ff090139c8..a5c1e5f2a0ca 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 577ae1d4ae89..2bad9d837029 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); struct kernel_fpu vxstate; \ unsigned long prealign, aligned, remaining; \ \ + if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \ + return ___crc32_sw(crc, data, datalen); \ + \ if ((unsigned long)data & VX_ALIGN_MASK) { \ prealign = VX_ALIGNMENT - \ ((unsigned long)data & VX_ALIGN_MASK); \ @@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); data = (void *)((unsigned long)data + prealign); \ } \ \ - if (datalen < VX_MIN_LEN) \ - return ___crc32_sw(crc, data, datalen); \ - \ aligned = datalen & ~VX_ALIGN_MASK; \ remaining = datalen & VX_ALIGN_MASK; \ \ diff --git a/arch/s390/defconfig b/arch/s390/defconfig index ccccebeeaaf6..73610f2e3b4f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRC7=m # CONFIG_XZ_DEC_X86 is not set # CONFIG_XZ_DEC_POWERPC is not set diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h index 4917728e5828..3b758f66e48b 100644 --- a/arch/s390/include/asm/facilities_src.h +++ b/arch/s390/include/asm/facilities_src.h @@ -55,4 +55,28 @@ static struct facility_def facility_defs[] = { -1 /* END */ } }, + { + .name = "FACILITIES_KVM", + .bits = (int[]){ + 0, /* N3 instructions */ + 1, /* z/Arch mode installed */ + 2, /* z/Arch mode active */ + 3, /* DAT-enhancement */ + 4, /* idte segment table */ + 5, /* idte region table */ + 6, /* ASN-and-LX reuse */ + 7, /* stfle */ + 8, /* enhanced-DAT 1 */ + 9, /* sense-running-status */ + 10, /* conditional sske */ + 13, /* ipte-range */ + 14, /* nonquiescing key-setting */ + 73, /* transactional execution */ + 75, /* access-exception-fetch/store indication */ + 76, /* msa extension 3 */ + 77, /* msa extension 4 */ + 78, /* enhanced-DAT 2 */ + -1 /* END */ + } + }, }; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 2bbe675c96b9..a41faf34b034 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -28,7 +28,7 @@ #define KVM_S390_BSCA_CPU_SLOTS 64 #define KVM_S390_ESCA_CPU_SLOTS 248 -#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS +#define KVM_MAX_VCPUS 255 #define KVM_USER_MEM_SLOTS 32 /* diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 1f95cc1faeb7..f3df9e0a5dec 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -125,6 +125,7 @@ int main(void) OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list); OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list); OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code); + OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code); OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address); OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr); OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw); diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 56e4d8234ef2..4431905f8cfa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -309,7 +309,9 @@ ENTRY(startup_kdump) l %r15,.Lstack-.LPG0(%r13) ahi %r15,-STACK_FRAME_OVERHEAD brasl %r14,verify_facilities - /* Continue with startup code in head64.S */ +# For uncompressed images, continue in +# arch/s390/kernel/head64.S. For compressed images, continue in +# arch/s390/boot/compressed/head.S. jg startup_continue .Lstack: diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 54200208bf24..4aa8a7e2a1da 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -495,6 +495,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; switch (code) { + case PGM_PROTECTION: + switch (prot) { + case PROT_TYPE_ALC: + tec->b60 = 1; + /* FALL THROUGH */ + case PROT_TYPE_DAT: + tec->b61 = 1; + break; + default: /* LA and KEYC set b61 to 0, other params undefined */ + return code; + } + /* FALL THROUGH */ case PGM_ASCE_TYPE: case PGM_PAGE_TRANSLATION: case PGM_REGION_FIRST_TRANS: @@ -504,8 +516,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, /* * op_access_id only applies to MOVE_PAGE -> set bit 61 * exc_access_id has to be set to 0 for some instructions. Both - * cases have to be handled by the caller. We can always store - * exc_access_id, as it is undefined for non-ar cases. + * cases have to be handled by the caller. */ tec->addr = gva >> PAGE_SHIFT; tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH; @@ -516,25 +527,13 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, case PGM_ASTE_VALIDITY: case PGM_ASTE_SEQUENCE: case PGM_EXTENDED_AUTHORITY: + /* + * We can always store exc_access_id, as it is + * undefined for non-ar cases. It is undefined for + * most DAT protection exceptions. + */ pgm->exc_access_id = ar; break; - case PGM_PROTECTION: - switch (prot) { - case PROT_TYPE_ALC: - tec->b60 = 1; - /* FALL THROUGH */ - case PROT_TYPE_DAT: - tec->b61 = 1; - tec->addr = gva >> PAGE_SHIFT; - tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH; - tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as; - /* exc_access_id is undefined for most cases */ - pgm->exc_access_id = ar; - break; - default: /* LA and KEYC set b61 to 0, other params undefined */ - break; - } - break; } return code; } diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 31a05330d11c..d7c6a7f53ced 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -206,7 +206,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - int ret = 0, nr_wp = 0, nr_bp = 0, i, size; + int ret = 0, nr_wp = 0, nr_bp = 0, i; struct kvm_hw_breakpoint *bp_data = NULL; struct kvm_hw_wp_info_arch *wp_info = NULL; struct kvm_hw_bp_info_arch *bp_info = NULL; @@ -216,17 +216,10 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) return -EINVAL; - size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint); - bp_data = kmalloc(size, GFP_KERNEL); - if (!bp_data) { - ret = -ENOMEM; - goto error; - } - - if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) { - ret = -EFAULT; - goto error; - } + bp_data = memdup_user(dbg->arch.hw_bp, + sizeof(*bp_data) * dbg->arch.nr_hw_bp); + if (IS_ERR(bp_data)) + return PTR_ERR(bp_data); for (i = 0; i < dbg->arch.nr_hw_bp; i++) { switch (bp_data[i].type) { @@ -241,17 +234,19 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, } } - size = nr_wp * sizeof(struct kvm_hw_wp_info_arch); - if (size > 0) { - wp_info = kmalloc(size, GFP_KERNEL); + if (nr_wp > 0) { + wp_info = kmalloc_array(nr_wp, + sizeof(*wp_info), + GFP_KERNEL); if (!wp_info) { ret = -ENOMEM; goto error; } } - size = nr_bp * sizeof(struct kvm_hw_bp_info_arch); - if (size > 0) { - bp_info = kmalloc(size, GFP_KERNEL); + if (nr_bp > 0) { + bp_info = kmalloc_array(nr_bp, + sizeof(*bp_info), + GFP_KERNEL); if (!bp_info) { ret = -ENOMEM; goto error; @@ -382,14 +377,20 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu) vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; } +#define PER_CODE_MASK (PER_EVENT_MASK >> 24) +#define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24) +#define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24) +#define PER_CODE_STORE (PER_EVENT_STORE >> 24) +#define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24) + #define per_bp_event(code) \ - (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH)) + (code & (PER_CODE_IFETCH | PER_CODE_BRANCH)) #define per_write_wp_event(code) \ - (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL)) + (code & (PER_CODE_STORE | PER_CODE_STORE_REAL)) static int debug_exit_required(struct kvm_vcpu *vcpu) { - u32 perc = (vcpu->arch.sie_block->perc << 24); + u8 perc = vcpu->arch.sie_block->perc; struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; struct kvm_hw_wp_info_arch *wp_info = NULL; struct kvm_hw_bp_info_arch *bp_info = NULL; @@ -444,7 +445,7 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu) const u8 ilen = kvm_s390_get_ilen(vcpu); struct kvm_s390_pgm_info pgm_info = { .code = PGM_PER, - .per_code = PER_EVENT_IFETCH >> 24, + .per_code = PER_CODE_IFETCH, .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), }; @@ -458,33 +459,33 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu) static void filter_guest_per_event(struct kvm_vcpu *vcpu) { - u32 perc = vcpu->arch.sie_block->perc << 24; + const u8 perc = vcpu->arch.sie_block->perc; u64 peraddr = vcpu->arch.sie_block->peraddr; u64 addr = vcpu->arch.sie_block->gpsw.addr; u64 cr9 = vcpu->arch.sie_block->gcr[9]; u64 cr10 = vcpu->arch.sie_block->gcr[10]; u64 cr11 = vcpu->arch.sie_block->gcr[11]; /* filter all events, demanded by the guest */ - u32 guest_perc = perc & cr9 & PER_EVENT_MASK; + u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK; if (!guest_per_enabled(vcpu)) guest_perc = 0; /* filter "successful-branching" events */ - if (guest_perc & PER_EVENT_BRANCH && + if (guest_perc & PER_CODE_BRANCH && cr9 & PER_CONTROL_BRANCH_ADDRESS && !in_addr_range(addr, cr10, cr11)) - guest_perc &= ~PER_EVENT_BRANCH; + guest_perc &= ~PER_CODE_BRANCH; /* filter "instruction-fetching" events */ - if (guest_perc & PER_EVENT_IFETCH && + if (guest_perc & PER_CODE_IFETCH && !in_addr_range(peraddr, cr10, cr11)) - guest_perc &= ~PER_EVENT_IFETCH; + guest_perc &= ~PER_CODE_IFETCH; /* All other PER events will be given to the guest */ /* TODO: Check altered address/address space */ - vcpu->arch.sie_block->perc = guest_perc >> 24; + vcpu->arch.sie_block->perc = guest_perc; if (!guest_perc) vcpu->arch.sie_block->iprcc &= ~PGM_PER; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index dfd0ca2638fa..1cab8a177d0e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -29,6 +29,7 @@ static const intercept_handler_t instruction_handlers[256] = { [0x01] = kvm_s390_handle_01, [0x82] = kvm_s390_handle_lpsw, [0x83] = kvm_s390_handle_diag, + [0xaa] = kvm_s390_handle_aa, [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_b2, [0xb6] = kvm_s390_handle_stctl, diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 24524c0f3ef8..be4db07f70d3 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -24,6 +24,8 @@ #include <asm/sclp.h> #include <asm/isc.h> #include <asm/gmap.h> +#include <asm/switch_to.h> +#include <asm/nmi.h> #include "kvm-s390.h" #include "gaccess.h" #include "trace-s390.h" @@ -40,6 +42,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) return 0; + BUG_ON(!kvm_s390_use_sca_entries()); read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { struct esca_block *sca = vcpu->kvm->arch.sca; @@ -68,6 +71,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) { int expect, rc; + BUG_ON(!kvm_s390_use_sca_entries()); read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { struct esca_block *sca = vcpu->kvm->arch.sca; @@ -109,6 +113,8 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc, expect; + if (!kvm_s390_use_sca_entries()) + return; atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { @@ -400,12 +406,78 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) return rc ? -EFAULT : 0; } +static int __write_machine_check(struct kvm_vcpu *vcpu, + struct kvm_s390_mchk_info *mchk) +{ + unsigned long ext_sa_addr; + freg_t fprs[NUM_FPRS]; + union mci mci; + int rc; + + mci.val = mchk->mcic; + /* take care of lazy register loading via vcpu load/put */ + save_fpu_regs(); + save_access_regs(vcpu->run->s.regs.acrs); + + /* Extended save area */ + rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr, + sizeof(unsigned long)); + /* Only bits 0-53 are used for address formation */ + ext_sa_addr &= ~0x3ffUL; + if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) { + if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs, + 512)) + mci.vr = 0; + } else { + mci.vr = 0; + } + + /* General interruption information */ + rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID); + rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE); + + /* Register-save areas */ + if (MACHINE_HAS_VX) { + convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); + rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128); + } else { + rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, + vcpu->run->s.regs.fprs, 128); + } + rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA, + vcpu->run->s.regs.gprs, 128); + rc |= put_guest_lc(vcpu, current->thread.fpu.fpc, + (u32 __user *) __LC_FP_CREG_SAVE_AREA); + rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, + (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA); + rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu), + (u64 __user *) __LC_CPU_TIMER_SAVE_AREA); + rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8, + (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA); + rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA, + &vcpu->run->s.regs.acrs, 64); + rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA, + &vcpu->arch.sie_block->gcr, 128); + + /* Extended interruption information */ + rc |= put_guest_lc(vcpu, mchk->ext_damage_code, + (u32 __user *) __LC_EXT_DAMAGE_CODE); + rc |= put_guest_lc(vcpu, mchk->failing_storage_address, + (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); + rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout, + sizeof(mchk->fixed_logout)); + return rc ? -EFAULT : 0; +} + static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_mchk_info mchk = {}; - unsigned long adtl_status_addr; int deliver = 0; int rc = 0; @@ -446,29 +518,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, mchk.cr14, mchk.mcic); - - rc = kvm_s390_vcpu_store_status(vcpu, - KVM_S390_STORE_STATUS_PREFIXED); - rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, - &adtl_status_addr, - sizeof(unsigned long)); - rc |= kvm_s390_vcpu_store_adtl_status(vcpu, - adtl_status_addr); - rc |= put_guest_lc(vcpu, mchk.mcic, - (u64 __user *) __LC_MCCK_CODE); - rc |= put_guest_lc(vcpu, mchk.failing_storage_address, - (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); - rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, - &mchk.fixed_logout, - sizeof(mchk.fixed_logout)); - rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __write_machine_check(vcpu, &mchk); } - return rc ? -EFAULT : 0; + return rc; } static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f142215ed30d..ac6c056df4b9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -132,10 +132,7 @@ module_param(nested, int, S_IRUGO); MODULE_PARM_DESC(nested, "Nested virtualization support"); /* upper facilities limit for kvm */ -unsigned long kvm_s390_fac_list_mask[16] = { - 0xffe6000000000000UL, - 0x005e000000000000UL, -}; +unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM }; unsigned long kvm_s390_fac_list_mask_size(void) { @@ -376,7 +373,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_NR_VCPUS: case KVM_CAP_MAX_VCPUS: r = KVM_S390_BSCA_CPU_SLOTS; - if (sclp.has_esca && sclp.has_64bscao) + if (!kvm_s390_use_sca_entries()) + r = KVM_MAX_VCPUS; + else if (sclp.has_esca && sclp.has_64bscao) r = KVM_S390_ESCA_CPU_SLOTS; break; case KVM_CAP_NR_MEMSLOTS: @@ -1553,6 +1552,8 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) static void sca_del_vcpu(struct kvm_vcpu *vcpu) { + if (!kvm_s390_use_sca_entries()) + return; read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { struct esca_block *sca = vcpu->kvm->arch.sca; @@ -1570,6 +1571,13 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu) static void sca_add_vcpu(struct kvm_vcpu *vcpu) { + if (!kvm_s390_use_sca_entries()) { + struct bsca_block *sca = vcpu->kvm->arch.sca; + + /* we still need the basic sca for the ipte control */ + vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); + vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; + } read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { struct esca_block *sca = vcpu->kvm->arch.sca; @@ -1650,6 +1658,11 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) { int rc; + if (!kvm_s390_use_sca_entries()) { + if (id < KVM_MAX_VCPUS) + return true; + return false; + } if (id < KVM_S390_BSCA_CPU_SLOTS) return true; if (!sclp.has_esca || !sclp.has_64bscao) @@ -1938,8 +1951,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= 1; if (sclp.has_sigpif) vcpu->arch.sie_block->eca |= 0x10000000U; - if (test_kvm_facility(vcpu->kvm, 64)) - vcpu->arch.sie_block->ecb3 |= 0x01; if (test_kvm_facility(vcpu->kvm, 129)) { vcpu->arch.sie_block->eca |= 0x00020000; vcpu->arch.sie_block->ecd |= 0x20000000; @@ -2694,6 +2705,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) kvm_clear_async_pf_completion_queue(vcpu); } + /* + * If userspace sets the riccb (e.g. after migration) to a valid state, + * we should enable RI here instead of doing the lazy enablement. + */ + if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && + test_kvm_facility(vcpu->kvm, 64)) { + struct runtime_instr_cb *riccb = + (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; + + if (riccb->valid) + vcpu->arch.sie_block->ecb3 |= 0x01; + } + kvm_run->kvm_dirty_regs = 0; } @@ -2837,38 +2861,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } -/* - * store additional status at address - */ -int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, - unsigned long gpa) -{ - /* Only bits 0-53 are used for address formation */ - if (!(gpa & ~0x3ff)) - return 0; - - return write_guest_abs(vcpu, gpa & ~0x3ff, - (void *)&vcpu->run->s.regs.vrs, 512); -} - -int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) -{ - if (!test_kvm_facility(vcpu->kvm, 129)) - return 0; - - /* - * The guest VXRS are in the host VXRs due to the lazy - * copying in vcpu load/put. We can simply call save_fpu_regs() - * to save the current register state because we are in the - * middle of a load/put cycle. - * - * Let's update our copies before we save it into the save area. - */ - save_fpu_regs(); - - return kvm_s390_store_adtl_status_unloaded(vcpu, addr); -} - static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index b8432862a817..3a4e97f1a9e6 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -20,6 +20,7 @@ #include <linux/kvm_host.h> #include <asm/facility.h> #include <asm/processor.h> +#include <asm/sclp.h> typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); @@ -245,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu) /* implemented in priv.c */ int is_valid_psw(psw_t *psw); +int kvm_s390_handle_aa(struct kvm_vcpu *vcpu); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu); @@ -273,10 +275,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu); void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); -int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, - unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); -int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); @@ -389,4 +388,13 @@ static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm) return &sca->ipte_control; } +static inline int kvm_s390_use_sca_entries(void) +{ + /* + * Without SIGP interpretation, only SRS interpretation (if available) + * might use the entries. By not setting the entries and keeping them + * invalid, hardware will not access them but intercept. + */ + return sclp.has_sigpif; +} #endif diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 46160388e996..e18435355c16 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -32,6 +32,24 @@ #include "kvm-s390.h" #include "trace.h" +static int handle_ri(struct kvm_vcpu *vcpu) +{ + if (test_kvm_facility(vcpu->kvm, 64)) { + vcpu->arch.sie_block->ecb3 |= 0x01; + kvm_s390_retry_instr(vcpu); + return 0; + } else + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); +} + +int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) +{ + if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) + return handle_ri(vcpu); + else + return -EOPNOTSUPP; +} + /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { @@ -1093,6 +1111,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu) static const intercept_handler_t eb_handlers[256] = { [0x2f] = handle_lctlg, [0x25] = handle_stctg, + [0x60] = handle_ri, + [0x61] = handle_ri, + [0x62] = handle_ri, }; int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index e390bbb16443..48352bffbc92 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -237,11 +237,10 @@ char * strrchr(const char * s, int c) EXPORT_SYMBOL(strrchr); static inline int clcle(const char *s1, unsigned long l1, - const char *s2, unsigned long l2, - int *diff) + const char *s2, unsigned long l2) { register unsigned long r2 asm("2") = (unsigned long) s1; - register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r3 asm("3") = (unsigned long) l1; register unsigned long r4 asm("4") = (unsigned long) s2; register unsigned long r5 asm("5") = (unsigned long) l2; int cc; @@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1, " srl %0,28" : "=&d" (cc), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) : : "cc"); - *diff = *(char *)r2 - *(char *)r4; return cc; } @@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2) return (char *) s1; l1 = __strend(s1) - s1; while (l1-- >= l2) { - int cc, dummy; + int cc; - cc = clcle(s1, l1, s2, l2, &dummy); + cc = clcle(s1, l2, s2, l2); if (!cc) return (char *) s1; s1++; @@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr); */ int memcmp(const void *cs, const void *ct, size_t n) { - int ret, diff; + int ret; - ret = clcle(cs, n, ct, n, &diff); + ret = clcle(cs, n, ct, n); if (ret) - ret = diff; + ret = ret == 1 ? -1 : 1; return ret; } EXPORT_SYMBOL(memcmp); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 7104ffb5a67f..af7cf28cf97e 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end, int rc = -EINVAL; pgd_t *pgdp; + if (addr == end) + return 0; if (end >= MODULES_END) return -EINVAL; mutex_lock(&cpa_mutex); |