From e65f30e0cb29694c4241bd9c96ea9413938fcec5 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Thu, 4 Feb 2016 10:24:52 +0100 Subject: s390: hypfs: Move diag implementation and data definitions Diag 204 data and function definitions currently live in the hypfs files. As KVM will be a consumer of this data, we need to make it publicly available and move it to the appropriate diag.{c,h} files. __attribute__ ((packed)) occurences were replaced with __packed for all moved structs. Signed-off-by: Janosch Frank Reviewed-by: David Hildenbrand Acked-by: Michael Holzheu Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/diag.h | 127 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 5fac921c1c42..f72744f14e31 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -78,4 +78,131 @@ struct diag210 { extern int diag210(struct diag210 *addr); +/* bit is set in flags, when physical cpu info is included in diag 204 data */ +#define DIAG204_LPAR_PHYS_FLG 0x80 +#define DIAG204_LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */ +#define DIAG204_CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */ + +/* diag 204 subcodes */ +enum diag204_sc { + DIAG204_SUBC_STIB4 = 4, + DIAG204_SUBC_RSI = 5, + DIAG204_SUBC_STIB6 = 6, + DIAG204_SUBC_STIB7 = 7 +}; + +/* The two available diag 204 data formats */ +enum diag204_format { + DIAG204_INFO_SIMPLE = 0, + DIAG204_INFO_EXT = 0x00010000 +}; + +struct diag204_info_blk_hdr { + __u8 npar; + __u8 flags; + __u16 tslice; + __u16 phys_cpus; + __u16 this_part; + __u64 curtod; +} __packed; + +struct diag204_x_info_blk_hdr { + __u8 npar; + __u8 flags; + __u16 tslice; + __u16 phys_cpus; + __u16 this_part; + __u64 curtod1; + __u64 curtod2; + char reserved[40]; +} __packed; + +struct diag204_part_hdr { + __u8 pn; + __u8 cpus; + char reserved[6]; + char part_name[DIAG204_LPAR_NAME_LEN]; +} __packed; + +struct diag204_x_part_hdr { + __u8 pn; + __u8 cpus; + __u8 rcpus; + __u8 pflag; + __u32 mlu; + char part_name[DIAG204_LPAR_NAME_LEN]; + char lpc_name[8]; + char os_name[8]; + __u64 online_cs; + __u64 online_es; + __u8 upid; + char reserved1[3]; + __u32 group_mlu; + char group_name[8]; + char reserved2[32]; +} __packed; + +struct diag204_cpu_info { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + __u8 cflag; + __u16 weight; + __u64 acc_time; + __u64 lp_time; +} __packed; + +struct diag204_x_cpu_info { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + __u8 cflag; + __u16 weight; + __u64 acc_time; + __u64 lp_time; + __u16 min_weight; + __u16 cur_weight; + __u16 max_weight; + char reseved2[2]; + __u64 online_time; + __u64 wait_time; + __u32 pma_weight; + __u32 polar_weight; + char reserved3[40]; +} __packed; + +struct diag204_phys_hdr { + char reserved1[1]; + __u8 cpus; + char reserved2[6]; + char mgm_name[8]; +} __packed; + +struct diag204_x_phys_hdr { + char reserved1[1]; + __u8 cpus; + char reserved2[6]; + char mgm_name[8]; + char reserved3[80]; +} __packed; + +struct diag204_phys_cpu { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + char reserved2[3]; + __u64 mgm_time; + char reserved3[8]; +} __packed; + +struct diag204_x_phys_cpu { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + char reserved2[3]; + __u64 mgm_time; + char reserved3[80]; +} __packed; + +int diag204(unsigned long subcode, unsigned long size, void *addr); #endif /* _ASM_S390_DIAG_H */ -- cgit v1.2.1 From e435dc31398e63b992639cf62024d959219db191 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Mon, 8 Feb 2016 13:36:22 +0100 Subject: s390: Make cpc_name accessible sclp_ocf.c is the only way to get the cpc name, as it registers the sole event handler for the ocf event. By creating a new global function that copies that name, we make it accessible to the world which longs to retrieve it. Additionally we now also store the cpc name as EBCDIC, so we don't have to convert it to and from ASCII if it is requested in native encoding. Signed-off-by: Janosch Frank Reviewed-by: David Hildenbrand Acked-by: Heiko Carstens Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index e4f6f73afe2f..49736a0d4e0e 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -101,5 +101,6 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count); int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count); void sclp_early_detect(void); void _sclp_print_early(const char *); +void sclp_ocf_cpc_name_copy(char *dst); #endif /* _ASM_S390_SCLP_H */ -- cgit v1.2.1 From 022bd2d11cc51f62e873a09bcae8016b10950194 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Fri, 12 Feb 2016 12:52:49 +0100 Subject: s390: Make diag224 public Diag204's cpu structures only contain the cpu type by means of an index in the diag224 name table. Hence, to be able to use diag204 in any meaningful way, we also need a usable diag224 interface. Signed-off-by: Janosch Frank Reviewed-by: Christian Borntraeger Reviewed-by: David Hildenbrand Acked-by: Heiko Carstens Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/diag.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index f72744f14e31..197e303a76e9 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -205,4 +205,5 @@ struct diag204_x_phys_cpu { } __packed; int diag204(unsigned long subcode, unsigned long size, void *addr); +int diag224(void *ptr); #endif /* _ASM_S390_DIAG_H */ -- cgit v1.2.1 From a011eeb2a3d6cd778eb63bea0bf149ebbe658ab5 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Mon, 9 May 2016 14:14:01 +0200 Subject: KVM: s390: Add operation exception interception handler This commit introduces code that handles operation exception interceptions. With this handler we can emulate instructions by using illegal opcodes. Signed-off-by: Janosch Frank Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 37b9017c6a96..093ea14109e2 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -255,6 +255,7 @@ struct kvm_vcpu_stat { u32 instruction_stctg; u32 exit_program_interruption; u32 exit_instr_and_program; + u32 exit_operation_exception; u32 deliver_external_call; u32 deliver_emergency_signal; u32 deliver_service_signal; -- cgit v1.2.1 From a2d57b35c0226102b1f2ffdc2f719fcc30c99bf5 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Mon, 23 May 2016 15:09:19 +0200 Subject: KVM: s390: Extend diag 204 fields The new store hypervisor information instruction, which we are going to introduce, needs previously unused fields in diag 204 structures. Signed-off-by: Janosch Frank Acked-by: Heiko Carstens Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/diag.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 197e303a76e9..f4000cdb6921 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -97,6 +97,11 @@ enum diag204_format { DIAG204_INFO_EXT = 0x00010000 }; +enum diag204_cpu_flags { + DIAG204_CPU_ONLINE = 0x20, + DIAG204_CPU_CAPPED = 0x40, +}; + struct diag204_info_blk_hdr { __u8 npar; __u8 flags; @@ -136,10 +141,13 @@ struct diag204_x_part_hdr { __u64 online_cs; __u64 online_es; __u8 upid; - char reserved1[3]; + __u8 reserved:3; + __u8 mtid:5; + char reserved1[2]; __u32 group_mlu; char group_name[8]; - char reserved2[32]; + char hardware_group_name[8]; + char reserved2[24]; } __packed; struct diag204_cpu_info { @@ -168,7 +176,9 @@ struct diag204_x_cpu_info { __u64 wait_time; __u32 pma_weight; __u32 polar_weight; - char reserved3[40]; + __u32 cpu_type_cap; + __u32 group_cpu_type_cap; + char reserved3[32]; } __packed; struct diag204_phys_hdr { @@ -199,7 +209,8 @@ struct diag204_x_phys_cpu { __u16 cpu_addr; char reserved1[2]; __u8 ctidx; - char reserved2[3]; + char reserved2[1]; + __u16 weight; __u64 mgm_time; char reserved3[80]; } __packed; -- cgit v1.2.1 From 95ca2cb57985b07f5b136405f80a5106f5b06641 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Mon, 23 May 2016 15:11:58 +0200 Subject: KVM: s390: Add sthyi emulation Store Hypervisor Information is an emulated z/VM instruction that provides a guest with basic information about the layers it is running on. This includes information about the cpu configuration of both the machine and the lpar, as well as their names, machine model and machine type. This information enables an application to determine the maximum capacity of CPs and IFLs available to software. The instruction is available whenever the facility bit 74 is set, otherwise executing it results in an operation exception. It is important to check the validity flags in the sections before using data from any structure member. It is not guaranteed that all members will be valid on all machines / machine configurations. Signed-off-by: Janosch Frank Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/diag.h | 10 ++++++++++ arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/include/uapi/asm/sie.h | 1 + 3 files changed, 13 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index f4000cdb6921..82211998ccf7 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -215,6 +215,16 @@ struct diag204_x_phys_cpu { char reserved3[80]; } __packed; +struct diag204_x_part_block { + struct diag204_x_part_hdr hdr; + struct diag204_x_cpu_info cpus[]; +} __packed; + +struct diag204_x_phys_block { + struct diag204_x_phys_hdr hdr; + struct diag204_x_phys_cpu cpus[]; +} __packed; + int diag204(unsigned long subcode, unsigned long size, void *addr); int diag224(void *ptr); #endif /* _ASM_S390_DIAG_H */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 093ea14109e2..7233b1c49964 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -154,6 +154,7 @@ struct kvm_s390_sie_block { #define LCTL_CR14 0x0002 __u16 lctl; /* 0x0044 */ __s16 icpua; /* 0x0046 */ +#define ICTL_OPEREXC 0x80000000 #define ICTL_PINT 0x20000000 #define ICTL_LPSW 0x00400000 #define ICTL_STCTL 0x00040000 @@ -279,6 +280,7 @@ struct kvm_vcpu_stat { u32 instruction_stfl; u32 instruction_tprot; u32 instruction_essa; + u32 instruction_sthyi; u32 instruction_sigp_sense; u32 instruction_sigp_sense_running; u32 instruction_sigp_external_call; diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index 8fb5d4a6dd25..3ac634368939 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h @@ -140,6 +140,7 @@ exit_code_ipa0(0xB2, 0x4c, "TAR"), \ exit_code_ipa0(0xB2, 0x50, "CSP"), \ exit_code_ipa0(0xB2, 0x54, "MVPG"), \ + exit_code_ipa0(0xB2, 0x56, "STHYI"), \ exit_code_ipa0(0xB2, 0x58, "BSG"), \ exit_code_ipa0(0xB2, 0x5a, "BSA"), \ exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ -- cgit v1.2.1 From 7d0a5e62411a9223512c6af2e4c08a2d7c00fa2e Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Tue, 10 May 2016 15:03:42 +0200 Subject: KVM: s390: Limit sthyi execution Store hypervisor information is a valid instruction not only in supervisor state but also in problem state, i.e. the guest's userspace. Its execution is not only computational and memory intensive, but also has to get hold of the ipte lock to write to the guest's memory. This lock is not intended to be held often and long, especially not from the untrusted guest userspace. Therefore we apply rate limiting of sthyi executions per VM. Signed-off-by: Janosch Frank Acked-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 7233b1c49964..bcc20dc91ea8 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -652,6 +652,7 @@ struct kvm_arch{ wait_queue_head_t ipte_wq; int ipte_lock_count; struct mutex ipte_mutex; + struct ratelimit_state sthyi_limit; spinlock_t start_stop_lock; struct sie_page2 *sie_page2; struct kvm_s390_cpu_model model; -- cgit v1.2.1 From 15c9705f0c8af2d19dede9866aec364746b269ef Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 19 Mar 2015 17:36:43 +0100 Subject: KVM: s390: interface to query and configure cpu features For now, we only have an interface to query and configure facilities indicated via STFL(E). However, we also have features indicated via SCLP, that have to be indicated to the guest by user space and usually require KVM support. This patch allows user space to query and configure available cpu features for the guest. Please note that disabling a feature doesn't necessarily mean that it is completely disabled (e.g. ESOP is mostly handled by the SIE). We will try our best to disable it. Most features (e.g. SCLP) can't directly be forwarded, as most of them need in addition to hardware support, support in KVM. As we later on want to turn these features in KVM explicitly on/off (to simulate different behavior), we have to filter all features provided by the hardware and make them configurable. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/include/uapi/asm/kvm.h | 8 ++++++++ 2 files changed, 10 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index bcc20dc91ea8..b2a83a0ce42c 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -658,6 +658,8 @@ struct kvm_arch{ struct kvm_s390_cpu_model model; struct kvm_s390_crypto crypto; u64 epoch; + /* subset of available cpu features enabled by user space */ + DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); }; #define KVM_HVA_ERR_BAD (-1UL) diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 3b8e99ef9d58..a8559f265e26 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -93,6 +93,14 @@ struct kvm_s390_vm_cpu_machine { __u64 fac_list[256]; }; +#define KVM_S390_VM_CPU_PROCESSOR_FEAT 2 +#define KVM_S390_VM_CPU_MACHINE_FEAT 3 + +#define KVM_S390_VM_CPU_FEAT_NR_BITS 1024 +struct kvm_s390_vm_cpu_feat { + __u64 feat[16]; +}; + /* kvm attributes for crypto */ #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 -- cgit v1.2.1 From 22be5a133169e855097936438417ab1b672ad43f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 21 Jan 2016 13:22:54 +0100 Subject: KVM: s390: forward ESOP if available ESOP guarantees that during a protection exception, bit 61 of real location 168-175 will only be set to 1 if it was because of ALCP or DATP. If the exception is due to LAP or KCP, the bit will always be set to 0. The old SOP definition allowed bit 61 to be unpredictable in case of LAP or KCP in some conditions. So ESOP replaces this unpredictability by a guarantee. Therefore, we can directly forward ESOP if it is available on our machine. We don't have to do anything when ESOP is disabled - the guest will simply expect unpredictable values. Our guest access functions are already handling ESOP properly. Please note that future functionality in KVM will require knowledge about ESOP being enabled for a guest or not. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index a8559f265e26..789c4e27e294 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -97,6 +97,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_MACHINE_FEAT 3 #define KVM_S390_VM_CPU_FEAT_NR_BITS 1024 +#define KVM_S390_VM_CPU_FEAT_ESOP 0 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 1afd43e0fbba4a92effc22977e3a7e64213ee860 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 18 May 2016 15:59:06 +0200 Subject: s390/crypto: allow to query all known cpacf functions KVM will have to query these functions, let's add at least the query capabilities. PCKMO has RRE format, as bit 16-31 are ignored, we can still use the existing function. As PCKMO won't touch the cc, let's force it to 0 upfront. Signed-off-by: David Hildenbrand Acked-by: Ingo Tuchscherer Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/cpacf.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index 1a82cf26ee11..d28621de8e0b 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -20,6 +20,9 @@ #define CPACF_KMC 0xb92f /* MSA */ #define CPACF_KIMD 0xb93e /* MSA */ #define CPACF_KLMD 0xb93f /* MSA */ +#define CPACF_PCKMO 0xb928 /* MSA3 */ +#define CPACF_KMF 0xb92a /* MSA4 */ +#define CPACF_KMO 0xb92b /* MSA4 */ #define CPACF_PCC 0xb92c /* MSA4 */ #define CPACF_KMCTR 0xb92d /* MSA4 */ #define CPACF_PPNO 0xb93c /* MSA5 */ @@ -136,6 +139,7 @@ static inline void __cpacf_query(unsigned int opcode, unsigned char *status) register unsigned long r1 asm("1") = (unsigned long) status; asm volatile( + " spm 0\n" /* pckmo doesn't change the cc */ /* Parameter registers are ignored, but may not be 0 */ "0: .insn rrf,%[opc] << 16,2,2,2,0\n" " brc 1,0b\n" /* handle partial completion */ @@ -157,6 +161,12 @@ static inline int cpacf_query(unsigned int opcode, unsigned int func) if (!test_facility(17)) /* check for MSA */ return 0; break; + case CPACF_PCKMO: + if (!test_facility(76)) /* check for MSA3 */ + return 0; + break; + case CPACF_KMF: + case CPACF_KMO: case CPACF_PCC: case CPACF_KMCTR: if (!test_facility(77)) /* check for MSA4 */ -- cgit v1.2.1 From 0a763c780b7cb830c250d00ead975778ab948f40 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 18 May 2016 16:03:47 +0200 Subject: KVM: s390: interface to query and configure cpu subfunctions We have certain instructions that indicate available subfunctions via a query subfunction (crypto functions and ptff), or via a test bit function (plo). By exposing these "subfunction blocks" to user space, we allow user space to 1) query available subfunctions and make sure subfunctions won't get lost during migration - e.g. properly indicate them via a CPU model 2) change the subfunctions to be reported to the guest (even adding unavailable ones) This mechanism works just like the way we indicate the stfl(e) list to user space. This way, user space could even emulate some subfunctions in QEMU in the future. If this is ever applicable, we have to make sure later on, that unsupported subfunctions result in an intercept to QEMU. Please note that support to indicate them to the guest is still missing and requires hardware support. Usually, the IBC takes already care of these subfunctions for migration safety. QEMU should make sure to always set these bits properly according to the machine generation to be emulated. Available subfunctions are only valid in combination with STFLE bits retrieved via KVM_S390_VM_CPU_MACHINE and enabled via KVM_S390_VM_CPU_PROCESSOR. If the applicable bits are available, the indicated subfunctions are guaranteed to be correct. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 789c4e27e294..f0818d70d73d 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -102,6 +102,26 @@ struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; +#define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC 4 +#define KVM_S390_VM_CPU_MACHINE_SUBFUNC 5 +/* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */ +struct kvm_s390_vm_cpu_subfunc { + __u8 plo[32]; /* always */ + __u8 ptff[16]; /* with TOD-clock steering */ + __u8 kmac[16]; /* with MSA */ + __u8 kmc[16]; /* with MSA */ + __u8 km[16]; /* with MSA */ + __u8 kimd[16]; /* with MSA */ + __u8 klmd[16]; /* with MSA */ + __u8 pckmo[16]; /* with MSA3 */ + __u8 kmctr[16]; /* with MSA4 */ + __u8 kmf[16]; /* with MSA4 */ + __u8 kmo[16]; /* with MSA4 */ + __u8 pcc[16]; /* with MSA4 */ + __u8 ppno[16]; /* with MSA5 */ + __u8 reserved[1824]; +}; + /* kvm attributes for crypto */ #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 -- cgit v1.2.1 From 4013ade3fb2fefa021827d675d8bc1d51f4aef93 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 12:49:43 +0100 Subject: s390/sclp: detect 64-bit-SCAO facility Let's correctly detect that facility, so we can correctly handle its abscence later on. Reviewed-by: Christian Borntraeger Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 49736a0d4e0e..521400086e65 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -59,6 +59,7 @@ struct sclp_info { unsigned char has_hvs : 1; unsigned char has_esca : 1; unsigned char has_sief2 : 1; + unsigned char has_64bscao : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From b9e28897e6e9f82585ecf6ea45942866ece7d167 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 12:51:52 +0100 Subject: s390/sclp: detect guest-PER enhancement Let's detect that facility, so we can correctly handle its abscence. Reviewed-by: Christian Borntraeger Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 521400086e65..076f6318b6fa 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -33,7 +33,8 @@ struct sclp_core_entry { u8 : 4; u8 sief2 : 1; u8 : 3; - u8 : 3; + u8 : 2; + u8 gpere : 1; u8 siif : 1; u8 sigpif : 1; u8 : 3; @@ -60,6 +61,7 @@ struct sclp_info { unsigned char has_esca : 1; unsigned char has_sief2 : 1; unsigned char has_64bscao : 1; + unsigned char has_gpere : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From 09be9cb92bb9e799bdbfd3834595bd6b4703b40b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 12:55:35 +0100 Subject: s390/sclp: detect cmma Let's detect the Collaborative-memory-management-interpretation facility, aka CMM assist, so we can correctly enable cmma later. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 076f6318b6fa..fa40ac8056f5 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -62,6 +62,7 @@ struct sclp_info { unsigned char has_sief2 : 1; unsigned char has_64bscao : 1; unsigned char has_gpere : 1; + unsigned char has_cmma : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From 5236c751da5e6ccfda4e5d53690a37dfb456997b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 12:53:46 +0100 Subject: s390/sclp: detect guest-storage-limit-suppression Let's detect that facility. Reviewed-by: Christian Borntraeger Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index fa40ac8056f5..e1450dd9d932 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -63,6 +63,7 @@ struct sclp_info { unsigned char has_64bscao : 1; unsigned char has_gpere : 1; unsigned char has_cmma : 1; + unsigned char has_gsls : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From efed110446226c725268a1f980806d799990a979 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 16 Apr 2015 12:32:41 +0200 Subject: KVM: s390: handle missing guest-storage-limit-suppression If guest-storage-limit-suppression is not available, we would for now have a valid guest address space with size 0. So let's simply set the origin to 0 and the limit to hamax. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b2a83a0ce42c..9eed5c18a61c 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -186,7 +186,9 @@ struct kvm_s390_sie_block { __u32 scaol; /* 0x0064 */ __u8 reserved68[4]; /* 0x0068 */ __u32 todpr; /* 0x006c */ - __u8 reserved70[32]; /* 0x0070 */ + __u8 reserved70[16]; /* 0x0070 */ + __u64 mso; /* 0x0080 */ + __u64 msl; /* 0x0088 */ psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ -- cgit v1.2.1 From 72cd82b9e9d075713367ad840c2a9b52b4cd447d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 12:59:03 +0100 Subject: s390/sclp: detect intervention bypass facility Let's detect if we have the intervention bypass facility installed. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index e1450dd9d932..ef1f427ad4d1 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -38,7 +38,11 @@ struct sclp_core_entry { u8 siif : 1; u8 sigpif : 1; u8 : 3; - u8 reserved2[10]; + u8 reserved2[3]; + u8 : 2; + u8 ib : 1; + u8 : 5; + u8 reserved3[6]; u8 type; u8 reserved1; } __attribute__((packed)); @@ -64,6 +68,7 @@ struct sclp_info { unsigned char has_gpere : 1; unsigned char has_cmma : 1; unsigned char has_gsls : 1; + unsigned char has_ib : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From 4a5c3e08271216891ce1b5315cec3dcadbd01cd4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 13:00:23 +0100 Subject: s390/sclp: detect conditional-external-interception facility Let's detect if we have that facility. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index ef1f427ad4d1..c91ad198a59c 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -41,7 +41,8 @@ struct sclp_core_entry { u8 reserved2[3]; u8 : 2; u8 ib : 1; - u8 : 5; + u8 cei : 1; + u8 : 4; u8 reserved3[6]; u8 type; u8 reserved1; @@ -69,6 +70,7 @@ struct sclp_info { unsigned char has_cmma : 1; unsigned char has_gsls : 1; unsigned char has_ib : 1; + unsigned char has_cei : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From a0eb55e6318f1bcfe93b01f0944622f14a6b2977 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 13:02:25 +0100 Subject: s390/sclp: detect PFMF interpretation facility Let's detect that facility. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index c91ad198a59c..bdb7f22d9ad4 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -71,6 +71,7 @@ struct sclp_info { unsigned char has_gsls : 1; unsigned char has_ib : 1; unsigned char has_cei : 1; + unsigned char has_pfmfi : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From 9c375490fc812ebdf3259ea2566c271d00544fc2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 13:02:52 +0100 Subject: s390/sclp: detect interlock-and-broadcast-suppression facility Let's detect that facility. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index bdb7f22d9ad4..99a0150d07b9 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -72,6 +72,7 @@ struct sclp_info { unsigned char has_ib : 1; unsigned char has_cei : 1; unsigned char has_pfmfi : 1; + unsigned char has_ibs : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From c427c42cd612719e8fb8b5891cc9761e7770024e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 10 May 2016 13:51:54 +0200 Subject: s390/mm: don't drop errors in get_guest_storage_key Commit 1e133ab296f3 ("s390/mm: split arch/s390/mm/pgtable.c") changed the return value of get_guest_storage_key to an unsigned char, resulting in -EFAULT getting interpreted as a valid storage key. Cc: stable@vger.kernel.org # 4.6+ Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 18d2beb89340..42b968a85863 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -893,7 +893,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char key, bool nq); -unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr); +unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr); /* * Certain architectures need to do special things when PTEs -- cgit v1.2.1 From 154c8c19c35b6da94a623cb793458e203572083d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 9 May 2016 11:22:34 +0200 Subject: s390/mm: return key via pointer in get_guest_storage_key Let's just split returning the key and reporting errors. This makes calling code easier and avoids bugs as happened already. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/pgtable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 42b968a85863..91f0e7b79821 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -893,7 +893,8 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char key, bool nq); -unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr); +int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, + unsigned char *key); /* * Certain architectures need to do special things when PTEs -- cgit v1.2.1 From 1824c723ac90f9870ebafae4b3b3e5f4b82ffeef Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 10 May 2016 09:43:11 +0200 Subject: KVM: s390: pfmf: support conditional-sske facility We already indicate that facility but don't implement it in our pfmf interception handler. Let's add a new storage key handling function for conditionally setting the guest storage key. As we will reuse this function later on, let's directly implement returning the old key via parameter and indicating if any change happened via rc. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/pgtable.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 91f0e7b79821..2f6702e27db9 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -893,6 +893,9 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char key, bool nq); +int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, + unsigned char key, unsigned char *oldkey, + bool nq, bool mr, bool mc); int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char *key); -- cgit v1.2.1 From 238614515287c9400727e4cd7aa958649dcbf05f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 12:56:43 +0100 Subject: s390/sclp: detect storage-key facility Let's correctly detect that facility. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 99a0150d07b9..2ad9c204b1a2 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -32,7 +32,8 @@ struct sclp_core_entry { u8 reserved0; u8 : 4; u8 sief2 : 1; - u8 : 3; + u8 skey : 1; + u8 : 2; u8 : 2; u8 gpere : 1; u8 siif : 1; @@ -73,6 +74,7 @@ struct sclp_info { unsigned char has_cei : 1; unsigned char has_pfmfi : 1; unsigned char has_ibs : 1; + unsigned char has_skey : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From a7e19ab55ffdd82f1a8d12694b9a0c0beeef534c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 10 May 2016 09:50:21 +0200 Subject: KVM: s390: handle missing storage-key facility Without the storage-key facility, SIE won't interpret SSKE, ISKE and RRBE for us. So let's add proper interception handlers that will be called if lazy sske cannot be enabled. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/page.h | 7 ++++--- arch/s390/include/asm/pgtable.h | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 53eacbd4f09b..f874e7d51c19 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -109,13 +109,14 @@ static inline unsigned char page_get_storage_key(unsigned long addr) static inline int page_reset_referenced(unsigned long addr) { - unsigned int ipm; + int cc; asm volatile( " rrbe 0,%1\n" " ipm %0\n" - : "=d" (ipm) : "a" (addr) : "cc"); - return !!(ipm & 0x20000000); + " srl %0,28\n" + : "=d" (cc) : "a" (addr) : "cc"); + return cc; } /* Bits int the storage key */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 2f6702e27db9..9951e7e59756 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -896,6 +896,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char key, unsigned char *oldkey, bool nq, bool mr, bool mc); +int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char *key); -- cgit v1.2.1 From 414d3b07496604a4372466a6b474ca24291a143c Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 8 Mar 2016 11:52:54 +0100 Subject: s390/kvm: page table invalidation notifier Pass an address range to the page table invalidation notifier for KVM. This allows to notify changes that affect a larger virtual memory area, e.g. for 1MB pages. Reviewed-by: David Hildenbrand Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index d054c1b07a3c..bc0eadf9ed8e 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -39,7 +39,8 @@ struct gmap { */ struct gmap_notifier { struct list_head list; - void (*notifier_call)(struct gmap *gmap, unsigned long gaddr); + void (*notifier_call)(struct gmap *gmap, unsigned long start, + unsigned long end); }; struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); -- cgit v1.2.1 From 8ecb1a59d6c6674bc98e4eee0c2482490748e21a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 8 Mar 2016 11:54:14 +0100 Subject: s390/mm: use RCU for gmap notifier list and the per-mm gmap list The gmap notifier list and the gmap list in the mm_struct change rarely. Use RCU to optimize the reader of these lists. Reviewed-by: David Hildenbrand Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 1 + arch/s390/include/asm/mmu.h | 11 +++++++---- arch/s390/include/asm/mmu_context.h | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index bc0eadf9ed8e..2cf49624af99 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -39,6 +39,7 @@ struct gmap { */ struct gmap_notifier { struct list_head list; + struct rcu_head rcu; void (*notifier_call)(struct gmap *gmap, unsigned long start, unsigned long end); }; diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 081b2ad99d73..b941528cc49e 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -8,8 +8,9 @@ typedef struct { cpumask_t cpu_attach_mask; atomic_t attach_count; unsigned int flush_mm; - spinlock_t list_lock; + spinlock_t pgtable_lock; struct list_head pgtable_list; + spinlock_t gmap_lock; struct list_head gmap_list; unsigned long asce; unsigned long asce_limit; @@ -22,9 +23,11 @@ typedef struct { unsigned int use_skey:1; } mm_context_t; -#define INIT_MM_CONTEXT(name) \ - .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ - .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ +#define INIT_MM_CONTEXT(name) \ + .context.pgtable_lock = \ + __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ + .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ + .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), static inline int tprot(unsigned long addr) diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c837b79b455d..3ce3854b7a41 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -15,8 +15,9 @@ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - spin_lock_init(&mm->context.list_lock); + spin_lock_init(&mm->context.pgtable_lock); INIT_LIST_HEAD(&mm->context.pgtable_list); + spin_lock_init(&mm->context.gmap_lock); INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.attach_count, 0); -- cgit v1.2.1 From b2d73b2a0ad1c758cb0c1acb01a911744b845942 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 8 Mar 2016 11:54:42 +0100 Subject: s390/mm: extended gmap pte notifier The current gmap pte notifier forces a pte into to a read-write state. If the pte is invalidated the gmap notifier is called to inform KVM that the mapping will go away. Extend this approach to allow read-write, read-only and no-access as possible target states and call the pte notifier for any change to the pte. This mechanism is used to temporarily set specific access rights for a pte without doing the heavy work of a true mprotect call. Reviewed-by: David Hildenbrand Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 9 ++++++--- arch/s390/include/asm/pgtable.h | 2 ++ 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 2cf49624af99..6897a0919446 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -59,8 +59,11 @@ void gmap_discard(struct gmap *, unsigned long from, unsigned long to); void __gmap_zap(struct gmap *, unsigned long gaddr); void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); -void gmap_register_ipte_notifier(struct gmap_notifier *); -void gmap_unregister_ipte_notifier(struct gmap_notifier *); -int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); +void gmap_register_pte_notifier(struct gmap_notifier *); +void gmap_unregister_pte_notifier(struct gmap_notifier *); +void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *); + +int gmap_mprotect_notify(struct gmap *, unsigned long start, + unsigned long len, int prot); #endif /* _ASM_S390_GMAP_H */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9951e7e59756..35dde6afffcf 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -886,6 +886,8 @@ void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry); void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, + pte_t *ptep, int prot); void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, pte_t *ptep , int reset); void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -- cgit v1.2.1 From 6ea427bbbd4078297bb1dbd6c5cb83f3f48aac46 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 8 Mar 2016 11:55:04 +0100 Subject: s390/mm: add reference counter to gmap structure Let's use a reference counter mechanism to control the lifetime of gmap structures. This will be needed for further changes related to gmap shadows. Reviewed-by: David Hildenbrand Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 6897a0919446..e69853ce55da 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -15,6 +15,7 @@ * @guest_to_host: radix tree with guest to host address translation * @host_to_guest: radix tree with pointer to segment table entries * @guest_table_lock: spinlock to protect all entries in the guest page table + * @ref_count: reference counter for the gmap structure * @table: pointer to the page directory * @asce: address space control element for gmap page table * @pfault_enabled: defines if pfaults are applicable for the guest @@ -26,6 +27,7 @@ struct gmap { struct radix_tree_root guest_to_host; struct radix_tree_root host_to_guest; spinlock_t guest_table_lock; + atomic_t ref_count; unsigned long *table; unsigned long asce; unsigned long asce_end; @@ -44,8 +46,11 @@ struct gmap_notifier { unsigned long end); }; -struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); -void gmap_free(struct gmap *gmap); +struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit); +void gmap_remove(struct gmap *gmap); +struct gmap *gmap_get(struct gmap *gmap); +void gmap_put(struct gmap *gmap); + void gmap_enable(struct gmap *gmap); void gmap_disable(struct gmap *gmap); int gmap_map_segment(struct gmap *gmap, unsigned long from, -- cgit v1.2.1 From 4be130a08420d6918d80c1067f8078f425eb98df Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 8 Mar 2016 12:12:18 +0100 Subject: s390/mm: add shadow gmap support For a nested KVM guest the outer KVM host needs to create shadow page tables for the nested guest. This patch adds the basic support to the guest address space (gmap) code. For each guest address space the inner KVM host creates, the first outer KVM host needs to create shadow page tables. The address space is identified by the ASCE loaded into the control register 1 at the time the inner SIE instruction for the second nested KVM guest is executed. The outer KVM host creates the shadow tables starting with the table identified by the ASCE on a on-demand basis. The outer KVM host will get repeated faults for all the shadow tables needed to run the second KVM guest. While a shadow page table for the second KVM guest is active the access to the origin region, segment and page tables needs to be restricted for the first KVM guest. For region and segment and page tables the first KVM guest may read the memory, but write attempt has to lead to an unshadow. This is done using the page invalid and read-only bits in the page table of the first KVM guest. If the first guest re-accesses one of the origin pages of a shadow, it gets a fault and the affected parts of the shadow page table hierarchy needs to be removed again. PGSTE tables don't have to be shadowed, as all interpretation assist can't deal with the invalid bits in the shadow pte being set differently than the original ones provided by the first KVM guest. Many bug fixes and improvements by David Hildenbrand. Reviewed-by: David Hildenbrand Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 52 ++++++++++++++++++++++++++++++++++++++- arch/s390/include/asm/pgalloc.h | 2 ++ arch/s390/include/asm/pgtable.h | 10 ++++++-- arch/s390/include/asm/processor.h | 1 + 4 files changed, 62 insertions(+), 3 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index e69853ce55da..58e65ee5b2d2 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -10,6 +10,7 @@ /** * struct gmap_struct - guest address space + * @list: list head for the mm->context gmap list * @crst_list: list of all crst tables used in the guest address space * @mm: pointer to the parent mm_struct * @guest_to_host: radix tree with guest to host address translation @@ -19,6 +20,13 @@ * @table: pointer to the page directory * @asce: address space control element for gmap page table * @pfault_enabled: defines if pfaults are applicable for the guest + * @host_to_rmap: radix tree with gmap_rmap lists + * @children: list of shadow gmap structures + * @pt_list: list of all page tables used in the shadow guest address space + * @shadow_lock: spinlock to protect the shadow gmap list + * @parent: pointer to the parent gmap for shadow guest address spaces + * @orig_asce: ASCE for which the shadow page table has been created + * @removed: flag to indicate if a shadow guest address space has been removed */ struct gmap { struct list_head list; @@ -33,8 +41,32 @@ struct gmap { unsigned long asce_end; void *private; bool pfault_enabled; + /* Additional data for shadow guest address spaces */ + struct radix_tree_root host_to_rmap; + struct list_head children; + struct list_head pt_list; + spinlock_t shadow_lock; + struct gmap *parent; + unsigned long orig_asce; + bool removed; }; +/** + * struct gmap_rmap - reverse mapping for shadow page table entries + * @next: pointer to next rmap in the list + * @raddr: virtual rmap address in the shadow guest address space + */ +struct gmap_rmap { + struct gmap_rmap *next; + unsigned long raddr; +}; + +#define gmap_for_each_rmap(pos, head) \ + for (pos = (head); pos; pos = pos->next) + +#define gmap_for_each_rmap_safe(pos, n, head) \ + for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n) + /** * struct gmap_notifier - notify function block for page invalidation * @notifier_call: address of callback function @@ -46,6 +78,11 @@ struct gmap_notifier { unsigned long end); }; +static inline int gmap_is_shadow(struct gmap *gmap) +{ + return !!gmap->parent; +} + struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit); void gmap_remove(struct gmap *gmap); struct gmap *gmap_get(struct gmap *gmap); @@ -64,9 +101,22 @@ void gmap_discard(struct gmap *, unsigned long from, unsigned long to); void __gmap_zap(struct gmap *, unsigned long gaddr); void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); +int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); + +struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce); +int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t); +int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t); +int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); +int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt); +int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, + unsigned long *pgt, int *dat_protection); +int gmap_shadow_page(struct gmap *sg, unsigned long saddr, + unsigned long paddr, int write); + void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *); -void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *); +void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *, + unsigned long bits); int gmap_mprotect_notify(struct gmap *, unsigned long start, unsigned long len, int prot); diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index da34cb6b1f3b..f4eb9843eed4 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -19,8 +19,10 @@ unsigned long *crst_table_alloc(struct mm_struct *); void crst_table_free(struct mm_struct *, unsigned long *); unsigned long *page_table_alloc(struct mm_struct *); +struct page *page_table_alloc_pgste(struct mm_struct *mm); void page_table_free(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); +void page_table_free_pgste(struct page *page); extern int page_table_allocate_pgste; static inline void clear_table(unsigned long *s, unsigned long val, size_t n) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 35dde6afffcf..a6e7fc8f5b49 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -256,6 +256,7 @@ static inline int is_module_addr(void *addr) /* Bits in the region table entry */ #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ +#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ @@ -327,6 +328,7 @@ static inline int is_module_addr(void *addr) #define PGSTE_GC_BIT 0x0002000000000000UL #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ +#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ /* Guest Page State used for virtualization */ #define _PGSTE_GPS_ZERO 0x0000000080000000UL @@ -885,12 +887,16 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry); void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +void ptep_notify(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long bits); int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, - pte_t *ptep, int prot); + pte_t *ptep, int prot, unsigned long bit); void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, pte_t *ptep , int reset); void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, + pte_t *sptep, pte_t *tptep, int write); +void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 9d4d311d7e52..94c80b6d031d 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -109,6 +109,7 @@ struct thread_struct { unsigned long ksp; /* kernel stack pointer */ mm_segment_t mm_segment; unsigned long gmap_addr; /* address of last gmap fault. */ + unsigned int gmap_write_flag; /* gmap fault write indication */ unsigned int gmap_pfault; /* signal of a pending guest pfault */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ -- cgit v1.2.1 From a9d23e71d7716e394a772686bfd994f4e181b235 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 8 Mar 2016 12:21:41 +0100 Subject: s390/mm: shadow pages with real guest requested protection We really want to avoid manually handling protection for nested virtualization. By shadowing pages with the protection the guest asked us for, the SIE can handle most protection-related actions for us (e.g. special handling for MVPG) and we can directly forward protection exceptions to the guest. PTEs will now always be shadowed with the correct _PAGE_PROTECT flag. Unshadowing will take care of any guest changes to the parent PTE and any host changes to the host PTE. If the host PTE doesn't have the fitting access rights or is not available, we have to fix it up. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 3 +-- arch/s390/include/asm/pgtable.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 58e65ee5b2d2..4a47055f58d7 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -110,8 +110,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt); int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection); -int gmap_shadow_page(struct gmap *sg, unsigned long saddr, - unsigned long paddr, int write); +int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index a6e7fc8f5b49..c7ebba483f09 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -895,7 +895,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, pte_t *ptep , int reset); void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, - pte_t *sptep, pte_t *tptep, int write); + pte_t *sptep, pte_t *tptep, pte_t pte); void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); -- cgit v1.2.1 From 0f7f84891516dc1ff7500fae12143710d2d9d11f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 8 Mar 2016 12:30:46 +0100 Subject: s390/mm: fix races on gmap_shadow creation Before any thread is allowed to use a gmap_shadow, it has to be fully initialized. However, for invalidation to work properly, we have to register the new gmap_shadow before we protect the parent gmap table. Because locking is tricky, and we have to avoid duplicate gmaps, let's introduce an initialized field, that signalizes other threads if that gmap_shadow can already be used or if they have to retry. Let's properly return errors using ERR_PTR() instead of simply returning NULL, so a caller can properly react on the error. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 4a47055f58d7..54a2487efce4 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -27,6 +27,7 @@ * @parent: pointer to the parent gmap for shadow guest address spaces * @orig_asce: ASCE for which the shadow page table has been created * @removed: flag to indicate if a shadow guest address space has been removed + * @initialized: flag to indicate if a shadow guest address space can be used */ struct gmap { struct list_head list; @@ -49,6 +50,7 @@ struct gmap { struct gmap *parent; unsigned long orig_asce; bool removed; + bool initialized; }; /** -- cgit v1.2.1 From 5b062bd4940f81e0bd26b0d75f56d7abebf0309f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 8 Mar 2016 12:17:40 +0100 Subject: s390/mm: prepare for EDAT1/EDAT2 support in gmap shadow In preparation for EDAT1/EDAT2 support for gmap shadows, we have to store the requested edat level in the gmap shadow. The edat level used during shadow translation is a property of the gmap shadow. Depending on that level, the gmap shadow will look differently for the same guest tables. We have to store it internally in order to support it later. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 54a2487efce4..2ab397c8ca09 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -26,6 +26,7 @@ * @shadow_lock: spinlock to protect the shadow gmap list * @parent: pointer to the parent gmap for shadow guest address spaces * @orig_asce: ASCE for which the shadow page table has been created + * @edat_level: edat level to be used for the shadow translation * @removed: flag to indicate if a shadow guest address space has been removed * @initialized: flag to indicate if a shadow guest address space can be used */ @@ -49,6 +50,7 @@ struct gmap { spinlock_t shadow_lock; struct gmap *parent; unsigned long orig_asce; + int edat_level; bool removed; bool initialized; }; @@ -105,7 +107,8 @@ void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr) int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); -struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce); +struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, + int edat_level); int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t); int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t); int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); -- cgit v1.2.1 From fd8d4e3ab6993e194287a59c4d3a6a43da86b8dc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 18 Apr 2016 13:24:52 +0200 Subject: s390/mm: support EDAT1 for gmap shadows If the guest is enabled for EDAT1, we can easily create shadows for guest2 -> guest3 provided tables that make use of EDAT1. If guest2 references a 1MB page, this memory looks consecutive for guest2, but it might not be so for us. Therefore we have to create fake page tables. We can easily add that to our existing infrastructure. The invalidation mechanism will make sure that fake page tables are removed when the parent table (sgt table entry) is changed. As EDAT1 also introduced protection on all page table levels, we have to also shadow these correctly. We don't have to care about: - ACCF-Validity Control in STE - Access-Control Bits in STE - Fetch-Protection Bit in STE - Common-Segment Bit in STE As all bits might be dropped and there is no guaranteed that they are active ("unpredictable whether the CPU uses these bits", "may be used"). Without using EDAT1 in the shadow ourselfes (STE-format control == 0), simply shadowing these bits would not be enough. They would be ignored. Please note that we are using the "fake" flag to make this look consistent with further changes (EDAT2, real-space designation support) and don't let the shadow functions handle fc=1 stes. In the future, with huge pages in the host, gmap_shadow_pgt() could simply try to map a huge host page if "fake" is set to one and indicate via return value that no lower fake tables / shadow ptes are required. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 2ab397c8ca09..c8ba5a197b1d 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -112,9 +112,10 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t); int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t); int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); -int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt); +int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, + int fake); int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, - unsigned long *pgt, int *dat_protection); + unsigned long *pgt, int *dat_protection, int *fake); int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); void gmap_register_pte_notifier(struct gmap_notifier *); -- cgit v1.2.1 From 18b89809881834cecd2977e6048a30c4c8f140fe Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 18 Apr 2016 13:42:05 +0200 Subject: s390/mm: support EDAT2 for gmap shadows If the guest is enabled for EDAT2, we can easily create shadows for guest2 -> guest3 provided tables that make use of EDAT2. If guest2 references a 2GB page, this memory looks consecutive for guest2, but it does not have to be so for us. Therefore we have to create fake segment and page tables. This works just like EDAT1 support, so page tables are removed when the parent table (r3t table entry) is changed. We don't hve to care about: - ACCF-Validity Control in RTTE - Access-Control Bits in RTTE - Fetch-Protection Bit in RTTE - Common-Region Bit in RTTE Just like for EDAT1, all bits might be dropped and there is no guaranteed that they are active. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index c8ba5a197b1d..2e4c3b222a96 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -111,7 +111,8 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t); int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t); -int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); +int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, + int fake); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake); int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, -- cgit v1.2.1 From 3218f7094b6b583f4f01bffcf84572c6beacdcc2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 18 Apr 2016 16:22:24 +0200 Subject: s390/mm: support real-space for gmap shadows We can easily support real-space designation just like EDAT1 and EDAT2. So guest2 can provide for guest3 an asce with the real-space control being set. We simply have to allocate the biggest page table possible and fake all levels. There is no protection to consider. If we exceed guest memory, vsie code will inject an addressing exception (via program intercept). In the future, we could limit the fake table level to the gmap page table. As the top level page table can never go away, such gmap shadows will never get unshadowed, we'll have to come up with another way to limit the number of kept gmap shadows. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 2e4c3b222a96..752cf47a81ab 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -109,8 +109,10 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); -int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t); -int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t); +int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, + int fake); +int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + int fake); int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, int fake); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, -- cgit v1.2.1 From 4a49443924731823da2e9b3ae9311b74a34e7ed8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 8 Mar 2016 12:31:52 +0100 Subject: s390/mm: remember the int code for the last gmap fault For nested virtualization, we want to know if we are handling a protection exception, because these can directly be forwarded to the guest without additional checks. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/processor.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 94c80b6d031d..8c2922f540f9 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -110,6 +110,7 @@ struct thread_struct { mm_segment_t mm_segment; unsigned long gmap_addr; /* address of last gmap fault. */ unsigned int gmap_write_flag; /* gmap fault write indication */ + unsigned int gmap_int_code; /* int code of last gmap fault */ unsigned int gmap_pfault; /* signal of a pending guest pfault */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ -- cgit v1.2.1 From 5b6c963bcef5c3a857e3f8ba84aa9380069fc95f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 27 May 2016 18:57:33 +0200 Subject: s390/mm: allow to check if a gmap shadow is valid It will be very helpful to have a mechanism to check without any locks if a given gmap shadow is still valid and matches the given properties. Acked-by: Martin Schwidefsky Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 752cf47a81ab..c67fb854705e 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -109,6 +109,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); +int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level); int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, int fake); int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, -- cgit v1.2.1 From 37d9df98b71afdf3baf41ee5451b6206c13328c6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 11 Mar 2015 16:47:33 +0100 Subject: KVM: s390: backup the currently enabled gmap when scheduled out Nested virtualization will have to enable own gmaps. Current code would enable the wrong gmap whenever scheduled out and back in, therefore resulting in the wrong gmap being enabled. This patch reenables the last enabled gmap, therefore avoiding having to touch vcpu->arch.gmap when enabling a different gmap. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/gmap.h | 1 + arch/s390/include/asm/kvm_host.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index c67fb854705e..741ddba0bf11 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -94,6 +94,7 @@ void gmap_put(struct gmap *gmap); void gmap_enable(struct gmap *gmap); void gmap_disable(struct gmap *gmap); +struct gmap *gmap_get_enabled(void); int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long len); int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 9eed5c18a61c..96bef30e2e33 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -551,6 +551,8 @@ struct kvm_vcpu_arch { struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; struct gmap *gmap; + /* backup location for the currently enabled gmap when scheduled out */ + struct gmap *enabled_gmap; struct kvm_guestdbg_info_arch guestdbg; unsigned long pfault_token; unsigned long pfault_select; -- cgit v1.2.1 From 3d84683bd737e397ae200e881a3230e469c59ad6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 16 Nov 2015 10:45:03 +0100 Subject: s390: introduce page_to_virt() and pfn_to_virt() Acked-by: Heiko Carstens Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/page.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index f874e7d51c19..b5edff32e547 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -147,6 +147,8 @@ static inline int devmem_is_allowed(unsigned long pfn) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) +#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -- cgit v1.2.1 From a3508fbe9dc6dd3bece0c7bf889cc085a011738c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 8 Jul 2015 13:19:48 +0200 Subject: KVM: s390: vsie: initial support for nested virtualization This patch adds basic support for nested virtualization on s390x, called VSIE (virtual SIE) and allows it to be used by the guest if the necessary facilities are supported by the hardware and enabled for the guest. In order to make this work, we have to shadow the sie control block provided by guest 2. In order to gain some performance, we have to reuse the same shadow blocks as good as possible. For now, we allow as many shadow blocks as we have VCPUs (that way, every VCPU can run the VSIE concurrently). We have to watch out for the prefix getting unmapped out of our shadow gmap and properly get the VCPU out of VSIE in that case, to fault the prefix pages back in. We use the PROG_REQUEST bit for that purpose. This patch is based on an initial prototype by Tobias Elpelt. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 15 ++++++++++++++- arch/s390/include/uapi/asm/kvm.h | 1 + 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 96bef30e2e33..255609c86901 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -145,7 +145,7 @@ struct kvm_s390_sie_block { __u64 cputm; /* 0x0028 */ __u64 ckc; /* 0x0030 */ __u64 epoch; /* 0x0038 */ - __u8 reserved40[4]; /* 0x0040 */ + __u32 svcc; /* 0x0040 */ #define LCTL_CR0 0x8000 #define LCTL_CR6 0x0200 #define LCTL_CR9 0x0040 @@ -167,6 +167,9 @@ struct kvm_s390_sie_block { #define ICPT_INST 0x04 #define ICPT_PROGI 0x08 #define ICPT_INSTPROGI 0x0C +#define ICPT_EXTINT 0x14 +#define ICPT_VALIDITY 0x20 +#define ICPT_STOP 0x28 #define ICPT_OPEREXC 0x2C #define ICPT_PARTEXEC 0x38 #define ICPT_IOINST 0x40 @@ -281,6 +284,7 @@ struct kvm_vcpu_stat { u32 instruction_stsi; u32 instruction_stfl; u32 instruction_tprot; + u32 instruction_sie; u32 instruction_essa; u32 instruction_sthyi; u32 instruction_sigp_sense; @@ -637,6 +641,14 @@ struct sie_page2 { u8 reserved900[0x1000 - 0x900]; /* 0x0900 */ } __packed; +struct kvm_s390_vsie { + struct mutex mutex; + struct radix_tree_root addr_to_page; + int page_count; + int next; + struct page *pages[KVM_MAX_VCPUS]; +}; + struct kvm_arch{ void *sca; int use_esca; @@ -661,6 +673,7 @@ struct kvm_arch{ struct sie_page2 *sie_page2; struct kvm_s390_cpu_model model; struct kvm_s390_crypto crypto; + struct kvm_s390_vsie vsie; u64 epoch; /* subset of available cpu features enabled by user space */ DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index f0818d70d73d..62423b1931c0 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -98,6 +98,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_NR_BITS 1024 #define KVM_S390_VM_CPU_FEAT_ESOP 0 +#define KVM_S390_VM_CPU_FEAT_SIEF2 1 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From c9bc1eabe5ee49f1be68550cc0bd907b55d9da8d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 25 Nov 2015 11:08:32 +0100 Subject: KVM: s390: vsie: support vectory facility (SIMD) As soon as guest 2 is allowed to use the vector facility (indicated via STFLE), it can also enable it for guest 3. We have to take care of the sattellite block that might be used when not relying on lazy vector copying (not the case for KVM). Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 255609c86901..190ad63291fb 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -229,7 +229,7 @@ struct kvm_s390_sie_block { __u8 reserved1e6[2]; /* 0x01e6 */ __u64 itdba; /* 0x01e8 */ __u64 riccbd; /* 0x01f0 */ - __u8 reserved1f8[8]; /* 0x01f8 */ + __u64 gvrd; /* 0x01f8 */ } __attribute__((packed)); struct kvm_s390_itdb { -- cgit v1.2.1 From 19c439b564b05939b83876a687bd48389d0aebb5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 25 Nov 2015 11:02:26 +0100 Subject: KVM: s390: vsie: support 64-bit-SCAO Let's provide the 64-bit-SCAO facility to guest 2, so he can set up a SCA for guest 3 that has a 64 bit address. Please note that we already require the 64 bit SCAO for our vsie implementation, in order to forward the SCA directly (by pinning the page). Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 62423b1931c0..c5e4537e96d9 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -99,6 +99,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_NR_BITS 1024 #define KVM_S390_VM_CPU_FEAT_ESOP 0 #define KVM_S390_VM_CPU_FEAT_SIEF2 1 +#define KVM_S390_VM_CPU_FEAT_64BSCAO 2 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 0615a326e066b580cf26d16a092ea54997dd6cbb Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 25 Nov 2015 09:59:49 +0100 Subject: KVM: s390: vsie: support shared IPTE-interlock facility As we forward the whole SCA provided by guest 2, we can directly forward SIIF if available. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index c5e4537e96d9..1d2e820f763d 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -100,6 +100,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_ESOP 0 #define KVM_S390_VM_CPU_FEAT_SIEF2 1 #define KVM_S390_VM_CPU_FEAT_64BSCAO 2 +#define KVM_S390_VM_CPU_FEAT_SIIF 3 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 77d18f6d47fbeaaceb15df9ab928757d5bb96ec6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 16:32:35 +0100 Subject: KVM: s390: vsie: support guest-PER-enhancement We can easily forward the guest-PER-enhancement facility to guest 2 if available. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 1d2e820f763d..98526ac114bf 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -101,6 +101,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_SIEF2 1 #define KVM_S390_VM_CPU_FEAT_64BSCAO 2 #define KVM_S390_VM_CPU_FEAT_SIIF 3 +#define KVM_S390_VM_CPU_FEAT_GPERE 4 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From a1b7b9b286c0157748922526ecb353e550209833 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 16:41:33 +0100 Subject: KVM: s390: vsie: support guest-storage-limit-suppression We can easily forward guest-storage-limit-suppression if available. One thing to care about is keeping the prefix properly mapped when gsls in toggled on/off or the mso changes in between. Therefore we better remap the prefix on any mso changes just like we already do with the prefix. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 98526ac114bf..9ed07479714f 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -102,6 +102,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_64BSCAO 2 #define KVM_S390_VM_CPU_FEAT_SIIF 3 #define KVM_S390_VM_CPU_FEAT_GPERE 4 +#define KVM_S390_VM_CPU_FEAT_GSLS 5 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 5630a8e82b1ee4d13daa500c045603c5b4801fd9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 16:53:51 +0100 Subject: KVM: s390: vsie: support intervention-bypass We can easily enable intervention bypass for guest 2, so it can use it for guest 3. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9ed07479714f..347f4f61b656 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -103,6 +103,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_SIIF 3 #define KVM_S390_VM_CPU_FEAT_GPERE 4 #define KVM_S390_VM_CPU_FEAT_GSLS 5 +#define KVM_S390_VM_CPU_FEAT_IB 6 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 13ee3f678b1117d7511a2c5e10549f7c37f4cadf Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 16:54:37 +0100 Subject: KVM: s390: vsie: support conditional-external-interception We can easily enable cei for guest 2, so he can use it for guest 3. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 347f4f61b656..7630dd70ed57 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -104,6 +104,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_GPERE 4 #define KVM_S390_VM_CPU_FEAT_GSLS 5 #define KVM_S390_VM_CPU_FEAT_IB 6 +#define KVM_S390_VM_CPU_FEAT_CEI 7 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 7fd7f39daa3da822122124730437c4f37e4d82de Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 24 Nov 2015 16:56:23 +0100 Subject: KVM: s390: vsie: support IBS interpretation We can easily enable ibs for guest 2, so he can use it for guest 3. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 7630dd70ed57..c128567d1cd3 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -105,6 +105,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_GSLS 5 #define KVM_S390_VM_CPU_FEAT_IB 6 #define KVM_S390_VM_CPU_FEAT_CEI 7 +#define KVM_S390_VM_CPU_FEAT_IBS 8 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From adbf16985c387851fd3454ca34893705dbde7f98 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 27 May 2016 22:03:52 +0200 Subject: KVM: s390: vsie: speed up VCPU irq delivery when handling vsie Whenever we want to wake up a VCPU (e.g. when injecting an IRQ), we have to kick it out of vsie, so the request will be handled faster. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 190ad63291fb..946fc86202fd 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -549,6 +549,8 @@ struct kvm_guestdbg_info_arch { struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; + /* if vsie is active, currently executed shadow sie control block */ + struct kvm_s390_sie_block *vsie_block; unsigned int host_acrs[NUM_ACRS]; struct fpu host_fpregs; struct kvm_s390_local_interrupt local_int; -- cgit v1.2.1 From 5d3876a8bf4607b72cbe754278d19c68990b57a8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Apr 2016 17:06:50 +0200 Subject: KVM: s390: vsie: add indication for future features We have certain SIE features that we cannot support for now. Let's add these features, so user space can directly prepare to enable them, so we don't have to update yet another component. In addition, add a comment block, telling why it is for now not possible to forward/enable these features. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index c128567d1cd3..a2ffec4139ad 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -106,6 +106,10 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_IB 6 #define KVM_S390_VM_CPU_FEAT_CEI 7 #define KVM_S390_VM_CPU_FEAT_IBS 8 +#define KVM_S390_VM_CPU_FEAT_SKEY 9 +#define KVM_S390_VM_CPU_FEAT_CMMA 10 +#define KVM_S390_VM_CPU_FEAT_PFMFI 11 +#define KVM_S390_VM_CPU_FEAT_SIGPIF 12 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; -- cgit v1.2.1 From 6502a34cfd6695929086187f63fe670cc3050e68 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 21 Jun 2016 14:19:51 +0200 Subject: KVM: s390: allow user space to handle instr 0x0000 We will use illegal instruction 0x0000 for handling 2 byte sw breakpoints from user space. As it can be enabled dynamically via a capability, let's move setting of ICTL_OPEREXC to the post creation step, so we avoid any races when enabling that capability just while adding new cpus. Acked-by: Janosch Frank Reviewed-by: Cornelia Huck Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 946fc86202fd..183b01727de4 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -43,6 +43,7 @@ /* s390-specific vcpu->requests bit members */ #define KVM_REQ_ENABLE_IBS 8 #define KVM_REQ_DISABLE_IBS 9 +#define KVM_REQ_ICPT_OPEREXC 10 #define SIGP_CTRL_C 0x80 #define SIGP_CTRL_SCN_MASK 0x3f @@ -666,6 +667,7 @@ struct kvm_arch{ int user_cpu_state_ctrl; int user_sigp; int user_stsi; + int user_instr0; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; int ipte_lock_count; -- cgit v1.2.1