summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/time.c38
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/sigp.c7
4 files changed, 18 insertions, 32 deletions
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 8d15314381e0..cae14c499511 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -208,6 +208,9 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_KVM;
else
machine_flags |= MACHINE_FLAG_VM;
+
+ /* Store machine flags for setting up lowcore early */
+ S390_lowcore.machine_flags = machine_flags;
}
static __init void early_pgm_check_handler(void)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..6bff1a1d9060 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -182,12 +182,14 @@ static void timing_alert_interrupt(__u16 code)
static void etr_reset(void);
static void stp_reset(void);
-unsigned long read_persistent_clock(void)
+void read_persistent_clock(struct timespec *ts)
{
- struct timespec ts;
+ tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
+}
- tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts);
- return ts.tv_sec;
+void read_boot_clock(struct timespec *ts)
+{
+ tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
}
static cycle_t read_tod_clock(struct clocksource *cs)
@@ -205,6 +207,10 @@ static struct clocksource clocksource_tod = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+struct clocksource * __init clocksource_default_clock(void)
+{
+ return &clocksource_tod;
+}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{
@@ -242,10 +248,6 @@ void update_vsyscall_tz(void)
*/
void __init time_init(void)
{
- struct timespec ts;
- unsigned long flags;
- cycle_t now;
-
/* Reset time synchronization interfaces. */
etr_reset();
stp_reset();
@@ -261,26 +263,6 @@ void __init time_init(void)
if (clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
- /*
- * The TOD clock is an accurate clock. The xtime should be
- * initialized in a way that the difference between TOD and
- * xtime is reasonably small. Too bad that timekeeping_init
- * sets xtime.tv_nsec to zero. In addition the clock source
- * change from the jiffies clock source to the TOD clock
- * source add another error of up to 1/HZ second. The same
- * function sets wall_to_monotonic to a value that is too
- * small for /proc/uptime to be accurate.
- * Reset xtime and wall_to_monotonic to sane values.
- */
- write_seqlock_irqsave(&xtime_lock, flags);
- now = get_clock();
- tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
- clocksource_tod.cycle_last = now;
- clocksource_tod.raw_time = xtime;
- tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
- set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
-
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f04f5301b1b4..4d613415c435 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -386,7 +386,7 @@ no_timer:
}
__unset_cpu_idle(vcpu);
__set_current_state(TASK_RUNNING);
- remove_wait_queue(&vcpu->wq, &wait);
+ remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 36678835034d..0ef81d6776e9 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -169,7 +169,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
unsigned long *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
- struct kvm_s390_local_interrupt *li;
+ struct kvm_s390_local_interrupt *li = NULL;
struct kvm_s390_interrupt_info *inti;
int rc;
u8 tmp;
@@ -189,9 +189,10 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
return 2; /* busy */
spin_lock(&fi->lock);
- li = fi->local_int[cpu_addr];
+ if (cpu_addr < KVM_MAX_VCPUS)
+ li = fi->local_int[cpu_addr];
- if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
+ if (li == NULL) {
rc = 1; /* incorrect state */
*reg &= SIGP_STAT_INCORRECT_STATE;
kfree(inti);