diff options
author | Lennart Poettering <lennart@poettering.net> | 2023-04-18 10:38:15 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-04-18 10:38:15 +0200 |
commit | 766c30a3b5883029d48084ef9304dfa6c23574d8 (patch) | |
tree | 3a88f170c30b3a5c6816bee62f92068f2b276bfb /src | |
parent | ee0e6e476e61d4baa2a18e241d212753e75003bf (diff) | |
parent | 2a3ae5fae09a48387d40768faf0ba5a837dc0513 (diff) | |
download | systemd-766c30a3b5883029d48084ef9304dfa6c23574d8.tar.gz |
Merge pull request #27256 from medhefgo/boot-rdtsc
boot: Improve timer frequency detection
Diffstat (limited to 'src')
-rw-r--r-- | src/boot/efi/ticks.c | 85 |
1 files changed, 57 insertions, 28 deletions
diff --git a/src/boot/efi/ticks.c b/src/boot/efi/ticks.c index 13972528cd..f902b83a99 100644 --- a/src/boot/efi/ticks.c +++ b/src/boot/efi/ticks.c @@ -4,59 +4,91 @@ #include "util.h" #include "vmm.h" -#ifdef __x86_64__ -static uint64_t ticks_read(void) { - uint64_t a, d; +#if defined(__i386__) || defined(__x86_64__) +# include <cpuid.h> +static uint64_t ticks_read_arch(void) { /* The TSC might or might not be virtualized in VMs (and thus might not be accurate or start at zero * at boot), depending on hypervisor and CPU functionality. If it's not virtualized it's not useful * for keeping time, hence don't attempt to use it. */ if (in_hypervisor()) return 0; - __asm__ volatile ("rdtsc" : "=a" (a), "=d" (d)); - return (d << 32) | a; + return __builtin_ia32_rdtsc(); } -#elif defined(__i386__) -static uint64_t ticks_read(void) { - uint64_t val; - if (in_hypervisor()) +static uint64_t ticks_freq_arch(void) { + /* Detect TSC frequency from CPUID information if available. */ + + unsigned max_leaf, ebx, ecx, edx; + if (__get_cpuid(0, &max_leaf, &ebx, &ecx, &edx) == 0) return 0; - __asm__ volatile ("rdtsc" : "=A" (val)); - return val; + /* Leaf 0x15 is Intel only. */ + if (max_leaf < 0x15 || ebx != signature_INTEL_ebx || ecx != signature_INTEL_ecx || + edx != signature_INTEL_edx) + return 0; + + unsigned denominator, numerator, crystal_hz; + __cpuid(0x15, denominator, numerator, crystal_hz, edx); + if (denominator == 0 || numerator == 0) + return 0; + + uint64_t freq = crystal_hz; + if (crystal_hz == 0) { + /* If the crystal frquency is not available, try to deduce it from + * the processor frequency leaf if available. */ + if (max_leaf < 0x16) + return 0; + + unsigned core_mhz; + __cpuid(0x16, core_mhz, ebx, ecx, edx); + freq = core_mhz * 1000ULL * 1000ULL * denominator / numerator; + } + + return freq * numerator / denominator; } + #elif defined(__aarch64__) -static uint64_t ticks_read(void) { + +static uint64_t ticks_read_arch(void) { uint64_t val; asm volatile("mrs %0, cntvct_el0" : "=r"(val)); return val; } -#else -static uint64_t ticks_read(void) { - return 0; -} -#endif -#if defined(__aarch64__) -static uint64_t ticks_freq(void) { +static uint64_t ticks_freq_arch(void) { uint64_t freq; asm volatile("mrs %0, cntfrq_el0" : "=r"(freq)); return freq; } + #else -/* count TSC ticks during a millisecond delay */ + +static uint64_t ticks_read_arch(void) { + return 0; +} + +static uint64_t ticks_freq_arch(void) { + return 0; +} + +#endif + static uint64_t ticks_freq(void) { - uint64_t ticks_start, ticks_end; static uint64_t cache = 0; if (cache != 0) return cache; - ticks_start = ticks_read(); + cache = ticks_freq_arch(); + if (cache != 0) + return cache; + + /* As a fallback, count ticks during a millisecond delay. */ + uint64_t ticks_start = ticks_read_arch(); BS->Stall(1000); - ticks_end = ticks_read(); + uint64_t ticks_end = ticks_read_arch(); if (ticks_end < ticks_start) /* Check for an overflow (which is not that unlikely, given on some * archs the value is 32bit) */ @@ -65,16 +97,13 @@ static uint64_t ticks_freq(void) { cache = (ticks_end - ticks_start) * 1000UL; return cache; } -#endif uint64_t time_usec(void) { - uint64_t ticks, freq; - - ticks = ticks_read(); + uint64_t ticks = ticks_read_arch(); if (ticks == 0) return 0; - freq = ticks_freq(); + uint64_t freq = ticks_freq(); if (freq == 0) return 0; |