diff --git a/sys/x86/x86/tsc.c b/sys/x86/x86/tsc.c index 2a6c81d..a30424e 100644 --- a/sys/x86/x86/tsc.c +++ b/sys/x86/x86/tsc.c @@ -57,7 +57,8 @@ int tsc_perf_stat; static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, - &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); + &tsc_is_invariant, 0, + "Indicates whether the TSC is ACPI P-, C- and T-state invariant"); TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); #ifdef SMP @@ -272,9 +273,7 @@ probe_tsc_freq(void) switch (cpu_vendor_id) { case CPU_VENDOR_AMD: - if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || - (vm_guest == VM_GUEST_NO && - CPUID_TO_FAMILY(cpu_id) >= 0x10)) + if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0) tsc_is_invariant = 1; if (cpu_feature & CPUID_SSE2) { tsc_timecounter.tc_get_timecount = @@ -282,12 +281,7 @@ probe_tsc_freq(void) } break; case CPU_VENDOR_INTEL: - if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || - (vm_guest == VM_GUEST_NO && - ((CPUID_TO_FAMILY(cpu_id) == 0x6 && - CPUID_TO_MODEL(cpu_id) >= 0xe) || - (CPUID_TO_FAMILY(cpu_id) == 0xf && - CPUID_TO_MODEL(cpu_id) >= 0x3)))) + if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0) tsc_is_invariant = 1; if (cpu_feature & CPUID_SSE2) { tsc_timecounter.tc_get_timecount = @@ -554,20 +548,6 @@ init_TSC_tc(void) } /* - * We cannot use the TSC if it stops incrementing in deep sleep. - * Currently only Intel CPUs are known for this problem unless - * the invariant TSC bit is set. - */ - if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && - (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { - tsc_timecounter.tc_quality = -1000; - tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; - if (bootverbose) - printf("TSC timecounter disabled: C3 enabled.\n"); - goto init; - } - - /* * We can not use the TSC in SMP mode unless the TSCs on all CPUs * are synchronized. If the user is sure that the system has * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a