Lines 57-63
int tsc_perf_stat;
Link Here
|
57 |
static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; |
57 |
static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; |
58 |
|
58 |
|
59 |
SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, |
59 |
SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, |
60 |
&tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); |
60 |
&tsc_is_invariant, 0, |
|
|
61 |
"Indicates whether the TSC is ACPI P-, C- and T-state invariant"); |
61 |
TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); |
62 |
TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); |
62 |
|
63 |
|
63 |
#ifdef SMP |
64 |
#ifdef SMP |
Lines 272-280
probe_tsc_freq(void)
Link Here
|
272 |
|
273 |
|
273 |
switch (cpu_vendor_id) { |
274 |
switch (cpu_vendor_id) { |
274 |
case CPU_VENDOR_AMD: |
275 |
case CPU_VENDOR_AMD: |
275 |
if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || |
276 |
if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0) |
276 |
(vm_guest == VM_GUEST_NO && |
|
|
277 |
CPUID_TO_FAMILY(cpu_id) >= 0x10)) |
278 |
tsc_is_invariant = 1; |
277 |
tsc_is_invariant = 1; |
279 |
if (cpu_feature & CPUID_SSE2) { |
278 |
if (cpu_feature & CPUID_SSE2) { |
280 |
tsc_timecounter.tc_get_timecount = |
279 |
tsc_timecounter.tc_get_timecount = |
Lines 282-293
probe_tsc_freq(void)
Link Here
|
282 |
} |
281 |
} |
283 |
break; |
282 |
break; |
284 |
case CPU_VENDOR_INTEL: |
283 |
case CPU_VENDOR_INTEL: |
285 |
if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || |
284 |
if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0) |
286 |
(vm_guest == VM_GUEST_NO && |
|
|
287 |
((CPUID_TO_FAMILY(cpu_id) == 0x6 && |
288 |
CPUID_TO_MODEL(cpu_id) >= 0xe) || |
289 |
(CPUID_TO_FAMILY(cpu_id) == 0xf && |
290 |
CPUID_TO_MODEL(cpu_id) >= 0x3)))) |
291 |
tsc_is_invariant = 1; |
285 |
tsc_is_invariant = 1; |
292 |
if (cpu_feature & CPUID_SSE2) { |
286 |
if (cpu_feature & CPUID_SSE2) { |
293 |
tsc_timecounter.tc_get_timecount = |
287 |
tsc_timecounter.tc_get_timecount = |
Lines 554-573
init_TSC_tc(void)
Link Here
|
554 |
} |
548 |
} |
555 |
|
549 |
|
556 |
/* |
550 |
/* |
557 |
* We cannot use the TSC if it stops incrementing in deep sleep. |
|
|
558 |
* Currently only Intel CPUs are known for this problem unless |
559 |
* the invariant TSC bit is set. |
560 |
*/ |
561 |
if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && |
562 |
(amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { |
563 |
tsc_timecounter.tc_quality = -1000; |
564 |
tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; |
565 |
if (bootverbose) |
566 |
printf("TSC timecounter disabled: C3 enabled.\n"); |
567 |
goto init; |
568 |
} |
569 |
|
570 |
/* |
571 |
* We can not use the TSC in SMP mode unless the TSCs on all CPUs |
551 |
* We can not use the TSC in SMP mode unless the TSCs on all CPUs |
572 |
* are synchronized. If the user is sure that the system has |
552 |
* are synchronized. If the user is sure that the system has |
573 |
* synchronized TSCs, set kern.timecounter.smp_tsc tunable to a |
553 |
* synchronized TSCs, set kern.timecounter.smp_tsc tunable to a |