View | Details | Raw Unified | Return to bug 266145
Collapse All | Expand All

(-)b/sys/amd64/amd64/initcpu.c (+18 lines)
Lines 255-260 initializecpu(void) Link Here
255
{
255
{
256
	uint64_t msr;
256
	uint64_t msr;
257
	uint32_t cr4;
257
	uint32_t cr4;
258
	u_int r[4];
258
259
259
	cr4 = rcr4();
260
	cr4 = rcr4();
260
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
261
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
Lines 317-322 initializecpu(void) Link Here
317
	if ((amd_feature & AMDID_RDTSCP) != 0 ||
318
	if ((amd_feature & AMDID_RDTSCP) != 0 ||
318
	    (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0)
319
	    (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0)
319
		wrmsr(MSR_TSC_AUX, cpu_auxmsr());
320
		wrmsr(MSR_TSC_AUX, cpu_auxmsr());
321
322
	if (cpu_high >= 0x1a) {
323
		cpuid_count(0x1a, 0, r);
324
		if ((r[0] & 0xff000000) == 0x20000000)
325
			PCPU_SET(small, 1);
326
	}
327
}
328
329
void
330
invlpgXX(vm_offset_t va)
331
{
332
	if (PCPU_GET(small)) {
333
		struct invpcid_descr d = { 0 };
334
		invpcid(&d, INVPCID_CTXGLOB);
335
	} else {
336
		invlpg(va);
337
	}
320
}
338
}
321
339
322
void
340
void
(-)b/sys/amd64/amd64/mp_machdep.c (-2 / +8 lines)
Lines 864-870 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1) Link Here
864
	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
864
	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
865
#endif /* COUNT_IPIS */
865
#endif /* COUNT_IPIS */
866
866
867
	invlpg(smp_tlb_addr1);
867
	if (smp_tlb_pmap == kernel_pmap)
868
		invlpgXX(smp_tlb_addr1);
869
	else
870
		invlpg(smp_tlb_addr1);
868
	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
871
	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
869
	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
872
	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
870
	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
873
	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
Lines 935-941 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1, Link Here
935
938
936
	addr = smp_tlb_addr1;
939
	addr = smp_tlb_addr1;
937
	do {
940
	do {
938
		invlpg(addr);
941
		if (smp_tlb_pmap == kernel_pmap)
942
			invlpgXX(addr);
943
		else
944
			invlpg(addr);
939
		addr += PAGE_SIZE;
945
		addr += PAGE_SIZE;
940
	} while (addr < smp_tlb_addr2);
946
	} while (addr < smp_tlb_addr2);
941
	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
947
	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
(-)b/sys/amd64/amd64/pmap.c (-6 / +6 lines)
Lines 2797-2803 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde) Link Here
2797
2797
2798
	if ((newpde & PG_PS) == 0)
2798
	if ((newpde & PG_PS) == 0)
2799
		/* Demotion: flush a specific 2MB page mapping. */
2799
		/* Demotion: flush a specific 2MB page mapping. */
2800
		invlpg(va);
2800
		invlpgXX(va);
2801
	else if ((newpde & PG_G) == 0)
2801
	else if ((newpde & PG_G) == 0)
2802
		/*
2802
		/*
2803
		 * Promotion: flush every 4KB page mapping from the TLB
2803
		 * Promotion: flush every 4KB page mapping from the TLB
Lines 3136-3142 pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va, Link Here
3136
    vm_offset_t addr2 __unused)
3136
    vm_offset_t addr2 __unused)
3137
{
3137
{
3138
	if (pmap == kernel_pmap) {
3138
	if (pmap == kernel_pmap) {
3139
		invlpg(va);
3139
		invlpgXX(va);
3140
	} else if (pmap == PCPU_GET(curpmap)) {
3140
	} else if (pmap == PCPU_GET(curpmap)) {
3141
		invlpg(va);
3141
		invlpg(va);
3142
		pmap_invalidate_page_cb(pmap, va);
3142
		pmap_invalidate_page_cb(pmap, va);
Lines 3228-3234 pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) Link Here
3228
3228
3229
	if (pmap == kernel_pmap) {
3229
	if (pmap == kernel_pmap) {
3230
		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3230
		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3231
			invlpg(addr);
3231
			invlpgXX(addr);
3232
	} else if (pmap == PCPU_GET(curpmap)) {
3232
	} else if (pmap == PCPU_GET(curpmap)) {
3233
		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3233
		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3234
			invlpg(addr);
3234
			invlpg(addr);
Lines 7651-7657 pmap_kenter_temporary(vm_paddr_t pa, int i) Link Here
7651
7651
7652
	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
7652
	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
7653
	pmap_kenter(va, pa);
7653
	pmap_kenter(va, pa);
7654
	invlpg(va);
7654
	invlpgXX(va);
7655
	return ((void *)crashdumpmap);
7655
	return ((void *)crashdumpmap);
7656
}
7656
}
7657
7657
Lines 10354-10360 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, Link Here
10354
				    page[i]->md.pat_mode, 0);
10354
				    page[i]->md.pat_mode, 0);
10355
				pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
10355
				pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
10356
				    cache_bits);
10356
				    cache_bits);
10357
				invlpg(vaddr[i]);
10357
				invlpgXX(vaddr[i]);
10358
			}
10358
			}
10359
		}
10359
		}
10360
	}
10360
	}
Lines 10403-10409 pmap_quick_remove_page(vm_offset_t addr) Link Here
10403
	if (addr != qframe)
10403
	if (addr != qframe)
10404
		return;
10404
		return;
10405
	pte_store(vtopte(qframe), 0);
10405
	pte_store(vtopte(qframe), 0);
10406
	invlpg(qframe);
10406
	invlpgXX(qframe);
10407
	mtx_unlock_spin(&qframe_mtx);
10407
	mtx_unlock_spin(&qframe_mtx);
10408
}
10408
}
10409
10409
(-)b/sys/amd64/include/md_var.h (+1 lines)
Lines 99-104 void get_fpcontext(struct thread *td, struct __mcontext *mcp, Link Here
99
int	set_fpcontext(struct thread *td, struct __mcontext *mcp,
99
int	set_fpcontext(struct thread *td, struct __mcontext *mcp,
100
	    char *xfpustate, size_t xfpustate_len);
100
	    char *xfpustate, size_t xfpustate_len);
101
101
102
void invlpgXX(vm_offset_t va);
102
#endif /* !_MACHINE_MD_VAR_H_ */
103
#endif /* !_MACHINE_MD_VAR_H_ */
103
104
104
#endif /* __i386__ */
105
#endif /* __i386__ */
(-)b/sys/amd64/include/pcpu.h (-1 / +2 lines)
Lines 99-105 _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line"); Link Here
99
	uint32_t pc_smp_tlb_gen;					\
99
	uint32_t pc_smp_tlb_gen;					\
100
	u_int	pc_smp_tlb_op;						\
100
	u_int	pc_smp_tlb_op;						\
101
	uint64_t pc_ucr3_load_mask;					\
101
	uint64_t pc_ucr3_load_mask;					\
102
	char	__pad[2916]		/* pad to UMA_PCPU_ALLOC_SIZE */
102
	u_int	pc_small;						\
103
	char	__pad[2912]		/* pad to UMA_PCPU_ALLOC_SIZE */
103
104
104
#define	PC_DBREG_CMD_NONE	0
105
#define	PC_DBREG_CMD_NONE	0
105
#define	PC_DBREG_CMD_LOAD	1
106
#define	PC_DBREG_CMD_LOAD	1

Return to bug 266145