View | Details | Raw Unified | Return to bug 179282
Collapse All | Expand All

(-)b/sys/amd64/amd64/exception.S (+6 lines)
Lines 42-47 Link Here
42
#include <machine/asmacros.h>
42
#include <machine/asmacros.h>
43
#include <machine/psl.h>
43
#include <machine/psl.h>
44
#include <machine/trap.h>
44
#include <machine/trap.h>
45
#include <machine/smap_instr.h>
45
#include <machine/specialreg.h>
46
#include <machine/specialreg.h>
46
47
47
#include "assym.s"
48
#include "assym.s"
Lines 196-201 alltraps_pushregs_no_rdi: Link Here
196
	movq	%r15,TF_R15(%rsp)
197
	movq	%r15,TF_R15(%rsp)
197
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
198
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
198
	cld
199
	cld
200
	CLAC
199
	FAKE_MCOUNT(TF_RIP(%rsp))
201
	FAKE_MCOUNT(TF_RIP(%rsp))
200
#ifdef KDTRACE_HOOKS
202
#ifdef KDTRACE_HOOKS
201
	/*
203
	/*
Lines 276-281 IDTVEC(dblfault) Link Here
276
	movw	%ds,TF_DS(%rsp)
278
	movw	%ds,TF_DS(%rsp)
277
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
279
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
278
	cld
280
	cld
281
	CLAC
279
	testb	$SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
282
	testb	$SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
280
	jz	1f			/* already running with kernel GS.base */
283
	jz	1f			/* already running with kernel GS.base */
281
	swapgs
284
	swapgs
Lines 379-384 IDTVEC(fast_syscall) Link Here
379
	movq	%r15,TF_R15(%rsp)	/* C preserved */
382
	movq	%r15,TF_R15(%rsp)	/* C preserved */
380
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
383
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
381
	cld
384
	cld
385
	CLAC
382
	FAKE_MCOUNT(TF_RIP(%rsp))
386
	FAKE_MCOUNT(TF_RIP(%rsp))
383
	movq	PCPU(CURTHREAD),%rdi
387
	movq	PCPU(CURTHREAD),%rdi
384
	movq	%rsp,TD_FRAME(%rdi)
388
	movq	%rsp,TD_FRAME(%rdi)
Lines 449-454 IDTVEC(fast_syscall32) Link Here
449
 */
453
 */
450
454
451
IDTVEC(nmi)
455
IDTVEC(nmi)
456
	CLAC
452
	subq	$TF_RIP,%rsp
457
	subq	$TF_RIP,%rsp
453
	movl	$(T_NMI),TF_TRAPNO(%rsp)
458
	movl	$(T_NMI),TF_TRAPNO(%rsp)
454
	movq	$0,TF_ADDR(%rsp)
459
	movq	$0,TF_ADDR(%rsp)
Lines 533-538 nmi_calltrap: Link Here
533
538
534
	shrq	$3,%rcx		/* trap frame size in long words */
539
	shrq	$3,%rcx		/* trap frame size in long words */
535
	cld
540
	cld
541
	CLAC
536
	rep
542
	rep
537
	movsq			/* copy trapframe */
543
	movsq			/* copy trapframe */
538
544
(-)b/sys/amd64/amd64/identcpu.c (-3 / +25 lines)
Lines 391-402 printcpuinfo(void) Link Here
391
				       /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
391
				       /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
392
				       "\001GSFSBASE"
392
				       "\001GSFSBASE"
393
				       "\002TSCADJ"
393
				       "\002TSCADJ"
394
				       "\003<b2>"
394
				       /* Bit Manipulation Instructions */
395
				       /* Bit Manipulation Instructions */
395
				       "\004BMI1"
396
				       "\004BMI1"
396
				       /* Hardware Lock Elision */
397
				       /* Hardware Lock Elision */
397
				       "\005HLE"
398
				       "\005HLE"
398
				       /* Advanced Vector Instructions 2 */
399
				       /* Advanced Vector Instructions 2 */
399
				       "\006AVX2"
400
				       "\006AVX2"
401
				       "\007<b6>"
400
				       /* Supervisor Mode Execution Prot. */
402
				       /* Supervisor Mode Execution Prot. */
401
				       "\010SMEP"
403
				       "\010SMEP"
402
				       /* Bit Manipulation Instructions */
404
				       /* Bit Manipulation Instructions */
Lines 406-417 printcpuinfo(void) Link Here
406
				       "\013INVPCID"
408
				       "\013INVPCID"
407
				       /* Restricted Transactional Memory */
409
				       /* Restricted Transactional Memory */
408
				       "\014RTM"
410
				       "\014RTM"
411
				       "\015<b12>"
412
				       "\016<b13>"
413
				       "\017<b14>"
414
				       "\020<b15>"
415
				       "\021<b16>"
416
				       "\022<b17>"
409
				       /* Enhanced NRBG */
417
				       /* Enhanced NRBG */
410
				       "\022RDSEED"
418
				       "\023RDSEED"
411
				       /* ADCX + ADOX */
419
				       /* ADCX + ADOX */
412
				       "\023ADX"
420
				       "\024ADX"
413
				       /* Supervisor Mode Access Prevention */
421
				       /* Supervisor Mode Access Prevention */
414
				       "\024SMAP"
422
				       "\025SMAP"
423
				       "\026<b21>"
424
				       "\027<b22>"
425
				       "\030<b23>"
426
				       "\031<b24>"
427
				       "\032<b25>"
428
				       "\033<b26>"
429
				       "\034<b27>"
430
				       "\035<b28>"
431
				       "\036<b29>"
432
				       "\037<b30>"
433
				       "\040<b31>"
415
				       );
434
				       );
416
			}
435
			}
417
436
Lines 545-550 identify_cpu(void) Link Here
545
		if (cpu_feature2 & CPUID2_HV) {
564
		if (cpu_feature2 & CPUID2_HV) {
546
			cpu_stdext_disable = CPUID_STDEXT_FSGSBASE |
565
			cpu_stdext_disable = CPUID_STDEXT_FSGSBASE |
547
			    CPUID_STDEXT_SMEP;
566
			    CPUID_STDEXT_SMEP;
567
#ifdef INTEL_SMAP
568
			cpu_stdext_disable |= CPUID_STDEXT_SMAP;
569
#endif
548
		} else
570
		} else
549
			cpu_stdext_disable = 0;
571
			cpu_stdext_disable = 0;
550
		TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable);
572
		TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable);
(-)b/sys/amd64/amd64/initcpu.c (-4 / +8 lines)
Lines 165-177 initializecpu(void) Link Here
165
		cr4 |= CR4_FSGSBASE;
165
		cr4 |= CR4_FSGSBASE;
166
166
167
	/*
167
	/*
168
	 * Postpone enabling the SMEP on the boot CPU until the page
168
	 * Postpone enabling the SMEP and the SMAP on the boot CPU until
169
	 * tables are switched from the boot loader identity mapping
169
	 * the page tables are switched from the boot loader identity
170
	 * to the kernel tables.  The boot loader enables the U bit in
170
	 * mapping to the kernel tables.
171
	 * its tables.
171
	 * The boot loader enables the U bit in its tables.
172
	 */
172
	 */
173
	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
173
	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
174
		cr4 |= CR4_SMEP;
174
		cr4 |= CR4_SMEP;
175
#ifdef INTEL_SMAP
176
	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMAP))
177
		cr4 |= CR4_SMAP;
178
#endif
175
	load_cr4(cr4);
179
	load_cr4(cr4);
176
	if ((amd_feature & AMDID_NX) != 0) {
180
	if ((amd_feature & AMDID_NX) != 0) {
177
		msr = rdmsr(MSR_EFER) | EFER_NXE;
181
		msr = rdmsr(MSR_EFER) | EFER_NXE;
(-)b/sys/amd64/amd64/pmap.c (+13 lines)
Lines 98-103 __FBSDID("$FreeBSD$"); Link Here
98
 *	and to when physical maps must be made correct.
98
 *	and to when physical maps must be made correct.
99
 */
99
 */
100
100
101
#include "opt_cpu.h"
101
#include "opt_pmap.h"
102
#include "opt_pmap.h"
102
#include "opt_vm.h"
103
#include "opt_vm.h"
103
104
Lines 665-670 pmap_bootstrap(vm_paddr_t *firstaddr) Link Here
665
	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
666
	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
666
		load_cr4(rcr4() | CR4_SMEP);
667
		load_cr4(rcr4() | CR4_SMEP);
667
668
669
	if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
670
#ifdef INTEL_SMAP
671
		load_cr4(rcr4() | CR4_SMAP);
672
	else
673
		panic("The kernel compiled with \"options INTEL_SMAP\","
674
			       	"but your CPU doesn't support SMAP!\n");
675
#else
676
		printf("Your CPU has support for SMAP security feature. "
677
			"You should recompile the kernel with "
678
			"\"options INTEL_SMAP\" to use them.\n");
679
#endif
680
668
	/*
681
	/*
669
	 * Initialize the kernel pmap (which is statically allocated).
682
	 * Initialize the kernel pmap (which is statically allocated).
670
	 */
683
	 */
(-)b/sys/amd64/amd64/support.S (+48 lines)
Lines 35-40 Link Here
35
#include <machine/asmacros.h>
35
#include <machine/asmacros.h>
36
#include <machine/intr_machdep.h>
36
#include <machine/intr_machdep.h>
37
#include <machine/pmap.h>
37
#include <machine/pmap.h>
38
#include <machine/smap_instr.h>
38
39
39
#include "assym.s"
40
#include "assym.s"
40
41
Lines 244-255 ENTRY(copyout) Link Here
244
245
245
	shrq	$3,%rcx
246
	shrq	$3,%rcx
246
	cld
247
	cld
248
	STAC
247
	rep
249
	rep
248
	movsq
250
	movsq
251
	CLAC
249
	movb	%dl,%cl
252
	movb	%dl,%cl
250
	andb	$7,%cl
253
	andb	$7,%cl
254
	STAC
251
	rep
255
	rep
252
	movsb
256
	movsb
257
	CLAC
253
258
254
done_copyout:
259
done_copyout:
255
	xorl	%eax,%eax
260
	xorl	%eax,%eax
Lines 258-263 done_copyout: Link Here
258
	ret
263
	ret
259
264
260
	ALIGN_TEXT
265
	ALIGN_TEXT
266
/*
267
 * note:
268
 * When SMAP enabled, the EFLAGS.AC bit gets cleared before control reaches
269
 * the fault handler.
270
 */ 
261
copyout_fault:
271
copyout_fault:
262
	movq	PCPU(CURPCB),%rdx
272
	movq	PCPU(CURPCB),%rdx
263
	movq	$0,PCB_ONFAULT(%rdx)
273
	movq	$0,PCB_ONFAULT(%rdx)
Lines 290-301 ENTRY(copyin) Link Here
290
	movb	%cl,%al
300
	movb	%cl,%al
291
	shrq	$3,%rcx				/* copy longword-wise */
301
	shrq	$3,%rcx				/* copy longword-wise */
292
	cld
302
	cld
303
	STAC
293
	rep
304
	rep
294
	movsq
305
	movsq
306
	CLAC
295
	movb	%al,%cl
307
	movb	%al,%cl
296
	andb	$7,%cl				/* copy remaining bytes */
308
	andb	$7,%cl				/* copy remaining bytes */
309
	STAC
297
	rep
310
	rep
298
	movsb
311
	movsb
312
	CLAC
299
313
300
done_copyin:
314
done_copyin:
301
	xorl	%eax,%eax
315
	xorl	%eax,%eax
Lines 304-309 done_copyin: Link Here
304
	ret
318
	ret
305
319
306
	ALIGN_TEXT
320
	ALIGN_TEXT
321
/*
322
 * note:
323
 * When SMAP enabled, the EFLAGS.AC bit gets cleared before control reaches
324
 * the fault handler.
325
 */ 
307
copyin_fault:
326
copyin_fault:
308
	movq	PCPU(CURPCB),%rdx
327
	movq	PCPU(CURPCB),%rdx
309
	movq	$0,PCB_ONFAULT(%rdx)
328
	movq	$0,PCB_ONFAULT(%rdx)
Lines 324-333 ENTRY(casuword32) Link Here
324
	ja	fusufault
343
	ja	fusufault
325
344
326
	movl	%esi,%eax			/* old */
345
	movl	%esi,%eax			/* old */
346
	STAC
327
#ifdef SMP
347
#ifdef SMP
328
	lock
348
	lock
329
#endif
349
#endif
330
	cmpxchgl %edx,(%rdi)			/* new = %edx */
350
	cmpxchgl %edx,(%rdi)			/* new = %edx */
351
	CLAC
331
352
332
	/*
353
	/*
333
	 * The old value is in %eax.  If the store succeeded it will be the
354
	 * The old value is in %eax.  If the store succeeded it will be the
Lines 353-362 ENTRY(casuword) Link Here
353
	ja	fusufault
374
	ja	fusufault
354
375
355
	movq	%rsi,%rax			/* old */
376
	movq	%rsi,%rax			/* old */
377
	STAC
356
#ifdef SMP
378
#ifdef SMP
357
	lock
379
	lock
358
#endif
380
#endif
359
	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
381
	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
382
	CLAC
360
383
361
	/*
384
	/*
362
	 * The old value is in %eax.  If the store succeeded it will be the
385
	 * The old value is in %eax.  If the store succeeded it will be the
Lines 385-391 ENTRY(fuword) Link Here
385
	cmpq	%rax,%rdi			/* verify address is valid */
408
	cmpq	%rax,%rdi			/* verify address is valid */
386
	ja	fusufault
409
	ja	fusufault
387
410
411
	STAC
388
	movq	(%rdi),%rax
412
	movq	(%rdi),%rax
413
	CLAC
389
	movq	$0,PCB_ONFAULT(%rcx)
414
	movq	$0,PCB_ONFAULT(%rcx)
390
	ret
415
	ret
391
END(fuword64)	
416
END(fuword64)	
Lines 399-405 ENTRY(fuword32) Link Here
399
	cmpq	%rax,%rdi			/* verify address is valid */
424
	cmpq	%rax,%rdi			/* verify address is valid */
400
	ja	fusufault
425
	ja	fusufault
401
426
427
	STAC
402
	movl	(%rdi),%eax
428
	movl	(%rdi),%eax
429
	CLAC
403
	movq	$0,PCB_ONFAULT(%rcx)
430
	movq	$0,PCB_ONFAULT(%rcx)
404
	ret
431
	ret
405
END(fuword32)
432
END(fuword32)
Lines 426-432 ENTRY(fuword16) Link Here
426
	cmpq	%rax,%rdi
453
	cmpq	%rax,%rdi
427
	ja	fusufault
454
	ja	fusufault
428
455
456
	STAC
429
	movzwl	(%rdi),%eax
457
	movzwl	(%rdi),%eax
458
	CLAC
430
	movq	$0,PCB_ONFAULT(%rcx)
459
	movq	$0,PCB_ONFAULT(%rcx)
431
	ret
460
	ret
432
END(fuword16)
461
END(fuword16)
Lines 439-450 ENTRY(fubyte) Link Here
439
	cmpq	%rax,%rdi
468
	cmpq	%rax,%rdi
440
	ja	fusufault
469
	ja	fusufault
441
470
471
	STAC
442
	movzbl	(%rdi),%eax
472
	movzbl	(%rdi),%eax
473
	CLAC
443
	movq	$0,PCB_ONFAULT(%rcx)
474
	movq	$0,PCB_ONFAULT(%rcx)
444
	ret
475
	ret
445
END(fubyte)
476
END(fubyte)
446
477
447
	ALIGN_TEXT
478
	ALIGN_TEXT
479
/*
480
 * note:
481
 * When SMAP enabled, the EFLAGS.AC bit gets cleared before control reaches
482
 * the fault handler.
483
 */ 
448
fusufault:
484
fusufault:
449
	movq	PCPU(CURPCB),%rcx
485
	movq	PCPU(CURPCB),%rcx
450
	xorl	%eax,%eax
486
	xorl	%eax,%eax
Lines 466-472 ENTRY(suword) Link Here
466
	cmpq	%rax,%rdi			/* verify address validity */
502
	cmpq	%rax,%rdi			/* verify address validity */
467
	ja	fusufault
503
	ja	fusufault
468
504
505
	STAC
469
	movq	%rsi,(%rdi)
506
	movq	%rsi,(%rdi)
507
	CLAC
470
	xorl	%eax,%eax
508
	xorl	%eax,%eax
471
	movq	PCPU(CURPCB),%rcx
509
	movq	PCPU(CURPCB),%rcx
472
	movq	%rax,PCB_ONFAULT(%rcx)
510
	movq	%rax,PCB_ONFAULT(%rcx)
Lines 482-488 ENTRY(suword32) Link Here
482
	cmpq	%rax,%rdi			/* verify address validity */
520
	cmpq	%rax,%rdi			/* verify address validity */
483
	ja	fusufault
521
	ja	fusufault
484
522
523
	STAC
485
	movl	%esi,(%rdi)
524
	movl	%esi,(%rdi)
525
	CLAC
486
	xorl	%eax,%eax
526
	xorl	%eax,%eax
487
	movq	PCPU(CURPCB),%rcx
527
	movq	PCPU(CURPCB),%rcx
488
	movq	%rax,PCB_ONFAULT(%rcx)
528
	movq	%rax,PCB_ONFAULT(%rcx)
Lines 497-503 ENTRY(suword16) Link Here
497
	cmpq	%rax,%rdi			/* verify address validity */
537
	cmpq	%rax,%rdi			/* verify address validity */
498
	ja	fusufault
538
	ja	fusufault
499
539
540
	STAC
500
	movw	%si,(%rdi)
541
	movw	%si,(%rdi)
542
	CLAC
501
	xorl	%eax,%eax
543
	xorl	%eax,%eax
502
	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
544
	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
503
	movq	%rax,PCB_ONFAULT(%rcx)
545
	movq	%rax,PCB_ONFAULT(%rcx)
Lines 513-519 ENTRY(subyte) Link Here
513
	ja	fusufault
555
	ja	fusufault
514
556
515
	movl	%esi,%eax
557
	movl	%esi,%eax
558
	STAC
516
	movb	%al,(%rdi)
559
	movb	%al,(%rdi)
560
	CLAC
517
	xorl	%eax,%eax
561
	xorl	%eax,%eax
518
	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
562
	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
519
	movq	%rax,PCB_ONFAULT(%rcx)
563
	movq	%rax,PCB_ONFAULT(%rcx)
Lines 555-561 ENTRY(copyinstr) Link Here
555
	decq	%rdx
599
	decq	%rdx
556
	jz	3f
600
	jz	3f
557
601
602
	STAC
558
	lodsb
603
	lodsb
604
	CLAC
559
	stosb
605
	stosb
560
	orb	%al,%al
606
	orb	%al,%al
561
	jnz	2b
607
	jnz	2b
Lines 584-590 cpystrflt_x: Link Here
584
	testq	%r9,%r9
630
	testq	%r9,%r9
585
	jz	1f
631
	jz	1f
586
	subq	%rdx,%r8
632
	subq	%rdx,%r8
633
	STAC
587
	movq	%r8,(%r9)
634
	movq	%r8,(%r9)
635
	CLAC
588
1:
636
1:
589
	ret
637
	ret
590
END(copyinstr)
638
END(copyinstr)
(-)b/sys/amd64/amd64/trap.c (+24 lines)
Lines 127-132 void dblfault_handler(struct trapframe *frame); Link Here
127
127
128
static int trap_pfault(struct trapframe *, int);
128
static int trap_pfault(struct trapframe *, int);
129
static void trap_fatal(struct trapframe *, vm_offset_t);
129
static void trap_fatal(struct trapframe *, vm_offset_t);
130
#ifdef INTEL_SMAP
131
static bool smap_access_violation(struct trapframe *, int usermode);
132
#endif
130
133
131
#define MAX_TRAP_MSG		33
134
#define MAX_TRAP_MSG		33
132
static char *trap_msg[] = {
135
static char *trap_msg[] = {
Lines 718-723 trap_pfault(frame, usermode) Link Here
718
721
719
		map = &vm->vm_map;
722
		map = &vm->vm_map;
720
723
724
#ifdef INTEL_SMAP
725
		if (__predict_false(smap_access_violation(frame, usermode))) {
726
			trap_fatal(frame, eva);
727
			return (-1);
728
		}
729
#endif
730
721
		/*
731
		/*
722
		 * When accessing a usermode address, kernel must be
732
		 * When accessing a usermode address, kernel must be
723
		 * ready to accept the page fault, and provide a
733
		 * ready to accept the page fault, and provide a
Lines 874-879 trap_fatal(frame, eva) Link Here
874
		panic("unknown/reserved trap");
884
		panic("unknown/reserved trap");
875
}
885
}
876
886
887
#ifdef INTEL_SMAP
888
static bool
889
smap_access_violation(struct trapframe *frame, int usermode)
890
{
891
	if ((cpu_stdext_feature & CPUID_STDEXT_SMAP) == 0)
892
		return (false);
893
894
	if (usermode || (frame->tf_rflags & PSL_AC) != 0)
895
		return (false);
896
897
	return (true);
898
}
899
#endif
900
877
/*
901
/*
878
 * Double fault handler. Called when a fault occurs while writing
902
 * Double fault handler. Called when a fault occurs while writing
879
 * a frame for a trap/exception onto the stack. This usually occurs
903
 * a frame for a trap/exception onto the stack. This usually occurs
(-)b/sys/amd64/ia32/ia32_exception.S (+1 lines)
Lines 68-73 IDTVEC(int0x80_syscall) Link Here
68
	movq	%r15,TF_R15(%rsp)
68
	movq	%r15,TF_R15(%rsp)
69
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
69
	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
70
	cld
70
	cld
71
	CLAC
71
	FAKE_MCOUNT(TF_RIP(%rsp))
72
	FAKE_MCOUNT(TF_RIP(%rsp))
72
	movq	%rsp, %rdi
73
	movq	%rsp, %rdi
73
	call	ia32_syscall
74
	call	ia32_syscall
(-)b/sys/amd64/include/asmacros.h (-1 / +2 lines)
Lines 167-173 Link Here
167
	movw	%es,TF_ES(%rsp) ;					\
167
	movw	%es,TF_ES(%rsp) ;					\
168
	movw	%ds,TF_DS(%rsp) ;					\
168
	movw	%ds,TF_DS(%rsp) ;					\
169
	movl	$TF_HASSEGS,TF_FLAGS(%rsp) ;				\
169
	movl	$TF_HASSEGS,TF_FLAGS(%rsp) ;				\
170
	cld
170
	cld ;								\
171
	CLAC
171
172
172
#define POP_FRAME							\
173
#define POP_FRAME							\
173
	movq	TF_RDI(%rsp),%rdi ;					\
174
	movq	TF_RDI(%rsp),%rdi ;					\
(-)b/sys/amd64/include/cpufunc.h (+27 lines)
Lines 39-48 Link Here
39
#ifndef _MACHINE_CPUFUNC_H_
39
#ifndef _MACHINE_CPUFUNC_H_
40
#define	_MACHINE_CPUFUNC_H_
40
#define	_MACHINE_CPUFUNC_H_
41
41
42
#include "opt_cpu.h"
43
42
#ifndef _SYS_CDEFS_H_
44
#ifndef _SYS_CDEFS_H_
43
#error this file needs sys/cdefs.h as a prerequisite
45
#error this file needs sys/cdefs.h as a prerequisite
44
#endif
46
#endif
45
47
48
#ifdef INTEL_SMAP
49
#include <machine/smap_instr.h>
50
#endif
51
46
struct region_descriptor;
52
struct region_descriptor;
47
53
48
#define readb(va)	(*(volatile uint8_t *) (va))
54
#define readb(va)	(*(volatile uint8_t *) (va))
Lines 711-721 intr_restore(register_t rflags) Link Here
711
	write_rflags(rflags);
717
	write_rflags(rflags);
712
}
718
}
713
719
720
/*
721
 * Intel SMAP related functions (clac and stac)
722
 */
723
static __inline void
724
clac(void)
725
{
726
#ifdef INTEL_SMAP
727
	__asm __volatile(__STRING(CLAC) : : : "memory");
728
#endif
729
}
730
731
static __inline void
732
stac(void)
733
{
734
#ifdef INTEL_SMAP
735
	__asm __volatile(__STRING(STAC) : : : "memory");
736
#endif
737
}
738
714
#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
739
#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
715
740
716
int	breakpoint(void);
741
int	breakpoint(void);
717
u_int	bsfl(u_int mask);
742
u_int	bsfl(u_int mask);
718
u_int	bsrl(u_int mask);
743
u_int	bsrl(u_int mask);
744
void	clac(void);
719
void	clflush(u_long addr);
745
void	clflush(u_long addr);
720
void	clts(void);
746
void	clts(void);
721
void	cpuid_count(u_int ax, u_int cx, u_int *p);
747
void	cpuid_count(u_int ax, u_int cx, u_int *p);
Lines 775-780 uint64_t rdtsc(void); Link Here
775
u_long	read_rflags(void);
801
u_long	read_rflags(void);
776
u_int	rfs(void);
802
u_int	rfs(void);
777
u_int	rgs(void);
803
u_int	rgs(void);
804
void	stac(void);
778
void	wbinvd(void);
805
void	wbinvd(void);
779
void	write_rflags(u_int rf);
806
void	write_rflags(u_int rf);
780
void	wrmsr(u_int msr, uint64_t newval);
807
void	wrmsr(u_int msr, uint64_t newval);
(-)b/sys/amd64/include/smap_instr.h (+14 lines)
Added Link Here
1
#ifndef	__SMAP_INSTRUCTION_H
2
#define	__SMAP_INSTRUCTION_H
3
4
#include "opt_cpu.h"
5
6
#ifdef INTEL_SMAP
7
#define	CLAC	.byte 0x0f,0x01,0xca
8
#define	STAC	.byte 0x0f,0x01,0xcb
9
#else
10
#define	CLAC
11
#define	STAC
12
#endif
13
14
#endif	/* __SMAP_INSTRUCTION_H */
(-)b/sys/conf/NOTES (+4 lines)
Lines 2963-2965 options RCTL Link Here
2963
options 	BROOKTREE_ALLOC_PAGES=(217*4+1)
2963
options 	BROOKTREE_ALLOC_PAGES=(217*4+1)
2964
options 	MAXFILES=999
2964
options 	MAXFILES=999
2965
2965
2966
# Intel SMAP
2967
# This options supported on Haswell and/or newer CPUs (2013 Juni < ) and
2968
# makes the kernel unbootable on older CPUs.
2969
options 	INTEL_SMAP	# Intel's hw version of PaX uderef
(-)b/sys/conf/options.amd64 (+3 lines)
Lines 72-74 ISCI_LOGGING opt_isci.h Link Here
72
# hw random number generators for random(4)
72
# hw random number generators for random(4)
73
PADLOCK_RNG		opt_cpu.h
73
PADLOCK_RNG		opt_cpu.h
74
RDRAND_RNG		opt_cpu.h
74
RDRAND_RNG		opt_cpu.h
75
76
# Intel Supervisor Mode Access Prevention (SMAP)
77
INTEL_SMAP		opt_cpu.h
(-)b/sys/x86/include/psl.h (-1 / +1 lines)
Lines 52-58 Link Here
52
#define	PSL_NT		0x00004000	/* nested task bit */
52
#define	PSL_NT		0x00004000	/* nested task bit */
53
#define	PSL_RF		0x00010000	/* resume flag bit */
53
#define	PSL_RF		0x00010000	/* resume flag bit */
54
#define	PSL_VM		0x00020000	/* virtual 8086 mode bit */
54
#define	PSL_VM		0x00020000	/* virtual 8086 mode bit */
55
#define	PSL_AC		0x00040000	/* alignment checking */
55
#define	PSL_AC		0x00040000	/* alignment checking or SMAP status*/
56
#define	PSL_VIF		0x00080000	/* virtual interrupt enable */
56
#define	PSL_VIF		0x00080000	/* virtual interrupt enable */
57
#define	PSL_VIP		0x00100000	/* virtual interrupt pending */
57
#define	PSL_VIP		0x00100000	/* virtual interrupt pending */
58
#define	PSL_ID		0x00200000	/* identification bit */
58
#define	PSL_ID		0x00200000	/* identification bit */
(-)b/sys/x86/include/specialreg.h (-1 / +1 lines)
Lines 73-78 Link Here
73
#define	CR4_PCIDE 0x00020000	/* Enable Context ID */
73
#define	CR4_PCIDE 0x00020000	/* Enable Context ID */
74
#define	CR4_XSAVE 0x00040000	/* XSETBV/XGETBV */
74
#define	CR4_XSAVE 0x00040000	/* XSETBV/XGETBV */
75
#define	CR4_SMEP 0x00100000	/* Supervisor-Mode Execution Prevention */
75
#define	CR4_SMEP 0x00100000	/* Supervisor-Mode Execution Prevention */
76
#define	CR4_SMAP 0x00200000	/* Supervisor-Mode Access Prevention */
76
77
77
/*
78
/*
78
 * Bits in AMD64 special registers.  EFER is 64 bits wide.
79
 * Bits in AMD64 special registers.  EFER is 64 bits wide.
79
- 

Return to bug 179282