And then rebuild and install a new kernel:
KK=`sysctl -n kern.osversion | cut -d# -f1`
cd /usr/src/sys/arch/`machine`/compile/$KK
make obj
make config
make
make install
+ if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ int family = ci->ci_family;
+ int model = ci->ci_model;
+
+ swapgs_vuln = 1;
+ if (family == 0x6 &&
+ (model == 0x37 || model == 0x4a || model == 0x4c ||
+ model == 0x4d || model == 0x5a || model == 0x5d ||
+ model == 0x6e || model == 0x65 || model == 0x75)) {
+ /* Silvermont, Airmont */
+ swapgs_vuln = 0;
+ } else if (family == 0x6 && (model == 0x85 || model == 0x57)) {
+ /* KnightsLanding */
+ swapgs_vuln = 0;
+ }
+ }
+
s = splhigh();
if (!cpu_meltdown)
codepatch_nop(CPTAG_MELTDOWN_NOP);
- else if (pmap_use_pcid) {
- extern long _pcid_set_reuse;
- DPRINTF("%s: codepatching PCID use", __func__);
- codepatch_replace(CPTAG_PCID_SET_REUSE, &_pcid_set_reuse,
- PCID_SET_REUSE_SIZE);
+ else {
+ extern long alltraps_kern_meltdown;
+
+ /* eliminate conditional branch in alltraps */
+ codepatch_jmp(CPTAG_MELTDOWN_ALLTRAPS, &alltraps_kern_meltdown);
+
+ /* enable reuse of PCID for U-K page tables */
+ if (pmap_use_pcid) {
+ extern long _pcid_set_reuse;
+ DPRINTF("%s: codepatching PCID use", __func__);
+ codepatch_replace(CPTAG_PCID_SET_REUSE,
+ &_pcid_set_reuse, PCID_SET_REUSE_SIZE);
+ }
+ }
+
+ /*
+ * CVE-2019-1125: if the CPU has SMAP and it's not vulnerable to
+ * Meltdown, then it's protected both from speculatively mis-skipping
+ * the swapgs during interrupts of userspace and from speculatively
+ * mis-taking a swapgs during interrupts while already in the kernel
+ * as the speculative path will fault from SMAP. Warning: enabling
+ * WRGSBASE would break this 'protection'.
+ *
+ * Otherwise, if the CPU's swapgs can't be speculated over and it
+ * _is_ vulnerable to Meltdown then the %cr3 change will serialize
+ * user->kern transitions, but we still need to mitigate the
+ * already-in-kernel cases.
+ */
+ if (!cpu_meltdown && (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP)) {
+ codepatch_nop(CPTAG_FENCE_SWAPGS_MIS_TAKEN);
+ codepatch_nop(CPTAG_FENCE_NO_SAFE_SMAP);
+ } else if (!swapgs_vuln && cpu_meltdown) {
+ codepatch_nop(CPTAG_FENCE_SWAPGS_MIS_TAKEN);
}
splx(s);
}
Index: sys/arch/amd64/amd64/locore.S
===================================================================
RCS file: /var/cvs/src/sys/arch/amd64/amd64/locore.S,v
retrieving revision 1.116.2.1
diff -u -p -r1.116.2.1 locore.S
--- sys/arch/amd64/amd64/locore.S 28 May 2019 14:23:21 -0000 1.116.2.1
+++ sys/arch/amd64/amd64/locore.S 7 Aug 2019 20:43:49 -0000
@@ -563,7 +563,7 @@ XUsyscall_meltdown:
* (thank you, Intel), at which point we'll continue at the
* "movq CPUVAR(KERN_RSP),%rax" after Xsyscall below.
* In case the CPU speculates past the mov to cr3, we put a
- * retpoline-style pause-jmp-to-pause loop.
+ * retpoline-style pause-lfence-jmp-to-pause loop.
*/
swapgs
movq %rax,CPUVAR(SCRATCH)
Index: sys/arch/amd64/amd64/vector.S
===================================================================
RCS file: /var/cvs/src/sys/arch/amd64/amd64/vector.S,v
retrieving revision 1.79
diff -u -p -r1.79 vector.S
--- sys/arch/amd64/amd64/vector.S 20 Jan 2019 00:53:08 -0000 1.79
+++ sys/arch/amd64/amd64/vector.S 7 Aug 2019 20:43:49 -0000
@@ -141,6 +141,7 @@ calltrap_specstk: # special stack path
.text
.globl INTRENTRY_LABEL(calltrap_specstk)
INTRENTRY_LABEL(calltrap_specstk):
+ lfence # block speculation through jz above
cld
SMAP_CLAC
movq %rsp,%rdi
@@ -183,6 +184,7 @@ IDTVEC(trap03)
.text
.global INTRENTRY_LABEL(trap03)
INTRENTRY_LABEL(trap03):
+ FENCE_NO_SAFE_SMAP
INTR_ENTRY_KERN
INTR_SAVE_MOST_GPRS_NO_ADJ
sti
@@ -313,7 +315,8 @@ IDTVEC(trap0d)
.Lhandle_doreti:
/* iretq faulted; resume in a stub that acts like we got a #GP */
leaq .Lhandle_doreti_resume(%rip),%rcx
-1: movq %rcx,24(%rsp) /* over %r[cd]x and err to %rip */
+1: lfence /* block speculation through conditionals above */
+ movq %rcx,24(%rsp) /* over %r[cd]x and err to %rip */
popq %rcx
popq %rdx
addq $8,%rsp /* pop the err code */
@@ -388,12 +391,13 @@ KUTEXT_PAGE_START
* the kernel page tables (thank you, Intel) will make us
* continue at the "movq CPUVAR(KERN_RSP),%rax" after alltraps
* below. In case the CPU speculates past the mov to cr3,
- * we put a retpoline-style pause-jmp-to-pause loop.
+ * we put a retpoline-style pause-lfence-jmp-to-pause loop.
*/
Xalltraps:
swapgs
movq %rax,CPUVAR(SCRATCH)
movq CPUVAR(KERN_CR3),%rax
+ .byte 0x66, 0x90 /* space for FENCE_SWAPGS_MIS_TAKEN below */
movq %rax,%cr3
0: pause
lfence
@@ -403,9 +407,12 @@ KUTEXT_PAGE_END
KTEXT_PAGE_START
.align NBPG, 0xcc
GENTRY(alltraps)
+ CODEPATCH_START
testb $SEL_RPL,24(%rsp)
je alltraps_kern
swapgs
+ CODEPATCH_END(CPTAG_MELTDOWN_ALLTRAPS)
+ FENCE_SWAPGS_MIS_TAKEN
movq %rax,CPUVAR(SCRATCH)
.space (0b - Xalltraps) - (. - alltraps), 0x90
@@ -428,9 +435,15 @@ END(alltraps)
/*
* Traps from supervisor mode (kernel)
+ * If we're not mitigating Meltdown, then there's a conditional branch
+ * above and we may need a fence to mitigate CVE-2019-1225. If we're
+ * doing Meltdown mitigation there's just an unconditional branch and
+ * can skip the fence.
*/
_ALIGN_TRAPS
GENTRY(alltraps_kern)
+ FENCE_NO_SAFE_SMAP
+GENTRY(alltraps_kern_meltdown)
INTR_ENTRY_KERN
INTR_SAVE_MOST_GPRS_NO_ADJ
sti
@@ -467,6 +480,7 @@ spl_lowered:
.popsection
#endif /* DIAGNOSTIC */
END(alltraps_kern)
+END(alltraps_kern_meltdown)
KTEXT_PAGE_END