FreeBSD Bugzilla – Attachment 210690 Details for
Bug 242137
Dell R440 loses time on FreeBSD-12-STABLE following r352517, exceeding the NTP threshold of 500 PPM to correct the error.
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
svnlite_diff_stable_12__r352513_r352519.patch
svnlite_diff_stable_12__r352513_r352519.patch (text/plain), 8.93 KB, created by
VinÃcius Zavam
on 2020-01-13 10:38:21 UTC
(
hide
)
Description:
svnlite_diff_stable_12__r352513_r352519.patch
Filename:
MIME Type:
Creator:
VinÃcius Zavam
Created:
2020-01-13 10:38:21 UTC
Size:
8.93 KB
patch
obsolete
>Index: sys/amd64/amd64/pmap.c >=================================================================== >--- sys/amd64/amd64/pmap.c (revision 352513) >+++ sys/amd64/amd64/pmap.c (revision 352519) >@@ -5214,8 +5214,7 @@ > pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) > { > pd_entry_t newpde, oldpde; >- vm_offset_t eva, va; >- vm_page_t m; >+ vm_page_t m, mt; > boolean_t anychanged; > pt_entry_t PG_G, PG_M, PG_RW; > >@@ -5229,15 +5228,15 @@ > anychanged = FALSE; > retry: > oldpde = newpde = *pde; >- if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == >- (PG_MANAGED | PG_M | PG_RW)) { >- eva = sva + NBPDR; >- for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); >- va < eva; va += PAGE_SIZE, m++) >- vm_page_dirty(m); >+ if ((prot & VM_PROT_WRITE) == 0) { >+ if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == >+ (PG_MANAGED | PG_M | PG_RW)) { >+ m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); >+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) >+ vm_page_dirty(mt); >+ } >+ newpde &= ~(PG_RW | PG_M); > } >- if ((prot & VM_PROT_WRITE) == 0) >- newpde &= ~(PG_RW | PG_M); > if ((prot & VM_PROT_EXECUTE) == 0) > newpde |= pg_nx; > if (newpde != oldpde) { >@@ -7600,7 +7599,7 @@ > pmap_t pmap; > pv_entry_t next_pv, pv; > pd_entry_t oldpde, *pde; >- pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V; >+ pt_entry_t *pte, PG_M, PG_RW; > struct rwlock *lock; > vm_offset_t va; > int md_gen, pvh_gen; >@@ -7636,33 +7635,23 @@ > } > } > PG_M = pmap_modified_bit(pmap); >- PG_V = pmap_valid_bit(pmap); > PG_RW = pmap_rw_bit(pmap); > va = pv->pv_va; > pde = pmap_pde(pmap, va); > oldpde = *pde; >- if ((oldpde & PG_RW) != 0) { >- if (pmap_demote_pde_locked(pmap, pde, va, &lock)) { >- if ((oldpde & PG_W) == 0) { >- /* >- * Write protect the mapping to a >- * single page so that a subsequent >- * write access may repromote. >- */ >- va += VM_PAGE_TO_PHYS(m) - (oldpde & >- PG_PS_FRAME); >- pte = pmap_pde_to_pte(pde, va); >- oldpte = *pte; >- if ((oldpte & PG_V) != 0) { >- while (!atomic_cmpset_long(pte, >- oldpte, >- oldpte & ~(PG_M | PG_RW))) >- oldpte = *pte; >- vm_page_dirty(m); >- pmap_invalidate_page(pmap, va); >- } >- } >- } >+ /* If oldpde has PG_RW set, then it also has PG_M set. */ >+ if ((oldpde & PG_RW) != 0 && >+ pmap_demote_pde_locked(pmap, pde, va, &lock) && >+ (oldpde & PG_W) == 0) { >+ /* >+ * Write protect the mapping to a single page so that >+ * a subsequent write access may repromote. >+ */ >+ va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); >+ pte = pmap_pde_to_pte(pde, va); >+ atomic_clear_long(pte, PG_M | PG_RW); >+ vm_page_dirty(m); >+ pmap_invalidate_page(pmap, va); > } > PMAP_UNLOCK(pmap); > } >Index: sys/arm64/arm64/pmap.c >=================================================================== >--- sys/arm64/arm64/pmap.c (revision 352513) >+++ sys/arm64/arm64/pmap.c (revision 352519) >@@ -5008,28 +5008,22 @@ > va = pv->pv_va; > l2 = pmap_l2(pmap, va); > oldl2 = pmap_load(l2); >- if ((oldl2 & ATTR_SW_DBM) != 0) { >- if (pmap_demote_l2_locked(pmap, l2, va, &lock)) { >- if ((oldl2 & ATTR_SW_WIRED) == 0) { >- /* >- * Write protect the mapping to a >- * single page so that a subsequent >- * write access may repromote. >- */ >- va += VM_PAGE_TO_PHYS(m) - >- (oldl2 & ~ATTR_MASK); >- l3 = pmap_l2_to_l3(l2, va); >- oldl3 = pmap_load(l3); >- if (pmap_l3_valid(oldl3)) { >- while (!atomic_fcmpset_long(l3, >- &oldl3, (oldl3 & ~ATTR_SW_DBM) | >- ATTR_AP(ATTR_AP_RO))) >- cpu_spinwait(); >- vm_page_dirty(m); >- pmap_invalidate_page(pmap, va); >- } >- } >- } >+ /* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */ >+ if ((oldl2 & ATTR_SW_DBM) != 0 && >+ pmap_demote_l2_locked(pmap, l2, va, &lock) && >+ (oldl2 & ATTR_SW_WIRED) == 0) { >+ /* >+ * Write protect the mapping to a single page so that >+ * a subsequent write access may repromote. >+ */ >+ va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK); >+ l3 = pmap_l2_to_l3(l2, va); >+ oldl3 = pmap_load(l3); >+ while (!atomic_fcmpset_long(l3, &oldl3, >+ (oldl3 & ~ATTR_SW_DBM) | ATTR_AP(ATTR_AP_RO))) >+ cpu_spinwait(); >+ vm_page_dirty(m); >+ pmap_invalidate_page(pmap, va); > } > PMAP_UNLOCK(pmap); > } >Index: sys/i386/i386/pmap.c >=================================================================== >--- sys/i386/i386/pmap.c (revision 352513) >+++ sys/i386/i386/pmap.c (revision 352519) >@@ -3332,8 +3332,7 @@ > pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) > { > pd_entry_t newpde, oldpde; >- vm_offset_t eva, va; >- vm_page_t m; >+ vm_page_t m, mt; > boolean_t anychanged; > > PMAP_LOCK_ASSERT(pmap, MA_OWNED); >@@ -3342,15 +3341,15 @@ > anychanged = FALSE; > retry: > oldpde = newpde = *pde; >- if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == >- (PG_MANAGED | PG_M | PG_RW)) { >- eva = sva + NBPDR; >- for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); >- va < eva; va += PAGE_SIZE, m++) >- vm_page_dirty(m); >+ if ((prot & VM_PROT_WRITE) == 0) { >+ if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == >+ (PG_MANAGED | PG_M | PG_RW)) { >+ m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); >+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) >+ vm_page_dirty(mt); >+ } >+ newpde &= ~(PG_RW | PG_M); > } >- if ((prot & VM_PROT_WRITE) == 0) >- newpde &= ~(PG_RW | PG_M); > #if defined(PAE) || defined(PAE_TABLES) > if ((prot & VM_PROT_EXECUTE) == 0) > newpde |= pg_nx; >@@ -5367,7 +5366,7 @@ > pv_entry_t next_pv, pv; > pmap_t pmap; > pd_entry_t oldpde, *pde; >- pt_entry_t oldpte, *pte; >+ pt_entry_t *pte; > vm_offset_t va; > > KASSERT((m->oflags & VPO_UNMANAGED) == 0, >@@ -5394,33 +5393,24 @@ > PMAP_LOCK(pmap); > pde = pmap_pde(pmap, va); > oldpde = *pde; >- if ((oldpde & PG_RW) != 0) { >- if (pmap_demote_pde(pmap, pde, va)) { >- if ((oldpde & PG_W) == 0) { >- /* >- * Write protect the mapping to a >- * single page so that a subsequent >- * write access may repromote. >- */ >- va += VM_PAGE_TO_PHYS(m) - (oldpde & >- PG_PS_FRAME); >- pte = pmap_pte_quick(pmap, va); >- oldpte = *pte; >- if ((oldpte & PG_V) != 0) { >- /* >- * Regardless of whether a pte is 32 or 64 bits >- * in size, PG_RW and PG_M are among the least >- * significant 32 bits. >- */ >- while (!atomic_cmpset_int((u_int *)pte, >- oldpte, >- oldpte & ~(PG_M | PG_RW))) >- oldpte = *pte; >- vm_page_dirty(m); >- pmap_invalidate_page(pmap, va); >- } >- } >- } >+ /* If oldpde has PG_RW set, then it also has PG_M set. */ >+ if ((oldpde & PG_RW) != 0 && >+ pmap_demote_pde(pmap, pde, va) && >+ (oldpde & PG_W) == 0) { >+ /* >+ * Write protect the mapping to a single page so that >+ * a subsequent write access may repromote. >+ */ >+ va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); >+ pte = pmap_pte_quick(pmap, va); >+ /* >+ * Regardless of whether a pte is 32 or 64 bits >+ * in size, PG_RW and PG_M are among the least >+ * significant 32 bits. >+ */ >+ atomic_clear_int((u_int *)pte, PG_M | PG_RW); >+ vm_page_dirty(m); >+ pmap_invalidate_page(pmap, va); > } > PMAP_UNLOCK(pmap); > } >Index: sys/riscv/riscv/pmap.c >=================================================================== >--- sys/riscv/riscv/pmap.c (revision 352513) >+++ sys/riscv/riscv/pmap.c (revision 352519) >@@ -4104,7 +4104,7 @@ > pmap_t pmap; > pv_entry_t next_pv, pv; > pd_entry_t *l2, oldl2; >- pt_entry_t *l3, oldl3; >+ pt_entry_t *l3; > vm_offset_t va; > int md_gen, pvh_gen; > >@@ -4142,28 +4142,19 @@ > va = pv->pv_va; > l2 = pmap_l2(pmap, va); > oldl2 = pmap_load(l2); >- if ((oldl2 & PTE_W) != 0) { >- if (pmap_demote_l2_locked(pmap, l2, va, &lock)) { >- if ((oldl2 & PTE_SW_WIRED) == 0) { >- /* >- * Write protect the mapping to a >- * single page so that a subsequent >- * write access may repromote. >- */ >- va += VM_PAGE_TO_PHYS(m) - >- PTE_TO_PHYS(oldl2); >- l3 = pmap_l2_to_l3(l2, va); >- oldl3 = pmap_load(l3); >- if ((oldl3 & PTE_V) != 0) { >- while (!atomic_fcmpset_long(l3, >- &oldl3, oldl3 & ~(PTE_D | >- PTE_W))) >- cpu_spinwait(); >- vm_page_dirty(m); >- pmap_invalidate_page(pmap, va); >- } >- } >- } >+ /* If oldl2 has PTE_W set, then it also has PTE_D set. */ >+ if ((oldl2 & PTE_W) != 0 && >+ pmap_demote_l2_locked(pmap, l2, va, &lock) && >+ (oldl2 & PTE_SW_WIRED) == 0) { >+ /* >+ * Write protect the mapping to a single page so that >+ * a subsequent write access may repromote. >+ */ >+ va += VM_PAGE_TO_PHYS(m) - PTE_TO_PHYS(oldl2); >+ l3 = pmap_l2_to_l3(l2, va); >+ pmap_clear_bits(l3, PTE_D | PTE_W); >+ vm_page_dirty(m); >+ pmap_invalidate_page(pmap, va); > } > PMAP_UNLOCK(pmap); > } >Index: . >=================================================================== >--- . (revision 352513) >+++ . (revision 352519) > >Property changes on: . >___________________________________________________________________ >Modified: svn:mergeinfo >## -0,0 +0,1 ## > Merged /head:r349526,350335
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 242137
:
209404
|
209405
|
209450
|
209680
|
209689
|
210496
|
210689
| 210690 |
210691
|
210708