FreeBSD Bugzilla – Attachment 169071 Details for
Bug 208580
[EXP-RUN] ASLR check
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
aslr.9.patch
1.patch (text/plain), 22.87 KB, created by
Konstantin Belousov
on 2016-04-07 10:43:55 UTC
(
hide
)
Description:
aslr.9.patch
Filename:
MIME Type:
Creator:
Konstantin Belousov
Created:
2016-04-07 10:43:55 UTC
Size:
22.87 KB
patch
obsolete
>diff --git a/sys/amd64/amd64/elf_machdep.c b/sys/amd64/amd64/elf_machdep.c >index ca07adc..c854ecd 100644 >--- a/sys/amd64/amd64/elf_machdep.c >+++ b/sys/amd64/amd64/elf_machdep.c >@@ -72,7 +72,8 @@ struct sysentvec elf64_freebsd_sysvec = { > .sv_setregs = exec_setregs, > .sv_fixlimit = NULL, > .sv_maxssiz = NULL, >- .sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_TIMEKEEP, >+ .sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_TIMEKEEP | >+ SV_ASLR, > .sv_set_syscall_retval = cpu_set_syscall_retval, > .sv_fetch_syscall_args = cpu_fetch_syscall_args, > .sv_syscallnames = syscallnames, >diff --git a/sys/arm/arm/elf_machdep.c b/sys/arm/arm/elf_machdep.c >index 2ec659a..2fa835a 100644 >--- a/sys/arm/arm/elf_machdep.c >+++ b/sys/arm/arm/elf_machdep.c >@@ -76,7 +76,7 @@ struct sysentvec elf32_freebsd_sysvec = { > .sv_maxssiz = NULL, > .sv_flags = > #if __ARM_ARCH >= 6 >- SV_SHP | SV_TIMEKEEP | >+ SV_SHP | SV_TIMEKEEP | SV_ASLR | > #endif > SV_ABI_FREEBSD | SV_ILP32, > .sv_set_syscall_retval = cpu_set_syscall_retval, >diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c >index 598bdc5..1291a0a 100644 >--- a/sys/compat/freebsd32/freebsd32_misc.c >+++ b/sys/compat/freebsd32/freebsd32_misc.c >@@ -3039,6 +3039,7 @@ freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) > switch (uap->com) { > case PROC_SPROTECT: > case PROC_TRACE_CTL: >+ case PROC_ASLR_CTL: > error = copyin(PTRIN(uap->data), &flags, sizeof(flags)); > if (error != 0) > return (error); >@@ -3068,6 +3069,7 @@ freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) > data = &x.rk; > break; > case PROC_TRACE_STATUS: >+ case PROC_ASLR_STATUS: > data = &flags; > break; > default: >@@ -3086,6 +3088,7 @@ freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) > error = error1; > break; > case PROC_TRACE_STATUS: >+ case PROC_ASLR_STATUS: > if (error == 0) > error = copyout(&flags, uap->data, sizeof(flags)); > break; >diff --git a/sys/compat/ia32/ia32_sysvec.c b/sys/compat/ia32/ia32_sysvec.c >index f201570..abf26a8 100644 >--- a/sys/compat/ia32/ia32_sysvec.c >+++ b/sys/compat/ia32/ia32_sysvec.c >@@ -120,11 +120,9 @@ struct sysentvec ia32_freebsd_sysvec = { > .sv_setregs = ia32_setregs, > .sv_fixlimit = ia32_fixlimit, > .sv_maxssiz = &ia32_maxssiz, >- .sv_flags = SV_ABI_FREEBSD | SV_IA32 | SV_ILP32 | >+ .sv_flags = SV_ABI_FREEBSD | SV_IA32 | SV_ILP32 > #ifdef __amd64__ >- SV_SHP | SV_TIMEKEEP >-#else >- 0 >+ | SV_SHP | SV_TIMEKEEP | SV_ASLR > #endif > , > .sv_set_syscall_retval = ia32_set_syscall_retval, >diff --git a/sys/i386/i386/elf_machdep.c b/sys/i386/i386/elf_machdep.c >index 3c76ab2..3fa2d09 100644 >--- a/sys/i386/i386/elf_machdep.c >+++ b/sys/i386/i386/elf_machdep.c >@@ -79,7 +79,7 @@ struct sysentvec elf32_freebsd_sysvec = { > .sv_fixlimit = NULL, > .sv_maxssiz = NULL, > .sv_flags = SV_ABI_FREEBSD | SV_IA32 | SV_ILP32 | SV_SHP | >- SV_TIMEKEEP, >+ SV_TIMEKEEP | SV_ASLR, > .sv_set_syscall_retval = cpu_set_syscall_retval, > .sv_fetch_syscall_args = cpu_fetch_syscall_args, > .sv_syscallnames = syscallnames, >diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c >index 0bed714..59a65e0 100644 >--- a/sys/kern/imgact_elf.c >+++ b/sys/kern/imgact_elf.c >@@ -137,6 +137,21 @@ SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, > #endif > #endif > >+static int __elfN(aslr_enabled) = 1; >+SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, >+ aslr_enabled, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, >+ __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable aslr"); >+ >+static int __elfN(pie_aslr_enabled) = 1; >+SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, >+ pie_aslr_enabled, CTLFLAG_RWTUN, &__elfN(pie_aslr_enabled), 0, >+ __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable aslr for PIE binaries"); >+ >+static int __elfN(aslr_care_sbrk) = 0; >+SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, >+ aslr_care_sbrk, CTLFLAG_RW, &__elfN(aslr_care_sbrk), 0, >+ __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); >+ > static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; > > #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) >@@ -424,13 +439,14 @@ __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > } > > static int >-__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, >- vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) >+__elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, >+ vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, >+ int cow) > { > struct sf_buf *sf; > vm_offset_t off; > vm_size_t sz; >- int error, rv; >+ int error, locked, rv; > > if (start != trunc_page(start)) { > rv = __elfN(map_partial)(map, object, offset, start, >@@ -453,10 +469,11 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > * The mapping is not page aligned. This means we have > * to copy the data. Sigh. > */ >- rv = vm_map_find(map, NULL, 0, &start, end - start, 0, >- VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL, >- 0); >- if (rv) >+ vm_map_lock(map); >+ rv = vm_map_insert(map, NULL, 0, start, end, >+ prot | VM_PROT_WRITE, VM_PROT_ALL, 0); >+ vm_map_unlock(map); >+ if (rv != KERN_SUCCESS) > return (rv); > if (object == NULL) > return (KERN_SUCCESS); >@@ -471,9 +488,8 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > error = copyout((caddr_t)sf_buf_kva(sf) + off, > (caddr_t)start, sz); > vm_imgact_unmap_page(sf); >- if (error) { >+ if (error != 0) > return (KERN_FAILURE); >- } > offset += sz; > } > rv = KERN_SUCCESS; >@@ -483,8 +499,12 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > rv = vm_map_insert(map, object, offset, start, end, > prot, VM_PROT_ALL, cow); > vm_map_unlock(map); >- if (rv != KERN_SUCCESS) >+ if (rv != KERN_SUCCESS) { >+ locked = VOP_ISLOCKED(imgp->vp); >+ VOP_UNLOCK(imgp->vp, 0); > vm_object_deallocate(object); >+ vn_lock(imgp->vp, locked | LK_RETRY); >+ } > } > return (rv); > } else { >@@ -541,7 +561,7 @@ __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, > cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | > (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); > >- rv = __elfN(map_insert)(map, >+ rv = __elfN(map_insert)(imgp, map, > object, > file_addr, /* file offset */ > map_addr, /* virtual start */ >@@ -571,8 +591,8 @@ __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, > > /* This had damn well better be true! */ > if (map_len != 0) { >- rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr + >- map_len, VM_PROT_ALL, 0); >+ rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, >+ map_addr + map_len, VM_PROT_ALL, 0); > if (rv != KERN_SUCCESS) { > return (EINVAL); > } >@@ -747,6 +767,24 @@ fail: > return (error); > } > >+static u_long >+__CONCAT(rnd_, __elfN(base))(u_long base, u_long minv, u_long maxv, >+ u_int align) >+{ >+ u_long rbase, res; >+ >+ arc4rand(&rbase, sizeof(rbase), 0); >+ res = base + rbase % (maxv - minv); >+ res &= ~((u_long)align - 1); >+ KASSERT(res >= base, >+ ("res %#lx < base %#lx, minv %#lx maxv %#lx rbase %#lx", >+ res, base, minv, maxv, rbase)); >+ KASSERT(res < maxv, >+ ("res %#lx > maxv %#lx, minv %#lx base %#lx rbase %#lx", >+ res, maxv, minv, base, rbase)); >+ return (res); >+} >+ > static int > __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > { >@@ -755,6 +793,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > const Elf_Phdr *phdr; > Elf_Auxargs *elf_auxargs; > struct vmspace *vmspace; >+ vm_map_t map; > const char *err_str, *newinterp; > char *interp, *interp_buf, *path; > Elf_Brandinfo *brand_info; >@@ -762,6 +801,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > vm_prot_t prot; > u_long text_size, data_size, total_size, text_addr, data_addr; > u_long seg_size, seg_addr, addr, baddr, et_dyn_addr, entry, proghdr; >+ u_long maxalign, mapsz, maxv; > int32_t osrel; > int error, i, n, interp_name_len, have_interp; > >@@ -803,12 +843,17 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > err_str = newinterp = NULL; > interp = interp_buf = NULL; > td = curthread; >+ maxalign = PAGE_SIZE; >+ mapsz = 0; > > for (i = 0; i < hdr->e_phnum; i++) { > switch (phdr[i].p_type) { > case PT_LOAD: > if (n == 0) > baddr = phdr[i].p_vaddr; >+ if (phdr[i].p_align > maxalign) >+ maxalign = phdr[i].p_align; >+ mapsz += phdr[i].p_memsz; > n++; > break; > case PT_INTERP: >@@ -862,6 +907,8 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > error = ENOEXEC; > goto ret; > } >+ sv = brand_info->sysvec; >+ et_dyn_addr = 0; > if (hdr->e_type == ET_DYN) { > if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { > uprintf("Cannot execute shared object\n"); >@@ -872,13 +919,17 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > * Honour the base load address from the dso if it is > * non-zero for some reason. > */ >- if (baddr == 0) >- et_dyn_addr = ET_DYN_LOAD_ADDR; >- else >- et_dyn_addr = 0; >- } else >- et_dyn_addr = 0; >- sv = brand_info->sysvec; >+ if (baddr == 0) { >+ if ((sv->sv_flags & SV_ASLR) == 0) >+ et_dyn_addr = ET_DYN_LOAD_ADDR; >+ else if ((__elfN(pie_aslr_enabled) && >+ (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || >+ (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) >+ et_dyn_addr = 1; >+ else >+ et_dyn_addr = ET_DYN_LOAD_ADDR; >+ } >+ } > if (interp != NULL && brand_info->interp_newpath != NULL) > newinterp = brand_info->interp_newpath; > >@@ -897,6 +948,37 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > > error = exec_new_vmspace(imgp, sv); > imgp->proc->p_sysent = sv; >+ vmspace = imgp->proc->p_vmspace; >+ map = &vmspace->vm_map; >+ >+ if ((sv->sv_flags & SV_ASLR) == 0 || >+ (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0) { >+ KASSERT(et_dyn_addr != 1, ("et_dyn_addr == 1 and !ASLR")); >+ } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || >+ (__elfN(aslr_enabled) && hdr->e_type != ET_DYN) || >+ et_dyn_addr == 1) { >+ vm_map_lock(map); >+ map->flags |= MAP_ASLR; >+ /* >+ * If user does not care about sbrk, utilize the bss >+ * grow region for mappings as well. We can select >+ * the base for the image anywere and still not suffer >+ * from the fragmentation. >+ */ >+ if (!__elfN(aslr_care_sbrk) || >+ (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) >+ map->flags |= MAP_ASLR_IGNSTART; >+ vm_map_unlock(map); >+ } >+ maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); >+ if (et_dyn_addr == 1) { >+ KASSERT((map->flags & MAP_ASLR) != 0, >+ ("et_dyn_addr but !MAP_ASLR")); >+ et_dyn_addr = __CONCAT(rnd_, __elfN(base))(vm_map_min(map), >+ vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), >+ /* reserve half of the address space to interpreter */ >+ maxv / 2, 1UL << flsl(maxalign)); >+ } > > vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); > if (error != 0) >@@ -989,7 +1071,6 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > goto ret; > } > >- vmspace = imgp->proc->p_vmspace; > vmspace->vm_tsize = text_size >> PAGE_SHIFT; > vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; > vmspace->vm_dsize = data_size >> PAGE_SHIFT; >@@ -1010,6 +1091,11 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) > if (interp != NULL) { > have_interp = FALSE; > VOP_UNLOCK(imgp->vp, 0); >+ if ((map->flags & MAP_ASLR) != 0) { >+ addr = __CONCAT(rnd_, __elfN(base))(addr, addr, >+ /* Assume that interpeter fits into 1/4 of AS */ >+ (maxv + addr) / 2, PAGE_SIZE); >+ } > if (brand_info->emul_path != NULL && > brand_info->emul_path[0] != '\0') { > path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); >diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c >index 94e139e..2aa1614 100644 >--- a/sys/kern/kern_fork.c >+++ b/sys/kern/kern_fork.c >@@ -497,7 +497,8 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread * > * Increase reference counts on shared objects. > */ > p2->p_flag = P_INMEM; >- p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC); >+ p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | >+ P2_ASLR_ENABLE | P2_ASLR_DISABLE | P2_ASLR_IGNSTART); > p2->p_swtick = ticks; > if (p1->p_flag & P_PROFIL) > startprofclock(p2); >diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c >index 8ef72901..9247ccc 100644 >--- a/sys/kern/kern_procctl.c >+++ b/sys/kern/kern_procctl.c >@@ -1,6 +1,6 @@ > /*- > * Copyright (c) 2014 John Baldwin >- * Copyright (c) 2014 The FreeBSD Foundation >+ * Copyright (c) 2014-2016 The FreeBSD Foundation > * > * Portions of this software were developed by Konstantin Belousov > * under sponsorship from the FreeBSD Foundation. >@@ -43,6 +43,10 @@ __FBSDID("$FreeBSD$"); > #include <sys/sysproto.h> > #include <sys/wait.h> > >+#include <vm/vm.h> >+#include <vm/pmap.h> >+#include <vm/vm_map.h> >+ > static int > protect_setchild(struct thread *td, struct proc *p, int flags) > { >@@ -336,6 +340,52 @@ trace_status(struct thread *td, struct proc *p, int *data) > return (0); > } > >+static int >+aslr_ctl(struct thread *td, struct proc *p, int state) >+{ >+ >+ PROC_LOCK_ASSERT(p, MA_OWNED); >+ >+ switch (state) { >+ case PROC_ASLR_FORCE_ENABLE: >+ p->p_flag2 &= ~P2_ASLR_DISABLE; >+ p->p_flag2 |= P2_ASLR_ENABLE; >+ break; >+ case PROC_ASLR_FORCE_DISABLE: >+ p->p_flag2 |= P2_ASLR_DISABLE; >+ p->p_flag2 &= ~P2_ASLR_ENABLE; >+ break; >+ case PROC_ASLR_NOFORCE: >+ p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); >+ break; >+ default: >+ return (EINVAL); >+ } >+ return (0); >+} >+ >+static int >+aslr_status(struct thread *td, struct proc *p, int *data) >+{ >+ int d; >+ >+ switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) { >+ case 0: >+ d = PROC_ASLR_NOFORCE; >+ break; >+ case P2_ASLR_ENABLE: >+ d = PROC_ASLR_FORCE_ENABLE; >+ break; >+ case P2_ASLR_DISABLE: >+ d = PROC_ASLR_FORCE_DISABLE; >+ break; >+ } >+ if ((p->p_vmspace->vm_map.flags & MAP_ASLR) != 0) >+ d |= PROC_ASLR_ACTIVE; >+ *data = d; >+ return (0); >+} >+ > #ifndef _SYS_SYSPROTO_H_ > struct procctl_args { > idtype_t idtype; >@@ -359,6 +409,7 @@ sys_procctl(struct thread *td, struct procctl_args *uap) > switch (uap->com) { > case PROC_SPROTECT: > case PROC_TRACE_CTL: >+ case PROC_ASLR_CTL: > error = copyin(uap->data, &flags, sizeof(flags)); > if (error != 0) > return (error); >@@ -386,6 +437,7 @@ sys_procctl(struct thread *td, struct procctl_args *uap) > data = &x.rk; > break; > case PROC_TRACE_STATUS: >+ case PROC_ASLR_STATUS: > data = &flags; > break; > default: >@@ -403,6 +455,7 @@ sys_procctl(struct thread *td, struct procctl_args *uap) > error = error1; > break; > case PROC_TRACE_STATUS: >+ case PROC_ASLR_STATUS: > if (error == 0) > error = copyout(&flags, uap->data, sizeof(flags)); > break; >@@ -432,6 +485,10 @@ kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) > return (trace_ctl(td, p, *(int *)data)); > case PROC_TRACE_STATUS: > return (trace_status(td, p, data)); >+ case PROC_ASLR_CTL: >+ return (aslr_ctl(td, p, *(int *)data)); >+ case PROC_ASLR_STATUS: >+ return (aslr_status(td, p, data)); > default: > return (EINVAL); > } >@@ -452,6 +509,8 @@ kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) > case PROC_REAP_GETPIDS: > case PROC_REAP_KILL: > case PROC_TRACE_STATUS: >+ case PROC_ASLR_CTL: >+ case PROC_ASLR_STATUS: > if (idtype != P_PID) > return (EINVAL); > } >@@ -471,6 +530,8 @@ kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) > tree_locked = true; > break; > case PROC_TRACE_STATUS: >+ case PROC_ASLR_CTL: >+ case PROC_ASLR_STATUS: > tree_locked = false; > break; > default: >diff --git a/sys/sys/proc.h b/sys/sys/proc.h >index 2d1769e..583b9b39 100644 >--- a/sys/sys/proc.h >+++ b/sys/sys/proc.h >@@ -702,6 +702,9 @@ struct proc { > #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ > #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ > #define P2_LWP_EVENTS 0x00000010 /* Report LWP events via ptrace(2). */ >+#define P2_ASLR_ENABLE 0x00000020 /* Force enable ASLR. */ >+#define P2_ASLR_DISABLE 0x00000040 /* Force disable ASLR. */ >+#define P2_ASLR_IGNSTART 0x00000080 /* Enable ASLR to consume sbrk area. */ > > /* Flags protected by proctree_lock, kept in p_treeflags. */ > #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ >diff --git a/sys/sys/procctl.h b/sys/sys/procctl.h >index 75dbf53..4a62669 100644 >--- a/sys/sys/procctl.h >+++ b/sys/sys/procctl.h >@@ -43,6 +43,8 @@ > #define PROC_REAP_KILL 6 /* kill descendants */ > #define PROC_TRACE_CTL 7 /* en/dis ptrace and coredumps */ > #define PROC_TRACE_STATUS 8 /* query tracing status */ >+#define PROC_ASLR_CTL 9 /* en/dis ASLR */ >+#define PROC_ASLR_STATUS 10 /* query ASLR status */ > > /* Operations for PROC_SPROTECT (passed in integer arg). */ > #define PPROT_OP(x) ((x) & 0xf) >@@ -102,6 +104,11 @@ struct procctl_reaper_kill { > #define PROC_TRACE_CTL_DISABLE 2 > #define PROC_TRACE_CTL_DISABLE_EXEC 3 > >+#define PROC_ASLR_FORCE_ENABLE 1 >+#define PROC_ASLR_FORCE_DISABLE 2 >+#define PROC_ASLR_NOFORCE 3 >+#define PROC_ASLR_ACTIVE 0x80000000 >+ > #ifndef _KERNEL > __BEGIN_DECLS > int procctl(idtype_t, id_t, int, void *); >diff --git a/sys/sys/sysent.h b/sys/sys/sysent.h >index a79ff04..c9bd29a 100644 >--- a/sys/sys/sysent.h >+++ b/sys/sys/sysent.h >@@ -138,7 +138,8 @@ struct sysentvec { > #define SV_AOUT 0x008000 /* a.out executable. */ > #define SV_SHP 0x010000 /* Shared page. */ > #define SV_CAPSICUM 0x020000 /* Force cap_enter() on startup. */ >-#define SV_TIMEKEEP 0x040000 >+#define SV_TIMEKEEP 0x040000 /* Shared page timehands. */ >+#define SV_ASLR 0x080000 /* ASLR allowed. */ > > #define SV_ABI_MASK 0xff > #define SV_PROC_FLAG(p, x) ((p)->p_sysent->sv_flags & (x)) >diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c >index 5e22b3e..246e196 100644 >--- a/sys/vm/vm_map.c >+++ b/sys/vm/vm_map.c >@@ -1470,6 +1470,20 @@ vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > return (result); > } > >+static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; >+static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; >+ >+static int aslr_sloppiness = 5; >+SYSCTL_INT(_vm, OID_AUTO, aslr_sloppiness, CTLFLAG_RW, &aslr_sloppiness, 0, >+ ""); >+ >+static int aslr_collapse_anon = 1; >+SYSCTL_INT(_vm, OID_AUTO, aslr_collapse_anon, CTLFLAG_RW, >+ &aslr_collapse_anon, 0, >+ ""); >+ >+#define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) >+ > /* > * vm_map_find finds an unallocated region in the target address > * map with the given length. The search is defined to be >@@ -1485,8 +1499,11 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > vm_size_t length, vm_offset_t max_addr, int find_space, > vm_prot_t prot, vm_prot_t max, int cow) > { >- vm_offset_t alignment, initial_addr, start; >- int result; >+ vm_map_entry_t prev_entry; >+ vm_offset_t alignment, addr_save, start, start1, rand_max, re; >+ const int *aslr_pages_rnd; >+ int result, do_aslr, pidx; >+ bool en_aslr, anon; > > KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || > object == NULL, >@@ -1499,21 +1516,86 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, > alignment = (vm_offset_t)1 << (find_space >> 8); > } else > alignment = 0; >- initial_addr = *addr; >+ do_aslr = (map->flags & MAP_ASLR) != 0 ? aslr_sloppiness : 0; >+ en_aslr = do_aslr != 0; >+ anon = object == NULL && (cow & (MAP_INHERIT_SHARE | >+ MAP_STACK_GROWS_UP | MAP_STACK_GROWS_DOWN)) == 0 && >+ prot != PROT_NONE && aslr_collapse_anon; >+ addr_save = *addr; >+ if (en_aslr) { >+ if (vm_map_max(map) > MAP_32BIT_MAX_ADDR && >+ (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR)) >+ aslr_pages_rnd = aslr_pages_rnd_64; >+ else >+ aslr_pages_rnd = aslr_pages_rnd_32; >+ if (find_space != VMFS_NO_SPACE && (map->flags & >+ MAP_ASLR_IGNSTART) != 0) { >+ start = anon ? map->anon_loc : vm_map_min(map); >+ } else { >+ start = anon && *addr == 0 ? map->anon_loc : addr_save; >+ } >+ } else { >+ start = addr_save; >+ } >+ start1 = start; /* for again_any_space restart */ > again: >- start = initial_addr; >+ if (en_aslr && (do_aslr == 0 || (anon && >+ do_aslr == aslr_sloppiness - 1))) { >+ /* >+ * We are either at the last aslr iteration, or anon >+ * coalescing failed on the first try. Retry with >+ * free run. >+ */ >+ if ((map->flags & MAP_ASLR_IGNSTART) != 0) >+ start = vm_map_min(map); >+ else >+ start = addr_save; >+ } >+again_any_space: > vm_map_lock(map); > do { > if (find_space != VMFS_NO_SPACE) { > if (vm_map_findspace(map, start, length, addr) || > (max_addr != 0 && *addr + length > max_addr)) { > vm_map_unlock(map); >+ if (do_aslr > 0) { >+ do_aslr--; >+ goto again; >+ } > if (find_space == VMFS_OPTIMAL_SPACE) { > find_space = VMFS_ANY_SPACE; >- goto again; >+ start = start1; >+ goto again_any_space; > } > return (KERN_NO_SPACE); > } >+ /* >+ * The R step for ASLR. But skip it if we are >+ * trying to coalesce anon memory request. >+ */ >+ if (do_aslr > 0 && >+ !(anon && do_aslr == aslr_sloppiness)) { >+ vm_map_lookup_entry(map, *addr, &prev_entry); >+ if (MAXPAGESIZES > 1 && pagesizes[1] != 0 && >+ (find_space == VMFS_SUPER_SPACE || >+ find_space == VMFS_OPTIMAL_SPACE)) >+ pidx = 1; >+ else >+ pidx = 0; >+ re = prev_entry->next == &map->header ? >+ map->max_offset : prev_entry->next->start; >+ rand_max = ((max_addr != 0 && re > max_addr) ? >+ max_addr : re) - *addr - length; >+ rand_max /= pagesizes[pidx]; >+ if (rand_max < aslr_pages_rnd[pidx]) { >+ vm_map_unlock(map); >+ start = re; >+ do_aslr--; >+ goto again; >+ } >+ *addr += (arc4random() % rand_max) * >+ pagesizes[pidx]; >+ } > switch (find_space) { > case VMFS_SUPER_SPACE: > case VMFS_OPTIMAL_SPACE: >@@ -1529,7 +1611,6 @@ again: > } > break; > } >- > start = *addr; > } > if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { >@@ -1539,8 +1620,15 @@ again: > result = vm_map_insert(map, object, offset, start, > start + length, prot, max, cow); > } >+ if (result != KERN_SUCCESS && do_aslr > 0) { >+ vm_map_unlock(map); >+ do_aslr--; >+ goto again; >+ } > } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && > find_space != VMFS_ANY_SPACE); >+ if (result == KERN_SUCCESS && anon) >+ map->anon_loc = *addr + length; > vm_map_unlock(map); > return (result); > } >@@ -3049,6 +3137,9 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) > > pmap_remove(map->pmap, entry->start, entry->end); > >+ if (entry->end == map->anon_loc) >+ map->anon_loc = entry->prev->end; >+ > /* > * Delete the entry only after removing all pmap > * entries pointing to its pages. (Otherwise, its >diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h >index 2c0a4ad..ffc9fd8 100644 >--- a/sys/vm/vm_map.h >+++ b/sys/vm/vm_map.h >@@ -190,6 +190,7 @@ struct vm_map { > pmap_t pmap; /* (c) Physical map */ > #define min_offset header.start /* (c) */ > #define max_offset header.end /* (c) */ >+ vm_offset_t anon_loc; > int busy; > }; > >@@ -198,6 +199,8 @@ struct vm_map { > */ > #define MAP_WIREFUTURE 0x01 /* wire all future pages */ > #define MAP_BUSY_WAKEUP 0x02 >+#define MAP_ASLR 0x04 /* enabled ASLR */ >+#define MAP_ASLR_IGNSTART 0x08 > > #ifdef _KERNEL > static __inline vm_offset_t
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 208580
:
169039
|
169071
|
171693
|
201539