Lines 137-142
SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
Link Here
|
137 |
#endif |
137 |
#endif |
138 |
#endif |
138 |
#endif |
139 |
|
139 |
|
|
|
140 |
static int __elfN(aslr_enabled) = 1; |
141 |
SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, |
142 |
aslr_enabled, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, |
143 |
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable aslr"); |
144 |
|
145 |
static int __elfN(pie_aslr_enabled) = 1; |
146 |
SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, |
147 |
pie_aslr_enabled, CTLFLAG_RWTUN, &__elfN(pie_aslr_enabled), 0, |
148 |
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable aslr for PIE binaries"); |
149 |
|
150 |
static int __elfN(aslr_care_sbrk) = 0; |
151 |
SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, |
152 |
aslr_care_sbrk, CTLFLAG_RW, &__elfN(aslr_care_sbrk), 0, |
153 |
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); |
154 |
|
140 |
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; |
155 |
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; |
141 |
|
156 |
|
142 |
#define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) |
157 |
#define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) |
Lines 453-462
__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
Link Here
|
453 |
* The mapping is not page aligned. This means we have |
468 |
* The mapping is not page aligned. This means we have |
454 |
* to copy the data. Sigh. |
469 |
* to copy the data. Sigh. |
455 |
*/ |
470 |
*/ |
456 |
rv = vm_map_find(map, NULL, 0, &start, end - start, 0, |
471 |
vm_map_lock(map); |
457 |
VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL, |
472 |
rv = vm_map_insert(map, NULL, 0, start, end, |
458 |
0); |
473 |
prot | VM_PROT_WRITE, VM_PROT_ALL, 0); |
459 |
if (rv) |
474 |
vm_map_unlock(map); |
|
|
475 |
if (rv != KERN_SUCCESS) |
460 |
return (rv); |
476 |
return (rv); |
461 |
if (object == NULL) |
477 |
if (object == NULL) |
462 |
return (KERN_SUCCESS); |
478 |
return (KERN_SUCCESS); |
Lines 471-479
__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
Link Here
|
471 |
error = copyout((caddr_t)sf_buf_kva(sf) + off, |
487 |
error = copyout((caddr_t)sf_buf_kva(sf) + off, |
472 |
(caddr_t)start, sz); |
488 |
(caddr_t)start, sz); |
473 |
vm_imgact_unmap_page(sf); |
489 |
vm_imgact_unmap_page(sf); |
474 |
if (error) { |
490 |
if (error != 0) |
475 |
return (KERN_FAILURE); |
491 |
return (KERN_FAILURE); |
476 |
} |
|
|
477 |
offset += sz; |
492 |
offset += sz; |
478 |
} |
493 |
} |
479 |
rv = KERN_SUCCESS; |
494 |
rv = KERN_SUCCESS; |
Lines 747-752
fail:
Link Here
|
747 |
return (error); |
762 |
return (error); |
748 |
} |
763 |
} |
749 |
|
764 |
|
|
|
765 |
static u_long |
766 |
__CONCAT(rnd_, __elfN(base))(u_long base, u_long minv, u_long maxv, |
767 |
u_int align) |
768 |
{ |
769 |
u_long rbase, res; |
770 |
|
771 |
arc4rand(&rbase, sizeof(rbase), 0); |
772 |
res = base + rbase % (maxv - minv); |
773 |
res &= ~(align - 1); |
774 |
return (res); |
775 |
} |
776 |
|
750 |
static int |
777 |
static int |
751 |
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) |
778 |
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) |
752 |
{ |
779 |
{ |
Lines 755-760
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
755 |
const Elf_Phdr *phdr; |
782 |
const Elf_Phdr *phdr; |
756 |
Elf_Auxargs *elf_auxargs; |
783 |
Elf_Auxargs *elf_auxargs; |
757 |
struct vmspace *vmspace; |
784 |
struct vmspace *vmspace; |
|
|
785 |
vm_map_t map; |
758 |
const char *err_str, *newinterp; |
786 |
const char *err_str, *newinterp; |
759 |
char *interp, *interp_buf, *path; |
787 |
char *interp, *interp_buf, *path; |
760 |
Elf_Brandinfo *brand_info; |
788 |
Elf_Brandinfo *brand_info; |
Lines 762-767
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
762 |
vm_prot_t prot; |
790 |
vm_prot_t prot; |
763 |
u_long text_size, data_size, total_size, text_addr, data_addr; |
791 |
u_long text_size, data_size, total_size, text_addr, data_addr; |
764 |
u_long seg_size, seg_addr, addr, baddr, et_dyn_addr, entry, proghdr; |
792 |
u_long seg_size, seg_addr, addr, baddr, et_dyn_addr, entry, proghdr; |
|
|
793 |
u_long maxalign, mapsz, maxv; |
765 |
int32_t osrel; |
794 |
int32_t osrel; |
766 |
int error, i, n, interp_name_len, have_interp; |
795 |
int error, i, n, interp_name_len, have_interp; |
767 |
|
796 |
|
Lines 803-814
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
803 |
err_str = newinterp = NULL; |
832 |
err_str = newinterp = NULL; |
804 |
interp = interp_buf = NULL; |
833 |
interp = interp_buf = NULL; |
805 |
td = curthread; |
834 |
td = curthread; |
|
|
835 |
maxalign = PAGE_SIZE; |
836 |
mapsz = 0; |
806 |
|
837 |
|
807 |
for (i = 0; i < hdr->e_phnum; i++) { |
838 |
for (i = 0; i < hdr->e_phnum; i++) { |
808 |
switch (phdr[i].p_type) { |
839 |
switch (phdr[i].p_type) { |
809 |
case PT_LOAD: |
840 |
case PT_LOAD: |
810 |
if (n == 0) |
841 |
if (n == 0) |
811 |
baddr = phdr[i].p_vaddr; |
842 |
baddr = phdr[i].p_vaddr; |
|
|
843 |
if (phdr[i].p_align > maxalign) |
844 |
maxalign = phdr[i].p_align; |
845 |
mapsz += phdr[i].p_memsz; |
812 |
n++; |
846 |
n++; |
813 |
break; |
847 |
break; |
814 |
case PT_INTERP: |
848 |
case PT_INTERP: |
Lines 862-867
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
862 |
error = ENOEXEC; |
896 |
error = ENOEXEC; |
863 |
goto ret; |
897 |
goto ret; |
864 |
} |
898 |
} |
|
|
899 |
sv = brand_info->sysvec; |
900 |
et_dyn_addr = 0; |
865 |
if (hdr->e_type == ET_DYN) { |
901 |
if (hdr->e_type == ET_DYN) { |
866 |
if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { |
902 |
if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { |
867 |
uprintf("Cannot execute shared object\n"); |
903 |
uprintf("Cannot execute shared object\n"); |
Lines 872-884
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
872 |
* Honour the base load address from the dso if it is |
908 |
* Honour the base load address from the dso if it is |
873 |
* non-zero for some reason. |
909 |
* non-zero for some reason. |
874 |
*/ |
910 |
*/ |
875 |
if (baddr == 0) |
911 |
if (baddr == 0) { |
876 |
et_dyn_addr = ET_DYN_LOAD_ADDR; |
912 |
if ((sv->sv_flags & SV_ASLR) == 0) |
877 |
else |
913 |
et_dyn_addr = ET_DYN_LOAD_ADDR; |
878 |
et_dyn_addr = 0; |
914 |
else if ((__elfN(pie_aslr_enabled) && |
879 |
} else |
915 |
(imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || |
880 |
et_dyn_addr = 0; |
916 |
(imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) |
881 |
sv = brand_info->sysvec; |
917 |
et_dyn_addr = 1; |
|
|
918 |
else |
919 |
et_dyn_addr = ET_DYN_LOAD_ADDR; |
920 |
} |
921 |
} |
882 |
if (interp != NULL && brand_info->interp_newpath != NULL) |
922 |
if (interp != NULL && brand_info->interp_newpath != NULL) |
883 |
newinterp = brand_info->interp_newpath; |
923 |
newinterp = brand_info->interp_newpath; |
884 |
|
924 |
|
Lines 897-902
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
897 |
|
937 |
|
898 |
error = exec_new_vmspace(imgp, sv); |
938 |
error = exec_new_vmspace(imgp, sv); |
899 |
imgp->proc->p_sysent = sv; |
939 |
imgp->proc->p_sysent = sv; |
|
|
940 |
vmspace = imgp->proc->p_vmspace; |
941 |
map = &vmspace->vm_map; |
942 |
|
943 |
if ((sv->sv_flags & SV_ASLR) == 0 || |
944 |
(imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0) { |
945 |
KASSERT(et_dyn_addr != 1, ("et_dyn_addr == 1 and !ASLR")); |
946 |
} else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || |
947 |
(__elfN(aslr_enabled) && hdr->e_type != ET_DYN) || |
948 |
et_dyn_addr == 1) { |
949 |
vm_map_lock(map); |
950 |
map->flags |= MAP_ASLR; |
951 |
/* |
952 |
* If user does not care about sbrk, utilize the bss |
953 |
* grow region for mappings as well. We can select |
954 |
* the base for the image anywere and still not suffer |
955 |
* from the fragmentation. |
956 |
*/ |
957 |
if (!__elfN(aslr_care_sbrk) || |
958 |
(imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) |
959 |
map->flags |= MAP_ASLR_IGNSTART; |
960 |
vm_map_unlock(map); |
961 |
} |
962 |
maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); |
963 |
if (et_dyn_addr == 1) { |
964 |
KASSERT((map->flags & MAP_ASLR) != 0, |
965 |
("et_dyn_addr but !MAP_ASLR")); |
966 |
et_dyn_addr = __CONCAT(rnd_, __elfN(base))(vm_map_min(map), |
967 |
vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), |
968 |
/* reserve half of the address space to interpreter */ |
969 |
maxv / 2, 1UL << flsl(maxalign)); |
970 |
} |
900 |
|
971 |
|
901 |
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); |
972 |
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); |
902 |
if (error != 0) |
973 |
if (error != 0) |
Lines 989-995
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
989 |
goto ret; |
1060 |
goto ret; |
990 |
} |
1061 |
} |
991 |
|
1062 |
|
992 |
vmspace = imgp->proc->p_vmspace; |
|
|
993 |
vmspace->vm_tsize = text_size >> PAGE_SHIFT; |
1063 |
vmspace->vm_tsize = text_size >> PAGE_SHIFT; |
994 |
vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; |
1064 |
vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; |
995 |
vmspace->vm_dsize = data_size >> PAGE_SHIFT; |
1065 |
vmspace->vm_dsize = data_size >> PAGE_SHIFT; |
Lines 1010-1015
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
Link Here
|
1010 |
if (interp != NULL) { |
1080 |
if (interp != NULL) { |
1011 |
have_interp = FALSE; |
1081 |
have_interp = FALSE; |
1012 |
VOP_UNLOCK(imgp->vp, 0); |
1082 |
VOP_UNLOCK(imgp->vp, 0); |
|
|
1083 |
if ((map->flags & MAP_ASLR) != 0) { |
1084 |
addr = __CONCAT(rnd_, __elfN(base))(addr, addr, |
1085 |
maxv, PAGE_SIZE); |
1086 |
} |
1013 |
if (brand_info->emul_path != NULL && |
1087 |
if (brand_info->emul_path != NULL && |
1014 |
brand_info->emul_path[0] != '\0') { |
1088 |
brand_info->emul_path[0] != '\0') { |
1015 |
path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); |
1089 |
path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); |