Lines 1-9
Link Here
|
1 |
/*- |
1 |
/*- |
2 |
* Copyright (c) 2013 The FreeBSD Foundation |
2 |
* Copyright (c) 2013 The FreeBSD Foundation |
|
|
3 |
* Copyright 2021 David Sebek <dasebek@gmail.com> |
3 |
* All rights reserved. |
4 |
* All rights reserved. |
4 |
* |
5 |
* |
5 |
* This software was developed by Benno Rice under sponsorship from |
6 |
* This software was developed by Benno Rice under sponsorship from |
6 |
* the FreeBSD Foundation. |
7 |
* the FreeBSD Foundation. |
|
|
8 |
* |
7 |
* Redistribution and use in source and binary forms, with or without |
9 |
* Redistribution and use in source and binary forms, with or without |
8 |
* modification, are permitted provided that the following conditions |
10 |
* modification, are permitted provided that the following conditions |
9 |
* are met: |
11 |
* are met: |
Lines 31-64
Link Here
|
31 |
#include <machine/asmacros.h> |
33 |
#include <machine/asmacros.h> |
32 |
|
34 |
|
33 |
.text |
35 |
.text |
34 |
.globl amd64_tramp |
36 |
.globl amd64_tramp_inline |
35 |
|
37 |
|
36 |
/* |
38 |
/* |
37 |
* void amd64_tramp(uint64_t stack, void *copy_finish, uint64_t kernend, |
39 |
* void amd64_tramp_inline(uint64_t stack %rdi, uint64_t kernend %rsi, |
38 |
* uint64_t modulep, uint64_t pagetable, uint64_t entry) |
40 |
* uint64_t modulep %rdx, uint64_t pagetable %rcx, uint64_t entry %r8, |
|
|
41 |
* uint64_t copy_dst %r9, uint64_t copy_src 8(%rsp), |
42 |
* uint64_t copy_src_end 16(%rsp)) |
39 |
*/ |
43 |
*/ |
40 |
amd64_tramp: |
44 |
amd64_tramp_inline: |
41 |
cli /* Make sure we don't get interrupted. */ |
45 |
cli /* Make sure we don't get interrupted. */ |
42 |
movq %rdi,%rsp /* Switch to our temporary stack. */ |
|
|
43 |
|
46 |
|
44 |
movq %rdx,%r12 /* Stash the kernel values for later. */ |
47 |
/* |
45 |
movq %rcx,%r13 |
48 |
* Copy the kernel from the staging area to the expected location |
46 |
movq %r8,%r14 |
49 |
* in memory. The following code is equivalent to the efi_copy_finish |
47 |
movq %r9,%r15 |
50 |
* function that amd64_tramp used to call. Inlining this code avoids |
|
|
51 |
* a scenario when the system froze because efi_copy_finish |
52 |
* overwrote its own code that just happened to be located somewhere |
53 |
* in the destination range. |
54 |
* |
55 |
* while (copy_src < copy_src_end) *copy_dst++ = *copy_src++; |
56 |
*/ |
57 |
movq 8(%rsp), %rax /* rax = copy_src */ |
58 |
movq 16(%rsp), %r10 /* r10 = copy_src_end */ |
59 |
cmpq %r10, %rax |
60 |
jnb copy_done |
61 |
subq %rax, %r9 /* r9 = copy_dst - copy_src */ |
62 |
loop: |
63 |
movq (%rax), %r11 |
64 |
movq %r11, (%rax,%r9) |
65 |
addq $8, %rax |
66 |
cmpq %rax, %r10 |
67 |
ja loop |
68 |
copy_done: |
48 |
|
69 |
|
49 |
callq *%rsi /* Call copy_finish so we're all ready to go. */ |
70 |
movq %rdi,%rsp /* Switch to our temporary stack. */ |
50 |
|
71 |
|
51 |
pushq %r12 /* Push kernend. */ |
72 |
pushq %rsi /* Push kernend. */ |
52 |
salq $32,%r13 /* Shift modulep and push it. */ |
73 |
salq $32,%rdx /* Shift modulep and push it. */ |
53 |
pushq %r13 |
74 |
pushq %rdx |
54 |
pushq %r15 /* Push the entry address. */ |
75 |
pushq %r8 /* Push the entry address. */ |
55 |
movq %r14,%cr3 /* Switch page tables. */ |
76 |
movq %rcx,%cr3 /* Switch page tables. */ |
56 |
ret /* "Return" to kernel entry. */ |
77 |
ret /* "Return" to kernel entry. */ |
57 |
|
78 |
|
58 |
ALIGN_TEXT |
79 |
ALIGN_TEXT |
59 |
amd64_tramp_end: |
80 |
amd64_tramp_inline_end: |
60 |
|
81 |
|
61 |
.data |
82 |
.data |
62 |
.globl amd64_tramp_size |
83 |
.globl amd64_tramp_inline_size |
63 |
amd64_tramp_size: |
84 |
amd64_tramp_inline_size: |
64 |
.long amd64_tramp_end-amd64_tramp |
85 |
.long amd64_tramp_inline_end-amd64_tramp_inline |