Lines 56-61
Link Here
|
56 |
#include <machine/segments.h> |
56 |
#include <machine/segments.h> |
57 |
#endif |
57 |
#endif |
58 |
|
58 |
|
|
|
59 |
#ifdef __amd64__ |
60 |
#include <machine/fpu.h> |
61 |
#endif |
62 |
|
59 |
#include <dev/usb/usb.h> |
63 |
#include <dev/usb/usb.h> |
60 |
|
64 |
|
61 |
#include <compat/ndis/pe_var.h> |
65 |
#include <compat/ndis/pe_var.h> |
Lines 66-71
Link Here
|
66 |
#include <compat/ndis/hal_var.h> |
70 |
#include <compat/ndis/hal_var.h> |
67 |
#include <compat/ndis/usbd_var.h> |
71 |
#include <compat/ndis/usbd_var.h> |
68 |
|
72 |
|
|
|
73 |
#ifdef __amd64__ |
74 |
struct fpu_cc_ent { |
75 |
struct fpu_kern_ctx *ctx; |
76 |
LIST_ENTRY(fpu_cc_ent) entries; |
77 |
}; |
78 |
static LIST_HEAD(fpu_ctx_free, fpu_cc_ent) fpu_free_head = |
79 |
LIST_HEAD_INITIALIZER(fpu_free_head); |
80 |
static LIST_HEAD(fpu_ctx_busy, fpu_cc_ent) fpu_busy_head = |
81 |
LIST_HEAD_INITIALIZER(fpu_busy_head); |
82 |
static struct mtx fpu_free_mtx; |
83 |
static struct mtx fpu_busy_mtx; |
84 |
#endif |
85 |
|
69 |
static struct mtx drvdb_mtx; |
86 |
static struct mtx drvdb_mtx; |
70 |
static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head; |
87 |
static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head; |
71 |
|
88 |
|
Lines 96-101
Link Here
|
96 |
mtx_init(&drvdb_mtx, "Windows driver DB lock", |
113 |
mtx_init(&drvdb_mtx, "Windows driver DB lock", |
97 |
"Windows internal lock", MTX_DEF); |
114 |
"Windows internal lock", MTX_DEF); |
98 |
|
115 |
|
|
|
116 |
#ifdef __amd64__ |
117 |
LIST_INIT(&fpu_free_head); |
118 |
LIST_INIT(&fpu_busy_head); |
119 |
mtx_init(&fpu_free_mtx, "free fpu context list lock", NULL, MTX_DEF); |
120 |
mtx_init(&fpu_busy_mtx, "busy fpu context list lock", NULL, MTX_DEF); |
121 |
#endif |
122 |
|
99 |
/* |
123 |
/* |
100 |
* PCI and pccard devices don't need to use IRPs to |
124 |
* PCI and pccard devices don't need to use IRPs to |
101 |
* interact with their bus drivers (usually), so our |
125 |
* interact with their bus drivers (usually), so our |
Lines 130-135
Link Here
|
130 |
windrv_libfini(void) |
154 |
windrv_libfini(void) |
131 |
{ |
155 |
{ |
132 |
struct drvdb_ent *d; |
156 |
struct drvdb_ent *d; |
|
|
157 |
#ifdef __amd64__ |
158 |
struct fpu_cc_ent *ent; |
159 |
#endif |
133 |
|
160 |
|
134 |
mtx_lock(&drvdb_mtx); |
161 |
mtx_lock(&drvdb_mtx); |
135 |
while(STAILQ_FIRST(&drvdb_head) != NULL) { |
162 |
while(STAILQ_FIRST(&drvdb_head) != NULL) { |
Lines 148-153
Link Here
|
148 |
smp_rendezvous(NULL, x86_oldldt, NULL, NULL); |
175 |
smp_rendezvous(NULL, x86_oldldt, NULL, NULL); |
149 |
ExFreePool(my_tids); |
176 |
ExFreePool(my_tids); |
150 |
#endif |
177 |
#endif |
|
|
178 |
#ifdef __amd64__ |
179 |
while ((ent = LIST_FIRST(&fpu_free_head)) != NULL) { |
180 |
LIST_REMOVE(ent, entries); |
181 |
fpu_kern_free_ctx(ent->ctx); |
182 |
free(ent, M_DEVBUF); |
183 |
} |
184 |
mtx_destroy(&fpu_free_mtx); |
185 |
|
186 |
ent = LIST_FIRST(&fpu_busy_head); |
187 |
KASSERT(ent == NULL, ("busy fpu context list is not empty")); |
188 |
mtx_destroy(&fpu_busy_mtx); |
189 |
#endif |
151 |
return (0); |
190 |
return (0); |
152 |
} |
191 |
} |
153 |
|
192 |
|
Lines 613-618
Link Here
|
613 |
|
652 |
|
614 |
return (0); |
653 |
return (0); |
615 |
} |
654 |
} |
|
|
655 |
|
656 |
static struct fpu_cc_ent * |
657 |
request_fpu_cc_ent(void) |
658 |
{ |
659 |
struct fpu_cc_ent *ent; |
660 |
|
661 |
mtx_lock(&fpu_free_mtx); |
662 |
if ((ent = LIST_FIRST(&fpu_free_head)) != NULL) { |
663 |
LIST_REMOVE(ent, entries); |
664 |
mtx_unlock(&fpu_free_mtx); |
665 |
mtx_lock(&fpu_busy_mtx); |
666 |
LIST_INSERT_HEAD(&fpu_busy_head, ent, entries); |
667 |
mtx_unlock(&fpu_busy_mtx); |
668 |
return (ent); |
669 |
} |
670 |
mtx_unlock(&fpu_free_mtx); |
671 |
|
672 |
if ((ent = malloc(sizeof(struct fpu_cc_ent), M_DEVBUF, M_NOWAIT | |
673 |
M_ZERO)) != NULL) { |
674 |
ent->ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL | |
675 |
FPU_KERN_NOWAIT); |
676 |
if (ent->ctx != NULL) { |
677 |
mtx_lock(&fpu_busy_mtx); |
678 |
LIST_INSERT_HEAD(&fpu_busy_head, ent, entries); |
679 |
mtx_unlock(&fpu_busy_mtx); |
680 |
} else { |
681 |
free(ent, M_DEVBUF); |
682 |
ent = NULL; |
683 |
} |
684 |
} |
685 |
|
686 |
return (ent); |
687 |
} |
688 |
|
689 |
static void |
690 |
release_fpu_cc_ent(struct fpu_cc_ent *ent) |
691 |
{ |
692 |
mtx_lock(&fpu_busy_mtx); |
693 |
LIST_REMOVE(ent, entries); |
694 |
mtx_unlock(&fpu_busy_mtx); |
695 |
mtx_lock(&fpu_free_mtx); |
696 |
LIST_INSERT_HEAD(&fpu_free_head, ent, entries); |
697 |
mtx_unlock(&fpu_free_mtx); |
698 |
} |
699 |
|
700 |
uint64_t |
701 |
_x86_64_call1(void *fn, uint64_t a) |
702 |
{ |
703 |
struct fpu_cc_ent *ent; |
704 |
uint64_t ret; |
705 |
|
706 |
if ((ent = request_fpu_cc_ent()) == NULL) |
707 |
return (ENOMEM); |
708 |
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); |
709 |
ret = x86_64_call1(fn, a); |
710 |
fpu_kern_leave(curthread, ent->ctx); |
711 |
release_fpu_cc_ent(ent); |
712 |
|
713 |
return (ret); |
714 |
} |
715 |
|
716 |
uint64_t |
717 |
_x86_64_call2(void *fn, uint64_t a, uint64_t b) |
718 |
{ |
719 |
struct fpu_cc_ent *ent; |
720 |
uint64_t ret; |
721 |
|
722 |
if ((ent = request_fpu_cc_ent()) == NULL) |
723 |
return (ENOMEM); |
724 |
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); |
725 |
ret = x86_64_call2(fn, a, b); |
726 |
fpu_kern_leave(curthread, ent->ctx); |
727 |
release_fpu_cc_ent(ent); |
728 |
|
729 |
return (ret); |
730 |
} |
731 |
|
732 |
uint64_t |
733 |
_x86_64_call3(void *fn, uint64_t a, uint64_t b, uint64_t c) |
734 |
{ |
735 |
struct fpu_cc_ent *ent; |
736 |
uint64_t ret; |
737 |
|
738 |
if ((ent = request_fpu_cc_ent()) == NULL) |
739 |
return (ENOMEM); |
740 |
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); |
741 |
ret = x86_64_call3(fn, a, b, c); |
742 |
fpu_kern_leave(curthread, ent->ctx); |
743 |
release_fpu_cc_ent(ent); |
744 |
|
745 |
return (ret); |
746 |
} |
747 |
|
748 |
uint64_t |
749 |
_x86_64_call4(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d) |
750 |
{ |
751 |
struct fpu_cc_ent *ent; |
752 |
uint64_t ret; |
753 |
|
754 |
if ((ent = request_fpu_cc_ent()) == NULL) |
755 |
return (ENOMEM); |
756 |
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); |
757 |
ret = x86_64_call4(fn, a, b, c, d); |
758 |
fpu_kern_leave(curthread, ent->ctx); |
759 |
release_fpu_cc_ent(ent); |
760 |
|
761 |
return (ret); |
762 |
} |
763 |
|
764 |
uint64_t |
765 |
_x86_64_call5(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d, |
766 |
uint64_t e) |
767 |
{ |
768 |
struct fpu_cc_ent *ent; |
769 |
uint64_t ret; |
770 |
|
771 |
if ((ent = request_fpu_cc_ent()) == NULL) |
772 |
return (ENOMEM); |
773 |
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); |
774 |
ret = x86_64_call5(fn, a, b, c, d, e); |
775 |
fpu_kern_leave(curthread, ent->ctx); |
776 |
release_fpu_cc_ent(ent); |
777 |
|
778 |
return (ret); |
779 |
} |
780 |
|
781 |
uint64_t |
782 |
_x86_64_call6(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d, |
783 |
uint64_t e, uint64_t f) |
784 |
{ |
785 |
struct fpu_cc_ent *ent; |
786 |
uint64_t ret; |
787 |
|
788 |
if ((ent = request_fpu_cc_ent()) == NULL) |
789 |
return (ENOMEM); |
790 |
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); |
791 |
ret = x86_64_call6(fn, a, b, c, d, e, f); |
792 |
fpu_kern_leave(curthread, ent->ctx); |
793 |
release_fpu_cc_ent(ent); |
794 |
|
795 |
return (ret); |
796 |
} |
616 |
#endif /* __amd64__ */ |
797 |
#endif /* __amd64__ */ |
617 |
|
798 |
|
618 |
|
799 |
|