FreeBSD Bugzilla – Attachment 159741 Details for
Bug 194952
uart(4) needs BayTrail support for Minnowboard Max
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Proposed fix for BayTril as low-level console
console.diff (text/plain), 19.43 KB, created by
Marcel Moolenaar
on 2015-08-10 22:06:53 UTC
(
hide
)
Description:
Proposed fix for BayTril as low-level console
Filename:
MIME Type:
Creator:
Marcel Moolenaar
Created:
2015-08-10 22:06:53 UTC
Size:
19.43 KB
patch
obsolete
>Index: sys/amd64/amd64/machdep.c >=================================================================== >--- sys/amd64/amd64/machdep.c (revision 286461) >+++ sys/amd64/amd64/machdep.c (working copy) >@@ -1625,38 +1625,6 @@ > MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL) > vty_set_preferred(VTY_VT); > >- /* >- * Initialize the console before we print anything out. >- */ >- cninit(); >- >-#ifdef DEV_ISA >-#ifdef DEV_ATPIC >- elcr_probe(); >- atpic_startup(); >-#else >- /* Reset and mask the atpics and leave them shut down. */ >- atpic_reset(); >- >- /* >- * Point the ICU spurious interrupt vectors at the APIC spurious >- * interrupt handler. >- */ >- setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); >- setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); >-#endif >-#else >-#error "have you forgotten the isa device?"; >-#endif >- >- kdb_init(); >- >-#ifdef KDB >- if (boothowto & RB_KDB) >- kdb_enter(KDB_WHY_BOOTFLAGS, >- "Boot flags requested debugger"); >-#endif >- > identify_cpu(); /* Final stage of CPU initialization */ > initializecpu(); /* Initialize CPU registers */ > initializecpucache(); >@@ -1693,6 +1661,35 @@ > > /* now running on new page tables, configured,and u/iom is accessible */ > >+ cninit(); >+ >+#ifdef DEV_ISA >+#ifdef DEV_ATPIC >+ elcr_probe(); >+ atpic_startup(); >+#else >+ /* Reset and mask the atpics and leave them shut down. */ >+ atpic_reset(); >+ >+ /* >+ * Point the ICU spurious interrupt vectors at the APIC spurious >+ * interrupt handler. >+ */ >+ setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); >+ setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); >+#endif >+#else >+#error "have you forgotten the isa device?"; >+#endif >+ >+ kdb_init(); >+ >+#ifdef KDB >+ if (boothowto & RB_KDB) >+ kdb_enter(KDB_WHY_BOOTFLAGS, >+ "Boot flags requested debugger"); >+#endif >+ > msgbufinit(msgbufp, msgbufsize); > fpuinit(); > >Index: sys/amd64/amd64/pmap.c >=================================================================== >--- sys/amd64/amd64/pmap.c (revision 286461) >+++ sys/amd64/amd64/pmap.c (working copy) >@@ -363,6 +363,18 @@ > static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ > static int ndmpdpphys; /* number of DMPDPphys pages */ > >+/* >+ * pmap_mapdev support pre initialization (i.e. console) >+ */ >+#define PMAP_PREINIT_MAPPING_COUNT 8 >+static struct pmap_preinit_mapping { >+ vm_paddr_t pa; >+ vm_offset_t va; >+ vm_size_t sz; >+ int mode; >+} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; >+static int pmap_initialized; >+ > static struct rwlock_padalign pvh_global_lock; > > /* >@@ -1016,6 +1028,7 @@ > void > pmap_init(void) > { >+ struct pmap_preinit_mapping *ppim; > vm_page_t mpte; > vm_size_t s; > int i, pv_npg; >@@ -1083,6 +1096,17 @@ > M_WAITOK | M_ZERO); > for (i = 0; i < pv_npg; i++) > TAILQ_INIT(&pv_table[i].pv_list); >+ >+ pmap_initialized = 1; >+ if (!bootverbose) >+ return; >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->va == 0) >+ continue; >+ printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i, >+ ppim->pa, ppim->va, ppim->sz, ppim->mode); >+ } > } > > static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, >@@ -6105,24 +6129,54 @@ > void * > pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) > { >+ struct pmap_preinit_mapping *ppim; > vm_offset_t va, offset; > vm_size_t tmpsize; >+ int i; > >- /* >- * If the specified range of physical addresses fits within the direct >- * map window, use the direct map. >- */ >- if (pa < dmaplimit && pa + size < dmaplimit) { >- va = PHYS_TO_DMAP(pa); >- if (!pmap_change_attr(va, size, mode)) >- return ((void *)va); >- } > offset = pa & PAGE_MASK; > size = round_page(offset + size); >- va = kva_alloc(size); >- if (!va) >- panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); > pa = trunc_page(pa); >+ >+ if (!pmap_initialized) { >+ va = 0; >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->va == 0) { >+ ppim->pa = pa; >+ ppim->sz = size; >+ ppim->mode = mode; >+ ppim->va = virtual_avail; >+ virtual_avail += size; >+ va = ppim->va; >+ break; >+ } >+ } >+ if (va == 0) >+ panic("%s: too many preinit mappings", __func__); >+ } else { >+ /* >+ * If we have a preinit mapping, re-use it. >+ */ >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->pa == pa && ppim->sz == size && >+ ppim->mode == mode) >+ return ((void *)(ppim->va + offset)); >+ } >+ /* >+ * If the specified range of physical addresses fits within >+ * the direct map window, use the direct map. >+ */ >+ if (pa < dmaplimit && pa + size < dmaplimit) { >+ va = PHYS_TO_DMAP(pa); >+ if (!pmap_change_attr(va, size, mode)) >+ return ((void *)(va + offset)); >+ } >+ va = kva_alloc(size); >+ if (va == 0) >+ panic("%s: Couldn't allocate KVA", __func__); >+ } > for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) > pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); > pmap_invalidate_range(kernel_pmap, va, va + tmpsize); >@@ -6147,15 +6201,32 @@ > void > pmap_unmapdev(vm_offset_t va, vm_size_t size) > { >- vm_offset_t base, offset; >+ struct pmap_preinit_mapping *ppim; >+ vm_offset_t offset; >+ int i; > > /* If we gave a direct map region in pmap_mapdev, do nothing */ > if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) > return; >- base = trunc_page(va); > offset = va & PAGE_MASK; > size = round_page(offset + size); >- kva_free(base, size); >+ va = trunc_page(va); >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->va == va && ppim->sz == size) { >+ if (pmap_initialized) >+ return; >+ ppim->pa = 0; >+ ppim->va = 0; >+ ppim->sz = 0; >+ ppim->mode = 0; >+ if (va + size == virtual_avail) >+ virtual_avail = va; >+ return; >+ } >+ } >+ if (pmap_initialized) >+ kva_free(va, size); > } > > /* >Index: sys/conf/files.amd64 >=================================================================== >--- sys/conf/files.amd64 (revision 286461) >+++ sys/conf/files.amd64 (working copy) >@@ -592,6 +592,7 @@ > x86/isa/orm.c optional isa > x86/pci/pci_bus.c optional pci > x86/pci/qpi.c optional pci >+x86/x86/bus_machdep.c standard > x86/x86/busdma_bounce.c standard > x86/x86/busdma_machdep.c standard > x86/x86/cpu_machdep.c standard >Index: sys/conf/files.i386 >=================================================================== >--- sys/conf/files.i386 (revision 286461) >+++ sys/conf/files.i386 (working copy) >@@ -586,6 +586,7 @@ > x86/isa/orm.c optional isa > x86/pci/pci_bus.c optional pci > x86/pci/qpi.c optional pci >+x86/x86/bus_machdep.c standard > x86/x86/busdma_bounce.c standard > x86/x86/busdma_machdep.c standard > x86/x86/cpu_machdep.c standard >Index: sys/dev/vt/hw/efifb/efifb.c >=================================================================== >--- sys/dev/vt/hw/efifb/efifb.c (revision 286461) >+++ sys/dev/vt/hw/efifb/efifb.c (working copy) >@@ -51,7 +51,6 @@ > > static vd_init_t vt_efifb_init; > static vd_probe_t vt_efifb_probe; >-static void vt_efifb_remap(void *efifb_data); > > static struct vt_driver vt_efifb_driver = { > .vd_name = "efifb", >@@ -71,8 +70,6 @@ > static struct fb_info local_info; > VT_DRIVER_DECLARE(vt_efifb, vt_efifb_driver); > >-SYSINIT(efifb_remap, SI_SUB_KMEM, SI_ORDER_ANY, vt_efifb_remap, &local_info); >- > static int > vt_efifb_probe(struct vt_device *vd) > { >@@ -137,12 +134,8 @@ > > info->fb_size = info->fb_height * info->fb_stride; > info->fb_pbase = efifb->fb_addr; >- /* >- * Use the direct map as a crutch until pmap is available. Once pmap >- * is online, the framebuffer will be remapped by vt_efifb_remap() >- * using pmap_mapdev_attr(). >- */ >- info->fb_vbase = PHYS_TO_DMAP(efifb->fb_addr); >+ info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase, >+ info->fb_size, VM_MEMATTR_WRITE_COMBINING); > > /* Get pixel storage size. */ > info->fb_bpp = info->fb_stride / info->fb_width * 8; >@@ -158,21 +151,3 @@ > > return (CN_INTERNAL); > } >- >-static void >-vt_efifb_remap(void *xinfo) >-{ >- struct fb_info *info = xinfo; >- >- if (info->fb_pbase == 0) >- return; >- >- /* >- * Remap as write-combining. This massively improves performance and >- * happens very early in kernel initialization, when everything is >- * still single-threaded and interrupts are off, so replacing the >- * mapping address is safe. >- */ >- info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase, >- info->fb_size, VM_MEMATTR_WRITE_COMBINING); >-} >Index: sys/dev/vt/hw/vga/vt_vga.c >=================================================================== >--- sys/dev/vt/hw/vga/vt_vga.c (revision 286461) >+++ sys/dev/vt/hw/vga/vt_vga.c (working copy) >@@ -46,13 +46,6 @@ > > #include <machine/bus.h> > >-#if defined(__amd64__) || defined(__i386__) >-#include <vm/vm.h> >-#include <vm/pmap.h> >-#include <machine/pmap.h> >-#include <machine/vmparam.h> >-#endif /* __amd64__ || __i386__ */ >- > struct vga_softc { > bus_space_tag_t vga_fb_tag; > bus_space_handle_t vga_fb_handle; >@@ -1228,13 +1221,16 @@ > > #if defined(__amd64__) || defined(__i386__) > sc->vga_fb_tag = X86_BUS_SPACE_MEM; >- sc->vga_fb_handle = KERNBASE + VGA_MEM_BASE; > sc->vga_reg_tag = X86_BUS_SPACE_IO; >- sc->vga_reg_handle = VGA_REG_BASE; > #else > # error "Architecture not yet supported!" > #endif > >+ bus_space_map(sc->vga_fb_tag, VGA_MEM_BASE, VGA_MEM_SIZE, 0, >+ &sc->vga_fb_handle); >+ bus_space_map(sc->vga_reg_tag, VGA_REG_BASE, VGA_REG_SIZE, 0, >+ &sc->vga_reg_handle); >+ > TUNABLE_INT_FETCH("hw.vga.textmode", &textmode); > if (textmode) { > vd->vd_flags |= VDF_TEXTMODE; >Index: sys/i386/i386/machdep.c >=================================================================== >--- sys/i386/i386/machdep.c (revision 286461) >+++ sys/i386/i386/machdep.c (working copy) >@@ -2612,6 +2612,40 @@ > */ > clock_init(); > >+ finishidentcpu(); /* Final stage of CPU initialization */ >+ setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, >+ GSEL(GCODE_SEL, SEL_KPL)); >+ setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, >+ GSEL(GCODE_SEL, SEL_KPL)); >+ initializecpu(); /* Initialize CPU registers */ >+ initializecpucache(); >+ >+ /* pointer to selector slot for %fs/%gs */ >+ PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); >+ >+ dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = >+ dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; >+ dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = >+ dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); >+#if defined(PAE) || defined(PAE_TABLES) >+ dblfault_tss.tss_cr3 = (int)IdlePDPT; >+#else >+ dblfault_tss.tss_cr3 = (int)IdlePTD; >+#endif >+ dblfault_tss.tss_eip = (int)dblfault_handler; >+ dblfault_tss.tss_eflags = PSL_KERNEL; >+ dblfault_tss.tss_ds = dblfault_tss.tss_es = >+ dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); >+ dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); >+ dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); >+ dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); >+ >+ vm86_initialize(); >+ getmemsize(first); >+ init_param2(physmem); >+ >+ /* now running on new page tables, configured,and u/iom is accessible */ >+ > /* > * Initialize the console before we print anything out. > */ >@@ -2652,40 +2686,6 @@ > kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); > #endif > >- finishidentcpu(); /* Final stage of CPU initialization */ >- setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, >- GSEL(GCODE_SEL, SEL_KPL)); >- setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, >- GSEL(GCODE_SEL, SEL_KPL)); >- initializecpu(); /* Initialize CPU registers */ >- initializecpucache(); >- >- /* pointer to selector slot for %fs/%gs */ >- PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); >- >- dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = >- dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; >- dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = >- dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); >-#if defined(PAE) || defined(PAE_TABLES) >- dblfault_tss.tss_cr3 = (int)IdlePDPT; >-#else >- dblfault_tss.tss_cr3 = (int)IdlePTD; >-#endif >- dblfault_tss.tss_eip = (int)dblfault_handler; >- dblfault_tss.tss_eflags = PSL_KERNEL; >- dblfault_tss.tss_ds = dblfault_tss.tss_es = >- dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); >- dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); >- dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); >- dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); >- >- vm86_initialize(); >- getmemsize(first); >- init_param2(physmem); >- >- /* now running on new page tables, configured,and u/iom is accessible */ >- > msgbufinit(msgbufp, msgbufsize); > #ifdef DEV_NPX > npxinit(true); >Index: sys/i386/i386/pmap.c >=================================================================== >--- sys/i386/i386/pmap.c (revision 286461) >+++ sys/i386/i386/pmap.c (working copy) >@@ -228,6 +228,18 @@ > #define PAT_INDEX_SIZE 8 > static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ > >+/* >+ * pmap_mapdev support pre initialization (i.e. console) >+ */ >+#define PMAP_PREINIT_MAPPING_COUNT 8 >+static struct pmap_preinit_mapping { >+ vm_paddr_t pa; >+ vm_offset_t va; >+ vm_size_t sz; >+ int mode; >+} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; >+static int pmap_initialized; >+ > static struct rwlock_padalign pvh_global_lock; > > /* >@@ -744,6 +756,7 @@ > void > pmap_init(void) > { >+ struct pmap_preinit_mapping *ppim; > vm_page_t mpte; > vm_size_t s; > int i, pv_npg; >@@ -827,6 +840,17 @@ > UMA_ZONE_VM | UMA_ZONE_NOFREE); > uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); > #endif >+ >+ pmap_initialized = 1; >+ if (!bootverbose) >+ return; >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->va == 0) >+ continue; >+ printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, >+ (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); >+ } > } > > >@@ -5071,8 +5095,10 @@ > void * > pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) > { >+ struct pmap_preinit_mapping *ppim; > vm_offset_t va, offset; > vm_size_t tmpsize; >+ int i; > > offset = pa & PAGE_MASK; > size = round_page(offset + size); >@@ -5080,11 +5106,36 @@ > > if (pa < KERNLOAD && pa + size <= KERNLOAD) > va = KERNBASE + pa; >- else >+ else if (!pmap_initialized) { >+ va = 0; >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->va == 0) { >+ ppim->pa = pa; >+ ppim->sz = size; >+ ppim->mode = mode; >+ ppim->va = virtual_avail; >+ virtual_avail += size; >+ va = ppim->va; >+ break; >+ } >+ } >+ if (va == 0) >+ panic("%s: too many preinit mappings", __func__); >+ } else { >+ /* >+ * If we have a preinit mapping, re-use it. >+ */ >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->pa == pa && ppim->sz == size && >+ ppim->mode == mode) >+ return ((void *)(ppim->va + offset)); >+ } > va = kva_alloc(size); >- if (!va) >- panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); >- >+ if (va == 0) >+ panic("%s: Couldn't allocate KVA", __func__); >+ } > for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) > pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); > pmap_invalidate_range(kernel_pmap, va, va + tmpsize); >@@ -5109,14 +5160,31 @@ > void > pmap_unmapdev(vm_offset_t va, vm_size_t size) > { >- vm_offset_t base, offset; >+ struct pmap_preinit_mapping *ppim; >+ vm_offset_t offset; >+ int i; > > if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) > return; >- base = trunc_page(va); > offset = va & PAGE_MASK; > size = round_page(offset + size); >- kva_free(base, size); >+ va = trunc_page(va); >+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { >+ ppim = pmap_preinit_mapping + i; >+ if (ppim->va == va && ppim->sz == size) { >+ if (pmap_initialized) >+ return; >+ ppim->pa = 0; >+ ppim->va = 0; >+ ppim->sz = 0; >+ ppim->mode = 0; >+ if (va + size == virtual_avail) >+ virtual_avail = va; >+ return; >+ } >+ } >+ if (pmap_initialized) >+ kva_free(va, size); > } > > /* >Index: sys/x86/include/bus.h >=================================================================== >--- sys/x86/include/bus.h (revision 286461) >+++ sys/x86/include/bus.h (working copy) >@@ -130,33 +130,16 @@ > * Map a region of device bus space into CPU virtual address space. > */ > >-static __inline int bus_space_map(bus_space_tag_t t, bus_addr_t addr, >- bus_size_t size, int flags, >- bus_space_handle_t *bshp); >+int bus_space_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size, >+ int flags, bus_space_handle_t *bshp); > >-static __inline int >-bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr, >- bus_size_t size __unused, int flags __unused, >- bus_space_handle_t *bshp) >-{ >- >- *bshp = addr; >- return (0); >-} >- > /* > * Unmap a region of device bus space. > */ > >-static __inline void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, >- bus_size_t size); >+void bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh, >+ bus_size_t size); > >-static __inline void >-bus_space_unmap(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused, >- bus_size_t size __unused) >-{ >-} >- > /* > * Get a new handle for a subregion of an already-mapped area of bus space. > */ >Index: sys/x86/x86/bus_machdep.c >=================================================================== >--- sys/x86/x86/bus_machdep.c (revision 0) >+++ sys/x86/x86/bus_machdep.c (working copy) >@@ -0,0 +1,59 @@ >+/*- >+ * Copyright (c) 2015 Marcel Moolenaar >+ * All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR >+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES >+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. >+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, >+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT >+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, >+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY >+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include <sys/cdefs.h> >+__FBSDID("$FreeBSD$"); >+ >+#include <sys/param.h> >+#include <sys/systm.h> >+#include <x86/bus.h> >+ >+#include <vm/vm.h> >+#include <vm/pmap.h> >+ >+/* >+ * Implementation of bus_space_map(), which effectively is a thin >+ * wrapper around pmap_mapdev() for memory mapped I/O space. It's >+ * implemented here and not in <x86/bus.h> to avoid pollution. >+ */ >+int >+bus_space_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size, >+ int flags __unused, bus_space_handle_t *bshp) >+{ >+ >+ *bshp = (tag == X86_BUS_SPACE_MEM) >+ ? (uintptr_t)pmap_mapdev(addr, size) >+ : addr; >+ return (0); >+} >+ >+void >+bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t size) >+{ >+ >+ if (tag == X86_BUS_SPACE_MEM) >+ pmap_unmapdev(bsh, size); >+} > >Property changes on: sys/x86/x86/bus_machdep.c >___________________________________________________________________ >Added: svn:eol-style >## -0,0 +1 ## >+native >\ No newline at end of property >Added: svn:keywords >## -0,0 +1 ## >+FreeBSD=%H >\ No newline at end of property >Added: svn:mime-type >## -0,0 +1 ## >+text/plain >\ No newline at end of property
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 194952
: 159741