View | Details | Raw Unified | Return to bug 194952
Collapse All | Expand All

(-)sys/amd64/amd64/machdep.c (-32 / +29 lines)
Lines 1625-1662 Link Here
1625
	    MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL)
1625
	    MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL)
1626
		vty_set_preferred(VTY_VT);
1626
		vty_set_preferred(VTY_VT);
1627
1627
1628
	/*
1629
	 * Initialize the console before we print anything out.
1630
	 */
1631
	cninit();
1632
1633
#ifdef DEV_ISA
1634
#ifdef DEV_ATPIC
1635
	elcr_probe();
1636
	atpic_startup();
1637
#else
1638
	/* Reset and mask the atpics and leave them shut down. */
1639
	atpic_reset();
1640
1641
	/*
1642
	 * Point the ICU spurious interrupt vectors at the APIC spurious
1643
	 * interrupt handler.
1644
	 */
1645
	setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1646
	setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1647
#endif
1648
#else
1649
#error "have you forgotten the isa device?";
1650
#endif
1651
1652
	kdb_init();
1653
1654
#ifdef KDB
1655
	if (boothowto & RB_KDB)
1656
		kdb_enter(KDB_WHY_BOOTFLAGS,
1657
		    "Boot flags requested debugger");
1658
#endif
1659
1660
	identify_cpu();		/* Final stage of CPU initialization */
1628
	identify_cpu();		/* Final stage of CPU initialization */
1661
	initializecpu();	/* Initialize CPU registers */
1629
	initializecpu();	/* Initialize CPU registers */
1662
	initializecpucache();
1630
	initializecpucache();
Lines 1693-1698 Link Here
1693
1661
1694
	/* now running on new page tables, configured,and u/iom is accessible */
1662
	/* now running on new page tables, configured,and u/iom is accessible */
1695
1663
1664
	cninit();
1665
1666
#ifdef DEV_ISA
1667
#ifdef DEV_ATPIC
1668
	elcr_probe();
1669
	atpic_startup();
1670
#else
1671
	/* Reset and mask the atpics and leave them shut down. */
1672
	atpic_reset();
1673
1674
	/*
1675
	 * Point the ICU spurious interrupt vectors at the APIC spurious
1676
	 * interrupt handler.
1677
	 */
1678
	setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1679
	setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1680
#endif
1681
#else
1682
#error "have you forgotten the isa device?";
1683
#endif
1684
1685
	kdb_init();
1686
1687
#ifdef KDB
1688
	if (boothowto & RB_KDB)
1689
		kdb_enter(KDB_WHY_BOOTFLAGS,
1690
		    "Boot flags requested debugger");
1691
#endif
1692
1696
	msgbufinit(msgbufp, msgbufsize);
1693
	msgbufinit(msgbufp, msgbufsize);
1697
	fpuinit();
1694
	fpuinit();
1698
1695
(-)sys/amd64/amd64/pmap.c (-15 / +86 lines)
Lines 363-368 Link Here
363
static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
363
static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
364
static int		ndmpdpphys;	/* number of DMPDPphys pages */
364
static int		ndmpdpphys;	/* number of DMPDPphys pages */
365
365
366
/*
367
 * pmap_mapdev support pre initialization (i.e. console)
368
 */
369
#define	PMAP_PREINIT_MAPPING_COUNT	8
370
static struct pmap_preinit_mapping {
371
	vm_paddr_t	pa;
372
	vm_offset_t	va;
373
	vm_size_t	sz;
374
	int		mode;
375
} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
376
static int pmap_initialized;
377
366
static struct rwlock_padalign pvh_global_lock;
378
static struct rwlock_padalign pvh_global_lock;
367
379
368
/*
380
/*
Lines 1016-1021 Link Here
1016
void
1028
void
1017
pmap_init(void)
1029
pmap_init(void)
1018
{
1030
{
1031
	struct pmap_preinit_mapping *ppim;
1019
	vm_page_t mpte;
1032
	vm_page_t mpte;
1020
	vm_size_t s;
1033
	vm_size_t s;
1021
	int i, pv_npg;
1034
	int i, pv_npg;
Lines 1083-1088 Link Here
1083
	    M_WAITOK | M_ZERO);
1096
	    M_WAITOK | M_ZERO);
1084
	for (i = 0; i < pv_npg; i++)
1097
	for (i = 0; i < pv_npg; i++)
1085
		TAILQ_INIT(&pv_table[i].pv_list);
1098
		TAILQ_INIT(&pv_table[i].pv_list);
1099
1100
	pmap_initialized = 1;
1101
	if (!bootverbose)
1102
		return;
1103
	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
1104
		ppim = pmap_preinit_mapping + i;
1105
		if (ppim->va == 0)
1106
			continue;
1107
		printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
1108
		    ppim->pa, ppim->va, ppim->sz, ppim->mode);
1109
	}
1086
}
1110
}
1087
1111
1088
static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
1112
static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
Lines 6105-6128 Link Here
6105
void *
6129
void *
6106
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6130
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6107
{
6131
{
6132
	struct pmap_preinit_mapping *ppim;
6108
	vm_offset_t va, offset;
6133
	vm_offset_t va, offset;
6109
	vm_size_t tmpsize;
6134
	vm_size_t tmpsize;
6135
	int i;
6110
6136
6111
	/*
6112
	 * If the specified range of physical addresses fits within the direct
6113
	 * map window, use the direct map. 
6114
	 */
6115
	if (pa < dmaplimit && pa + size < dmaplimit) {
6116
		va = PHYS_TO_DMAP(pa);
6117
		if (!pmap_change_attr(va, size, mode))
6118
			return ((void *)va);
6119
	}
6120
	offset = pa & PAGE_MASK;
6137
	offset = pa & PAGE_MASK;
6121
	size = round_page(offset + size);
6138
	size = round_page(offset + size);
6122
	va = kva_alloc(size);
6123
	if (!va)
6124
		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
6125
	pa = trunc_page(pa);
6139
	pa = trunc_page(pa);
6140
6141
	if (!pmap_initialized) {
6142
		va = 0;
6143
		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6144
			ppim = pmap_preinit_mapping + i;
6145
			if (ppim->va == 0) {
6146
				ppim->pa = pa;
6147
				ppim->sz = size;
6148
				ppim->mode = mode;
6149
				ppim->va = virtual_avail;
6150
				virtual_avail += size;
6151
				va = ppim->va;
6152
				break;
6153
			}
6154
		}
6155
		if (va == 0)
6156
			panic("%s: too many preinit mappings", __func__);
6157
	} else {
6158
		/*
6159
		 * If we have a preinit mapping, re-use it.
6160
		 */
6161
		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6162
			ppim = pmap_preinit_mapping + i;
6163
			if (ppim->pa == pa && ppim->sz == size &&
6164
			    ppim->mode == mode)
6165
				return ((void *)(ppim->va + offset));
6166
		}
6167
		/*
6168
		 * If the specified range of physical addresses fits within
6169
		 * the direct map window, use the direct map. 
6170
		 */
6171
		if (pa < dmaplimit && pa + size < dmaplimit) {
6172
			va = PHYS_TO_DMAP(pa);
6173
			if (!pmap_change_attr(va, size, mode))
6174
				return ((void *)(va + offset));
6175
		}
6176
		va = kva_alloc(size);
6177
		if (va == 0)
6178
			panic("%s: Couldn't allocate KVA", __func__);
6179
	}
6126
	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
6180
	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
6127
		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
6181
		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
6128
	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
6182
	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
Lines 6147-6161 Link Here
6147
void
6201
void
6148
pmap_unmapdev(vm_offset_t va, vm_size_t size)
6202
pmap_unmapdev(vm_offset_t va, vm_size_t size)
6149
{
6203
{
6150
	vm_offset_t base, offset;
6204
	struct pmap_preinit_mapping *ppim;
6205
	vm_offset_t offset;
6206
	int i;
6151
6207
6152
	/* If we gave a direct map region in pmap_mapdev, do nothing */
6208
	/* If we gave a direct map region in pmap_mapdev, do nothing */
6153
	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
6209
	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
6154
		return;
6210
		return;
6155
	base = trunc_page(va);
6156
	offset = va & PAGE_MASK;
6211
	offset = va & PAGE_MASK;
6157
	size = round_page(offset + size);
6212
	size = round_page(offset + size);
6158
	kva_free(base, size);
6213
	va = trunc_page(va);
6214
	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6215
		ppim = pmap_preinit_mapping + i;
6216
		if (ppim->va == va && ppim->sz == size) {
6217
			if (pmap_initialized)
6218
				return;
6219
			ppim->pa = 0;
6220
			ppim->va = 0;
6221
			ppim->sz = 0;
6222
			ppim->mode = 0;
6223
			if (va + size == virtual_avail)
6224
				virtual_avail = va;
6225
			return;
6226
		}
6227
	}
6228
	if (pmap_initialized)
6229
		kva_free(va, size);
6159
}
6230
}
6160
6231
6161
/*
6232
/*
(-)sys/conf/files.amd64 (+1 lines)
Lines 592-597 Link Here
592
x86/isa/orm.c			optional	isa
592
x86/isa/orm.c			optional	isa
593
x86/pci/pci_bus.c		optional	pci
593
x86/pci/pci_bus.c		optional	pci
594
x86/pci/qpi.c			optional	pci
594
x86/pci/qpi.c			optional	pci
595
x86/x86/bus_machdep.c		standard
595
x86/x86/busdma_bounce.c		standard
596
x86/x86/busdma_bounce.c		standard
596
x86/x86/busdma_machdep.c	standard
597
x86/x86/busdma_machdep.c	standard
597
x86/x86/cpu_machdep.c		standard
598
x86/x86/cpu_machdep.c		standard
(-)sys/conf/files.i386 (+1 lines)
Lines 586-591 Link Here
586
x86/isa/orm.c			optional isa
586
x86/isa/orm.c			optional isa
587
x86/pci/pci_bus.c		optional pci
587
x86/pci/pci_bus.c		optional pci
588
x86/pci/qpi.c			optional pci
588
x86/pci/qpi.c			optional pci
589
x86/x86/bus_machdep.c		standard
589
x86/x86/busdma_bounce.c		standard
590
x86/x86/busdma_bounce.c		standard
590
x86/x86/busdma_machdep.c	standard
591
x86/x86/busdma_machdep.c	standard
591
x86/x86/cpu_machdep.c		standard
592
x86/x86/cpu_machdep.c		standard
(-)sys/dev/vt/hw/efifb/efifb.c (-27 / +2 lines)
Lines 51-57 Link Here
51
51
52
static vd_init_t vt_efifb_init;
52
static vd_init_t vt_efifb_init;
53
static vd_probe_t vt_efifb_probe;
53
static vd_probe_t vt_efifb_probe;
54
static void vt_efifb_remap(void *efifb_data);
55
54
56
static struct vt_driver vt_efifb_driver = {
55
static struct vt_driver vt_efifb_driver = {
57
	.vd_name = "efifb",
56
	.vd_name = "efifb",
Lines 71-78 Link Here
71
static struct fb_info local_info;
70
static struct fb_info local_info;
72
VT_DRIVER_DECLARE(vt_efifb, vt_efifb_driver);
71
VT_DRIVER_DECLARE(vt_efifb, vt_efifb_driver);
73
72
74
SYSINIT(efifb_remap, SI_SUB_KMEM, SI_ORDER_ANY, vt_efifb_remap, &local_info);
75
76
static int
73
static int
77
vt_efifb_probe(struct vt_device *vd)
74
vt_efifb_probe(struct vt_device *vd)
78
{
75
{
Lines 137-148 Link Here
137
134
138
	info->fb_size = info->fb_height * info->fb_stride;
135
	info->fb_size = info->fb_height * info->fb_stride;
139
	info->fb_pbase = efifb->fb_addr;
136
	info->fb_pbase = efifb->fb_addr;
140
	/*
137
	info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase,
141
	 * Use the direct map as a crutch until pmap is available. Once pmap
138
	    info->fb_size, VM_MEMATTR_WRITE_COMBINING);
142
	 * is online, the framebuffer will be remapped by vt_efifb_remap()
143
	 * using pmap_mapdev_attr().
144
	 */
145
	info->fb_vbase = PHYS_TO_DMAP(efifb->fb_addr);
146
139
147
	/* Get pixel storage size. */
140
	/* Get pixel storage size. */
148
	info->fb_bpp = info->fb_stride / info->fb_width * 8;
141
	info->fb_bpp = info->fb_stride / info->fb_width * 8;
Lines 158-178 Link Here
158
151
159
	return (CN_INTERNAL);
152
	return (CN_INTERNAL);
160
}
153
}
161
162
static void
163
vt_efifb_remap(void *xinfo)
164
{
165
	struct fb_info *info = xinfo;
166
167
	if (info->fb_pbase == 0)
168
		return;
169
170
	/*
171
	 * Remap as write-combining. This massively improves performance and
172
	 * happens very early in kernel initialization, when everything is
173
	 * still single-threaded and interrupts are off, so replacing the
174
	 * mapping address is safe.
175
	 */
176
	info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase,
177
	    info->fb_size, VM_MEMATTR_WRITE_COMBINING);
178
}
(-)sys/dev/vt/hw/vga/vt_vga.c (-9 / +5 lines)
Lines 46-58 Link Here
46
46
47
#include <machine/bus.h>
47
#include <machine/bus.h>
48
48
49
#if defined(__amd64__) || defined(__i386__)
50
#include <vm/vm.h>
51
#include <vm/pmap.h>
52
#include <machine/pmap.h>
53
#include <machine/vmparam.h>
54
#endif /* __amd64__ || __i386__ */
55
56
struct vga_softc {
49
struct vga_softc {
57
	bus_space_tag_t		 vga_fb_tag;
50
	bus_space_tag_t		 vga_fb_tag;
58
	bus_space_handle_t	 vga_fb_handle;
51
	bus_space_handle_t	 vga_fb_handle;
Lines 1228-1240 Link Here
1228
1221
1229
#if defined(__amd64__) || defined(__i386__)
1222
#if defined(__amd64__) || defined(__i386__)
1230
	sc->vga_fb_tag = X86_BUS_SPACE_MEM;
1223
	sc->vga_fb_tag = X86_BUS_SPACE_MEM;
1231
	sc->vga_fb_handle = KERNBASE + VGA_MEM_BASE;
1232
	sc->vga_reg_tag = X86_BUS_SPACE_IO;
1224
	sc->vga_reg_tag = X86_BUS_SPACE_IO;
1233
	sc->vga_reg_handle = VGA_REG_BASE;
1234
#else
1225
#else
1235
# error "Architecture not yet supported!"
1226
# error "Architecture not yet supported!"
1236
#endif
1227
#endif
1237
1228
1229
	bus_space_map(sc->vga_fb_tag, VGA_MEM_BASE, VGA_MEM_SIZE, 0,
1230
	    &sc->vga_fb_handle);
1231
	bus_space_map(sc->vga_reg_tag, VGA_REG_BASE, VGA_REG_SIZE, 0,
1232
	    &sc->vga_reg_handle);
1233
1238
	TUNABLE_INT_FETCH("hw.vga.textmode", &textmode);
1234
	TUNABLE_INT_FETCH("hw.vga.textmode", &textmode);
1239
	if (textmode) {
1235
	if (textmode) {
1240
		vd->vd_flags |= VDF_TEXTMODE;
1236
		vd->vd_flags |= VDF_TEXTMODE;
(-)sys/i386/i386/machdep.c (-34 / +34 lines)
Lines 2612-2617 Link Here
2612
	 */
2612
	 */
2613
	clock_init();
2613
	clock_init();
2614
2614
2615
	finishidentcpu();	/* Final stage of CPU initialization */
2616
	setidt(IDT_UD, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL,
2617
	    GSEL(GCODE_SEL, SEL_KPL));
2618
	setidt(IDT_GP, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL,
2619
	    GSEL(GCODE_SEL, SEL_KPL));
2620
	initializecpu();	/* Initialize CPU registers */
2621
	initializecpucache();
2622
2623
	/* pointer to selector slot for %fs/%gs */
2624
	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2625
2626
	dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2627
	    dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2628
	dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2629
	    dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2630
#if defined(PAE) || defined(PAE_TABLES)
2631
	dblfault_tss.tss_cr3 = (int)IdlePDPT;
2632
#else
2633
	dblfault_tss.tss_cr3 = (int)IdlePTD;
2634
#endif
2635
	dblfault_tss.tss_eip = (int)dblfault_handler;
2636
	dblfault_tss.tss_eflags = PSL_KERNEL;
2637
	dblfault_tss.tss_ds = dblfault_tss.tss_es =
2638
	    dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2639
	dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2640
	dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2641
	dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2642
2643
	vm86_initialize();
2644
	getmemsize(first);
2645
	init_param2(physmem);
2646
2647
	/* now running on new page tables, configured,and u/iom is accessible */
2648
2615
	/*
2649
	/*
2616
	 * Initialize the console before we print anything out.
2650
	 * Initialize the console before we print anything out.
2617
	 */
2651
	 */
Lines 2652-2691 Link Here
2652
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2686
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2653
#endif
2687
#endif
2654
2688
2655
	finishidentcpu();	/* Final stage of CPU initialization */
2656
	setidt(IDT_UD, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL,
2657
	    GSEL(GCODE_SEL, SEL_KPL));
2658
	setidt(IDT_GP, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL,
2659
	    GSEL(GCODE_SEL, SEL_KPL));
2660
	initializecpu();	/* Initialize CPU registers */
2661
	initializecpucache();
2662
2663
	/* pointer to selector slot for %fs/%gs */
2664
	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2665
2666
	dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2667
	    dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2668
	dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2669
	    dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2670
#if defined(PAE) || defined(PAE_TABLES)
2671
	dblfault_tss.tss_cr3 = (int)IdlePDPT;
2672
#else
2673
	dblfault_tss.tss_cr3 = (int)IdlePTD;
2674
#endif
2675
	dblfault_tss.tss_eip = (int)dblfault_handler;
2676
	dblfault_tss.tss_eflags = PSL_KERNEL;
2677
	dblfault_tss.tss_ds = dblfault_tss.tss_es =
2678
	    dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2679
	dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2680
	dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2681
	dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2682
2683
	vm86_initialize();
2684
	getmemsize(first);
2685
	init_param2(physmem);
2686
2687
	/* now running on new page tables, configured,and u/iom is accessible */
2688
2689
	msgbufinit(msgbufp, msgbufsize);
2689
	msgbufinit(msgbufp, msgbufsize);
2690
#ifdef DEV_NPX
2690
#ifdef DEV_NPX
2691
	npxinit(true);
2691
	npxinit(true);
(-)sys/i386/i386/pmap.c (-7 / +75 lines)
Lines 228-233 Link Here
228
#define	PAT_INDEX_SIZE	8
228
#define	PAT_INDEX_SIZE	8
229
static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
229
static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
230
230
231
/*
232
 * pmap_mapdev support pre initialization (i.e. console)
233
 */
234
#define	PMAP_PREINIT_MAPPING_COUNT	8
235
static struct pmap_preinit_mapping {
236
	vm_paddr_t	pa;
237
	vm_offset_t	va;
238
	vm_size_t	sz;
239
	int		mode;
240
} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
241
static int pmap_initialized;
242
231
static struct rwlock_padalign pvh_global_lock;
243
static struct rwlock_padalign pvh_global_lock;
232
244
233
/*
245
/*
Lines 744-749 Link Here
744
void
756
void
745
pmap_init(void)
757
pmap_init(void)
746
{
758
{
759
	struct pmap_preinit_mapping *ppim;
747
	vm_page_t mpte;
760
	vm_page_t mpte;
748
	vm_size_t s;
761
	vm_size_t s;
749
	int i, pv_npg;
762
	int i, pv_npg;
Lines 827-832 Link Here
827
	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
840
	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
828
	uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
841
	uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
829
#endif
842
#endif
843
844
	pmap_initialized = 1;
845
	if (!bootverbose)
846
		return;
847
	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
848
		ppim = pmap_preinit_mapping + i;
849
		if (ppim->va == 0)
850
			continue;
851
		printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i,
852
		    (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode);
853
	}
830
}
854
}
831
855
832
856
Lines 5071-5078 Link Here
5071
void *
5095
void *
5072
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
5096
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
5073
{
5097
{
5098
	struct pmap_preinit_mapping *ppim;
5074
	vm_offset_t va, offset;
5099
	vm_offset_t va, offset;
5075
	vm_size_t tmpsize;
5100
	vm_size_t tmpsize;
5101
	int i;
5076
5102
5077
	offset = pa & PAGE_MASK;
5103
	offset = pa & PAGE_MASK;
5078
	size = round_page(offset + size);
5104
	size = round_page(offset + size);
Lines 5080-5090 Link Here
5080
5106
5081
	if (pa < KERNLOAD && pa + size <= KERNLOAD)
5107
	if (pa < KERNLOAD && pa + size <= KERNLOAD)
5082
		va = KERNBASE + pa;
5108
		va = KERNBASE + pa;
5083
	else
5109
	else if (!pmap_initialized) {
5110
		va = 0;
5111
		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5112
			ppim = pmap_preinit_mapping + i;
5113
			if (ppim->va == 0) {
5114
				ppim->pa = pa;
5115
				ppim->sz = size;
5116
				ppim->mode = mode;
5117
				ppim->va = virtual_avail;
5118
				virtual_avail += size;
5119
				va = ppim->va;
5120
				break;
5121
			}
5122
		}
5123
		if (va == 0)
5124
			panic("%s: too many preinit mappings", __func__);
5125
	} else {
5126
		/*
5127
		 * If we have a preinit mapping, re-use it.
5128
		 */
5129
		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5130
			ppim = pmap_preinit_mapping + i;
5131
			if (ppim->pa == pa && ppim->sz == size &&
5132
			    ppim->mode == mode)
5133
				return ((void *)(ppim->va + offset));
5134
		}
5084
		va = kva_alloc(size);
5135
		va = kva_alloc(size);
5085
	if (!va)
5136
		if (va == 0)
5086
		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
5137
			panic("%s: Couldn't allocate KVA", __func__);
5087
5138
	}
5088
	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5139
	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5089
		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
5140
		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
5090
	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
5141
	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
Lines 5109-5122 Link Here
5109
void
5160
void
5110
pmap_unmapdev(vm_offset_t va, vm_size_t size)
5161
pmap_unmapdev(vm_offset_t va, vm_size_t size)
5111
{
5162
{
5112
	vm_offset_t base, offset;
5163
	struct pmap_preinit_mapping *ppim;
5164
	vm_offset_t offset;
5165
	int i;
5113
5166
5114
	if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
5167
	if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
5115
		return;
5168
		return;
5116
	base = trunc_page(va);
5117
	offset = va & PAGE_MASK;
5169
	offset = va & PAGE_MASK;
5118
	size = round_page(offset + size);
5170
	size = round_page(offset + size);
5119
	kva_free(base, size);
5171
	va = trunc_page(va);
5172
	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5173
		ppim = pmap_preinit_mapping + i;
5174
		if (ppim->va == va && ppim->sz == size) {
5175
			if (pmap_initialized)
5176
				return;
5177
			ppim->pa = 0;
5178
			ppim->va = 0;
5179
			ppim->sz = 0;
5180
			ppim->mode = 0;
5181
			if (va + size == virtual_avail)
5182
				virtual_avail = va;
5183
			return;
5184
		}
5185
	}
5186
	if (pmap_initialized)
5187
		kva_free(va, size);
5120
}
5188
}
5121
5189
5122
/*
5190
/*
(-)sys/x86/include/bus.h (-21 / +4 lines)
Lines 130-162 Link Here
130
 * Map a region of device bus space into CPU virtual address space.
130
 * Map a region of device bus space into CPU virtual address space.
131
 */
131
 */
132
132
133
static __inline int bus_space_map(bus_space_tag_t t, bus_addr_t addr,
133
int bus_space_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
134
				  bus_size_t size, int flags,
134
    int flags, bus_space_handle_t *bshp);
135
				  bus_space_handle_t *bshp);
136
135
137
static __inline int
138
bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr,
139
	      bus_size_t size __unused, int flags __unused,
140
	      bus_space_handle_t *bshp)
141
{
142
143
	*bshp = addr;
144
	return (0);
145
}
146
147
/*
136
/*
148
 * Unmap a region of device bus space.
137
 * Unmap a region of device bus space.
149
 */
138
 */
150
139
151
static __inline void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
140
void bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh,
152
				     bus_size_t size);
141
    bus_size_t size);
153
142
154
static __inline void
155
bus_space_unmap(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused,
156
		bus_size_t size __unused)
157
{
158
}
159
160
/*
143
/*
161
 * Get a new handle for a subregion of an already-mapped area of bus space.
144
 * Get a new handle for a subregion of an already-mapped area of bus space.
162
 */
145
 */
(-)sys/x86/x86/bus_machdep.c (+59 lines)
Line 0 Link Here
1
/*-
2
 * Copyright (c) 2015 Marcel Moolenaar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * 1. Redistributions of source code must retain the above copyright
10
 *    notice, this list of conditions and the following disclaimer.
11
 * 2. Redistributions in binary form must reproduce the above copyright
12
 *    notice, this list of conditions and the following disclaimer in the
13
 *    documentation and/or other materials provided with the distribution.
14
 *
15
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
 */
26
27
#include <sys/cdefs.h>
28
__FBSDID("$FreeBSD$");
29
30
#include <sys/param.h>
31
#include <sys/systm.h>
32
#include <x86/bus.h>
33
34
#include <vm/vm.h>
35
#include <vm/pmap.h>
36
37
/*
38
 * Implementation of bus_space_map(), which effectively is a thin
39
 * wrapper around pmap_mapdev() for memory mapped I/O space. It's
40
 * implemented here and not in <x86/bus.h> to avoid pollution.
41
 */
42
int
43
bus_space_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
44
    int flags __unused, bus_space_handle_t *bshp)
45
{
46
47
	*bshp = (tag == X86_BUS_SPACE_MEM)
48
	    ? (uintptr_t)pmap_mapdev(addr, size)
49
	    : addr;
50
	return (0);
51
}
52
53
void
54
bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t size)
55
{
56
57
	if (tag == X86_BUS_SPACE_MEM)
58
		pmap_unmapdev(bsh, size);
59
}

Return to bug 194952