View | Details | Raw Unified | Return to bug 236922 | Differences between
and this patch

Collapse All | Expand All

(-)sys/conf/files (+3 lines)
Lines 3246-3251 Link Here
3246
dev/virtio/virtio_bus_if.m		optional	virtio
3246
dev/virtio/virtio_bus_if.m		optional	virtio
3247
dev/virtio/virtio_if.m			optional	virtio
3247
dev/virtio/virtio_if.m			optional	virtio
3248
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3248
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3249
dev/virtio/pci/virtio_pci_if.m      optional    virtio_pci
3250
dev/virtio/pci/virtio_pci_legacy.c  optional    virtio_pci
3251
dev/virtio/pci/virtio_pci_modern.c  optional    virtio_pci
3249
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio
3252
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio
3250
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio
3253
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio
3251
dev/virtio/network/if_vtnet.c		optional	vtnet
3254
dev/virtio/network/if_vtnet.c		optional	vtnet
(-)sys/modules/virtio/pci/Makefile (+2 lines)
Lines 27-32 Link Here
27
27
28
KMOD=	virtio_pci
28
KMOD=	virtio_pci
29
SRCS=	virtio_pci.c
29
SRCS=	virtio_pci.c
30
SRCS+=  virtio_pci_legacy.c virtio_pci_modern.c
31
SRCS+=  virtio_pci_if.c virtio_pci_if.h
30
SRCS+=	virtio_bus_if.h virtio_if.h 
32
SRCS+=	virtio_bus_if.h virtio_if.h 
31
SRCS+=	bus_if.h device_if.h pci_if.h
33
SRCS+=	bus_if.h device_if.h pci_if.h
32
34
(-)sys/dev/virtio/balloon/virtio_balloon.c (-15 / +50 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 78-83 Link Here
78
static struct virtio_feature_desc vtballoon_feature_desc[] = {
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
79
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
80
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
83
	{ VIRTIO_BALLOON_F_DEFLATE_ON_OOM,	"DeflateOnOOM"	},
81
84
82
	{ 0, NULL }
85
	{ 0, NULL }
83
};
86
};
Lines 87-93 Link Here
87
static int	vtballoon_detach(device_t);
90
static int	vtballoon_detach(device_t);
88
static int	vtballoon_config_change(device_t);
91
static int	vtballoon_config_change(device_t);
89
92
90
static void	vtballoon_negotiate_features(struct vtballoon_softc *);
93
static int	vtballoon_negotiate_features(struct vtballoon_softc *);
94
static int	vtballoon_setup_features(struct vtballoon_softc *);
91
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
95
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
92
96
93
static void	vtballoon_vq_intr(void *);
97
static void	vtballoon_vq_intr(void *);
Lines 107-116 Link Here
107
111
108
static int	vtballoon_sleep(struct vtballoon_softc *);
112
static int	vtballoon_sleep(struct vtballoon_softc *);
109
static void	vtballoon_thread(void *);
113
static void	vtballoon_thread(void *);
110
static void	vtballoon_add_sysctl(struct vtballoon_softc *);
114
static void	vtballoon_setup_sysctl(struct vtballoon_softc *);
111
115
116
#define vtballoon_modern(_sc) \
117
    (((_sc)->vtballoon_features & VIRTIO_F_VERSION_1) != 0)
118
112
/* Features desired/implemented by this driver. */
119
/* Features desired/implemented by this driver. */
113
#define VTBALLOON_FEATURES		0
120
#define VTBALLOON_FEATURES		VIRTIO_BALLOON_F_MUST_TELL_HOST
114
121
115
/* Timeout between retries when the balloon needs inflating. */
122
/* Timeout between retries when the balloon needs inflating. */
116
#define VTBALLOON_LOWMEM_TIMEOUT	hz
123
#define VTBALLOON_LOWMEM_TIMEOUT	hz
Lines 151-158 Link Here
151
};
158
};
152
static devclass_t vtballoon_devclass;
159
static devclass_t vtballoon_devclass;
153
160
154
DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver,
161
DRIVER_MODULE(virtio_balloon, vtpcil, vtballoon_driver,
155
    vtballoon_devclass, 0, 0);
162
    vtballoon_devclass, 0, 0);
163
DRIVER_MODULE(virtio_balloon, vtpcim, vtballoon_driver,
164
    vtballoon_devclass, 0, 0);
156
MODULE_VERSION(virtio_balloon, 1);
165
MODULE_VERSION(virtio_balloon, 1);
157
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
166
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
158
167
Lines 176-189 Link Here
176
185
177
	sc = device_get_softc(dev);
186
	sc = device_get_softc(dev);
178
	sc->vtballoon_dev = dev;
187
	sc->vtballoon_dev = dev;
188
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
179
189
180
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
190
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
181
	TAILQ_INIT(&sc->vtballoon_pages);
191
	TAILQ_INIT(&sc->vtballoon_pages);
182
192
183
	vtballoon_add_sysctl(sc);
193
	vtballoon_setup_sysctl(sc);
184
194
185
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
195
	error = vtballoon_setup_features(sc);
186
	vtballoon_negotiate_features(sc);
196
	if (error) {
197
		device_printf(dev, "cannot setup features\n");
198
		goto fail;
199
	}
187
200
188
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
201
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
189
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
202
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
Lines 269-286 Link Here
269
	return (1);
282
	return (1);
270
}
283
}
271
284
272
static void
285
static int
273
vtballoon_negotiate_features(struct vtballoon_softc *sc)
286
vtballoon_negotiate_features(struct vtballoon_softc *sc)
274
{
287
{
275
	device_t dev;
288
	device_t dev;
276
	uint64_t features;
289
	uint64_t features;
277
290
278
	dev = sc->vtballoon_dev;
291
	dev = sc->vtballoon_dev;
279
	features = virtio_negotiate_features(dev, VTBALLOON_FEATURES);
292
	features = VTBALLOON_FEATURES;
280
	sc->vtballoon_features = features;
293
294
	sc->vtballoon_features = virtio_negotiate_features(dev, features);
295
	return (virtio_finalize_features(dev));
281
}
296
}
282
297
283
static int
298
static int
299
vtballoon_setup_features(struct vtballoon_softc *sc)
300
{
301
	int error;
302
303
	error = vtballoon_negotiate_features(sc);
304
	if (error)
305
		return (error);
306
307
	return (0);
308
}
309
310
static int
284
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
311
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
285
{
312
{
286
	device_t dev;
313
	device_t dev;
Lines 438-444 Link Here
438
{
465
{
439
	vm_page_t m;
466
	vm_page_t m;
440
467
441
	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
468
	m = vm_page_alloc(NULL, 0,
469
	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP);
442
	if (m != NULL)
470
	if (m != NULL)
443
		sc->vtballoon_current_npages++;
471
		sc->vtballoon_current_npages++;
444
472
Lines 461-476 Link Here
461
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
489
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
462
	    offsetof(struct virtio_balloon_config, num_pages));
490
	    offsetof(struct virtio_balloon_config, num_pages));
463
491
464
	return (le32toh(desired));
492
	if (vtballoon_modern(sc))
493
		return (desired);
494
	else
495
		return (le32toh(desired));
465
}
496
}
466
497
467
static void
498
static void
468
vtballoon_update_size(struct vtballoon_softc *sc)
499
vtballoon_update_size(struct vtballoon_softc *sc)
469
{
500
{
501
	uint32_t npages;
470
502
503
	npages = sc->vtballoon_current_npages;
504
	if (!vtballoon_modern(sc))
505
		npages = htole32(npages);
506
471
	virtio_write_dev_config_4(sc->vtballoon_dev,
507
	virtio_write_dev_config_4(sc->vtballoon_dev,
472
	    offsetof(struct virtio_balloon_config, actual),
508
	    offsetof(struct virtio_balloon_config, actual), npages);
473
	    htole32(sc->vtballoon_current_npages));
474
}
509
}
475
510
476
static int
511
static int
Lines 542-548 Link Here
542
}
577
}
543
578
544
static void
579
static void
545
vtballoon_add_sysctl(struct vtballoon_softc *sc)
580
vtballoon_setup_sysctl(struct vtballoon_softc *sc)
546
{
581
{
547
	device_t dev;
582
	device_t dev;
548
	struct sysctl_ctx_list *ctx;
583
	struct sysctl_ctx_list *ctx;
(-)sys/dev/virtio/balloon/virtio_balloon.h (-1 / +29 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
2
 * This header is BSD licensed so anyone can use the definitions to implement
4
 * This header is BSD licensed so anyone can use the definitions to implement
3
 * compatible drivers/servers.
5
 * compatible drivers/servers.
4
 *
6
 *
Lines 34-39 Link Here
34
/* Feature bits. */
36
/* Feature bits. */
35
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
36
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
39
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	0x4 /* Deflate balloon on OOM */
37
40
38
/* Size of a PFN in the balloon interface. */
41
/* Size of a PFN in the balloon interface. */
39
#define VIRTIO_BALLOON_PFN_SHIFT 12
42
#define VIRTIO_BALLOON_PFN_SHIFT 12
Lines 52-59 Link Here
52
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
55
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
53
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
56
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
54
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
57
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
55
#define VIRTIO_BALLOON_S_NR       6
58
#define VIRTIO_BALLOON_S_AVAIL    6   /* Available memory as in /proc */
59
#define VIRTIO_BALLOON_S_CACHES   7   /* Disk caches */
60
#define VIRTIO_BALLOON_S_NR       8
56
61
62
/*
63
 * Memory statistics structure.
64
 * Driver fills an array of these structures and passes to device.
65
 *
66
 * NOTE: fields are laid out in a way that would make compiler add padding
67
 * between and after fields, so we have to use compiler-specific attributes to
68
 * pack it, to disable this padding. This also often causes compiler to
69
 * generate suboptimal code.
70
 *
71
 * We maintain this statistics structure format for backwards compatibility,
72
 * but don't follow this example.
73
 *
74
 * If implementing a similar structure, do something like the below instead:
75
 *     struct virtio_balloon_stat {
76
 *         __virtio16 tag;
77
 *         __u8 reserved[6];
78
 *         __virtio64 val;
79
 *     };
80
 *
81
 * In other words, add explicit reserved fields to align field and
82
 * structure boundaries at field size, avoiding compiler padding
83
 * without the packed attribute.
84
 */
57
struct virtio_balloon_stat {
85
struct virtio_balloon_stat {
58
	uint16_t tag;
86
	uint16_t tag;
59
	uint64_t val;
87
	uint64_t val;
(-)sys/dev/virtio/block/virtio_blk.c (-63 / +96 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 74-84 Link Here
74
	uint64_t		 vtblk_features;
76
	uint64_t		 vtblk_features;
75
	uint32_t		 vtblk_flags;
77
	uint32_t		 vtblk_flags;
76
#define VTBLK_FLAG_INDIRECT	0x0001
78
#define VTBLK_FLAG_INDIRECT	0x0001
77
#define VTBLK_FLAG_READONLY	0x0002
79
#define VTBLK_FLAG_DETACH	0x0002
78
#define VTBLK_FLAG_DETACH	0x0004
80
#define VTBLK_FLAG_SUSPEND	0x0004
79
#define VTBLK_FLAG_SUSPEND	0x0008
81
#define VTBLK_FLAG_BARRIER	0x0008
80
#define VTBLK_FLAG_BARRIER	0x0010
82
#define VTBLK_FLAG_WCE_CONFIG	0x0010
81
#define VTBLK_FLAG_WC_CONFIG	0x0020
82
83
83
	struct virtqueue	*vtblk_vq;
84
	struct virtqueue	*vtblk_vq;
84
	struct sglist		*vtblk_sglist;
85
	struct sglist		*vtblk_sglist;
Lines 107-115 Link Here
107
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110
	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
111
	{ VIRTIO_BLK_F_FLUSH,		"FlushCmd"	},
111
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
113
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
114
	{ VIRTIO_BLK_F_MQ,		"Multiqueue"	},
113
115
114
	{ 0, NULL }
116
	{ 0, NULL }
115
};
117
};
Lines 131-138 Link Here
131
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132
static void	vtblk_strategy(struct bio *);
134
static void	vtblk_strategy(struct bio *);
133
135
134
static void	vtblk_negotiate_features(struct vtblk_softc *);
136
static int	vtblk_negotiate_features(struct vtblk_softc *);
135
static void	vtblk_setup_features(struct vtblk_softc *);
137
static int	vtblk_setup_features(struct vtblk_softc *);
136
static int	vtblk_maximum_segments(struct vtblk_softc *,
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
137
		    struct virtio_blk_config *);
139
		    struct virtio_blk_config *);
138
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
Lines 191-196 Link Here
191
static void	vtblk_setup_sysctl(struct vtblk_softc *);
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
192
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
193
195
196
#define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
197
#define vtblk_htog16(_sc, _val)	virtio_htog16(vtblk_modern(_sc), _val)
198
#define vtblk_htog32(_sc, _val)	virtio_htog32(vtblk_modern(_sc), _val)
199
#define vtblk_htog64(_sc, _val)	virtio_htog64(vtblk_modern(_sc), _val)
200
#define vtblk_gtoh16(_sc, _val)	virtio_gtoh16(vtblk_modern(_sc), _val)
201
#define vtblk_gtoh32(_sc, _val)	virtio_gtoh32(vtblk_modern(_sc), _val)
202
#define vtblk_gtoh64(_sc, _val)	virtio_gtoh64(vtblk_modern(_sc), _val)
203
194
/* Tunables. */
204
/* Tunables. */
195
static int vtblk_no_ident = 0;
205
static int vtblk_no_ident = 0;
196
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
206
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
Lines 198-215 Link Here
198
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
208
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
199
209
200
/* Features desired/implemented by this driver. */
210
/* Features desired/implemented by this driver. */
201
#define VTBLK_FEATURES \
211
#define VTBLK_COMMON_FEATURES \
202
    (VIRTIO_BLK_F_BARRIER		| \
212
    (VIRTIO_BLK_F_SIZE_MAX		| \
203
     VIRTIO_BLK_F_SIZE_MAX		| \
204
     VIRTIO_BLK_F_SEG_MAX		| \
213
     VIRTIO_BLK_F_SEG_MAX		| \
205
     VIRTIO_BLK_F_GEOMETRY		| \
214
     VIRTIO_BLK_F_GEOMETRY		| \
206
     VIRTIO_BLK_F_RO			| \
215
     VIRTIO_BLK_F_RO			| \
207
     VIRTIO_BLK_F_BLK_SIZE		| \
216
     VIRTIO_BLK_F_BLK_SIZE		| \
208
     VIRTIO_BLK_F_WCE			| \
217
     VIRTIO_BLK_F_FLUSH			| \
209
     VIRTIO_BLK_F_TOPOLOGY		| \
218
     VIRTIO_BLK_F_TOPOLOGY		| \
210
     VIRTIO_BLK_F_CONFIG_WCE		| \
219
     VIRTIO_BLK_F_CONFIG_WCE		| \
211
     VIRTIO_RING_F_INDIRECT_DESC)
220
     VIRTIO_RING_F_INDIRECT_DESC)
212
221
222
#define VTBLK_MODERN_FEATURES 	(VTBLK_COMMON_FEATURES)
223
#define VTBLK_LEGACY_FEATURES	(VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
224
213
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
225
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
214
#define VTBLK_LOCK_INIT(_sc, _name) \
226
#define VTBLK_LOCK_INIT(_sc, _name) \
215
				mtx_init(VTBLK_MTX((_sc)), (_name), \
227
				mtx_init(VTBLK_MTX((_sc)), (_name), \
Lines 254-261 Link Here
254
266
255
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
267
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
256
    vtblk_modevent, 0);
268
    vtblk_modevent, 0);
257
DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
269
DRIVER_MODULE(virtio_blk, vtpcil, vtblk_driver, vtblk_devclass,
258
    vtblk_modevent, 0);
270
    vtblk_modevent, 0);
271
DRIVER_MODULE(virtio_blk, vtpcim, vtblk_driver, vtblk_devclass,
272
    vtblk_modevent, 0);
259
MODULE_VERSION(virtio_blk, 1);
273
MODULE_VERSION(virtio_blk, 1);
260
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
274
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
261
275
Lines 299-308 Link Here
299
	struct virtio_blk_config blkcfg;
313
	struct virtio_blk_config blkcfg;
300
	int error;
314
	int error;
301
315
302
	virtio_set_feature_desc(dev, vtblk_feature_desc);
303
304
	sc = device_get_softc(dev);
316
	sc = device_get_softc(dev);
305
	sc->vtblk_dev = dev;
317
	sc->vtblk_dev = dev;
318
	virtio_set_feature_desc(dev, vtblk_feature_desc);
319
306
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
320
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
307
	bioq_init(&sc->vtblk_bioq);
321
	bioq_init(&sc->vtblk_bioq);
308
	TAILQ_INIT(&sc->vtblk_dump_queue);
322
	TAILQ_INIT(&sc->vtblk_dump_queue);
Lines 310-317 Link Here
310
	TAILQ_INIT(&sc->vtblk_req_ready);
324
	TAILQ_INIT(&sc->vtblk_req_ready);
311
325
312
	vtblk_setup_sysctl(sc);
326
	vtblk_setup_sysctl(sc);
313
	vtblk_setup_features(sc);
314
327
328
	error = vtblk_setup_features(sc);
329
	if (error) {
330
		device_printf(dev, "cannot setup features\n");
331
		goto fail;
332
	}
333
315
	vtblk_read_config(sc, &blkcfg);
334
	vtblk_read_config(sc, &blkcfg);
316
335
317
	/*
336
	/*
Lines 539-554 Link Here
539
		return;
558
		return;
540
	}
559
	}
541
560
542
	/*
543
	 * Fail any write if RO. Unfortunately, there does not seem to
544
	 * be a better way to report our readonly'ness to GEOM above.
545
	 */
546
	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
547
	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
548
		vtblk_bio_done(sc, bp, EROFS);
549
		return;
550
	}
551
552
	VTBLK_LOCK(sc);
561
	VTBLK_LOCK(sc);
553
562
554
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
563
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
Lines 563-597 Link Here
563
	VTBLK_UNLOCK(sc);
572
	VTBLK_UNLOCK(sc);
564
}
573
}
565
574
566
static void
575
static int
567
vtblk_negotiate_features(struct vtblk_softc *sc)
576
vtblk_negotiate_features(struct vtblk_softc *sc)
568
{
577
{
569
	device_t dev;
578
	device_t dev;
570
	uint64_t features;
579
	uint64_t features;
571
580
572
	dev = sc->vtblk_dev;
581
	dev = sc->vtblk_dev;
573
	features = VTBLK_FEATURES;
582
	features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
583
	    VTBLK_LEGACY_FEATURES;
574
584
575
	sc->vtblk_features = virtio_negotiate_features(dev, features);
585
	sc->vtblk_features = virtio_negotiate_features(dev, features);
586
	return (virtio_finalize_features(dev));
576
}
587
}
577
588
578
static void
589
static int
579
vtblk_setup_features(struct vtblk_softc *sc)
590
vtblk_setup_features(struct vtblk_softc *sc)
580
{
591
{
581
	device_t dev;
592
	device_t dev;
593
	int error;
582
594
583
	dev = sc->vtblk_dev;
595
	dev = sc->vtblk_dev;
584
596
585
	vtblk_negotiate_features(sc);
597
	error = vtblk_negotiate_features(sc);
598
	if (error)
599
		return (error);
586
600
587
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
601
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
588
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
602
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
589
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
590
		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
591
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
592
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
593
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
603
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
594
		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
604
		sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
605
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
606
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER; /* Legacy. */
607
608
	return (0);
595
}
609
}
596
610
597
static int
611
static int
Lines 670-684 Link Here
670
	dp->d_name = VTBLK_DISK_NAME;
684
	dp->d_name = VTBLK_DISK_NAME;
671
	dp->d_unit = device_get_unit(dev);
685
	dp->d_unit = device_get_unit(dev);
672
	dp->d_drv1 = sc;
686
	dp->d_drv1 = sc;
673
	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
687
	dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
674
	    DISKFLAG_DIRECT_COMPLETION;
675
	dp->d_hba_vendor = virtio_get_vendor(dev);
688
	dp->d_hba_vendor = virtio_get_vendor(dev);
676
	dp->d_hba_device = virtio_get_device(dev);
689
	dp->d_hba_device = virtio_get_device(dev);
677
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
690
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
678
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
691
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
679
692
680
	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
693
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
694
		dp->d_flags |= DISKFLAG_WRITE_PROTECT;
695
	else {
696
		if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
697
			dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
681
		dp->d_dump = vtblk_dump;
698
		dp->d_dump = vtblk_dump;
699
	}
682
700
683
	/* Capacity is always in 512-byte units. */
701
	/* Capacity is always in 512-byte units. */
684
	dp->d_mediasize = blkcfg->capacity * 512;
702
	dp->d_mediasize = blkcfg->capacity * 512;
Lines 862-887 Link Here
862
	bp = bioq_takefirst(bioq);
880
	bp = bioq_takefirst(bioq);
863
	req->vbr_bp = bp;
881
	req->vbr_bp = bp;
864
	req->vbr_ack = -1;
882
	req->vbr_ack = -1;
865
	req->vbr_hdr.ioprio = 1;
883
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
866
884
867
	switch (bp->bio_cmd) {
885
	switch (bp->bio_cmd) {
868
	case BIO_FLUSH:
886
	case BIO_FLUSH:
869
		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
887
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
888
		req->vbr_hdr.sector = 0;
870
		break;
889
		break;
871
	case BIO_READ:
890
	case BIO_READ:
872
		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
891
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
873
		req->vbr_hdr.sector = bp->bio_offset / 512;
892
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
874
		break;
893
		break;
875
	case BIO_WRITE:
894
	case BIO_WRITE:
876
		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
895
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
877
		req->vbr_hdr.sector = bp->bio_offset / 512;
896
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
878
		break;
897
		break;
879
	default:
898
	default:
880
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
899
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
881
	}
900
	}
882
901
883
	if (bp->bio_flags & BIO_ORDERED)
902
	if (bp->bio_flags & BIO_ORDERED)
884
		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
903
		req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
885
904
886
	return (req);
905
	return (req);
887
}
906
}
Lines 912-918 Link Here
912
			if (!virtqueue_empty(vq))
931
			if (!virtqueue_empty(vq))
913
				return (EBUSY);
932
				return (EBUSY);
914
			ordered = 1;
933
			ordered = 1;
915
			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
934
			req->vbr_hdr.type &= vtblk_gtoh32(sc,
935
				~VIRTIO_BLK_T_BARRIER);
916
		}
936
		}
917
	}
937
	}
918
938
Lines 1016-1030 Link Here
1016
static void
1036
static void
1017
vtblk_drain(struct vtblk_softc *sc)
1037
vtblk_drain(struct vtblk_softc *sc)
1018
{
1038
{
1019
	struct bio_queue queue;
1020
	struct bio_queue_head *bioq;
1039
	struct bio_queue_head *bioq;
1021
	struct vtblk_request *req;
1040
	struct vtblk_request *req;
1022
	struct bio *bp;
1041
	struct bio *bp;
1023
1042
1024
	bioq = &sc->vtblk_bioq;
1043
	bioq = &sc->vtblk_bioq;
1025
	TAILQ_INIT(&queue);
1026
1044
1027
	if (sc->vtblk_vq != NULL) {
1045
	if (sc->vtblk_vq != NULL) {
1046
		struct bio_queue queue;
1047
1048
		TAILQ_INIT(&queue);
1028
		vtblk_queue_completed(sc, &queue);
1049
		vtblk_queue_completed(sc, &queue);
1029
		vtblk_done_completed(sc, &queue);
1050
		vtblk_done_completed(sc, &queue);
1030
1051
Lines 1115-1124 Link Here
1115
	/* Read the configuration if the feature was negotiated. */
1136
	/* Read the configuration if the feature was negotiated. */
1116
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1137
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1117
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1138
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1118
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1139
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1140
	    geometry.cylinders, blkcfg);
1141
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1142
	    geometry.heads, blkcfg);
1143
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1144
	    geometry.sectors, blkcfg);
1119
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1145
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1120
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1146
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1121
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1147
	    topology.physical_block_exp, blkcfg);
1148
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1149
	    topology.alignment_offset, blkcfg);
1150
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1151
	    topology.min_io_size, blkcfg);
1152
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1153
	    topology.opt_io_size, blkcfg);
1154
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1122
}
1155
}
1123
1156
1124
#undef VTBLK_GET_CONFIG
1157
#undef VTBLK_GET_CONFIG
Lines 1142-1149 Link Here
1142
		return;
1175
		return;
1143
1176
1144
	req->vbr_ack = -1;
1177
	req->vbr_ack = -1;
1145
	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1178
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
1146
	req->vbr_hdr.ioprio = 1;
1179
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1147
	req->vbr_hdr.sector = 0;
1180
	req->vbr_hdr.sector = 0;
1148
1181
1149
	req->vbr_bp = &buf;
1182
	req->vbr_bp = &buf;
Lines 1274-1282 Link Here
1274
1307
1275
	req = &sc->vtblk_dump_request;
1308
	req = &sc->vtblk_dump_request;
1276
	req->vbr_ack = -1;
1309
	req->vbr_ack = -1;
1277
	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1310
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
1278
	req->vbr_hdr.ioprio = 1;
1311
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1279
	req->vbr_hdr.sector = offset / 512;
1312
	req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / 512);
1280
1313
1281
	req->vbr_bp = &buf;
1314
	req->vbr_bp = &buf;
1282
	g_reset_bio(&buf);
1315
	g_reset_bio(&buf);
Lines 1296-1303 Link Here
1296
1329
1297
	req = &sc->vtblk_dump_request;
1330
	req = &sc->vtblk_dump_request;
1298
	req->vbr_ack = -1;
1331
	req->vbr_ack = -1;
1299
	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1332
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
1300
	req->vbr_hdr.ioprio = 1;
1333
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1301
	req->vbr_hdr.sector = 0;
1334
	req->vbr_hdr.sector = 0;
1302
1335
1303
	req->vbr_bp = &buf;
1336
	req->vbr_bp = &buf;
Lines 1325-1331 Link Here
1325
1358
1326
	/* Set either writeback (1) or writethrough (0) mode. */
1359
	/* Set either writeback (1) or writethrough (0) mode. */
1327
	virtio_write_dev_config_1(sc->vtblk_dev,
1360
	virtio_write_dev_config_1(sc->vtblk_dev,
1328
	    offsetof(struct virtio_blk_config, writeback), wc);
1361
	    offsetof(struct virtio_blk_config, wce), wc);
1329
}
1362
}
1330
1363
1331
static int
1364
static int
Lines 1334-1348 Link Here
1334
{
1367
{
1335
	int wc;
1368
	int wc;
1336
1369
1337
	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1370
	if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
1338
		wc = vtblk_tunable_int(sc, "writecache_mode",
1371
		wc = vtblk_tunable_int(sc, "writecache_mode",
1339
		    vtblk_writecache_mode);
1372
		    vtblk_writecache_mode);
1340
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1373
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1341
			vtblk_set_write_cache(sc, wc);
1374
			vtblk_set_write_cache(sc, wc);
1342
		else
1375
		else
1343
			wc = blkcfg->writeback;
1376
			wc = blkcfg->wce;
1344
	} else
1377
	} else
1345
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1378
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
1346
1379
1347
	return (wc);
1380
	return (wc);
1348
}
1381
}
Lines 1359-1365 Link Here
1359
	error = sysctl_handle_int(oidp, &wc, 0, req);
1392
	error = sysctl_handle_int(oidp, &wc, 0, req);
1360
	if (error || req->newptr == NULL)
1393
	if (error || req->newptr == NULL)
1361
		return (error);
1394
		return (error);
1362
	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1395
	if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
1363
		return (EPERM);
1396
		return (EPERM);
1364
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1397
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1365
		return (EINVAL);
1398
		return (EINVAL);
(-)sys/dev/virtio/block/virtio_blk.h (-5 / +24 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
2
 * This header is BSD licensed so anyone can use the definitions to implement
4
 * This header is BSD licensed so anyone can use the definitions to implement
3
 * compatible drivers/servers.
5
 * compatible drivers/servers.
4
 *
6
 *
Lines 32-48 Link Here
32
#define _VIRTIO_BLK_H
34
#define _VIRTIO_BLK_H
33
35
34
/* Feature bits */
36
/* Feature bits */
35
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
36
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
37
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
37
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
38
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
38
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
39
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
39
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
40
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
40
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
41
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
41
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
42
#define VIRTIO_BLK_F_FLUSH	0x0200	/* Flush command supported */
42
#define VIRTIO_BLK_F_WCE	0x0200	/* Writeback mode enabled after reset */
43
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
43
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
44
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
44
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
45
#define VIRTIO_BLK_F_MQ 	0x1000 	/* Support more than one vq */
45
46
47
/* Legacy feature bits */
48
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
49
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
50
51
/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
52
#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
46
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
53
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
47
54
48
struct virtio_blk_config {
55
struct virtio_blk_config {
Lines 64-78 Link Here
64
71
65
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
72
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
66
	struct virtio_blk_topology {
73
	struct virtio_blk_topology {
74
		/* exponent for physical block per logical block. */
67
		uint8_t physical_block_exp;
75
		uint8_t physical_block_exp;
76
		/* alignment offset in logical blocks. */
68
		uint8_t alignment_offset;
77
		uint8_t alignment_offset;
78
		/* minimum I/O size without performance penalty in logical
79
		 * blocks. */
69
		uint16_t min_io_size;
80
		uint16_t min_io_size;
81
		/* optimal sustained I/O size in logical blocks. */
70
		uint32_t opt_io_size;
82
		uint32_t opt_io_size;
71
	} topology;
83
	} topology;
72
84
73
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
85
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
74
	uint8_t writeback;
86
	uint8_t wce;
87
	uint8_t unused;
75
88
89
	/* Number of vqs, only available when VIRTIO_BLK_F_MQ is set */
90
	uint16_t num_queues;
76
} __packed;
91
} __packed;
77
92
78
/*
93
/*
Lines 105-111 Link Here
105
/* ID string length */
120
/* ID string length */
106
#define VIRTIO_BLK_ID_BYTES	20
121
#define VIRTIO_BLK_ID_BYTES	20
107
122
108
/* This is the first element of the read scatter-gather list. */
123
/*
124
 * This comes first in the read scatter-gather list.
125
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
126
 * this is the first element of the read scatter-gather list.
127
 */
109
struct virtio_blk_outhdr {
128
struct virtio_blk_outhdr {
110
	/* VIRTIO_BLK_T* */
129
	/* VIRTIO_BLK_T* */
111
	uint32_t type;
130
	uint32_t type;
(-)sys/dev/virtio/console/virtio_console.c (-26 / +142 lines)
Lines 30-35 Link Here
30
__FBSDID("$FreeBSD: releng/11.3/sys/dev/virtio/console/virtio_console.c 298955 2016-05-03 03:41:25Z pfg $");
30
__FBSDID("$FreeBSD: releng/11.3/sys/dev/virtio/console/virtio_console.c 298955 2016-05-03 03:41:25Z pfg $");
31
31
32
#include <sys/param.h>
32
#include <sys/param.h>
33
#include <sys/ctype.h>
33
#include <sys/systm.h>
34
#include <sys/systm.h>
34
#include <sys/kernel.h>
35
#include <sys/kernel.h>
35
#include <sys/malloc.h>
36
#include <sys/malloc.h>
Lines 58-71 Link Here
58
59
59
#define VTCON_MAX_PORTS 32
60
#define VTCON_MAX_PORTS 32
60
#define VTCON_TTY_PREFIX "V"
61
#define VTCON_TTY_PREFIX "V"
62
#define VTCON_TTY_ALIAS_PREFIX "vtcon"
61
#define VTCON_BULK_BUFSZ 128
63
#define VTCON_BULK_BUFSZ 128
64
#define VTCON_CTRL_BUFSZ 128
62
65
63
/*
66
/*
64
 * The buffer cannot cross more than one page boundary due to the
67
 * The buffers cannot cross more than one page boundary due to the
65
 * size of the sglist segment array used.
68
 * size of the sglist segment array used.
66
 */
69
 */
67
CTASSERT(VTCON_BULK_BUFSZ <= PAGE_SIZE);
70
CTASSERT(VTCON_BULK_BUFSZ <= PAGE_SIZE);
71
CTASSERT(VTCON_CTRL_BUFSZ <= PAGE_SIZE);
68
72
73
CTASSERT(sizeof(struct virtio_console_config) <= VTCON_CTRL_BUFSZ);
74
69
struct vtcon_softc;
75
struct vtcon_softc;
70
struct vtcon_softc_port;
76
struct vtcon_softc_port;
71
77
Lines 80-85 Link Here
80
	int				 vtcport_flags;
86
	int				 vtcport_flags;
81
#define VTCON_PORT_FLAG_GONE	0x01
87
#define VTCON_PORT_FLAG_GONE	0x01
82
#define VTCON_PORT_FLAG_CONSOLE	0x02
88
#define VTCON_PORT_FLAG_CONSOLE	0x02
89
#define VTCON_PORT_FLAG_ALIAS	0x04
83
90
84
#if defined(KDB)
91
#if defined(KDB)
85
	int				 vtcport_alt_break_state;
92
	int				 vtcport_alt_break_state;
Lines 151-158 Link Here
151
static int	 vtcon_detach(device_t);
158
static int	 vtcon_detach(device_t);
152
static int	 vtcon_config_change(device_t);
159
static int	 vtcon_config_change(device_t);
153
160
154
static void	 vtcon_setup_features(struct vtcon_softc *);
161
static int	 vtcon_setup_features(struct vtcon_softc *);
155
static void	 vtcon_negotiate_features(struct vtcon_softc *);
162
static int	 vtcon_negotiate_features(struct vtcon_softc *);
156
static int	 vtcon_alloc_scports(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
157
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
158
static void	 vtcon_read_config(struct vtcon_softc *,
165
static void	 vtcon_read_config(struct vtcon_softc *,
Lines 176-183 Link Here
176
static void	 vtcon_ctrl_port_remove_event(struct vtcon_softc *, int);
183
static void	 vtcon_ctrl_port_remove_event(struct vtcon_softc *, int);
177
static void	 vtcon_ctrl_port_console_event(struct vtcon_softc *, int);
184
static void	 vtcon_ctrl_port_console_event(struct vtcon_softc *, int);
178
static void	 vtcon_ctrl_port_open_event(struct vtcon_softc *, int);
185
static void	 vtcon_ctrl_port_open_event(struct vtcon_softc *, int);
186
static void	 vtcon_ctrl_port_name_event(struct vtcon_softc *, int,
187
		     const char *, size_t);
179
static void	 vtcon_ctrl_process_event(struct vtcon_softc *,
188
static void	 vtcon_ctrl_process_event(struct vtcon_softc *,
180
		     struct virtio_console_control *);
189
		     struct virtio_console_control *, void *, size_t);
181
static void	 vtcon_ctrl_task_cb(void *, int);
190
static void	 vtcon_ctrl_task_cb(void *, int);
182
static void	 vtcon_ctrl_event_intr(void *);
191
static void	 vtcon_ctrl_event_intr(void *);
183
static void	 vtcon_ctrl_poll(struct vtcon_softc *,
192
static void	 vtcon_ctrl_poll(struct vtcon_softc *,
Lines 191-196 Link Here
191
static int	 vtcon_port_populate(struct vtcon_port *);
200
static int	 vtcon_port_populate(struct vtcon_port *);
192
static void	 vtcon_port_destroy(struct vtcon_port *);
201
static void	 vtcon_port_destroy(struct vtcon_port *);
193
static int	 vtcon_port_create(struct vtcon_softc *, int);
202
static int	 vtcon_port_create(struct vtcon_softc *, int);
203
static void	 vtcon_port_dev_alias(struct vtcon_port *, const char *,
204
		     size_t);
194
static void	 vtcon_port_drain_bufs(struct virtqueue *);
205
static void	 vtcon_port_drain_bufs(struct virtqueue *);
195
static void	 vtcon_port_drain(struct vtcon_port *);
206
static void	 vtcon_port_drain(struct vtcon_port *);
196
static void	 vtcon_port_teardown(struct vtcon_port *);
207
static void	 vtcon_port_teardown(struct vtcon_port *);
Lines 216-221 Link Here
216
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
217
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
218
229
230
#define vtcon_modern(_sc) (((_sc)->vtcon_features & VIRTIO_F_VERSION_1) != 0)
231
#define vtcon_htog16(_sc, _val)	virtio_htog16(vtcon_modern(_sc), _val)
232
#define vtcon_htog32(_sc, _val)	virtio_htog32(vtcon_modern(_sc), _val)
233
#define vtcon_htog64(_sc, _val)	virtio_htog64(vtcon_modern(_sc), _val)
234
#define vtcon_gtoh16(_sc, _val)	virtio_gtoh16(vtcon_modern(_sc), _val)
235
#define vtcon_gtoh32(_sc, _val)	virtio_gtoh32(vtcon_modern(_sc), _val)
236
#define vtcon_gtoh64(_sc, _val)	virtio_gtoh64(vtcon_modern(_sc), _val)
237
219
static int	 vtcon_pending_free;
238
static int	 vtcon_pending_free;
220
239
221
static struct ttydevsw vtcon_tty_class = {
240
static struct ttydevsw vtcon_tty_class = {
Lines 245-252 Link Here
245
};
264
};
246
static devclass_t vtcon_devclass;
265
static devclass_t vtcon_devclass;
247
266
248
DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass,
267
DRIVER_MODULE(virtio_console, vtpcil, vtcon_driver, vtcon_devclass,
249
    vtcon_modevent, 0);
268
    vtcon_modevent, 0);
269
DRIVER_MODULE(virtio_console, vtpcim, vtcon_driver, vtcon_devclass,
270
    vtcon_modevent, 0);
250
MODULE_VERSION(virtio_console, 1);
271
MODULE_VERSION(virtio_console, 1);
251
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
272
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
252
273
Lines 312-323 Link Here
312
333
313
	sc = device_get_softc(dev);
334
	sc = device_get_softc(dev);
314
	sc->vtcon_dev = dev;
335
	sc->vtcon_dev = dev;
336
	virtio_set_feature_desc(dev, vtcon_feature_desc);
315
337
316
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
338
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
317
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
339
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
318
340
319
	virtio_set_feature_desc(dev, vtcon_feature_desc);
341
	error = vtcon_setup_features(sc);
320
	vtcon_setup_features(sc);
342
	if (error) {
343
		device_printf(dev, "cannot setup features\n");
344
		goto fail;
345
	}
321
346
322
	vtcon_read_config(sc, &concfg);
347
	vtcon_read_config(sc, &concfg);
323
	vtcon_determine_max_ports(sc, &concfg);
348
	vtcon_determine_max_ports(sc, &concfg);
Lines 409-415 Link Here
409
	return (0);
434
	return (0);
410
}
435
}
411
436
412
static void
437
static int
413
vtcon_negotiate_features(struct vtcon_softc *sc)
438
vtcon_negotiate_features(struct vtcon_softc *sc)
414
{
439
{
415
	device_t dev;
440
	device_t dev;
Lines 419-439 Link Here
419
	features = VTCON_FEATURES;
444
	features = VTCON_FEATURES;
420
445
421
	sc->vtcon_features = virtio_negotiate_features(dev, features);
446
	sc->vtcon_features = virtio_negotiate_features(dev, features);
447
	return (virtio_finalize_features(dev));
422
}
448
}
423
449
424
static void
450
static int
425
vtcon_setup_features(struct vtcon_softc *sc)
451
vtcon_setup_features(struct vtcon_softc *sc)
426
{
452
{
427
	device_t dev;
453
	device_t dev;
454
	int error;
428
455
429
	dev = sc->vtcon_dev;
456
	dev = sc->vtcon_dev;
430
457
431
	vtcon_negotiate_features(sc);
458
	error = vtcon_negotiate_features(sc);
459
	if (error)
460
		return (error);
432
461
433
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
462
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
434
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
463
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
435
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
464
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
436
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
465
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
466
467
	return (0);
437
}
468
}
438
469
439
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
470
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
Lines 597-604 Link Here
597
	vq = sc->vtcon_ctrl_rxvq;
628
	vq = sc->vtcon_ctrl_rxvq;
598
629
599
	sglist_init(&sg, 2, segs);
630
	sglist_init(&sg, 2, segs);
600
	error = sglist_append(&sg, control,
631
	error = sglist_append(&sg, control, VTCON_CTRL_BUFSZ);
601
	    sizeof(struct virtio_console_control));
602
	KASSERT(error == 0, ("%s: error %d adding control to sglist",
632
	KASSERT(error == 0, ("%s: error %d adding control to sglist",
603
	    __func__, error));
633
	    __func__, error));
604
634
Lines 611-618 Link Here
611
	struct virtio_console_control *control;
641
	struct virtio_console_control *control;
612
	int error;
642
	int error;
613
643
614
	control = malloc(sizeof(struct virtio_console_control), M_DEVBUF,
644
	control = malloc(VTCON_CTRL_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT);
615
	    M_ZERO | M_NOWAIT);
616
	if (control == NULL)
645
	if (control == NULL)
617
		return (ENOMEM);
646
		return (ENOMEM);
618
647
Lines 629-635 Link Here
629
{
658
{
630
	int error;
659
	int error;
631
660
632
	bzero(control, sizeof(struct virtio_console_control));
661
	bzero(control, VTCON_CTRL_BUFSZ);
633
662
634
	error = vtcon_ctrl_event_enqueue(sc, control);
663
	error = vtcon_ctrl_event_enqueue(sc, control);
635
	KASSERT(error == 0,
664
	KASSERT(error == 0,
Lines 796-816 Link Here
796
}
825
}
797
826
798
static void
827
static void
828
vtcon_ctrl_port_name_event(struct vtcon_softc *sc, int id, const char *name,
829
    size_t len)
830
{
831
	device_t dev;
832
	struct vtcon_softc_port *scport;
833
	struct vtcon_port *port;
834
835
	dev = sc->vtcon_dev;
836
	scport = &sc->vtcon_ports[id];
837
838
	/*
839
	 * The VirtIO specification says the NUL terminator is not included in
840
	 * the length, but QEMU includes it. Adjust the length if needed.
841
	 */
842
	if (name == NULL || len == 0)
843
		return;
844
	if (name[len - 1] == '\0') {
845
		len--;
846
		if (len == 0)
847
			return;
848
	}
849
850
	VTCON_LOCK(sc);
851
	port = scport->vcsp_port;
852
	if (port == NULL) {
853
		VTCON_UNLOCK(sc);
854
		device_printf(dev, "%s: name port %d, but does not exist\n",
855
		    __func__, id);
856
		return;
857
	}
858
859
	VTCON_PORT_LOCK(port);
860
	VTCON_UNLOCK(sc);
861
	vtcon_port_dev_alias(port, name, len);
862
	VTCON_PORT_UNLOCK(port);
863
}
864
865
static void
799
vtcon_ctrl_process_event(struct vtcon_softc *sc,
866
vtcon_ctrl_process_event(struct vtcon_softc *sc,
800
    struct virtio_console_control *control)
867
    struct virtio_console_control *control, void *data, size_t data_len)
801
{
868
{
802
	device_t dev;
869
	device_t dev;
803
	int id;
870
	uint32_t id;
871
	uint16_t event;
804
872
805
	dev = sc->vtcon_dev;
873
	dev = sc->vtcon_dev;
806
	id = control->id;
874
	id = vtcon_htog32(sc, control->id);
875
	event = vtcon_htog16(sc, control->event);
807
876
808
	if (id < 0 || id >= sc->vtcon_max_ports) {
877
	if (id >= sc->vtcon_max_ports) {
809
		device_printf(dev, "%s: invalid port ID %d\n", __func__, id);
878
		device_printf(dev, "%s: event %d invalid port ID %d\n",
879
		    __func__, event, id);
810
		return;
880
		return;
811
	}
881
	}
812
882
813
	switch (control->event) {
883
	switch (event) {
814
	case VIRTIO_CONSOLE_PORT_ADD:
884
	case VIRTIO_CONSOLE_PORT_ADD:
815
		vtcon_ctrl_port_add_event(sc, id);
885
		vtcon_ctrl_port_add_event(sc, id);
816
		break;
886
		break;
Lines 831-836 Link Here
831
		break;
901
		break;
832
902
833
	case VIRTIO_CONSOLE_PORT_NAME:
903
	case VIRTIO_CONSOLE_PORT_NAME:
904
		vtcon_ctrl_port_name_event(sc, id, (const char *)data, data_len);
834
		break;
905
		break;
835
	}
906
	}
836
}
907
}
Lines 841-847 Link Here
841
	struct vtcon_softc *sc;
912
	struct vtcon_softc *sc;
842
	struct virtqueue *vq;
913
	struct virtqueue *vq;
843
	struct virtio_console_control *control;
914
	struct virtio_console_control *control;
915
	void *data;
916
	size_t data_len;
844
	int detached;
917
	int detached;
918
	uint32_t len;
845
919
846
	sc = xsc;
920
	sc = xsc;
847
	vq = sc->vtcon_ctrl_rxvq;
921
	vq = sc->vtcon_ctrl_rxvq;
Lines 849-860 Link Here
849
	VTCON_LOCK(sc);
923
	VTCON_LOCK(sc);
850
924
851
	while ((detached = (sc->vtcon_flags & VTCON_FLAG_DETACHED)) == 0) {
925
	while ((detached = (sc->vtcon_flags & VTCON_FLAG_DETACHED)) == 0) {
852
		control = virtqueue_dequeue(vq, NULL);
926
		control = virtqueue_dequeue(vq, &len);
853
		if (control == NULL)
927
		if (control == NULL)
854
			break;
928
			break;
855
929
930
		if (len > sizeof(struct virtio_console_control)) {
931
			data = (void *) &control[1];
932
			data_len = len - sizeof(struct virtio_console_control);
933
		} else {
934
			data = NULL;
935
			data_len = 0;
936
		}
937
856
		VTCON_UNLOCK(sc);
938
		VTCON_UNLOCK(sc);
857
		vtcon_ctrl_process_event(sc, control);
939
		vtcon_ctrl_process_event(sc, control, data, data_len);
858
		VTCON_LOCK(sc);
940
		VTCON_LOCK(sc);
859
		vtcon_ctrl_event_requeue(sc, control);
941
		vtcon_ctrl_event_requeue(sc, control);
860
	}
942
	}
Lines 926-934 Link Here
926
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
1008
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
927
		return;
1009
		return;
928
1010
929
	control.id = portid;
1011
	control.id = vtcon_gtoh32(sc, portid);
930
	control.event = event;
1012
	control.event = vtcon_gtoh16(sc, event);
931
	control.value = value;
1013
	control.value = vtcon_gtoh16(sc, value);
932
1014
933
	vtcon_ctrl_poll(sc, &control);
1015
	vtcon_ctrl_poll(sc, &control);
934
}
1016
}
Lines 1090-1095 Link Here
1090
	    device_get_unit(dev), id);
1172
	    device_get_unit(dev), id);
1091
1173
1092
	return (0);
1174
	return (0);
1175
}
1176
1177
static void
1178
vtcon_port_dev_alias(struct vtcon_port *port, const char *name, size_t len)
1179
{
1180
	struct vtcon_softc *sc;
1181
	struct cdev *pdev;
1182
	struct tty *tp;
1183
	int i, error;
1184
1185
	sc = port->vtcport_sc;
1186
	tp = port->vtcport_tty;
1187
1188
	if (port->vtcport_flags & VTCON_PORT_FLAG_ALIAS)
1189
		return;
1190
1191
	/* Port name is UTF-8, but we can only handle ASCII. */
1192
	for (i = 0; i < len; i++) {
1193
		if (!isascii(name[i]))
1194
			return;
1195
	}
1196
1197
	/*
1198
	 * Port name may not conform to the devfs requirements so we cannot use
1199
	 * tty_makealias() because the MAKEDEV_CHECKNAME flag must be specified.
1200
	 */
1201
	error = make_dev_alias_p(MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME, &pdev,
1202
	    tp->t_dev, "%s/%*s", VTCON_TTY_ALIAS_PREFIX, (int)len, name);
1203
	if (error) {
1204
		device_printf(sc->vtcon_dev,
1205
		    "%s: cannot make dev alias (%s/%*s) error %d\n", __func__,
1206
		    VTCON_TTY_ALIAS_PREFIX, (int)len, name, error);
1207
	} else
1208
		port->vtcport_flags |= VTCON_PORT_FLAG_ALIAS;
1093
}
1209
}
1094
1210
1095
static void
1211
static void
(-)sys/dev/virtio/mmio/virtio_mmio.c (-3 / +18 lines)
Lines 426-431 Link Here
426
	case VIRTIO_IVAR_VENDOR:
426
	case VIRTIO_IVAR_VENDOR:
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
428
		break;
428
		break;
429
	case VIRTIO_IVAR_SUBVENDOR:
430
	case VIRTIO_IVAR_MODERN:
431
		*result = 0;
432
		break;
429
	default:
433
	default:
430
		return (ENOENT);
434
		return (ENOENT);
431
	}
435
	}
Lines 512-518 Link Here
512
	if (sc->vtmmio_vqs == NULL)
516
	if (sc->vtmmio_vqs == NULL)
513
		return (ENOMEM);
517
		return (ENOMEM);
514
518
515
	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 1 << PAGE_SHIFT);
519
	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
520
	    (1 << PAGE_SHIFT));
516
521
517
	for (idx = 0; idx < nvqs; idx++) {
522
	for (idx = 0; idx < nvqs; idx++) {
518
		vqx = &sc->vtmmio_vqs[idx];
523
		vqx = &sc->vtmmio_vqs[idx];
Lines 537-546 Link Here
537
		    VIRTIO_MMIO_VRING_ALIGN);
542
		    VIRTIO_MMIO_VRING_ALIGN);
538
#if 0
543
#if 0
539
		device_printf(dev, "virtqueue paddr 0x%08lx\n",
544
		device_printf(dev, "virtqueue paddr 0x%08lx\n",
540
				(uint64_t)virtqueue_paddr(vq));
545
		    (uint64_t)virtqueue_paddr(vq));
541
#endif
546
#endif
542
		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
547
		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
543
			virtqueue_paddr(vq) >> PAGE_SHIFT);
548
		    virtqueue_paddr(vq) >> PAGE_SHIFT);
544
549
545
		vqx->vtv_vq = *info->vqai_vq = vq;
550
		vqx->vtv_vq = *info->vqai_vq = vq;
546
		vqx->vtv_no_intr = info->vqai_intr == NULL;
551
		vqx->vtv_no_intr = info->vqai_intr == NULL;
Lines 592-597 Link Here
592
597
593
	vtmmio_negotiate_features(dev, features);
598
	vtmmio_negotiate_features(dev, features);
594
599
600
	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
601
	    (1 << PAGE_SHIFT));
602
595
	for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
603
	for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
596
		error = vtmmio_reinit_virtqueue(sc, idx);
604
		error = vtmmio_reinit_virtqueue(sc, idx);
597
		if (error)
605
		if (error)
Lines 766-771 Link Here
766
	if (error)
774
	if (error)
767
		return (error);
775
		return (error);
768
776
777
	vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
778
	vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
779
	    VIRTIO_MMIO_VRING_ALIGN);
780
#if 0
781
	device_printf(sc->dev, "virtqueue paddr 0x%08lx\n",
782
	    (uint64_t)virtqueue_paddr(vq));
783
#endif
769
	vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
784
	vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
770
	    virtqueue_paddr(vq) >> PAGE_SHIFT);
785
	    virtqueue_paddr(vq) >> PAGE_SHIFT);
771
786
(-)sys/dev/virtio/network/if_vtnet.c (-840 / +1185 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 67-88 Link Here
67
#include <netinet6/ip6_var.h>
69
#include <netinet6/ip6_var.h>
68
#include <netinet/udp.h>
70
#include <netinet/udp.h>
69
#include <netinet/tcp.h>
71
#include <netinet/tcp.h>
72
#include <netinet/tcp_lro.h>
70
73
71
#include <machine/bus.h>
74
#include <machine/bus.h>
72
#include <machine/resource.h>
75
#include <machine/resource.h>
73
#include <sys/bus.h>
76
#include <sys/bus.h>
74
#include <sys/rman.h>
77
#include <sys/rman.h>
75
78
79
#include <sys/queue.h>
80
76
#include <dev/virtio/virtio.h>
81
#include <dev/virtio/virtio.h>
77
#include <dev/virtio/virtqueue.h>
82
#include <dev/virtio/virtqueue.h>
78
#include <dev/virtio/network/virtio_net.h>
83
#include <dev/virtio/network/virtio_net.h>
79
#include <dev/virtio/network/if_vtnetvar.h>
84
#include <dev/virtio/network/if_vtnetvar.h>
80
81
#include "virtio_if.h"
85
#include "virtio_if.h"
82
86
83
#include "opt_inet.h"
87
#include "opt_inet.h"
84
#include "opt_inet6.h"
88
#include "opt_inet6.h"
85
89
90
#if defined(INET) || defined(INET6)
91
#include <machine/in_cksum.h>
92
#endif
93
86
static int	vtnet_modevent(module_t, int, void *);
94
static int	vtnet_modevent(module_t, int, void *);
87
95
88
static int	vtnet_probe(device_t);
96
static int	vtnet_probe(device_t);
Lines 94-101 Link Here
94
static int	vtnet_attach_completed(device_t);
102
static int	vtnet_attach_completed(device_t);
95
static int	vtnet_config_change(device_t);
103
static int	vtnet_config_change(device_t);
96
104
97
static void	vtnet_negotiate_features(struct vtnet_softc *);
105
static int	vtnet_negotiate_features(struct vtnet_softc *);
98
static void	vtnet_setup_features(struct vtnet_softc *);
106
static int	vtnet_setup_features(struct vtnet_softc *);
99
static int	vtnet_init_rxq(struct vtnet_softc *, int);
107
static int	vtnet_init_rxq(struct vtnet_softc *, int);
100
static int	vtnet_init_txq(struct vtnet_softc *, int);
108
static int	vtnet_init_txq(struct vtnet_softc *, int);
101
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
109
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
Lines 103-110 Link Here
103
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
111
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
104
static void	vtnet_free_rx_filters(struct vtnet_softc *);
112
static void	vtnet_free_rx_filters(struct vtnet_softc *);
105
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
113
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
114
static int	vtnet_alloc_interface(struct vtnet_softc *);
106
static int	vtnet_setup_interface(struct vtnet_softc *);
115
static int	vtnet_setup_interface(struct vtnet_softc *);
107
static int	vtnet_change_mtu(struct vtnet_softc *, int);
116
static int	vtnet_ioctl_mtu(struct vtnet_softc *, int);
117
static int	vtnet_ioctl_ifflags(struct vtnet_softc *);
118
static int	vtnet_ioctl_multi(struct vtnet_softc *);
119
static int	vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
108
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
120
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
109
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
121
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
110
122
Lines 112-122 Link Here
112
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
124
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
113
static struct mbuf *
125
static struct mbuf *
114
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
126
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
115
static int	vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
127
static int	vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
116
		    struct mbuf *, int);
128
		    struct mbuf *, int);
117
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
129
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
118
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
130
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
119
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
131
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
132
static int	vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
133
		     uint16_t, int, struct virtio_net_hdr *);
134
static int	vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
135
		     uint16_t, int, struct virtio_net_hdr *);
120
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
136
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
121
		     struct virtio_net_hdr *);
137
		     struct virtio_net_hdr *);
122
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
138
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
Lines 128-133 Link Here
128
static void	vtnet_rx_vq_intr(void *);
144
static void	vtnet_rx_vq_intr(void *);
129
static void	vtnet_rxq_tq_intr(void *, int);
145
static void	vtnet_rxq_tq_intr(void *, int);
130
146
147
static int	vtnet_txq_intr_threshold(struct vtnet_txq *);
131
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
148
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
132
static int	vtnet_txq_notify(struct vtnet_txq *);
149
static int	vtnet_txq_notify(struct vtnet_txq *);
133
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
150
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
Lines 140-146 Link Here
140
		    struct virtio_net_hdr *);
157
		    struct virtio_net_hdr *);
141
static int	vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
158
static int	vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
142
		    struct vtnet_tx_header *);
159
		    struct vtnet_tx_header *);
143
static int	vtnet_txq_encap(struct vtnet_txq *, struct mbuf **);
160
static int	vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
144
#ifdef VTNET_LEGACY_TX
161
#ifdef VTNET_LEGACY_TX
145
static void	vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
162
static void	vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
146
static void	vtnet_start(struct ifnet *);
163
static void	vtnet_start(struct ifnet *);
Lines 177-182 Link Here
177
static int	vtnet_init_tx_queues(struct vtnet_softc *);
194
static int	vtnet_init_tx_queues(struct vtnet_softc *);
178
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
195
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
179
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
196
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
197
static void	vtnet_update_rx_offloads(struct vtnet_softc *);
180
static int	vtnet_reinit(struct vtnet_softc *);
198
static int	vtnet_reinit(struct vtnet_softc *);
181
static void	vtnet_init_locked(struct vtnet_softc *);
199
static void	vtnet_init_locked(struct vtnet_softc *);
182
static void	vtnet_init(void *);
200
static void	vtnet_init(void *);
Lines 185-195 Link Here
185
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
203
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
186
		    struct sglist *, int, int);
204
		    struct sglist *, int, int);
187
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
205
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
206
static int	vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
188
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
207
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
189
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
208
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int);
190
static int	vtnet_set_promisc(struct vtnet_softc *, int);
209
static int	vtnet_set_promisc(struct vtnet_softc *, int);
191
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
210
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
192
static void	vtnet_attach_disable_promisc(struct vtnet_softc *);
193
static void	vtnet_rx_filter(struct vtnet_softc *);
211
static void	vtnet_rx_filter(struct vtnet_softc *);
194
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
212
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
195
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
213
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
Lines 198-218 Link Here
198
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
216
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
199
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
217
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
200
218
219
static void	vtnet_update_speed_duplex(struct vtnet_softc *);
201
static int	vtnet_is_link_up(struct vtnet_softc *);
220
static int	vtnet_is_link_up(struct vtnet_softc *);
202
static void	vtnet_update_link_status(struct vtnet_softc *);
221
static void	vtnet_update_link_status(struct vtnet_softc *);
203
static int	vtnet_ifmedia_upd(struct ifnet *);
222
static int	vtnet_ifmedia_upd(struct ifnet *);
204
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
223
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
205
static void	vtnet_get_hwaddr(struct vtnet_softc *);
224
static void	vtnet_get_macaddr(struct vtnet_softc *);
206
static void	vtnet_set_hwaddr(struct vtnet_softc *);
225
static void	vtnet_set_macaddr(struct vtnet_softc *);
226
static void	vtnet_attached_set_macaddr(struct vtnet_softc *);
207
static void	vtnet_vlan_tag_remove(struct mbuf *);
227
static void	vtnet_vlan_tag_remove(struct mbuf *);
208
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
228
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
209
static void	vtnet_set_tx_intr_threshold(struct vtnet_softc *);
210
229
211
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
230
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
212
		    struct sysctl_oid_list *, struct vtnet_rxq *);
231
		    struct sysctl_oid_list *, struct vtnet_rxq *);
213
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
232
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
214
		    struct sysctl_oid_list *, struct vtnet_txq *);
233
		    struct sysctl_oid_list *, struct vtnet_txq *);
215
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
234
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
235
static void	vtnet_load_tunables(struct vtnet_softc *);
216
static void	vtnet_setup_sysctl(struct vtnet_softc *);
236
static void	vtnet_setup_sysctl(struct vtnet_softc *);
217
237
218
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
238
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
Lines 229-285 Link Here
229
static int	vtnet_tunable_int(struct vtnet_softc *, const char *, int);
249
static int	vtnet_tunable_int(struct vtnet_softc *, const char *, int);
230
250
231
/* Tunables. */
251
/* Tunables. */
232
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
252
#define vtnet_htog16(_sc, _val)	virtio_htog16(vtnet_modern(_sc), _val)
253
#define vtnet_htog32(_sc, _val)	virtio_htog32(vtnet_modern(_sc), _val)
254
#define vtnet_htog64(_sc, _val)	virtio_htog64(vtnet_modern(_sc), _val)
255
#define vtnet_gtoh16(_sc, _val)	virtio_gtoh16(vtnet_modern(_sc), _val)
256
#define vtnet_gtoh32(_sc, _val)	virtio_gtoh32(vtnet_modern(_sc), _val)
257
#define vtnet_gtoh64(_sc, _val)	virtio_gtoh64(vtnet_modern(_sc), _val)
258
259
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VirtIO Net driver");
260
233
static int vtnet_csum_disable = 0;
261
static int vtnet_csum_disable = 0;
234
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
235
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
262
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
236
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
263
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
264
265
static int vtnet_fixup_needs_csum = 0;
266
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
267
    &vtnet_fixup_needs_csum, 0,
268
    "Calculate valid checksum for NEEDS_CSUM packets");
269
237
static int vtnet_tso_disable = 0;
270
static int vtnet_tso_disable = 0;
238
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
271
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
239
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
272
    &vtnet_tso_disable, 0, "Disables TSO");
240
    0, "Disables TCP Segmentation Offload");
273
241
static int vtnet_lro_disable = 0;
274
static int vtnet_lro_disable = 0;
242
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
275
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
243
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
276
    &vtnet_lro_disable, 0, "Disables hardware LRO");
244
    0, "Disables TCP Large Receive Offload");
277
245
static int vtnet_mq_disable = 0;
278
static int vtnet_mq_disable = 0;
246
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
279
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
247
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
280
    &vtnet_mq_disable, 0, "Disables multiqueue support");
248
    0, "Disables Multi Queue support");
281
249
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
282
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
250
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
251
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
283
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
252
    &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
284
    &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
253
static int vtnet_rx_process_limit = 512;
285
254
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
286
static int vtnet_tso_maxlen = IP_MAXPACKET;
287
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
288
    &vtnet_tso_maxlen, 0, "TSO burst limit");
289
290
static int vtnet_rx_process_limit = 1024;
255
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
291
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
256
    &vtnet_rx_process_limit, 0,
292
    &vtnet_rx_process_limit, 0,
257
    "Limits the number RX segments processed in a single pass");
293
    "Number of RX segments processed in one pass");
258
294
295
static int vtnet_lro_entry_count = 128;
296
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
297
    &vtnet_lro_entry_count, 0, "Software LRO entry count");
298
299
/* Enable sorted LRO, and the depth of the mbuf queue. */
300
static int vtnet_lro_mbufq_depth = 0;
301
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
302
    &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
303
259
static uma_zone_t vtnet_tx_header_zone;
304
static uma_zone_t vtnet_tx_header_zone;
260
305
261
static struct virtio_feature_desc vtnet_feature_desc[] = {
306
static struct virtio_feature_desc vtnet_feature_desc[] = {
262
	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
307
	{ VIRTIO_NET_F_CSUM,			"TxChecksum"		},
263
	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
308
	{ VIRTIO_NET_F_GUEST_CSUM,		"RxChecksum"		},
264
	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
309
	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,	"CtrlRxOffloads"	},
265
	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
310
	{ VIRTIO_NET_F_MAC,			"MAC"			},
266
	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
311
	{ VIRTIO_NET_F_GSO,			"TxGSO"			},
267
	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
312
	{ VIRTIO_NET_F_GUEST_TSO4,		"RxLROv4"		},
268
	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
313
	{ VIRTIO_NET_F_GUEST_TSO6,		"RxLROv6"		},
269
	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
314
	{ VIRTIO_NET_F_GUEST_ECN,		"RxLROECN"		},
270
	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
315
	{ VIRTIO_NET_F_GUEST_UFO,		"RxUFO"			},
271
	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
316
	{ VIRTIO_NET_F_HOST_TSO4,		"TxTSOv4"		},
272
	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
317
	{ VIRTIO_NET_F_HOST_TSO6,		"TxTSOv6"		},
273
	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
318
	{ VIRTIO_NET_F_HOST_ECN,		"TxTSOECN"		},
274
	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
319
	{ VIRTIO_NET_F_HOST_UFO,		"TxUFO"			},
275
	{ VIRTIO_NET_F_STATUS,		"Status"	},
320
	{ VIRTIO_NET_F_MRG_RXBUF,		"MrgRxBuf"		},
276
	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
321
	{ VIRTIO_NET_F_STATUS,			"Status"		},
277
	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
322
	{ VIRTIO_NET_F_CTRL_VQ,			"CtrlVq"		},
278
	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
323
	{ VIRTIO_NET_F_CTRL_RX,			"CtrlRxMode"		},
279
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
324
	{ VIRTIO_NET_F_CTRL_VLAN,		"CtrlVLANFilter"	},
280
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
325
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,		"CtrlRxModeExtra"	},
281
	{ VIRTIO_NET_F_MQ,		"Multiqueue"	},
326
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,		"GuestAnnounce"		},
282
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
327
	{ VIRTIO_NET_F_MQ,			"Multiqueue"		},
328
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,		"CtrlMacAddr"		},
329
	{ VIRTIO_NET_F_SPEED_DUPLEX,		"SpeedDuplex"		},
283
330
284
	{ 0, NULL }
331
	{ 0, NULL }
285
};
332
};
Lines 305-320 Link Here
305
#endif /* DEV_NETMAP */
352
#endif /* DEV_NETMAP */
306
353
307
static driver_t vtnet_driver = {
354
static driver_t vtnet_driver = {
308
	"vtnet",
355
	.name = "vtnet",
309
	vtnet_methods,
356
	.methods = vtnet_methods,
310
	sizeof(struct vtnet_softc)
357
	.size = sizeof(struct vtnet_softc)
311
};
358
};
312
static devclass_t vtnet_devclass;
359
static devclass_t vtnet_devclass;
313
360
314
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
361
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
315
    vtnet_modevent, 0);
362
    vtnet_modevent, 0);
316
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
363
DRIVER_MODULE(vtnet, vtpcil, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
317
    vtnet_modevent, 0);
364
DRIVER_MODULE(vtnet, vtpcim, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
318
MODULE_VERSION(vtnet, 1);
365
MODULE_VERSION(vtnet, 1);
319
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
366
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
320
#ifdef DEV_NETMAP
367
#ifdef DEV_NETMAP
Lines 361-367 Link Here
361
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
408
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
362
		return (ENXIO);
409
		return (ENXIO);
363
410
364
	device_set_desc(dev, "VirtIO Networking Adapter");
411
	device_set_desc(dev, "VirtIO Network Adapter");
365
412
366
	return (BUS_PROBE_DEFAULT);
413
	return (BUS_PROBE_DEFAULT);
367
}
414
}
Lines 380-389 Link Here
380
427
381
	VTNET_CORE_LOCK_INIT(sc);
428
	VTNET_CORE_LOCK_INIT(sc);
382
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
429
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
430
	vtnet_load_tunables(sc);
383
431
432
	error = vtnet_alloc_interface(sc);
433
	if (error) {
434
		device_printf(dev, "cannot allocate interface\n");
435
		goto fail;
436
	}
437
384
	vtnet_setup_sysctl(sc);
438
	vtnet_setup_sysctl(sc);
385
	vtnet_setup_features(sc);
386
439
440
	error = vtnet_setup_features(sc);
441
	if (error) {
442
		device_printf(dev, "cannot setup features\n");
443
		goto fail;
444
	}
445
387
	error = vtnet_alloc_rx_filters(sc);
446
	error = vtnet_alloc_rx_filters(sc);
388
	if (error) {
447
	if (error) {
389
		device_printf(dev, "cannot allocate Rx filters\n");
448
		device_printf(dev, "cannot allocate Rx filters\n");
Lines 410-416 Link Here
410
469
411
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
470
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
412
	if (error) {
471
	if (error) {
413
		device_printf(dev, "cannot setup virtqueue interrupts\n");
472
		device_printf(dev, "cannot setup interrupts\n");
414
		/* BMV: This will crash if during boot! */
473
		/* BMV: This will crash if during boot! */
415
		ether_ifdetach(sc->vtnet_ifp);
474
		ether_ifdetach(sc->vtnet_ifp);
416
		goto fail;
475
		goto fail;
Lines 518-524 Link Here
518
static int
577
static int
519
vtnet_shutdown(device_t dev)
578
vtnet_shutdown(device_t dev)
520
{
579
{
521
522
	/*
580
	/*
523
	 * Suspend already does all of what we need to
581
	 * Suspend already does all of what we need to
524
	 * do here; we just never expect to be resumed.
582
	 * do here; we just never expect to be resumed.
Lines 529-537 Link Here
529
static int
587
static int
530
vtnet_attach_completed(device_t dev)
588
vtnet_attach_completed(device_t dev)
531
{
589
{
590
	struct vtnet_softc *sc;
532
591
533
	vtnet_attach_disable_promisc(device_get_softc(dev));
592
	sc = device_get_softc(dev);
534
593
594
	VTNET_CORE_LOCK(sc);
595
	vtnet_attached_set_macaddr(sc);
596
	VTNET_CORE_UNLOCK(sc);
597
535
	return (0);
598
	return (0);
536
}
599
}
537
600
Lines 551-587 Link Here
551
	return (0);
614
	return (0);
552
}
615
}
553
616
554
static void
617
static int
555
vtnet_negotiate_features(struct vtnet_softc *sc)
618
vtnet_negotiate_features(struct vtnet_softc *sc)
556
{
619
{
557
	device_t dev;
620
	device_t dev;
558
	uint64_t mask, features;
621
	uint64_t features, negotiated_features;
622
	int no_csum;
559
623
560
	dev = sc->vtnet_dev;
624
	dev = sc->vtnet_dev;
561
	mask = 0;
625
	features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
626
	    VTNET_LEGACY_FEATURES;
562
627
563
	/*
628
	/*
564
	 * TSO and LRO are only available when their corresponding checksum
629
	 * TSO and LRO are only available when their corresponding checksum
565
	 * offload feature is also negotiated.
630
	 * offload feature is also negotiated.
566
	 */
631
	 */
567
	if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
632
	no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
568
		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
633
	if (no_csum)
569
		mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
634
		features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
570
	}
635
	if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
571
	if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
636
		features &= ~VTNET_TSO_FEATURES;
572
		mask |= VTNET_TSO_FEATURES;
637
	if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
573
	if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
638
		features &= ~VTNET_LRO_FEATURES;
574
		mask |= VTNET_LRO_FEATURES;
639
575
#ifndef VTNET_LEGACY_TX
640
#ifndef VTNET_LEGACY_TX
576
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
641
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
577
		mask |= VIRTIO_NET_F_MQ;
642
		features &= ~VIRTIO_NET_F_MQ;
578
#else
643
#else
579
	mask |= VIRTIO_NET_F_MQ;
644
	features &= ~VIRTIO_NET_F_MQ;
580
#endif
645
#endif
581
646
582
	features = VTNET_FEATURES & ~mask;
647
	negotiated_features = virtio_negotiate_features(dev, features);
583
	sc->vtnet_features = virtio_negotiate_features(dev, features);
584
648
649
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
650
		uint16_t mtu;
651
652
		mtu = virtio_read_dev_config_2(dev,
653
		    offsetof(struct virtio_net_config, mtu));
654
		if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
655
			device_printf(dev, "Invalid MTU value: %d. "
656
			    "MTU feature disabled.\n", mtu);
657
			features &= ~VIRTIO_NET_F_MTU;
658
			negotiated_features =
659
			    virtio_negotiate_features(dev, features);
660
		}
661
	}
662
663
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
664
		uint16_t npairs;
665
666
		npairs = virtio_read_dev_config_2(dev,
667
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
668
		if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
669
		    npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
670
			device_printf(dev, "Invalid max_virtqueue_pairs value: "
671
			    "%d. Multiqueue feature disabled.\n", npairs);
672
			features &= ~VIRTIO_NET_F_MQ;
673
			negotiated_features =
674
			    virtio_negotiate_features(dev, features);
675
		}
676
	}
677
585
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
678
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
586
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
679
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
587
		/*
680
		/*
Lines 595-620 Link Here
595
		 */
688
		 */
596
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
689
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
597
			device_printf(dev,
690
			device_printf(dev,
598
			    "LRO disabled due to both mergeable buffers and "
691
			    "Host LRO disabled since both mergeable buffers "
599
			    "indirect descriptors not negotiated\n");
692
			    "and indirect descriptors were not negotiated\n");
600
601
			features &= ~VTNET_LRO_FEATURES;
693
			features &= ~VTNET_LRO_FEATURES;
602
			sc->vtnet_features =
694
			negotiated_features =
603
			    virtio_negotiate_features(dev, features);
695
			    virtio_negotiate_features(dev, features);
604
		} else
696
		} else
605
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
697
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
606
	}
698
	}
699
700
	sc->vtnet_features = negotiated_features;
701
	sc->vtnet_negotiated_features = negotiated_features;
702
703
	return (virtio_finalize_features(dev));
607
}
704
}
608
705
609
static void
706
static int
610
vtnet_setup_features(struct vtnet_softc *sc)
707
vtnet_setup_features(struct vtnet_softc *sc)
611
{
708
{
612
	device_t dev;
709
	device_t dev;
710
	int error;
613
711
614
	dev = sc->vtnet_dev;
712
	dev = sc->vtnet_dev;
615
713
616
	vtnet_negotiate_features(sc);
714
	error = vtnet_negotiate_features(sc);
715
	if (error)
716
		return (error);
617
717
718
	if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
719
		sc->vtnet_flags |= VTNET_FLAG_MODERN;
618
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
720
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
619
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
721
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
620
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
722
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
Lines 625-650 Link Here
625
		sc->vtnet_flags |= VTNET_FLAG_MAC;
727
		sc->vtnet_flags |= VTNET_FLAG_MAC;
626
	}
728
	}
627
729
730
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
731
		sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
732
		    offsetof(struct virtio_net_config, mtu));
733
	} else
734
		sc->vtnet_max_mtu = VTNET_MAX_MTU;
735
628
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
736
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
629
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
737
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
630
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
738
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
739
	} else if (vtnet_modern(sc)) {
740
		/* This is identical to the mergeable header. */
741
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
631
	} else
742
	} else
632
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
743
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
633
744
634
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
745
	if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
635
		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
746
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
636
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
747
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
637
		sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
748
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
638
	else
749
	else
639
		sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
750
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
640
751
752
	/*
753
	 * Favor "hardware" LRO if negotiated, but support software LRO as
754
	 * a fallback; there is usually little benefit (or worse) with both.
755
	 */
756
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
757
	    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
758
		sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
759
641
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
760
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
642
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
761
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
643
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
762
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
644
		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
763
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
645
	else
764
	else
646
		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
765
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
647
766
767
	sc->vtnet_req_vq_pairs = 1;
768
	sc->vtnet_max_vq_pairs = 1;
769
648
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
770
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
649
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
771
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
650
772
Lines 654-688 Link Here
654
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
776
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
655
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
777
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
656
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
778
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
779
780
		if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
781
			sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
782
			    offsetof(struct virtio_net_config,
783
			    max_virtqueue_pairs));
784
		}
657
	}
785
	}
658
786
659
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
660
	    sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
661
		sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
662
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
663
	} else
664
		sc->vtnet_max_vq_pairs = 1;
665
666
	if (sc->vtnet_max_vq_pairs > 1) {
787
	if (sc->vtnet_max_vq_pairs > 1) {
788
		int req;
789
667
		/*
790
		/*
668
		 * Limit the maximum number of queue pairs to the lower of
791
		 * Limit the maximum number of requested queue pairs to the
669
		 * the number of CPUs and the configured maximum.
792
		 * number of CPUs and the configured maximum.
670
		 * The actual number of queues that get used may be less.
671
		 */
793
		 */
672
		int max;
794
		req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
673
795
		if (req < 0)
674
		max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
796
			req = 1;
675
		if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
797
		if (req == 0)
676
			if (max > mp_ncpus)
798
			req = mp_ncpus;
677
				max = mp_ncpus;
799
		if (req > sc->vtnet_max_vq_pairs)
678
			if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
800
			req = sc->vtnet_max_vq_pairs;
679
				max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
801
		if (req > mp_ncpus)
680
			if (max > 1) {
802
			req = mp_ncpus;
681
				sc->vtnet_requested_vq_pairs = max;
803
		if (req > 1) {
682
				sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
804
			sc->vtnet_req_vq_pairs = req;
683
			}
805
			sc->vtnet_flags |= VTNET_FLAG_MQ;
684
		}
806
		}
685
	}
807
	}
808
809
	return (0);
686
}
810
}
687
811
688
static int
812
static int
Lines 703-708 Link Here
703
	if (rxq->vtnrx_sg == NULL)
827
	if (rxq->vtnrx_sg == NULL)
704
		return (ENOMEM);
828
		return (ENOMEM);
705
829
830
#if defined(INET) || defined(INET6)
831
	if (vtnet_software_lro(sc)) {
832
		if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
833
		    sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
834
			return (ENOMEM);
835
	}
836
#endif
837
706
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
838
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
707
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
839
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
708
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
840
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
Lines 768-773 Link Here
768
			return (error);
900
			return (error);
769
	}
901
	}
770
902
903
	vtnet_set_rx_process_limit(sc);
771
	vtnet_setup_queue_sysctl(sc);
904
	vtnet_setup_queue_sysctl(sc);
772
905
773
	return (0);
906
	return (0);
Lines 780-785 Link Here
780
	rxq->vtnrx_sc = NULL;
913
	rxq->vtnrx_sc = NULL;
781
	rxq->vtnrx_id = -1;
914
	rxq->vtnrx_id = -1;
782
915
916
#if defined(INET) || defined(INET6)
917
	tcp_lro_free(&rxq->vtnrx_lro);
918
#endif
919
783
	if (rxq->vtnrx_sg != NULL) {
920
	if (rxq->vtnrx_sg != NULL) {
784
		sglist_free(rxq->vtnrx_sg);
921
		sglist_free(rxq->vtnrx_sg);
785
		rxq->vtnrx_sg = NULL;
922
		rxq->vtnrx_sg = NULL;
Lines 888-915 Link Here
888
	if (info == NULL)
1025
	if (info == NULL)
889
		return (ENOMEM);
1026
		return (ENOMEM);
890
1027
891
	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
1028
	for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
892
		rxq = &sc->vtnet_rxqs[i];
1029
		rxq = &sc->vtnet_rxqs[i];
893
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
1030
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
894
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
1031
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
895
		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
1032
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
896
1033
897
		txq = &sc->vtnet_txqs[i];
1034
		txq = &sc->vtnet_txqs[i];
898
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
1035
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
899
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
1036
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
900
		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
1037
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
901
	}
1038
	}
902
1039
1040
	/* These queues will not be used so allocate the minimum resources. */
1041
	for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
1042
		rxq = &sc->vtnet_rxqs[i];
1043
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
1044
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1045
1046
		txq = &sc->vtnet_txqs[i];
1047
		VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
1048
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1049
	}
1050
903
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
1051
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
904
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
1052
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
905
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
1053
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
906
	}
1054
	}
907
1055
908
	/*
1056
	/*
909
	 * Enable interrupt binding if this is multiqueue. This only matters
1057
	 * TODO: Enable interrupt binding if this is multiqueue. This will
910
	 * when per-vq MSIX is available.
1058
	 * only matter when per-virtqueue MSIX is available.
911
	 */
1059
	 */
912
	if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
1060
	if (sc->vtnet_flags & VTNET_FLAG_MQ)
913
		flags |= 0;
1061
		flags |= 0;
914
1062
915
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
1063
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
Lines 919-941 Link Here
919
}
1067
}
920
1068
921
static int
1069
static int
922
vtnet_setup_interface(struct vtnet_softc *sc)
1070
vtnet_alloc_interface(struct vtnet_softc *sc)
923
{
1071
{
924
	device_t dev;
1072
	device_t dev;
925
	struct ifnet *ifp;
1073
	struct ifnet *ifp;
926
1074
927
	dev = sc->vtnet_dev;
1075
	dev = sc->vtnet_dev;
928
1076
929
	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
1077
	ifp = if_alloc(IFT_ETHER);
930
	if (ifp == NULL) {
1078
	if (ifp == NULL)
931
		device_printf(dev, "cannot allocate ifnet structure\n");
1079
		return (ENOMEM);
932
		return (ENOSPC);
933
	}
934
1080
935
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1081
	sc->vtnet_ifp = ifp;
936
	ifp->if_baudrate = IF_Gbps(10);	/* Approx. */
937
	ifp->if_softc = sc;
1082
	ifp->if_softc = sc;
1083
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1084
1085
	return (0);
1086
}
1087
1088
static int
1089
vtnet_setup_interface(struct vtnet_softc *sc)
1090
{
1091
	device_t dev;
1092
	struct ifnet *ifp;
1093
1094
	dev = sc->vtnet_dev;
1095
	ifp = sc->vtnet_ifp;
1096
938
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1097
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1098
	ifp->if_baudrate = IF_Gbps(10);
939
	ifp->if_init = vtnet_init;
1099
	ifp->if_init = vtnet_init;
940
	ifp->if_ioctl = vtnet_ioctl;
1100
	ifp->if_ioctl = vtnet_ioctl;
941
	ifp->if_get_counter = vtnet_get_counter;
1101
	ifp->if_get_counter = vtnet_get_counter;
Lines 950-1000 Link Here
950
	IFQ_SET_READY(&ifp->if_snd);
1110
	IFQ_SET_READY(&ifp->if_snd);
951
#endif
1111
#endif
952
1112
953
	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
1113
	vtnet_get_macaddr(sc);
954
	    vtnet_ifmedia_sts);
955
	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
956
	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
957
1114
958
	/* Read (or generate) the MAC address for the adapter. */
959
	vtnet_get_hwaddr(sc);
960
961
	ether_ifattach(ifp, sc->vtnet_hwaddr);
962
963
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
1115
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
964
		ifp->if_capabilities |= IFCAP_LINKSTATE;
1116
		ifp->if_capabilities |= IFCAP_LINKSTATE;
965
1117
966
	/* Tell the upper layer(s) we support long frames. */
1118
	ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
967
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1119
	ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
968
	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
1120
	ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
969
1121
970
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1122
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1123
		int gso;
1124
971
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
1125
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
972
1126
973
		if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
1127
		gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
974
			ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1128
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
1129
			ifp->if_capabilities |= IFCAP_TSO4;
1130
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
1131
			ifp->if_capabilities |= IFCAP_TSO6;
1132
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
975
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
1133
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
976
		} else {
977
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
978
				ifp->if_capabilities |= IFCAP_TSO4;
979
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
980
				ifp->if_capabilities |= IFCAP_TSO6;
981
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
982
				sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
983
		}
984
1134
985
		if (ifp->if_capabilities & IFCAP_TSO)
1135
		if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) {
1136
			int tso_maxlen;
1137
986
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1138
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1139
1140
			tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
1141
			    vtnet_tso_maxlen);
1142
			ifp->if_hw_tsomax = tso_maxlen -
1143
			    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1144
			ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1;
1145
			ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1146
		}
987
	}
1147
	}
988
1148
989
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
1149
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
990
		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
1150
		ifp->if_capabilities |= IFCAP_RXCSUM;
1151
#ifdef notyet
1152
		/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
1153
		ifp->if_capabilities |= IFCAP_RXCSUM_IPV6;
1154
#endif
991
1155
992
		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
1156
		if (vtnet_tunable_int(sc, "fixup_needs_csum",
993
		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
1157
		    vtnet_fixup_needs_csum) != 0)
994
			ifp->if_capabilities |= IFCAP_LRO;
1158
			sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
1159
1160
		/* Support either "hardware" or software LRO. */
1161
		ifp->if_capabilities |= IFCAP_LRO;
995
	}
1162
	}
996
1163
997
	if (ifp->if_capabilities & IFCAP_HWCSUM) {
1164
	if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
998
		/*
1165
		/*
999
		 * VirtIO does not support VLAN tagging, but we can fake
1166
		 * VirtIO does not support VLAN tagging, but we can fake
1000
		 * it by inserting and removing the 802.1Q header during
1167
		 * it by inserting and removing the 802.1Q header during
Lines 1005-1015 Link Here
1005
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1172
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1006
	}
1173
	}
1007
1174
1008
	ifp->if_capenable = ifp->if_capabilities;
1175
	if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
1176
		ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1177
	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1009
1178
1010
	/*
1179
	/*
1011
	 * Capabilities after here are not enabled by default.
1180
	 * Capabilities after here are not enabled by default.
1012
	 */
1181
	 */
1182
	ifp->if_capenable = ifp->if_capabilities;
1013
1183
1014
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1184
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1015
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1185
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
Lines 1020-1076 Link Here
1020
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1190
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1021
	}
1191
	}
1022
1192
1023
	vtnet_set_rx_process_limit(sc);
1193
	ether_ifattach(ifp, sc->vtnet_hwaddr);
1024
	vtnet_set_tx_intr_threshold(sc);
1025
1194
1195
	/* Tell the upper layer(s) we support long frames. */
1196
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1197
1026
	return (0);
1198
	return (0);
1027
}
1199
}
1028
1200
1029
static int
1201
static int
1030
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1202
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
1031
{
1203
{
1204
	int framesz;
1205
1206
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
1207
		return (MJUMPAGESIZE);
1208
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1209
		return (MCLBYTES);
1210
1211
	/*
1212
	 * Try to scale the receive mbuf cluster size from the MTU. Without
1213
	 * the GUEST_TSO[46] features, the VirtIO specification says the
1214
	 * driver must only be able to receive ~1500 byte frames. But if
1215
	 * jumbo frames can be transmitted then try to receive jumbo.
1216
	 *
1217
	 * BMV: Not quite true when F_MTU is negotiated!
1218
	 */
1219
	if (vtnet_modern(sc)) {
1220
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
1221
		framesz = sizeof(struct virtio_net_hdr_v1);
1222
	} else
1223
		framesz = sizeof(struct vtnet_rx_header);
1224
	framesz += sizeof(struct ether_vlan_header) + mtu;
1225
1226
	if (framesz <= MCLBYTES)
1227
		return (MCLBYTES);
1228
	else if (framesz <= MJUMPAGESIZE)
1229
		return (MJUMPAGESIZE);
1230
	else if (framesz <= MJUM9BYTES)
1231
		return (MJUM9BYTES);
1232
1233
	/* Sane default; avoid 16KB clusters. */
1234
	return (MCLBYTES);
1235
}
1236
1237
static int
1238
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu)
1239
{
1032
	struct ifnet *ifp;
1240
	struct ifnet *ifp;
1033
	int frame_size, clsize;
1241
	int clustersz;
1034
1242
1035
	ifp = sc->vtnet_ifp;
1243
	ifp = sc->vtnet_ifp;
1244
	VTNET_CORE_LOCK_ASSERT(sc);
1036
1245
1037
	if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1246
	if (ifp->if_mtu == mtu)
1247
		return (0);
1248
	else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
1038
		return (EINVAL);
1249
		return (EINVAL);
1039
1250
1040
	frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1251
	ifp->if_mtu = mtu;
1041
	    new_mtu;
1252
	clustersz = vtnet_rx_cluster_size(sc, mtu);
1042
1253
1043
	/*
1254
	if (clustersz != sc->vtnet_rx_clustersz &&
1044
	 * Based on the new MTU (and hence frame size) determine which
1255
	    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1045
	 * cluster size is most appropriate for the receive queues.
1256
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1046
	 */
1257
		vtnet_init_locked(sc);
1047
	if (frame_size <= MCLBYTES) {
1258
	}
1048
		clsize = MCLBYTES;
1049
	} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1050
		/* Avoid going past 9K jumbos. */
1051
		if (frame_size > MJUM9BYTES)
1052
			return (EINVAL);
1053
		clsize = MJUM9BYTES;
1054
	} else
1055
		clsize = MJUMPAGESIZE;
1056
1259
1057
	ifp->if_mtu = new_mtu;
1260
	return (0);
1058
	sc->vtnet_rx_new_clsize = clsize;
1261
}
1059
1262
1060
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1263
static int
1061
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1264
vtnet_ioctl_ifflags(struct vtnet_softc *sc)
1265
{
1266
	struct ifnet *ifp;
1267
	int drv_running;
1268
1269
	ifp = sc->vtnet_ifp;
1270
	drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1271
1272
	VTNET_CORE_LOCK_ASSERT(sc);
1273
1274
	if ((ifp->if_flags & IFF_UP) == 0) {
1275
		if (drv_running)
1276
			vtnet_stop(sc);
1277
		goto out;
1278
	}
1279
1280
	if (!drv_running) {
1062
		vtnet_init_locked(sc);
1281
		vtnet_init_locked(sc);
1282
		goto out;
1063
	}
1283
	}
1064
1284
1285
	if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1286
	    (IFF_PROMISC | IFF_ALLMULTI)) {
1287
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1288
			return (ENOTSUP);
1289
		vtnet_rx_filter(sc);
1290
	}
1291
1292
out:
1293
	sc->vtnet_if_flags = ifp->if_flags;
1065
	return (0);
1294
	return (0);
1066
}
1295
}
1067
1296
1068
static int
1297
static int
1298
vtnet_ioctl_multi(struct vtnet_softc *sc)
1299
{
1300
	struct ifnet *ifp;
1301
1302
	ifp = sc->vtnet_ifp;
1303
1304
	VTNET_CORE_LOCK_ASSERT(sc);
1305
1306
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
1307
	    ifp->if_drv_flags & IFF_DRV_RUNNING)
1308
		vtnet_rx_filter_mac(sc);
1309
1310
	return (0);
1311
}
1312
1313
static int
1314
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
1315
{
1316
	struct ifnet *ifp;
1317
	int mask, reinit, update;
1318
1319
	ifp = sc->vtnet_ifp;
1320
	mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable;
1321
	reinit = update = 0;
1322
1323
	VTNET_CORE_LOCK_ASSERT(sc);
1324
1325
	if (mask & IFCAP_TXCSUM)
1326
		ifp->if_capenable ^= IFCAP_TXCSUM;
1327
	if (mask & IFCAP_TXCSUM_IPV6)
1328
		ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1329
	if (mask & IFCAP_TSO4)
1330
		ifp->if_capenable ^= IFCAP_TSO4;
1331
	if (mask & IFCAP_TSO6)
1332
		ifp->if_capenable ^= IFCAP_TSO6;
1333
1334
	if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
1335
		/*
1336
		 * These Rx features require the negotiated features to
1337
		 * be updated. Avoid a full reinit if possible.
1338
		 */
1339
		if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
1340
			update = 1;
1341
		else
1342
			reinit = 1;
1343
1344
		/* BMV: Avoid needless renegotiation for just software LRO. */
1345
		if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
1346
		    IFCAP_LRO && vtnet_software_lro(sc))
1347
			reinit = update = 0;
1348
1349
		if (mask & IFCAP_RXCSUM)
1350
			ifp->if_capenable ^= IFCAP_RXCSUM;
1351
		if (mask & IFCAP_RXCSUM_IPV6)
1352
			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1353
		if (mask & IFCAP_LRO)
1354
			ifp->if_capenable ^= IFCAP_LRO;
1355
1356
		/*
1357
		 * VirtIO does not distinguish between IPv4 and IPv6 checksums
1358
		 * so treat them as a pair. Guest TSO (LRO) requires receive
1359
		 * checksums.
1360
		 */
1361
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
1362
			ifp->if_capenable |= IFCAP_RXCSUM;
1363
#ifdef notyet
1364
			ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
1365
#endif
1366
		} else
1367
			ifp->if_capenable &=
1368
			    ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO);
1369
	}
1370
1371
	if (mask & IFCAP_VLAN_HWFILTER) {
1372
		/* These Rx features require renegotiation. */
1373
		reinit = 1;
1374
1375
		if (mask & IFCAP_VLAN_HWFILTER)
1376
			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1377
	}
1378
1379
	if (mask & IFCAP_VLAN_HWTSO)
1380
		ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1381
	if (mask & IFCAP_VLAN_HWTAGGING)
1382
		ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1383
1384
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1385
		if (reinit) {
1386
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1387
			vtnet_init_locked(sc);
1388
		} else if (update)
1389
			vtnet_update_rx_offloads(sc);
1390
	}
1391
1392
	return (0);
1393
}
1394
1395
static int
1069
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1396
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1070
{
1397
{
1071
	struct vtnet_softc *sc;
1398
	struct vtnet_softc *sc;
1072
	struct ifreq *ifr;
1399
	struct ifreq *ifr;
1073
	int reinit, mask, error;
1400
	int error;
1074
1401
1075
	sc = ifp->if_softc;
1402
	sc = ifp->if_softc;
1076
	ifr = (struct ifreq *) data;
1403
	ifr = (struct ifreq *) data;
Lines 1078-1122 Link Here
1078
1405
1079
	switch (cmd) {
1406
	switch (cmd) {
1080
	case SIOCSIFMTU:
1407
	case SIOCSIFMTU:
1081
		if (ifp->if_mtu != ifr->ifr_mtu) {
1408
		VTNET_CORE_LOCK(sc);
1082
			VTNET_CORE_LOCK(sc);
1409
		error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
1083
			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1410
		VTNET_CORE_UNLOCK(sc);
1084
			VTNET_CORE_UNLOCK(sc);
1085
		}
1086
		break;
1411
		break;
1087
1412
1088
	case SIOCSIFFLAGS:
1413
	case SIOCSIFFLAGS:
1089
		VTNET_CORE_LOCK(sc);
1414
		VTNET_CORE_LOCK(sc);
1090
		if ((ifp->if_flags & IFF_UP) == 0) {
1415
		error = vtnet_ioctl_ifflags(sc);
1091
			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1092
				vtnet_stop(sc);
1093
		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1094
			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1095
			    (IFF_PROMISC | IFF_ALLMULTI)) {
1096
				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1097
					vtnet_rx_filter(sc);
1098
				else {
1099
					ifp->if_flags |= IFF_PROMISC;
1100
					if ((ifp->if_flags ^ sc->vtnet_if_flags)
1101
					    & IFF_ALLMULTI)
1102
						error = ENOTSUP;
1103
				}
1104
			}
1105
		} else
1106
			vtnet_init_locked(sc);
1107
1108
		if (error == 0)
1109
			sc->vtnet_if_flags = ifp->if_flags;
1110
		VTNET_CORE_UNLOCK(sc);
1416
		VTNET_CORE_UNLOCK(sc);
1111
		break;
1417
		break;
1112
1418
1113
	case SIOCADDMULTI:
1419
	case SIOCADDMULTI:
1114
	case SIOCDELMULTI:
1420
	case SIOCDELMULTI:
1115
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1116
			break;
1117
		VTNET_CORE_LOCK(sc);
1421
		VTNET_CORE_LOCK(sc);
1118
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1422
		error = vtnet_ioctl_multi(sc);
1119
			vtnet_rx_filter_mac(sc);
1120
		VTNET_CORE_UNLOCK(sc);
1423
		VTNET_CORE_UNLOCK(sc);
1121
		break;
1424
		break;
1122
1425
Lines 1127-1172 Link Here
1127
1430
1128
	case SIOCSIFCAP:
1431
	case SIOCSIFCAP:
1129
		VTNET_CORE_LOCK(sc);
1432
		VTNET_CORE_LOCK(sc);
1130
		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1433
		error = vtnet_ioctl_ifcap(sc, ifr);
1131
1132
		if (mask & IFCAP_TXCSUM)
1133
			ifp->if_capenable ^= IFCAP_TXCSUM;
1134
		if (mask & IFCAP_TXCSUM_IPV6)
1135
			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1136
		if (mask & IFCAP_TSO4)
1137
			ifp->if_capenable ^= IFCAP_TSO4;
1138
		if (mask & IFCAP_TSO6)
1139
			ifp->if_capenable ^= IFCAP_TSO6;
1140
1141
		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1142
		    IFCAP_VLAN_HWFILTER)) {
1143
			/* These Rx features require us to renegotiate. */
1144
			reinit = 1;
1145
1146
			if (mask & IFCAP_RXCSUM)
1147
				ifp->if_capenable ^= IFCAP_RXCSUM;
1148
			if (mask & IFCAP_RXCSUM_IPV6)
1149
				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1150
			if (mask & IFCAP_LRO)
1151
				ifp->if_capenable ^= IFCAP_LRO;
1152
			if (mask & IFCAP_VLAN_HWFILTER)
1153
				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1154
		} else
1155
			reinit = 0;
1156
1157
		if (mask & IFCAP_VLAN_HWTSO)
1158
			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1159
		if (mask & IFCAP_VLAN_HWTAGGING)
1160
			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1161
1162
		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1163
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1164
			vtnet_init_locked(sc);
1165
		}
1166
1167
		VTNET_CORE_UNLOCK(sc);
1434
		VTNET_CORE_UNLOCK(sc);
1168
		VLAN_CAPABILITIES(ifp);
1435
		VLAN_CAPABILITIES(ifp);
1169
1170
		break;
1436
		break;
1171
1437
1172
	default:
1438
	default:
Lines 1185-1196 Link Here
1185
	struct virtqueue *vq;
1451
	struct virtqueue *vq;
1186
	int nbufs, error;
1452
	int nbufs, error;
1187
1453
1188
#ifdef DEV_NETMAP
1189
	error = vtnet_netmap_rxq_populate(rxq);
1190
	if (error >= 0)
1191
		return (error);
1192
#endif  /* DEV_NETMAP */
1193
1194
	vq = rxq->vtnrx_vq;
1454
	vq = rxq->vtnrx_vq;
1195
	error = ENOSPC;
1455
	error = ENOSPC;
1196
1456
Lines 1220-1239 Link Here
1220
	struct virtqueue *vq;
1480
	struct virtqueue *vq;
1221
	struct mbuf *m;
1481
	struct mbuf *m;
1222
	int last;
1482
	int last;
1223
#ifdef DEV_NETMAP
1224
	int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX,
1225
						rxq->vtnrx_id);
1226
#else  /* !DEV_NETMAP */
1227
	int netmap_bufs = 0;
1228
#endif /* !DEV_NETMAP */
1229
1483
1230
	vq = rxq->vtnrx_vq;
1484
	vq = rxq->vtnrx_vq;
1231
	last = 0;
1485
	last = 0;
1232
1486
1233
	while ((m = virtqueue_drain(vq, &last)) != NULL) {
1487
	while ((m = virtqueue_drain(vq, &last)) != NULL)
1234
		if (!netmap_bufs)
1488
		m_freem(m);
1235
			m_freem(m);
1236
	}
1237
1489
1238
	KASSERT(virtqueue_empty(vq),
1490
	KASSERT(virtqueue_empty(vq),
1239
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1491
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
Lines 1243-1299 Link Here
1243
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1495
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1244
{
1496
{
1245
	struct mbuf *m_head, *m_tail, *m;
1497
	struct mbuf *m_head, *m_tail, *m;
1246
	int i, clsize;
1498
	int i, size;
1247
1499
1248
	clsize = sc->vtnet_rx_clsize;
1500
	m_head = NULL;
1501
	size = sc->vtnet_rx_clustersz;
1249
1502
1250
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1503
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1251
	    ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1504
	    ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
1252
1505
1253
	m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1506
	for (i = 0; i < nbufs; i++) {
1254
	if (m_head == NULL)
1507
		m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
1255
		goto fail;
1508
		if (m == NULL) {
1509
			sc->vtnet_stats.mbuf_alloc_failed++;
1510
			m_freem(m_head);
1511
			return (NULL);
1512
		}
1256
1513
1257
	m_head->m_len = clsize;
1514
		m->m_len = size;
1258
	m_tail = m_head;
1515
		if (m_head != NULL) {
1259
1516
			m_tail->m_next = m;
1260
	/* Allocate the rest of the chain. */
1517
			m_tail = m;
1261
	for (i = 1; i < nbufs; i++) {
1518
		} else
1262
		m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1519
			m_head = m_tail = m;
1263
		if (m == NULL)
1264
			goto fail;
1265
1266
		m->m_len = clsize;
1267
		m_tail->m_next = m;
1268
		m_tail = m;
1269
	}
1520
	}
1270
1521
1271
	if (m_tailp != NULL)
1522
	if (m_tailp != NULL)
1272
		*m_tailp = m_tail;
1523
		*m_tailp = m_tail;
1273
1524
1274
	return (m_head);
1525
	return (m_head);
1275
1276
fail:
1277
	sc->vtnet_stats.mbuf_alloc_failed++;
1278
	m_freem(m_head);
1279
1280
	return (NULL);
1281
}
1526
}
1282
1527
1283
/*
1528
/*
1284
 * Slow path for when LRO without mergeable buffers is negotiated.
1529
 * Slow path for when LRO without mergeable buffers is negotiated.
1285
 */
1530
 */
1286
static int
1531
static int
1287
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1532
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1288
    int len0)
1533
    int len0)
1289
{
1534
{
1290
	struct vtnet_softc *sc;
1535
	struct vtnet_softc *sc;
1291
	struct mbuf *m, *m_prev;
1536
	struct mbuf *m, *m_prev, *m_new, *m_tail;
1292
	struct mbuf *m_new, *m_tail;
1537
	int len, clustersz, nreplace, error;
1293
	int len, clsize, nreplace, error;
1294
1538
1295
	sc = rxq->vtnrx_sc;
1539
	sc = rxq->vtnrx_sc;
1296
	clsize = sc->vtnet_rx_clsize;
1540
	clustersz = sc->vtnet_rx_clustersz;
1297
1541
1298
	m_prev = NULL;
1542
	m_prev = NULL;
1299
	m_tail = NULL;
1543
	m_tail = NULL;
Lines 1303-1327 Link Here
1303
	len = len0;
1547
	len = len0;
1304
1548
1305
	/*
1549
	/*
1306
	 * Since these mbuf chains are so large, we avoid allocating an
1550
	 * Since these mbuf chains are so large, avoid allocating a complete
1307
	 * entire replacement chain if possible. When the received frame
1551
	 * replacement when the received frame did not consume the entire
1308
	 * did not consume the entire chain, the unused mbufs are moved
1552
	 * chain. Unused mbufs are moved to the tail of the replacement mbuf.
1309
	 * to the replacement chain.
1310
	 */
1553
	 */
1311
	while (len > 0) {
1554
	while (len > 0) {
1312
		/*
1313
		 * Something is seriously wrong if we received a frame
1314
		 * larger than the chain. Drop it.
1315
		 */
1316
		if (m == NULL) {
1555
		if (m == NULL) {
1317
			sc->vtnet_stats.rx_frame_too_large++;
1556
			sc->vtnet_stats.rx_frame_too_large++;
1318
			return (EMSGSIZE);
1557
			return (EMSGSIZE);
1319
		}
1558
		}
1320
1559
1321
		/* We always allocate the same cluster size. */
1560
		/*
1322
		KASSERT(m->m_len == clsize,
1561
		 * Every mbuf should have the expected cluster size sincethat
1323
		    ("%s: mbuf size %d is not the cluster size %d",
1562
		 * is also used to allocate the replacements.
1324
		    __func__, m->m_len, clsize));
1563
		 */
1564
		KASSERT(m->m_len == clustersz,
1565
		    ("%s: mbuf size %d not expected cluster size %d", __func__,
1566
		    m->m_len, clustersz));
1325
1567
1326
		m->m_len = MIN(m->m_len, len);
1568
		m->m_len = MIN(m->m_len, len);
1327
		len -= m->m_len;
1569
		len -= m->m_len;
Lines 1331-1349 Link Here
1331
		nreplace++;
1573
		nreplace++;
1332
	}
1574
	}
1333
1575
1334
	KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1576
	KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
1335
	    ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1577
	    ("%s: invalid replacement mbuf count %d max %d", __func__,
1336
	    sc->vtnet_rx_nmbufs));
1578
	    nreplace, sc->vtnet_rx_nmbufs));
1337
1579
1338
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1580
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1339
	if (m_new == NULL) {
1581
	if (m_new == NULL) {
1340
		m_prev->m_len = clsize;
1582
		m_prev->m_len = clustersz;
1341
		return (ENOBUFS);
1583
		return (ENOBUFS);
1342
	}
1584
	}
1343
1585
1344
	/*
1586
	/*
1345
	 * Move any unused mbufs from the received chain onto the end
1587
	 * Move any unused mbufs from the received mbuf chain onto the
1346
	 * of the new chain.
1588
	 * end of the replacement chain.
1347
	 */
1589
	 */
1348
	if (m_prev->m_next != NULL) {
1590
	if (m_prev->m_next != NULL) {
1349
		m_tail->m_next = m_prev->m_next;
1591
		m_tail->m_next = m_prev->m_next;
Lines 1353-1373 Link Here
1353
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1595
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1354
	if (error) {
1596
	if (error) {
1355
		/*
1597
		/*
1356
		 * BAD! We could not enqueue the replacement mbuf chain. We
1598
		 * The replacement is suppose to be an copy of the one
1357
		 * must restore the m0 chain to the original state if it was
1599
		 * dequeued so this is a very unexpected error.
1358
		 * modified so we can subsequently discard it.
1359
		 *
1600
		 *
1360
		 * NOTE: The replacement is suppose to be an identical copy
1601
		 * Restore the m0 chain to the original state if it was
1361
		 * to the one just dequeued so this is an unexpected error.
1602
		 * modified so we can then discard it.
1362
		 */
1603
		 */
1363
		sc->vtnet_stats.rx_enq_replacement_failed++;
1364
1365
		if (m_tail->m_next != NULL) {
1604
		if (m_tail->m_next != NULL) {
1366
			m_prev->m_next = m_tail->m_next;
1605
			m_prev->m_next = m_tail->m_next;
1367
			m_tail->m_next = NULL;
1606
			m_tail->m_next = NULL;
1368
		}
1607
		}
1369
1608
		m_prev->m_len = clustersz;
1370
		m_prev->m_len = clsize;
1609
		sc->vtnet_stats.rx_enq_replacement_failed++;
1371
		m_freem(m_new);
1610
		m_freem(m_new);
1372
	}
1611
	}
1373
1612
Lines 1383-1413 Link Here
1383
1622
1384
	sc = rxq->vtnrx_sc;
1623
	sc = rxq->vtnrx_sc;
1385
1624
1386
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1625
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1387
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1626
		return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
1388
1627
1389
	if (m->m_next == NULL) {
1628
	MPASS(m->m_next == NULL);
1390
		/* Fast-path for the common case of just one mbuf. */
1629
	if (m->m_len < len)
1391
		if (m->m_len < len)
1630
		return (EMSGSIZE);
1392
			return (EINVAL);
1393
1631
1394
		m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1632
	m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1395
		if (m_new == NULL)
1633
	if (m_new == NULL)
1396
			return (ENOBUFS);
1634
		return (ENOBUFS);
1397
1635
1398
		error = vtnet_rxq_enqueue_buf(rxq, m_new);
1636
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1399
		if (error) {
1637
	if (error) {
1400
			/*
1638
		sc->vtnet_stats.rx_enq_replacement_failed++;
1401
			 * The new mbuf is suppose to be an identical
1639
		m_freem(m_new);
1402
			 * copy of the one just dequeued so this is an
1403
			 * unexpected error.
1404
			 */
1405
			m_freem(m_new);
1406
			sc->vtnet_stats.rx_enq_replacement_failed++;
1407
		} else
1408
			m->m_len = len;
1409
	} else
1640
	} else
1410
		error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1641
		m->m_len = len;
1411
1642
1412
	return (error);
1643
	return (error);
1413
}
1644
}
Lines 1417-1455 Link Here
1417
{
1648
{
1418
	struct vtnet_softc *sc;
1649
	struct vtnet_softc *sc;
1419
	struct sglist *sg;
1650
	struct sglist *sg;
1420
	struct vtnet_rx_header *rxhdr;
1651
	int header_inlined, error;
1421
	uint8_t *mdata;
1422
	int offset, error;
1423
1652
1424
	sc = rxq->vtnrx_sc;
1653
	sc = rxq->vtnrx_sc;
1425
	sg = rxq->vtnrx_sg;
1654
	sg = rxq->vtnrx_sg;
1426
	mdata = mtod(m, uint8_t *);
1427
1655
1656
	KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1657
	    ("%s: mbuf chain without LRO_NOMRG", __func__));
1428
	VTNET_RXQ_LOCK_ASSERT(rxq);
1658
	VTNET_RXQ_LOCK_ASSERT(rxq);
1429
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1430
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1431
	KASSERT(m->m_len == sc->vtnet_rx_clsize,
1432
	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1433
	     sc->vtnet_rx_clsize));
1434
1659
1435
	sglist_reset(sg);
1660
	sglist_reset(sg);
1436
	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1661
	header_inlined = vtnet_modern(sc) ||
1662
	    (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
1663
1664
	if (header_inlined)
1665
		error = sglist_append_mbuf(sg, m);
1666
	else {
1667
		struct vtnet_rx_header *rxhdr =
1668
		    mtod(m, struct vtnet_rx_header *);
1437
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1669
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1438
		rxhdr = (struct vtnet_rx_header *) mdata;
1439
		sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1440
		offset = sizeof(struct vtnet_rx_header);
1441
	} else
1442
		offset = 0;
1443
1670
1444
	sglist_append(sg, mdata + offset, m->m_len - offset);
1671
		/* Append the header and remaining mbuf data. */
1445
	if (m->m_next != NULL) {
1672
		error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1446
		error = sglist_append_mbuf(sg, m->m_next);
1673
		if (error)
1447
		MPASS(error == 0);
1674
			return (error);
1675
		error = sglist_append(sg, &rxhdr[1],
1676
		    m->m_len - sizeof(struct vtnet_rx_header));
1677
		if (error)
1678
			return (error);
1679
1680
		if (m->m_next != NULL)
1681
			error = sglist_append_mbuf(sg, m->m_next);
1448
	}
1682
	}
1449
1683
1450
	error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1684
	if (error)
1685
		return (error);
1451
1686
1452
	return (error);
1687
	return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
1453
}
1688
}
1454
1689
1455
static int
1690
static int
Lines 1472-1525 Link Here
1472
	return (error);
1707
	return (error);
1473
}
1708
}
1474
1709
1475
/*
1476
 * Use the checksum offset in the VirtIO header to set the
1477
 * correct CSUM_* flags.
1478
 */
1479
static int
1710
static int
1480
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1711
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
1481
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1712
    int hoff, struct virtio_net_hdr *hdr)
1482
{
1713
{
1483
	struct vtnet_softc *sc;
1714
	struct vtnet_softc *sc;
1484
#if defined(INET) || defined(INET6)
1715
	int error;
1485
	int offset = hdr->csum_start + hdr->csum_offset;
1486
#endif
1487
1716
1488
	sc = rxq->vtnrx_sc;
1717
	sc = rxq->vtnrx_sc;
1489
1718
1490
	/* Only do a basic sanity check on the offset. */
1719
	/*
1491
	switch (eth_type) {
1720
	 * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
1492
#if defined(INET)
1721
	 * not have an analogous CSUM flag. The checksum has been validated,
1493
	case ETHERTYPE_IP:
1722
	 * but is incomplete (TCP/UDP pseudo header).
1494
		if (__predict_false(offset < ip_start + sizeof(struct ip)))
1723
	 *
1495
			return (1);
1724
	 * The packet is likely from another VM on the same host that itself
1496
		break;
1725
	 * performed checksum offloading so Tx/Rx is basically a memcpy and
1497
#endif
1726
	 * the checksum has little value.
1498
#if defined(INET6)
1727
	 *
1499
	case ETHERTYPE_IPV6:
1728
	 * Default to receiving the packet as-is for performance reasons, but
1500
		if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1729
	 * this can cause issues if the packet is to be forwarded because it
1501
			return (1);
1730
	 * does not contain a valid checksum. This patch may be helpful:
1502
		break;
1731
	 * https://reviews.freebsd.org/D6611. In the meantime, have the driver
1503
#endif
1732
	 * compute the checksum if requested.
1504
	default:
1733
	 *
1505
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1734
	 * BMV: Need to add an CSUM_PARTIAL flag?
1506
		return (1);
1735
	 */
1736
	if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
1737
		error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
1738
		return (error);
1507
	}
1739
	}
1508
1740
1509
	/*
1741
	/*
1510
	 * Use the offset to determine the appropriate CSUM_* flags. This is
1742
	 * Compute the checksum in the driver so the packet will contain a
1511
	 * a bit dirty, but we can get by with it since the checksum offsets
1743
	 * valid checksum. The checksum is at csum_offset from csum_start.
1512
	 * happen to be different. We assume the host host does not do IPv4
1513
	 * header checksum offloading.
1514
	 */
1744
	 */
1515
	switch (hdr->csum_offset) {
1745
	switch (etype) {
1516
	case offsetof(struct udphdr, uh_sum):
1746
#if defined(INET) || defined(INET6)
1517
	case offsetof(struct tcphdr, th_sum):
1747
	case ETHERTYPE_IP:
1748
	case ETHERTYPE_IPV6: {
1749
		int csum_off, csum_end;
1750
		uint16_t csum;
1751
1752
		csum_off = hdr->csum_start + hdr->csum_offset;
1753
		csum_end = csum_off + sizeof(uint16_t);
1754
1755
		/* Assume checksum will be in the first mbuf. */
1756
		if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
1757
			return (1);
1758
1759
		/*
1760
		 * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
1761
		 * checksum and write it at the specified offset. We could
1762
		 * try to verify the packet: csum_start should probably
1763
		 * correspond to the start of the TCP/UDP header.
1764
		 *
1765
		 * BMV: Need to properly handle UDP with zero checksum. Is
1766
		 * the IPv4 header checksum implicitly validated?
1767
		 */
1768
		csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
1769
		*(uint16_t *)(mtodo(m, csum_off)) = csum;
1518
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1770
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1519
		m->m_pkthdr.csum_data = 0xFFFF;
1771
		m->m_pkthdr.csum_data = 0xFFFF;
1520
		break;
1772
		break;
1773
	}
1774
#endif
1521
	default:
1775
	default:
1522
		sc->vtnet_stats.rx_csum_bad_offset++;
1776
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1523
		return (1);
1777
		return (1);
1524
	}
1778
	}
1525
1779
Lines 1527-1590 Link Here
1527
}
1781
}
1528
1782
1529
static int
1783
static int
1530
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1784
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
1531
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1785
    uint16_t etype, int hoff, struct virtio_net_hdr *hdr)
1532
{
1786
{
1533
	struct vtnet_softc *sc;
1787
	struct vtnet_softc *sc;
1534
	int offset, proto;
1788
	int protocol;
1535
1789
1536
	sc = rxq->vtnrx_sc;
1790
	sc = rxq->vtnrx_sc;
1537
1791
1538
	switch (eth_type) {
1792
	switch (etype) {
1539
#if defined(INET)
1793
#if defined(INET)
1540
	case ETHERTYPE_IP: {
1794
	case ETHERTYPE_IP:
1541
		struct ip *ip;
1795
		if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
1542
		if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1796
			protocol = IPPROTO_DONE;
1543
			return (1);
1797
		else {
1544
		ip = (struct ip *)(m->m_data + ip_start);
1798
			struct ip *ip = (struct ip *)(m->m_data + hoff);
1545
		proto = ip->ip_p;
1799
			protocol = ip->ip_p;
1546
		offset = ip_start + (ip->ip_hl << 2);
1800
		}
1547
		break;
1801
		break;
1548
	}
1549
#endif
1802
#endif
1550
#if defined(INET6)
1803
#if defined(INET6)
1551
	case ETHERTYPE_IPV6:
1804
	case ETHERTYPE_IPV6:
1552
		if (__predict_false(m->m_len < ip_start +
1805
		if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
1553
		    sizeof(struct ip6_hdr)))
1806
		    || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
1554
			return (1);
1807
			protocol = IPPROTO_DONE;
1555
		offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1556
		if (__predict_false(offset < 0))
1557
			return (1);
1558
		break;
1808
		break;
1559
#endif
1809
#endif
1560
	default:
1810
	default:
1561
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1811
		protocol = IPPROTO_DONE;
1562
		return (1);
1812
		break;
1563
	}
1813
	}
1564
1814
1565
	switch (proto) {
1815
	switch (protocol) {
1566
	case IPPROTO_TCP:
1816
	case IPPROTO_TCP:
1567
		if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1568
			return (1);
1569
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1570
		m->m_pkthdr.csum_data = 0xFFFF;
1571
		break;
1572
	case IPPROTO_UDP:
1817
	case IPPROTO_UDP:
1573
		if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1574
			return (1);
1575
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1818
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1576
		m->m_pkthdr.csum_data = 0xFFFF;
1819
		m->m_pkthdr.csum_data = 0xFFFF;
1577
		break;
1820
		break;
1578
	default:
1821
	default:
1579
		/*
1822
		/*
1580
		 * For the remaining protocols, FreeBSD does not support
1823
		 * FreeBSD does not support checksum offloading of this
1581
		 * checksum offloading, so the checksum will be recomputed.
1824
		 * protocol. Let the stack re-verify the checksum later
1825
		 * if the protocol is supported.
1582
		 */
1826
		 */
1583
#if 0
1827
#if 0
1584
		if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1828
		if_printf(sc->vtnet_ifp,
1585
		    "protocol eth_type=%#x proto=%d csum_start=%d "
1829
		    "%s: checksum offload of unsupported protocol "
1586
		    "csum_offset=%d\n", __func__, eth_type, proto,
1830
		    "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
1587
		    hdr->csum_start, hdr->csum_offset);
1831
		    __func__, etype, protocol, hdr->csum_start,
1832
		    hdr->csum_offset);
1588
#endif
1833
#endif
1589
		break;
1834
		break;
1590
	}
1835
	}
Lines 1592-1632 Link Here
1592
	return (0);
1837
	return (0);
1593
}
1838
}
1594
1839
1595
/*
1596
 * Set the appropriate CSUM_* flags. Unfortunately, the information
1597
 * provided is not directly useful to us. The VirtIO header gives the
1598
 * offset of the checksum, which is all Linux needs, but this is not
1599
 * how FreeBSD does things. We are forced to peek inside the packet
1600
 * a bit.
1601
 *
1602
 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1603
 * could accept the offsets and let the stack figure it out.
1604
 */
1605
static int
1840
static int
1606
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1841
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1607
    struct virtio_net_hdr *hdr)
1842
    struct virtio_net_hdr *hdr)
1608
{
1843
{
1609
	struct ether_header *eh;
1844
	const struct ether_header *eh;
1610
	struct ether_vlan_header *evh;
1845
	int hoff;
1611
	uint16_t eth_type;
1846
	uint16_t etype;
1612
	int offset, error;
1613
1847
1614
	eh = mtod(m, struct ether_header *);
1848
	eh = mtod(m, const struct ether_header *);
1615
	eth_type = ntohs(eh->ether_type);
1849
	etype = ntohs(eh->ether_type);
1616
	if (eth_type == ETHERTYPE_VLAN) {
1850
	if (etype == ETHERTYPE_VLAN) {
1617
		/* BMV: We should handle nested VLAN tags too. */
1851
		/* TODO BMV: Handle QinQ. */
1618
		evh = mtod(m, struct ether_vlan_header *);
1852
		const struct ether_vlan_header *evh =
1619
		eth_type = ntohs(evh->evl_proto);
1853
		    mtod(m, const struct ether_vlan_header *);
1620
		offset = sizeof(struct ether_vlan_header);
1854
		etype = ntohs(evh->evl_proto);
1855
		hoff = sizeof(struct ether_vlan_header);
1621
	} else
1856
	} else
1622
		offset = sizeof(struct ether_header);
1857
		hoff = sizeof(struct ether_header);
1623
1858
1624
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1859
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1625
		error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1860
		return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
1626
	else
1861
	else /* VIRTIO_NET_HDR_F_DATA_VALID */
1627
		error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1862
		return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
1628
1629
	return (error);
1630
}
1863
}
1631
1864
1632
static void
1865
static void
Lines 1661-1674 Link Here
1661
{
1894
{
1662
	struct vtnet_softc *sc;
1895
	struct vtnet_softc *sc;
1663
	struct virtqueue *vq;
1896
	struct virtqueue *vq;
1664
	struct mbuf *m, *m_tail;
1897
	struct mbuf *m_tail;
1665
	int len;
1666
1898
1667
	sc = rxq->vtnrx_sc;
1899
	sc = rxq->vtnrx_sc;
1668
	vq = rxq->vtnrx_vq;
1900
	vq = rxq->vtnrx_vq;
1669
	m_tail = m_head;
1901
	m_tail = m_head;
1670
1902
1671
	while (--nbufs > 0) {
1903
	while (--nbufs > 0) {
1904
		struct mbuf *m;
1905
		int len;
1906
1672
		m = virtqueue_dequeue(vq, &len);
1907
		m = virtqueue_dequeue(vq, &len);
1673
		if (m == NULL) {
1908
		if (m == NULL) {
1674
			rxq->vtnrx_stats.vrxs_ierrors++;
1909
			rxq->vtnrx_stats.vrxs_ierrors++;
Lines 1703-1721 Link Here
1703
	return (1);
1938
	return (1);
1704
}
1939
}
1705
1940
1941
#if defined(INET) || defined(INET6)
1942
static int
1943
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
1944
{
1945
	struct lro_ctrl *lro;
1946
1947
	lro = &rxq->vtnrx_lro;
1948
1949
	if (lro->lro_mbuf_max != 0) {
1950
		tcp_lro_queue_mbuf(lro, m);
1951
		return (0);
1952
	}
1953
1954
	return (tcp_lro_rx(lro, m, 0));
1955
}
1956
#endif
1957
1706
static void
1958
static void
1707
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1959
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1708
    struct virtio_net_hdr *hdr)
1960
    struct virtio_net_hdr *hdr)
1709
{
1961
{
1710
	struct vtnet_softc *sc;
1962
	struct vtnet_softc *sc;
1711
	struct ifnet *ifp;
1963
	struct ifnet *ifp;
1712
	struct ether_header *eh;
1713
1964
1714
	sc = rxq->vtnrx_sc;
1965
	sc = rxq->vtnrx_sc;
1715
	ifp = sc->vtnet_ifp;
1966
	ifp = sc->vtnet_ifp;
1716
1967
1717
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1968
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1718
		eh = mtod(m, struct ether_header *);
1969
		struct ether_header *eh = mtod(m, struct ether_header *);
1719
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1970
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1720
			vtnet_vlan_tag_remove(m);
1971
			vtnet_vlan_tag_remove(m);
1721
			/*
1972
			/*
Lines 1730-1754 Link Here
1730
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1981
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1731
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1982
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1732
1983
1733
	/*
1984
	if (hdr->flags &
1734
	 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1985
	    (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
1735
	 * distinction that Linux does. Need to reevaluate if performing
1736
	 * offloading for the NEEDS_CSUM case is really appropriate.
1737
	 */
1738
	if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1739
	    VIRTIO_NET_HDR_F_DATA_VALID)) {
1740
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1986
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1741
			rxq->vtnrx_stats.vrxs_csum++;
1987
			rxq->vtnrx_stats.vrxs_csum++;
1742
		else
1988
		else
1743
			rxq->vtnrx_stats.vrxs_csum_failed++;
1989
			rxq->vtnrx_stats.vrxs_csum_failed++;
1744
	}
1990
	}
1745
1991
1992
	if (hdr->gso_size != 0) {
1993
		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1994
		case VIRTIO_NET_HDR_GSO_TCPV4:
1995
		case VIRTIO_NET_HDR_GSO_TCPV6:
1996
//			not available in 11.x mbuf
1997
//			m->m_pkthdr.lro_nsegs =
1998
//			    howmany(m->m_pkthdr.len, hdr->gso_size);
1999
			rxq->vtnrx_stats.vrxs_host_lro++;
2000
			break;
2001
		}
2002
	}
2003
1746
	rxq->vtnrx_stats.vrxs_ipackets++;
2004
	rxq->vtnrx_stats.vrxs_ipackets++;
1747
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
2005
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1748
2006
1749
	VTNET_RXQ_UNLOCK(rxq);
2007
#if defined(INET) || defined(INET6)
2008
	if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) {
2009
		if (vtnet_lro_rx(rxq, m) == 0)
2010
			return;
2011
	}
2012
#endif
2013
1750
	(*ifp->if_input)(ifp, m);
2014
	(*ifp->if_input)(ifp, m);
1751
	VTNET_RXQ_LOCK(rxq);
1752
}
2015
}
1753
2016
1754
static int
2017
static int
Lines 1758-1777 Link Here
1758
	struct vtnet_softc *sc;
2021
	struct vtnet_softc *sc;
1759
	struct ifnet *ifp;
2022
	struct ifnet *ifp;
1760
	struct virtqueue *vq;
2023
	struct virtqueue *vq;
1761
	struct mbuf *m;
2024
	int deq, count;
1762
	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1763
	int len, deq, nbufs, adjsz, count;
1764
2025
1765
	sc = rxq->vtnrx_sc;
2026
	sc = rxq->vtnrx_sc;
1766
	vq = rxq->vtnrx_vq;
2027
	vq = rxq->vtnrx_vq;
1767
	ifp = sc->vtnet_ifp;
2028
	ifp = sc->vtnet_ifp;
1768
	hdr = &lhdr;
1769
	deq = 0;
2029
	deq = 0;
1770
	count = sc->vtnet_rx_process_limit;
2030
	count = sc->vtnet_rx_process_limit;
1771
2031
1772
	VTNET_RXQ_LOCK_ASSERT(rxq);
2032
	VTNET_RXQ_LOCK_ASSERT(rxq);
1773
2033
2034
#ifdef DEV_NETMAP
2035
	if (netmap_rx_irq(ifp, 0, &deq))
2036
		return (0);
2037
#endif
2038
1774
	while (count-- > 0) {
2039
	while (count-- > 0) {
2040
		struct mbuf *m;
2041
		int len, nbufs, adjsz;
2042
1775
		m = virtqueue_dequeue(vq, &len);
2043
		m = virtqueue_dequeue(vq, &len);
1776
		if (m == NULL)
2044
		if (m == NULL)
1777
			break;
2045
			break;
Lines 1783-1800 Link Here
1783
			continue;
2051
			continue;
1784
		}
2052
		}
1785
2053
1786
		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
2054
		if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
2055
			struct virtio_net_hdr_mrg_rxbuf *mhdr =
2056
			    mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
2057
			nbufs = vtnet_htog16(sc, mhdr->num_buffers);
2058
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2059
		} else if (vtnet_modern(sc)) {
2060
			nbufs = 1; /* num_buffers is always 1 */
2061
			adjsz = sizeof(struct virtio_net_hdr_v1);
2062
		} else {
1787
			nbufs = 1;
2063
			nbufs = 1;
1788
			adjsz = sizeof(struct vtnet_rx_header);
2064
			adjsz = sizeof(struct vtnet_rx_header);
1789
			/*
2065
			/*
1790
			 * Account for our pad inserted between the header
2066
			 * Account for our gap between the header and start of
1791
			 * and the actual start of the frame.
2067
			 * data to keep the segments separated.
1792
			 */
2068
			 */
1793
			len += VTNET_RX_HEADER_PAD;
2069
			len += VTNET_RX_HEADER_PAD;
1794
		} else {
1795
			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1796
			nbufs = mhdr->num_buffers;
1797
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1798
		}
2070
		}
1799
2071
1800
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
2072
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
Lines 1816-1841 Link Here
1816
		}
2088
		}
1817
2089
1818
		/*
2090
		/*
1819
		 * Save copy of header before we strip it. For both mergeable
2091
		 * Save an endian swapped version of the header prior to it
1820
		 * and non-mergeable, the header is at the beginning of the
2092
		 * being stripped. The header is always at the start of the
1821
		 * mbuf data. We no longer need num_buffers, so always use a
2093
		 * mbuf data. num_buffers was already saved (and not needed)
1822
		 * regular header.
2094
		 * so use the standard header.
1823
		 *
1824
		 * BMV: Is this memcpy() expensive? We know the mbuf data is
1825
		 * still valid even after the m_adj().
1826
		 */
2095
		 */
1827
		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
2096
		hdr = mtod(m, struct virtio_net_hdr *);
2097
		lhdr.flags = hdr->flags;
2098
		lhdr.gso_type = hdr->gso_type;
2099
		lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
2100
		lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
2101
		lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
2102
		lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
1828
		m_adj(m, adjsz);
2103
		m_adj(m, adjsz);
1829
2104
1830
		vtnet_rxq_input(rxq, m, hdr);
2105
		vtnet_rxq_input(rxq, m, &lhdr);
1831
1832
		/* Must recheck after dropping the Rx lock. */
1833
		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1834
			break;
1835
	}
2106
	}
1836
2107
1837
	if (deq > 0)
2108
	if (deq > 0) {
2109
#if defined(INET) || defined(INET6)
2110
		tcp_lro_flush_all(&rxq->vtnrx_lro);
2111
#endif
1838
		virtqueue_notify(vq);
2112
		virtqueue_notify(vq);
2113
	}
1839
2114
1840
	return (count > 0 ? 0 : EAGAIN);
2115
	return (count > 0 ? 0 : EAGAIN);
1841
}
2116
}
Lines 1864-1874 Link Here
1864
		return;
2139
		return;
1865
	}
2140
	}
1866
2141
1867
#ifdef DEV_NETMAP
1868
	if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS)
1869
		return;
1870
#endif /* DEV_NETMAP */
1871
1872
	VTNET_RXQ_LOCK(rxq);
2142
	VTNET_RXQ_LOCK(rxq);
1873
2143
1874
again:
2144
again:
Lines 1888-1895 Link Here
1888
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
2158
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1889
			goto again;
2159
			goto again;
1890
2160
1891
		VTNET_RXQ_UNLOCK(rxq);
1892
		rxq->vtnrx_stats.vrxs_rescheduled++;
2161
		rxq->vtnrx_stats.vrxs_rescheduled++;
2162
		VTNET_RXQ_UNLOCK(rxq);
1893
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2163
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1894
	} else
2164
	} else
1895
		VTNET_RXQ_UNLOCK(rxq);
2165
		VTNET_RXQ_UNLOCK(rxq);
Lines 1919-1940 Link Here
1919
		if (!more)
2189
		if (!more)
1920
			vtnet_rxq_disable_intr(rxq);
2190
			vtnet_rxq_disable_intr(rxq);
1921
		rxq->vtnrx_stats.vrxs_rescheduled++;
2191
		rxq->vtnrx_stats.vrxs_rescheduled++;
2192
		VTNET_RXQ_UNLOCK(rxq);
1922
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2193
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1923
	}
2194
	} else
2195
		VTNET_RXQ_UNLOCK(rxq);
2196
}
1924
2197
1925
	VTNET_RXQ_UNLOCK(rxq);
2198
static int
2199
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
2200
{
2201
	struct vtnet_softc *sc;
2202
	int threshold;
2203
2204
	sc = txq->vtntx_sc;
2205
2206
	/*
2207
	 * The Tx interrupt is disabled until the queue free count falls
2208
	 * below our threshold. Completed frames are drained from the Tx
2209
	 * virtqueue before transmitting new frames and in the watchdog
2210
	 * callout, so the frequency of Tx interrupts is greatly reduced,
2211
	 * at the cost of not freeing mbufs as quickly as they otherwise
2212
	 * would be.
2213
	 */
2214
	threshold = virtqueue_size(txq->vtntx_vq) / 4;
2215
2216
	/*
2217
	 * Without indirect descriptors, leave enough room for the most
2218
	 * segments we handle.
2219
	 */
2220
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
2221
	    threshold < sc->vtnet_tx_nsegs)
2222
		threshold = sc->vtnet_tx_nsegs;
2223
2224
	return (threshold);
1926
}
2225
}
1927
2226
1928
static int
2227
static int
1929
vtnet_txq_below_threshold(struct vtnet_txq *txq)
2228
vtnet_txq_below_threshold(struct vtnet_txq *txq)
1930
{
2229
{
1931
	struct vtnet_softc *sc;
1932
	struct virtqueue *vq;
2230
	struct virtqueue *vq;
1933
2231
1934
	sc = txq->vtntx_sc;
1935
	vq = txq->vtntx_vq;
2232
	vq = txq->vtntx_vq;
1936
2233
1937
	return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
2234
	return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
1938
}
2235
}
1939
2236
1940
static int
2237
static int
Lines 1969-1989 Link Here
1969
	struct virtqueue *vq;
2266
	struct virtqueue *vq;
1970
	struct vtnet_tx_header *txhdr;
2267
	struct vtnet_tx_header *txhdr;
1971
	int last;
2268
	int last;
1972
#ifdef DEV_NETMAP
1973
	int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX,
1974
						txq->vtntx_id);
1975
#else  /* !DEV_NETMAP */
1976
	int netmap_bufs = 0;
1977
#endif /* !DEV_NETMAP */
1978
2269
1979
	vq = txq->vtntx_vq;
2270
	vq = txq->vtntx_vq;
1980
	last = 0;
2271
	last = 0;
1981
2272
1982
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
2273
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1983
		if (!netmap_bufs) {
2274
		m_freem(txhdr->vth_mbuf);
1984
			m_freem(txhdr->vth_mbuf);
2275
		uma_zfree(vtnet_tx_header_zone, txhdr);
1985
			uma_zfree(vtnet_tx_header_zone, txhdr);
1986
		}
1987
	}
2276
	}
1988
2277
1989
	KASSERT(virtqueue_empty(vq),
2278
	KASSERT(virtqueue_empty(vq),
Lines 1991-2002 Link Here
1991
}
2280
}
1992
2281
1993
/*
2282
/*
1994
 * BMV: Much of this can go away once we finally have offsets in
2283
 * BMV: This can go away once we finally have offsets in the mbuf header.
1995
 * the mbuf packet header. Bug andre@.
1996
 */
2284
 */
1997
static int
2285
static int
1998
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
2286
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
1999
    int *etype, int *proto, int *start)
2287
    int *proto, int *start)
2000
{
2288
{
2001
	struct vtnet_softc *sc;
2289
	struct vtnet_softc *sc;
2002
	struct ether_vlan_header *evh;
2290
	struct ether_vlan_header *evh;
Lines 2040-2046 Link Here
2040
		break;
2328
		break;
2041
#endif
2329
#endif
2042
	default:
2330
	default:
2043
		sc->vtnet_stats.tx_csum_bad_ethtype++;
2331
		sc->vtnet_stats.tx_csum_unknown_ethtype++;
2044
		return (EINVAL);
2332
		return (EINVAL);
2045
	}
2333
	}
2046
2334
Lines 2048-2054 Link Here
2048
}
2336
}
2049
2337
2050
static int
2338
static int
2051
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2339
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags,
2052
    int offset, struct virtio_net_hdr *hdr)
2340
    int offset, struct virtio_net_hdr *hdr)
2053
{
2341
{
2054
	static struct timeval lastecn;
2342
	static struct timeval lastecn;
Lines 2064-2079 Link Here
2064
	} else
2352
	} else
2065
		tcp = (struct tcphdr *)(m->m_data + offset);
2353
		tcp = (struct tcphdr *)(m->m_data + offset);
2066
2354
2067
	hdr->hdr_len = offset + (tcp->th_off << 2);
2355
	hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
2068
	hdr->gso_size = m->m_pkthdr.tso_segsz;
2356
	hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
2069
	hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2357
	hdr->gso_type = (flags & CSUM_IP_TSO) ?
2070
	    VIRTIO_NET_HDR_GSO_TCPV6;
2358
	    VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
2071
2359
2072
	if (tcp->th_flags & TH_CWR) {
2360
	if (__predict_false(tcp->th_flags & TH_CWR)) {
2073
		/*
2361
		/*
2074
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2362
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
2075
		 * ECN support is not on a per-interface basis, but globally via
2363
		 * FreeBSD, ECN support is not on a per-interface basis,
2076
		 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2364
		 * but globally via the net.inet.tcp.ecn.enable sysctl
2365
		 * knob. The default is off.
2077
		 */
2366
		 */
2078
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2367
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2079
			if (ppsratecheck(&lastecn, &curecn, 1))
2368
			if (ppsratecheck(&lastecn, &curecn, 1))
Lines 2103-2132 Link Here
2103
	if (error)
2392
	if (error)
2104
		goto drop;
2393
		goto drop;
2105
2394
2106
	if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2395
	if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
2107
	    (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2396
		/* Sanity check the parsed mbuf matches the offload flags. */
2108
		/*
2397
		if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
2109
		 * We could compare the IP protocol vs the CSUM_ flag too,
2398
		    etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
2110
		 * but that really should not be necessary.
2399
		    && etype != ETHERTYPE_IPV6))) {
2111
		 */
2400
			sc->vtnet_stats.tx_csum_proto_mismatch++;
2401
			goto drop;
2402
		}
2403
2112
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2404
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2113
		hdr->csum_start = csum_start;
2405
		hdr->csum_start = vtnet_gtoh16(sc, csum_start);
2114
		hdr->csum_offset = m->m_pkthdr.csum_data;
2406
		hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
2115
		txq->vtntx_stats.vtxs_csum++;
2407
		txq->vtntx_stats.vtxs_csum++;
2116
	}
2408
	}
2117
2409
2118
	if (flags & CSUM_TSO) {
2410
	if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
2411
		/*
2412
		 * Sanity check the parsed mbuf IP protocol is TCP, and
2413
		 * VirtIO TSO reqires the checksum offloading above.
2414
		 */
2119
		if (__predict_false(proto != IPPROTO_TCP)) {
2415
		if (__predict_false(proto != IPPROTO_TCP)) {
2120
			/* Likely failed to correctly parse the mbuf. */
2121
			sc->vtnet_stats.tx_tso_not_tcp++;
2416
			sc->vtnet_stats.tx_tso_not_tcp++;
2122
			goto drop;
2417
			goto drop;
2418
		} else if (__predict_false((hdr->flags &
2419
		    VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
2420
			sc->vtnet_stats.tx_tso_without_csum++;
2421
			goto drop;
2123
		}
2422
		}
2124
2423
2125
		KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2424
		error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr);
2126
		    ("%s: mbuf %p TSO without checksum offload %#x",
2127
		    __func__, m, flags));
2128
2129
		error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2130
		if (error)
2425
		if (error)
2131
			goto drop;
2426
			goto drop;
2132
	}
2427
	}
Lines 2155-2162 Link Here
2155
2450
2156
	sglist_reset(sg);
2451
	sglist_reset(sg);
2157
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2452
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2158
	KASSERT(error == 0 && sg->sg_nseg == 1,
2453
	if (error != 0 || sg->sg_nseg != 1) {
2159
	    ("%s: error %d adding header to sglist", __func__, error));
2454
		KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
2455
		    __func__, error, sg->sg_nseg));
2456
		goto fail;
2457
	}
2160
2458
2161
	error = sglist_append_mbuf(sg, m);
2459
	error = sglist_append_mbuf(sg, m);
2162
	if (error) {
2460
	if (error) {
Lines 2186-2192 Link Here
2186
}
2484
}
2187
2485
2188
static int
2486
static int
2189
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head)
2487
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
2190
{
2488
{
2191
	struct vtnet_tx_header *txhdr;
2489
	struct vtnet_tx_header *txhdr;
2192
	struct virtio_net_hdr *hdr;
2490
	struct virtio_net_hdr *hdr;
Lines 2196-2202 Link Here
2196
	m = *m_head;
2494
	m = *m_head;
2197
	M_ASSERTPKTHDR(m);
2495
	M_ASSERTPKTHDR(m);
2198
2496
2199
	txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
2497
	txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
2200
	if (txhdr == NULL) {
2498
	if (txhdr == NULL) {
2201
		m_freem(m);
2499
		m_freem(m);
2202
		*m_head = NULL;
2500
		*m_head = NULL;
Lines 2204-2212 Link Here
2204
	}
2502
	}
2205
2503
2206
	/*
2504
	/*
2207
	 * Always use the non-mergeable header, regardless if the feature
2505
	 * Always use the non-mergeable header, regardless if mergable headers
2208
	 * was negotiated. For transmit, num_buffers is always zero. The
2506
	 * were negotiated, because for transmit num_buffers is always zero.
2209
	 * vtnet_hdr_size is used to enqueue the correct header size.
2507
	 * The vtnet_hdr_size is used to enqueue the right header size segment.
2210
	 */
2508
	 */
2211
	hdr = &txhdr->vth_uhdr.hdr;
2509
	hdr = &txhdr->vth_uhdr.hdr;
2212
2510
Lines 2228-2238 Link Here
2228
	}
2526
	}
2229
2527
2230
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2528
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2231
	if (error == 0)
2232
		return (0);
2233
2234
fail:
2529
fail:
2235
	uma_zfree(vtnet_tx_header_zone, txhdr);
2530
	if (error)
2531
		uma_zfree(vtnet_tx_header_zone, txhdr);
2236
2532
2237
	return (error);
2533
	return (error);
2238
}
2534
}
Lines 2270-2276 Link Here
2270
		if (m0 == NULL)
2566
		if (m0 == NULL)
2271
			break;
2567
			break;
2272
2568
2273
		if (vtnet_txq_encap(txq, &m0) != 0) {
2569
		if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
2274
			if (m0 != NULL)
2570
			if (m0 != NULL)
2275
				IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2571
				IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2276
			break;
2572
			break;
Lines 2347-2353 Link Here
2347
			break;
2643
			break;
2348
		}
2644
		}
2349
2645
2350
		if (vtnet_txq_encap(txq, &m) != 0) {
2646
		if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
2351
			if (m != NULL)
2647
			if (m != NULL)
2352
				drbr_putback(ifp, br, m);
2648
				drbr_putback(ifp, br, m);
2353
			else
2649
			else
Lines 2381-2387 Link Here
2381
	sc = ifp->if_softc;
2677
	sc = ifp->if_softc;
2382
	npairs = sc->vtnet_act_vq_pairs;
2678
	npairs = sc->vtnet_act_vq_pairs;
2383
2679
2384
	/* check if flowid is set */
2385
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2680
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2386
		i = m->m_pkthdr.flowid % npairs;
2681
		i = m->m_pkthdr.flowid % npairs;
2387
	else
2682
	else
Lines 2471-2476 Link Here
2471
	deq = 0;
2766
	deq = 0;
2472
	VTNET_TXQ_LOCK_ASSERT(txq);
2767
	VTNET_TXQ_LOCK_ASSERT(txq);
2473
2768
2769
#ifdef DEV_NETMAP
2770
	if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
2771
		virtqueue_disable_intr(vq); // XXX luigi
2772
		return (0); // XXX or 1 ?
2773
	}
2774
#endif
2775
2474
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2776
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2475
		m = txhdr->vth_mbuf;
2777
		m = txhdr->vth_mbuf;
2476
		deq++;
2778
		deq++;
Lines 2512-2522 Link Here
2512
		return;
2814
		return;
2513
	}
2815
	}
2514
2816
2515
#ifdef DEV_NETMAP
2516
	if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2517
		return;
2518
#endif /* DEV_NETMAP */
2519
2520
	VTNET_TXQ_LOCK(txq);
2817
	VTNET_TXQ_LOCK(txq);
2521
2818
2522
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2819
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
Lines 2703-2709 Link Here
2703
	 * Most drivers just ignore the return value - it only fails
3000
	 * Most drivers just ignore the return value - it only fails
2704
	 * with ENOMEM so an error is not likely.
3001
	 * with ENOMEM so an error is not likely.
2705
	 */
3002
	 */
2706
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3003
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
2707
		rxq = &sc->vtnet_rxqs[i];
3004
		rxq = &sc->vtnet_rxqs[i];
2708
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
3005
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2709
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
3006
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
Lines 2733-2739 Link Here
2733
		rxq = &sc->vtnet_rxqs[i];
3030
		rxq = &sc->vtnet_rxqs[i];
2734
		if (rxq->vtnrx_tq != NULL) {
3031
		if (rxq->vtnrx_tq != NULL) {
2735
			taskqueue_free(rxq->vtnrx_tq);
3032
			taskqueue_free(rxq->vtnrx_tq);
2736
			rxq->vtnrx_tq = NULL;
3033
			rxq->vtnrx_vq = NULL;
2737
		}
3034
		}
2738
3035
2739
		txq = &sc->vtnet_txqs[i];
3036
		txq = &sc->vtnet_txqs[i];
Lines 2773-2779 Link Here
2773
	struct vtnet_txq *txq;
3070
	struct vtnet_txq *txq;
2774
	int i;
3071
	int i;
2775
3072
2776
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3073
#ifdef DEV_NETMAP
3074
	if (nm_native_on(NA(sc->vtnet_ifp)))
3075
		return;
3076
#endif
3077
3078
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2777
		rxq = &sc->vtnet_rxqs[i];
3079
		rxq = &sc->vtnet_rxqs[i];
2778
		vtnet_rxq_free_mbufs(rxq);
3080
		vtnet_rxq_free_mbufs(rxq);
2779
3081
Lines 2789-2799 Link Here
2789
	struct vtnet_txq *txq;
3091
	struct vtnet_txq *txq;
2790
	int i;
3092
	int i;
2791
3093
3094
	VTNET_CORE_LOCK_ASSERT(sc);
3095
2792
	/*
3096
	/*
2793
	 * Lock and unlock the per-queue mutex so we known the stop
3097
	 * Lock and unlock the per-queue mutex so we known the stop
2794
	 * state is visible. Doing only the active queues should be
3098
	 * state is visible. Doing only the active queues should be
2795
	 * sufficient, but it does not cost much extra to do all the
3099
	 * sufficient, but it does not cost much extra to do all the
2796
	 * queues. Note we hold the core mutex here too.
3100
	 * queues.
2797
	 */
3101
	 */
2798
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3102
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2799
		rxq = &sc->vtnet_rxqs[i];
3103
		rxq = &sc->vtnet_rxqs[i];
Lines 2832-2839 Link Here
2832
	virtio_stop(dev);
3136
	virtio_stop(dev);
2833
	vtnet_stop_rendezvous(sc);
3137
	vtnet_stop_rendezvous(sc);
2834
3138
2835
	/* Free any mbufs left in the virtqueues. */
2836
	vtnet_drain_rxtx_queues(sc);
3139
	vtnet_drain_rxtx_queues(sc);
3140
	sc->vtnet_act_vq_pairs = 1;
2837
}
3141
}
2838
3142
2839
static int
3143
static int
Lines 2842-2892 Link Here
2842
	device_t dev;
3146
	device_t dev;
2843
	struct ifnet *ifp;
3147
	struct ifnet *ifp;
2844
	uint64_t features;
3148
	uint64_t features;
2845
	int mask, error;
3149
	int error;
2846
3150
2847
	dev = sc->vtnet_dev;
3151
	dev = sc->vtnet_dev;
2848
	ifp = sc->vtnet_ifp;
3152
	ifp = sc->vtnet_ifp;
2849
	features = sc->vtnet_features;
3153
	features = sc->vtnet_negotiated_features;
2850
3154
2851
	mask = 0;
2852
#if defined(INET)
2853
	mask |= IFCAP_RXCSUM;
2854
#endif
2855
#if defined (INET6)
2856
	mask |= IFCAP_RXCSUM_IPV6;
2857
#endif
2858
2859
	/*
3155
	/*
2860
	 * Re-negotiate with the host, removing any disabled receive
3156
	 * Re-negotiate with the host, removing any disabled receive
2861
	 * features. Transmit features are disabled only on our side
3157
	 * features. Transmit features are disabled only on our side
2862
	 * via if_capenable and if_hwassist.
3158
	 * via if_capenable and if_hwassist.
2863
	 */
3159
	 */
2864
3160
2865
	if (ifp->if_capabilities & mask) {
3161
	if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
2866
		/*
3162
		features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
2867
		 * We require both IPv4 and IPv6 offloading to be enabled
2868
		 * in order to negotiated it: VirtIO does not distinguish
2869
		 * between the two.
2870
		 */
2871
		if ((ifp->if_capenable & mask) != mask)
2872
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2873
	}
2874
3163
2875
	if (ifp->if_capabilities & IFCAP_LRO) {
3164
	if ((ifp->if_capenable & IFCAP_LRO) == 0)
2876
		if ((ifp->if_capenable & IFCAP_LRO) == 0)
3165
		features &= ~VTNET_LRO_FEATURES;
2877
			features &= ~VTNET_LRO_FEATURES;
2878
	}
2879
3166
2880
	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
3167
	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2881
		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3168
		features &= ~VIRTIO_NET_F_CTRL_VLAN;
2882
			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2883
	}
2884
3169
2885
	error = virtio_reinit(dev, features);
3170
	error = virtio_reinit(dev, features);
2886
	if (error)
3171
	if (error) {
2887
		device_printf(dev, "virtio reinit error %d\n", error);
3172
		device_printf(dev, "virtio reinit error %d\n", error);
3173
		return (error);
3174
	}
2888
3175
2889
	return (error);
3176
	sc->vtnet_features = features;
3177
	virtio_reinit_complete(dev);
3178
3179
	return (0);
2890
}
3180
}
2891
3181
2892
static void
3182
static void
Lines 2897-2905 Link Here
2897
	ifp = sc->vtnet_ifp;
3187
	ifp = sc->vtnet_ifp;
2898
3188
2899
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
3189
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2900
		/* Restore promiscuous and all-multicast modes. */
2901
		vtnet_rx_filter(sc);
3190
		vtnet_rx_filter(sc);
2902
		/* Restore filtered MAC addresses. */
2903
		vtnet_rx_filter_mac(sc);
3191
		vtnet_rx_filter_mac(sc);
2904
	}
3192
	}
2905
3193
Lines 2911-2942 Link Here
2911
vtnet_init_rx_queues(struct vtnet_softc *sc)
3199
vtnet_init_rx_queues(struct vtnet_softc *sc)
2912
{
3200
{
2913
	device_t dev;
3201
	device_t dev;
3202
	struct ifnet *ifp;
2914
	struct vtnet_rxq *rxq;
3203
	struct vtnet_rxq *rxq;
2915
	int i, clsize, error;
3204
	int i, clustersz, error;
2916
3205
2917
	dev = sc->vtnet_dev;
3206
	dev = sc->vtnet_dev;
3207
	ifp = sc->vtnet_ifp;
2918
3208
2919
	/*
3209
	clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu);
2920
	 * Use the new cluster size if one has been set (via a MTU
3210
	sc->vtnet_rx_clustersz = clustersz;
2921
	 * change). Otherwise, use the standard 2K clusters.
3211
2922
	 *
3212
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
2923
	 * BMV: It might make sense to use page sized clusters as
3213
		sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
2924
	 * the default (depending on the features negotiated).
3214
		    VTNET_MAX_RX_SIZE, clustersz);
2925
	 */
3215
		KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2926
	if (sc->vtnet_rx_new_clsize != 0) {
3216
		    ("%s: too many rx mbufs %d for %d segments", __func__,
2927
		clsize = sc->vtnet_rx_new_clsize;
3217
		    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2928
		sc->vtnet_rx_new_clsize = 0;
2929
	} else
3218
	} else
2930
		clsize = MCLBYTES;
3219
		sc->vtnet_rx_nmbufs = 1;
2931
3220
2932
	sc->vtnet_rx_clsize = clsize;
3221
#ifdef DEV_NETMAP
2933
	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
3222
	if (vtnet_netmap_init_rx_buffers(sc))
3223
		return (0);
3224
#endif
2934
3225
2935
	KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2936
	    sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2937
	    ("%s: too many rx mbufs %d for %d segments", __func__,
2938
	    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2939
2940
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3226
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2941
		rxq = &sc->vtnet_rxqs[i];
3227
		rxq = &sc->vtnet_rxqs[i];
2942
3228
Lines 2946-2953 Link Here
2946
		VTNET_RXQ_UNLOCK(rxq);
3232
		VTNET_RXQ_UNLOCK(rxq);
2947
3233
2948
		if (error) {
3234
		if (error) {
2949
			device_printf(dev,
3235
			device_printf(dev, "cannot populate Rx queue %d\n", i);
2950
			    "cannot allocate mbufs for Rx queue %d\n", i);
2951
			return (error);
3236
			return (error);
2952
		}
3237
		}
2953
	}
3238
	}
Lines 2964-2969 Link Here
2964
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3249
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2965
		txq = &sc->vtnet_txqs[i];
3250
		txq = &sc->vtnet_txqs[i];
2966
		txq->vtntx_watchdog = 0;
3251
		txq->vtntx_watchdog = 0;
3252
		txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
2967
	}
3253
	}
2968
3254
2969
	return (0);
3255
	return (0);
Lines 2993-3028 Link Here
2993
3279
2994
	dev = sc->vtnet_dev;
3280
	dev = sc->vtnet_dev;
2995
3281
2996
	if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3282
	if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
2997
		sc->vtnet_act_vq_pairs = 1;
3283
		sc->vtnet_act_vq_pairs = 1;
2998
		return;
3284
		return;
2999
	}
3285
	}
3000
3286
3001
	npairs = sc->vtnet_requested_vq_pairs;
3287
	npairs = sc->vtnet_req_vq_pairs;
3002
3288
3003
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3289
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3004
		device_printf(dev,
3290
		device_printf(dev, "cannot set active queue pairs to %d, "
3005
		    "cannot set active queue pairs to %d\n", npairs);
3291
		    "falling back to 1 queue pair\n", npairs);
3006
		npairs = 1;
3292
		npairs = 1;
3007
	}
3293
	}
3008
3294
3009
	sc->vtnet_act_vq_pairs = npairs;
3295
	sc->vtnet_act_vq_pairs = npairs;
3010
}
3296
}
3011
3297
3298
static void
3299
vtnet_update_rx_offloads(struct vtnet_softc *sc)
3300
{
3301
	struct ifnet *ifp;
3302
	uint64_t features;
3303
	int error;
3304
3305
	ifp = sc->vtnet_ifp;
3306
	features = sc->vtnet_features;
3307
3308
	VTNET_CORE_LOCK_ASSERT(sc);
3309
3310
	if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
3311
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
3312
			features |= VIRTIO_NET_F_GUEST_CSUM;
3313
		else
3314
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
3315
	}
3316
3317
	if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) {
3318
		if (ifp->if_capenable & IFCAP_LRO)
3319
			features |= VTNET_LRO_FEATURES;
3320
		else
3321
			features &= ~VTNET_LRO_FEATURES;
3322
	}
3323
3324
	error = vtnet_ctrl_guest_offloads(sc,
3325
	    features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
3326
		        VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN  |
3327
			VIRTIO_NET_F_GUEST_UFO));
3328
	if (error) {
3329
		device_printf(sc->vtnet_dev,
3330
		    "%s: cannot update Rx features\n", __func__);
3331
		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3332
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3333
			vtnet_init_locked(sc);
3334
		}
3335
	} else
3336
		sc->vtnet_features = features;
3337
}
3338
3012
static int
3339
static int
3013
vtnet_reinit(struct vtnet_softc *sc)
3340
vtnet_reinit(struct vtnet_softc *sc)
3014
{
3341
{
3342
	device_t dev;
3015
	struct ifnet *ifp;
3343
	struct ifnet *ifp;
3016
	int error;
3344
	int error;
3017
3345
3346
	dev = sc->vtnet_dev;
3018
	ifp = sc->vtnet_ifp;
3347
	ifp = sc->vtnet_ifp;
3019
3348
3020
	/* Use the current MAC address. */
3021
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3349
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3022
	vtnet_set_hwaddr(sc);
3023
3350
3351
	error = vtnet_virtio_reinit(sc);
3352
	if (error)
3353
		return (error);
3354
3355
	vtnet_set_macaddr(sc);
3024
	vtnet_set_active_vq_pairs(sc);
3356
	vtnet_set_active_vq_pairs(sc);
3025
3357
3358
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3359
		vtnet_init_rx_filters(sc);
3360
3026
	ifp->if_hwassist = 0;
3361
	ifp->if_hwassist = 0;
3027
	if (ifp->if_capenable & IFCAP_TXCSUM)
3362
	if (ifp->if_capenable & IFCAP_TXCSUM)
3028
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3363
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
Lines 3033-3048 Link Here
3033
	if (ifp->if_capenable & IFCAP_TSO6)
3368
	if (ifp->if_capenable & IFCAP_TSO6)
3034
		ifp->if_hwassist |= CSUM_IP6_TSO;
3369
		ifp->if_hwassist |= CSUM_IP6_TSO;
3035
3370
3036
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3037
		vtnet_init_rx_filters(sc);
3038
3039
	error = vtnet_init_rxtx_queues(sc);
3371
	error = vtnet_init_rxtx_queues(sc);
3040
	if (error)
3372
	if (error)
3041
		return (error);
3373
		return (error);
3042
3374
3043
	vtnet_enable_interrupts(sc);
3044
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3045
3046
	return (0);
3375
	return (0);
3047
}
3376
}
3048
3377
Lines 3062-3083 Link Here
3062
3391
3063
	vtnet_stop(sc);
3392
	vtnet_stop(sc);
3064
3393
3065
	/* Reinitialize with the host. */
3394
	if (vtnet_reinit(sc) != 0) {
3066
	if (vtnet_virtio_reinit(sc) != 0)
3395
		vtnet_stop(sc);
3067
		goto fail;
3396
		return;
3397
	}
3068
3398
3069
	if (vtnet_reinit(sc) != 0)
3399
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3070
		goto fail;
3071
3072
	virtio_reinit_complete(dev);
3073
3074
	vtnet_update_link_status(sc);
3400
	vtnet_update_link_status(sc);
3401
	vtnet_enable_interrupts(sc);
3075
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3402
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3076
3077
	return;
3078
3079
fail:
3080
	vtnet_stop(sc);
3081
}
3403
}
3082
3404
3083
static void
3405
static void
Lines 3087-3092 Link Here
3087
3409
3088
	sc = xsc;
3410
	sc = xsc;
3089
3411
3412
#ifdef DEV_NETMAP
3413
	if (!NA(sc->vtnet_ifp)) {
3414
		D("try to attach again");
3415
		vtnet_netmap_attach(sc);
3416
	}
3417
#endif
3418
3090
	VTNET_CORE_LOCK(sc);
3419
	VTNET_CORE_LOCK(sc);
3091
	vtnet_init_locked(sc);
3420
	vtnet_init_locked(sc);
3092
	VTNET_CORE_UNLOCK(sc);
3421
	VTNET_CORE_UNLOCK(sc);
Lines 3095-3110 Link Here
3095
static void
3424
static void
3096
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3425
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3097
{
3426
{
3098
	struct virtqueue *vq;
3099
3427
3100
	vq = sc->vtnet_ctrl_vq;
3101
3102
	/*
3428
	/*
3103
	 * The control virtqueue is only polled and therefore it should
3429
	 * The control virtqueue is only polled and therefore it should
3104
	 * already be empty.
3430
	 * already be empty.
3105
	 */
3431
	 */
3106
	KASSERT(virtqueue_empty(vq),
3432
	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
3107
	    ("%s: ctrl vq %p not empty", __func__, vq));
3433
	    ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
3108
}
3434
}
3109
3435
3110
static void
3436
static void
Lines 3115-3161 Link Here
3115
3441
3116
	vq = sc->vtnet_ctrl_vq;
3442
	vq = sc->vtnet_ctrl_vq;
3117
3443
3444
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
3118
	VTNET_CORE_LOCK_ASSERT(sc);
3445
	VTNET_CORE_LOCK_ASSERT(sc);
3119
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3120
	    ("%s: CTRL_VQ feature not negotiated", __func__));
3121
3446
3122
	if (!virtqueue_empty(vq))
3447
	if (!virtqueue_empty(vq))
3123
		return;
3448
		return;
3124
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3125
		return;
3126
3449
3127
	/*
3450
	/*
3128
	 * Poll for the response, but the command is likely already
3451
	 * Poll for the response, but the command is likely completed before
3129
	 * done when we return from the notify.
3452
	 * returning from the notify.
3130
	 */
3453
	 */
3131
	virtqueue_notify(vq);
3454
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0)  {
3132
	virtqueue_poll(vq, NULL);
3455
		virtqueue_notify(vq);
3456
		virtqueue_poll(vq, NULL);
3457
	}
3133
}
3458
}
3134
3459
3135
static int
3460
static int
3136
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3461
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3137
{
3462
{
3138
	struct virtio_net_ctrl_hdr hdr __aligned(2);
3139
	struct sglist_seg segs[3];
3463
	struct sglist_seg segs[3];
3140
	struct sglist sg;
3464
	struct sglist sg;
3141
	uint8_t ack;
3465
	struct {
3466
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3467
		uint8_t pad1;
3468
		uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
3469
		uint8_t pad2;
3470
		uint8_t ack;
3471
	} s;
3142
	int error;
3472
	int error;
3143
3473
3144
	hdr.class = VIRTIO_NET_CTRL_MAC;
3474
	error = 0;
3145
	hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3475
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
3146
	ack = VIRTIO_NET_ERR;
3147
3476
3148
	sglist_init(&sg, 3, segs);
3477
	s.hdr.class = VIRTIO_NET_CTRL_MAC;
3478
	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3479
	bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
3480
	s.ack = VIRTIO_NET_ERR;
3481
3482
	sglist_init(&sg, nitems(segs), segs);
3483
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3484
	error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
3485
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3486
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3487
3488
	if (error == 0)
3489
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3490
3491
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3492
}
3493
3494
static int
3495
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
3496
{
3497
	struct sglist_seg segs[3];
3498
	struct sglist sg;
3499
	struct {
3500
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3501
		uint8_t pad1;
3502
		uint64_t offloads __aligned(8);
3503
		uint8_t pad2;
3504
		uint8_t ack;
3505
	} s;
3506
	int error;
3507
3149
	error = 0;
3508
	error = 0;
3150
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3509
	MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3151
	error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3152
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3153
	KASSERT(error == 0 && sg.sg_nseg == 3,
3154
	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
3155
3510
3156
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3511
	s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
3512
	s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
3513
	s.offloads = vtnet_gtoh64(sc, offloads);
3514
	s.ack = VIRTIO_NET_ERR;
3157
3515
3158
	return (ack == VIRTIO_NET_OK ? 0 : EIO);
3516
	sglist_init(&sg, nitems(segs), segs);
3517
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3518
	error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
3519
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3520
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3521
3522
	if (error == 0)
3523
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3524
3525
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3159
}
3526
}
3160
3527
3161
static int
3528
static int
Lines 3164-3226 Link Here
3164
	struct sglist_seg segs[3];
3531
	struct sglist_seg segs[3];
3165
	struct sglist sg;
3532
	struct sglist sg;
3166
	struct {
3533
	struct {
3167
		struct virtio_net_ctrl_hdr hdr;
3534
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3168
		uint8_t pad1;
3535
		uint8_t pad1;
3169
		struct virtio_net_ctrl_mq mq;
3536
		struct virtio_net_ctrl_mq mq __aligned(2);
3170
		uint8_t pad2;
3537
		uint8_t pad2;
3171
		uint8_t ack;
3538
		uint8_t ack;
3172
	} s __aligned(2);
3539
	} s;
3173
	int error;
3540
	int error;
3174
3541
3542
	error = 0;
3543
	MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
3544
3175
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3545
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3176
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3546
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3177
	s.mq.virtqueue_pairs = npairs;
3547
	s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
3178
	s.ack = VIRTIO_NET_ERR;
3548
	s.ack = VIRTIO_NET_ERR;
3179
3549
3180
	sglist_init(&sg, 3, segs);
3550
	sglist_init(&sg, nitems(segs), segs);
3181
	error = 0;
3182
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3551
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3183
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3552
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3184
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3553
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3185
	KASSERT(error == 0 && sg.sg_nseg == 3,
3554
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3186
	    ("%s: error %d adding MQ message to sglist", __func__, error));
3187
3555
3188
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3556
	if (error == 0)
3557
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3189
3558
3190
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3559
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3191
}
3560
}
3192
3561
3193
static int
3562
static int
3194
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3563
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on)
3195
{
3564
{
3196
	struct sglist_seg segs[3];
3565
	struct sglist_seg segs[3];
3197
	struct sglist sg;
3566
	struct sglist sg;
3198
	struct {
3567
	struct {
3199
		struct virtio_net_ctrl_hdr hdr;
3568
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3200
		uint8_t pad1;
3569
		uint8_t pad1;
3201
		uint8_t onoff;
3570
		uint8_t onoff;
3202
		uint8_t pad2;
3571
		uint8_t pad2;
3203
		uint8_t ack;
3572
		uint8_t ack;
3204
	} s __aligned(2);
3573
	} s;
3205
	int error;
3574
	int error;
3206
3575
3207
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3576
	error = 0;
3208
	    ("%s: CTRL_RX feature not negotiated", __func__));
3577
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3209
3578
3210
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3579
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3211
	s.hdr.cmd = cmd;
3580
	s.hdr.cmd = cmd;
3212
	s.onoff = !!on;
3581
	s.onoff = !!on;
3213
	s.ack = VIRTIO_NET_ERR;
3582
	s.ack = VIRTIO_NET_ERR;
3214
3583
3215
	sglist_init(&sg, 3, segs);
3584
	sglist_init(&sg, nitems(segs), segs);
3216
	error = 0;
3217
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3585
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3218
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3586
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3219
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3587
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3220
	KASSERT(error == 0 && sg.sg_nseg == 3,
3588
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3221
	    ("%s: error %d adding Rx message to sglist", __func__, error));
3222
3589
3223
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3590
	if (error == 0)
3591
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3224
3592
3225
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3593
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3226
}
3594
}
Lines 3228-3267 Link Here
3228
static int
3596
static int
3229
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3597
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3230
{
3598
{
3231
3232
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3599
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3233
}
3600
}
3234
3601
3235
static int
3602
static int
3236
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3603
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3237
{
3604
{
3238
3239
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3605
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3240
}
3606
}
3241
3607
3242
/*
3243
 * The device defaults to promiscuous mode for backwards compatibility.
3244
 * Turn it off at attach time if possible.
3245
 */
3246
static void
3608
static void
3247
vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3248
{
3249
	struct ifnet *ifp;
3250
3251
	ifp = sc->vtnet_ifp;
3252
3253
	VTNET_CORE_LOCK(sc);
3254
	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3255
		ifp->if_flags |= IFF_PROMISC;
3256
	} else if (vtnet_set_promisc(sc, 0) != 0) {
3257
		ifp->if_flags |= IFF_PROMISC;
3258
		device_printf(sc->vtnet_dev,
3259
		    "cannot disable default promiscuous mode\n");
3260
	}
3261
	VTNET_CORE_UNLOCK(sc);
3262
}
3263
3264
static void
3265
vtnet_rx_filter(struct vtnet_softc *sc)
3609
vtnet_rx_filter(struct vtnet_softc *sc)
3266
{
3610
{
3267
	device_t dev;
3611
	device_t dev;
Lines 3272-3284 Link Here
3272
3616
3273
	VTNET_CORE_LOCK_ASSERT(sc);
3617
	VTNET_CORE_LOCK_ASSERT(sc);
3274
3618
3275
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3619
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) {
3276
		device_printf(dev, "cannot %s promiscuous mode\n",
3620
		device_printf(dev, "cannot %s promiscuous mode\n",
3277
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3621
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3622
	}
3278
3623
3279
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3624
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) {
3280
		device_printf(dev, "cannot %s all-multicast mode\n",
3625
		device_printf(dev, "cannot %s all-multicast mode\n",
3281
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3626
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3627
	}
3282
}
3628
}
3283
3629
3284
static void
3630
static void
Lines 3296-3309 Link Here
3296
3642
3297
	ifp = sc->vtnet_ifp;
3643
	ifp = sc->vtnet_ifp;
3298
	filter = sc->vtnet_mac_filter;
3644
	filter = sc->vtnet_mac_filter;
3645
3299
	ucnt = 0;
3646
	ucnt = 0;
3300
	mcnt = 0;
3647
	mcnt = 0;
3301
	promisc = 0;
3648
	promisc = 0;
3302
	allmulti = 0;
3649
	allmulti = 0;
3650
	error = 0;
3303
3651
3652
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3304
	VTNET_CORE_LOCK_ASSERT(sc);
3653
	VTNET_CORE_LOCK_ASSERT(sc);
3305
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3306
	    ("%s: CTRL_RX feature not negotiated", __func__));
3307
3654
3308
	/* Unicast MAC addresses: */
3655
	/* Unicast MAC addresses: */
3309
	if_addr_rlock(ifp);
3656
	if_addr_rlock(ifp);
Lines 3324-3337 Link Here
3324
	}
3671
	}
3325
	if_addr_runlock(ifp);
3672
	if_addr_runlock(ifp);
3326
3673
3327
	if (promisc != 0) {
3328
		filter->vmf_unicast.nentries = 0;
3329
		if_printf(ifp, "more than %d MAC addresses assigned, "
3330
		    "falling back to promiscuous mode\n",
3331
		    VTNET_MAX_MAC_ENTRIES);
3332
	} else
3333
		filter->vmf_unicast.nentries = ucnt;
3334
3335
	/* Multicast MAC addresses: */
3674
	/* Multicast MAC addresses: */
3336
	if_maddr_rlock(ifp);
3675
	if_maddr_rlock(ifp);
3337
	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3676
	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
Lines 3348-3381 Link Here
3348
	}
3687
	}
3349
	if_maddr_runlock(ifp);
3688
	if_maddr_runlock(ifp);
3350
3689
3690
	if (promisc != 0) {
3691
		if_printf(ifp, "cannot filter more than %d MAC addresses, "
3692
		    "falling back to promiscuous mode\n",
3693
		    VTNET_MAX_MAC_ENTRIES);
3694
		ucnt = 0;
3695
	}
3351
	if (allmulti != 0) {
3696
	if (allmulti != 0) {
3352
		filter->vmf_multicast.nentries = 0;
3697
		if_printf(ifp, "cannot filter more than %d multicast MAC "
3353
		if_printf(ifp, "more than %d multicast MAC addresses "
3698
		    "addresses, falling back to all-multicast mode\n",
3354
		    "assigned, falling back to all-multicast mode\n",
3355
		    VTNET_MAX_MAC_ENTRIES);
3699
		    VTNET_MAX_MAC_ENTRIES);
3356
	} else
3700
		mcnt = 0;
3357
		filter->vmf_multicast.nentries = mcnt;
3701
	}
3358
3702
3359
	if (promisc != 0 && allmulti != 0)
3703
	if (promisc != 0 && allmulti != 0)
3360
		goto out;
3704
		goto out;
3361
3705
3706
	filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
3707
	filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
3708
3362
	hdr.class = VIRTIO_NET_CTRL_MAC;
3709
	hdr.class = VIRTIO_NET_CTRL_MAC;
3363
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3710
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3364
	ack = VIRTIO_NET_ERR;
3711
	ack = VIRTIO_NET_ERR;
3365
3712
3366
	sglist_init(&sg, 4, segs);
3713
	sglist_init(&sg, nitems(segs), segs);
3367
	error = 0;
3368
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3714
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3369
	error |= sglist_append(&sg, &filter->vmf_unicast,
3715
	error |= sglist_append(&sg, &filter->vmf_unicast,
3370
	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3716
	    sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
3371
	error |= sglist_append(&sg, &filter->vmf_multicast,
3717
	error |= sglist_append(&sg, &filter->vmf_multicast,
3372
	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3718
	    sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
3373
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3719
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3374
	KASSERT(error == 0 && sg.sg_nseg == 4,
3720
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3375
	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3376
3721
3377
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3722
	if (error == 0)
3378
3723
		vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3379
	if (ack != VIRTIO_NET_OK)
3724
	if (ack != VIRTIO_NET_OK)
3380
		if_printf(ifp, "error setting host MAC filter table\n");
3725
		if_printf(ifp, "error setting host MAC filter table\n");
3381
3726
Lines 3392-3419 Link Here
3392
	struct sglist_seg segs[3];
3737
	struct sglist_seg segs[3];
3393
	struct sglist sg;
3738
	struct sglist sg;
3394
	struct {
3739
	struct {
3395
		struct virtio_net_ctrl_hdr hdr;
3740
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3396
		uint8_t pad1;
3741
		uint8_t pad1;
3397
		uint16_t tag;
3742
		uint16_t tag __aligned(2);
3398
		uint8_t pad2;
3743
		uint8_t pad2;
3399
		uint8_t ack;
3744
		uint8_t ack;
3400
	} s __aligned(2);
3745
	} s;
3401
	int error;
3746
	int error;
3402
3747
3748
	error = 0;
3749
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3750
3403
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3751
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3404
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3752
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3405
	s.tag = tag;
3753
	s.tag = vtnet_gtoh16(sc, tag);
3406
	s.ack = VIRTIO_NET_ERR;
3754
	s.ack = VIRTIO_NET_ERR;
3407
3755
3408
	sglist_init(&sg, 3, segs);
3756
	sglist_init(&sg, nitems(segs), segs);
3409
	error = 0;
3410
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3757
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3411
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3758
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3412
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3759
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3413
	KASSERT(error == 0 && sg.sg_nseg == 3,
3760
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3414
	    ("%s: error %d adding VLAN message to sglist", __func__, error));
3415
3761
3416
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3762
	if (error == 0)
3763
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3417
3764
3418
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3765
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3419
}
3766
}
Lines 3421-3433 Link Here
3421
static void
3768
static void
3422
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3769
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3423
{
3770
{
3771
	int i, bit;
3424
	uint32_t w;
3772
	uint32_t w;
3425
	uint16_t tag;
3773
	uint16_t tag;
3426
	int i, bit;
3427
3774
3775
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3428
	VTNET_CORE_LOCK_ASSERT(sc);
3776
	VTNET_CORE_LOCK_ASSERT(sc);
3429
	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3430
	    ("%s: VLAN_FILTER feature not negotiated", __func__));
3431
3777
3432
	/* Enable the filter for each configured VLAN. */
3778
	/* Enable the filter for each configured VLAN. */
3433
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3779
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
Lines 3466-3471 Link Here
3466
		sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3812
		sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3467
3813
3468
	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3814
	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3815
	    ifp->if_drv_flags & IFF_DRV_RUNNING &&
3469
	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3816
	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3470
		device_printf(sc->vtnet_dev,
3817
		device_printf(sc->vtnet_dev,
3471
		    "cannot %s VLAN %d %s the host filter table\n",
3818
		    "cannot %s VLAN %d %s the host filter table\n",
Lines 3495-3515 Link Here
3495
	vtnet_update_vlan_filter(arg, 0, tag);
3842
	vtnet_update_vlan_filter(arg, 0, tag);
3496
}
3843
}
3497
3844
3845
static void
3846
vtnet_update_speed_duplex(struct vtnet_softc *sc)
3847
{
3848
	struct ifnet *ifp;
3849
	uint32_t speed;
3850
3851
	ifp = sc->vtnet_ifp;
3852
3853
	if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
3854
		return;
3855
3856
	/* BMV: Ignore duplex. */
3857
	speed = virtio_read_dev_config_4(sc->vtnet_dev,
3858
	    offsetof(struct virtio_net_config, speed));
3859
	if (speed != -1)
3860
		ifp->if_baudrate = IF_Mbps(speed);
3861
}
3862
3498
static int
3863
static int
3499
vtnet_is_link_up(struct vtnet_softc *sc)
3864
vtnet_is_link_up(struct vtnet_softc *sc)
3500
{
3865
{
3501
	device_t dev;
3502
	struct ifnet *ifp;
3503
	uint16_t status;
3866
	uint16_t status;
3504
3867
3505
	dev = sc->vtnet_dev;
3868
	if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
3506
	ifp = sc->vtnet_ifp;
3869
		return (1);
3507
3870
3508
	if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3871
	status = virtio_read_dev_config_2(sc->vtnet_dev,
3509
		status = VIRTIO_NET_S_LINK_UP;
3872
	    offsetof(struct virtio_net_config, status));
3510
	else
3511
		status = virtio_read_dev_config_2(dev,
3512
		    offsetof(struct virtio_net_config, status));
3513
3873
3514
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3874
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3515
}
3875
}
Lines 3521-3532 Link Here
3521
	int link;
3881
	int link;
3522
3882
3523
	ifp = sc->vtnet_ifp;
3883
	ifp = sc->vtnet_ifp;
3524
3525
	VTNET_CORE_LOCK_ASSERT(sc);
3884
	VTNET_CORE_LOCK_ASSERT(sc);
3526
	link = vtnet_is_link_up(sc);
3885
	link = vtnet_is_link_up(sc);
3527
3886
3528
	/* Notify if the link status has changed. */
3887
	/* Notify if the link status has changed. */
3529
	if (link != 0 && sc->vtnet_link_active == 0) {
3888
	if (link != 0 && sc->vtnet_link_active == 0) {
3889
		vtnet_update_speed_duplex(sc);
3530
		sc->vtnet_link_active = 1;
3890
		sc->vtnet_link_active = 1;
3531
		if_link_state_change(ifp, LINK_STATE_UP);
3891
		if_link_state_change(ifp, LINK_STATE_UP);
3532
	} else if (link == 0 && sc->vtnet_link_active != 0) {
3892
	} else if (link == 0 && sc->vtnet_link_active != 0) {
Lines 3538-3553 Link Here
3538
static int
3898
static int
3539
vtnet_ifmedia_upd(struct ifnet *ifp)
3899
vtnet_ifmedia_upd(struct ifnet *ifp)
3540
{
3900
{
3541
	struct vtnet_softc *sc;
3901
	return (EOPNOTSUPP);
3542
	struct ifmedia *ifm;
3543
3544
	sc = ifp->if_softc;
3545
	ifm = &sc->vtnet_media;
3546
3547
	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3548
		return (EINVAL);
3549
3550
	return (0);
3551
}
3902
}
3552
3903
3553
static void
3904
static void
Lines 3563-3587 Link Here
3563
	VTNET_CORE_LOCK(sc);
3914
	VTNET_CORE_LOCK(sc);
3564
	if (vtnet_is_link_up(sc) != 0) {
3915
	if (vtnet_is_link_up(sc) != 0) {
3565
		ifmr->ifm_status |= IFM_ACTIVE;
3916
		ifmr->ifm_status |= IFM_ACTIVE;
3566
		ifmr->ifm_active |= VTNET_MEDIATYPE;
3917
		ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
3567
	} else
3918
	} else
3568
		ifmr->ifm_active |= IFM_NONE;
3919
		ifmr->ifm_active |= IFM_NONE;
3569
	VTNET_CORE_UNLOCK(sc);
3920
	VTNET_CORE_UNLOCK(sc);
3570
}
3921
}
3571
3922
3572
static void
3923
static void
3573
vtnet_set_hwaddr(struct vtnet_softc *sc)
3924
vtnet_get_macaddr(struct vtnet_softc *sc)
3574
{
3925
{
3926
3927
	if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3928
		virtio_read_device_config_array(sc->vtnet_dev,
3929
		    offsetof(struct virtio_net_config, mac),
3930
		    &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
3931
	} else {
3932
		/* Generate a random locally administered unicast address. */
3933
		sc->vtnet_hwaddr[0] = 0xB2;
3934
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3935
	}
3936
}
3937
3938
static void
3939
vtnet_set_macaddr(struct vtnet_softc *sc)
3940
{
3575
	device_t dev;
3941
	device_t dev;
3576
	int i;
3942
	int error;
3577
3943
3578
	dev = sc->vtnet_dev;
3944
	dev = sc->vtnet_dev;
3579
3945
3580
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3946
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3581
		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3947
		error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
3948
		if (error)
3582
			device_printf(dev, "unable to set MAC address\n");
3949
			device_printf(dev, "unable to set MAC address\n");
3583
	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3950
		return;
3584
		for (i = 0; i < ETHER_ADDR_LEN; i++) {
3951
	}
3952
3953
	/* MAC in config is read-only in modern VirtIO. */
3954
	if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
3955
		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
3585
			virtio_write_dev_config_1(dev,
3956
			virtio_write_dev_config_1(dev,
3586
			    offsetof(struct virtio_net_config, mac) + i,
3957
			    offsetof(struct virtio_net_config, mac) + i,
3587
			    sc->vtnet_hwaddr[i]);
3958
			    sc->vtnet_hwaddr[i]);
Lines 3590-3620 Link Here
3590
}
3961
}
3591
3962
3592
static void
3963
static void
3593
vtnet_get_hwaddr(struct vtnet_softc *sc)
3964
vtnet_attached_set_macaddr(struct vtnet_softc *sc)
3594
{
3965
{
3595
	device_t dev;
3596
	int i;
3597
3966
3598
	dev = sc->vtnet_dev;
3967
	/* Assign MAC address if it was generated. */
3599
3968
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
3600
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3969
		vtnet_set_macaddr(sc);
3601
		/*
3602
		 * Generate a random locally administered unicast address.
3603
		 *
3604
		 * It would be nice to generate the same MAC address across
3605
		 * reboots, but it seems all the hosts currently available
3606
		 * support the MAC feature, so this isn't too important.
3607
		 */
3608
		sc->vtnet_hwaddr[0] = 0xB2;
3609
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3610
		vtnet_set_hwaddr(sc);
3611
		return;
3612
	}
3613
3614
	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3615
		sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3616
		    offsetof(struct virtio_net_config, mac) + i);
3617
	}
3618
}
3970
}
3619
3971
3620
static void
3972
static void
Lines 3645-3680 Link Here
3645
}
3997
}
3646
3998
3647
static void
3999
static void
3648
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3649
{
3650
	int size, thresh;
3651
3652
	size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3653
3654
	/*
3655
	 * The Tx interrupt is disabled until the queue free count falls
3656
	 * below our threshold. Completed frames are drained from the Tx
3657
	 * virtqueue before transmitting new frames and in the watchdog
3658
	 * callout, so the frequency of Tx interrupts is greatly reduced,
3659
	 * at the cost of not freeing mbufs as quickly as they otherwise
3660
	 * would be.
3661
	 *
3662
	 * N.B. We assume all the Tx queues are the same size.
3663
	 */
3664
	thresh = size / 4;
3665
3666
	/*
3667
	 * Without indirect descriptors, leave enough room for the most
3668
	 * segments we handle.
3669
	 */
3670
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3671
	    thresh < sc->vtnet_tx_nsegs)
3672
		thresh = sc->vtnet_tx_nsegs;
3673
3674
	sc->vtnet_tx_intr_thresh = thresh;
3675
}
3676
3677
static void
3678
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
4000
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3679
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
4001
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3680
{
4002
{
Lines 3702-3707 Link Here
3702
	    &stats->vrxs_csum, "Receive checksum offloaded");
4024
	    &stats->vrxs_csum, "Receive checksum offloaded");
3703
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
4025
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3704
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4026
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4027
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
4028
	    &stats->vrxs_host_lro, "Receive host segmentation offloaded");
3705
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4029
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3706
	    &stats->vrxs_rescheduled,
4030
	    &stats->vrxs_rescheduled,
3707
	    "Receive interrupt handler rescheduled");
4031
	    "Receive interrupt handler rescheduled");
Lines 3732-3738 Link Here
3732
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
4056
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3733
	    &stats->vtxs_csum, "Transmit checksum offloaded");
4057
	    &stats->vtxs_csum, "Transmit checksum offloaded");
3734
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
4058
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3735
	    &stats->vtxs_tso, "Transmit segmentation offloaded");
4059
	    &stats->vtxs_tso, "Transmit TCP segmentation offloaded");
3736
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4060
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3737
	    &stats->vtxs_rescheduled,
4061
	    &stats->vtxs_rescheduled,
3738
	    "Transmit interrupt handler rescheduled");
4062
	    "Transmit interrupt handler rescheduled");
Lines 3752-3758 Link Here
3752
	tree = device_get_sysctl_tree(dev);
4076
	tree = device_get_sysctl_tree(dev);
3753
	child = SYSCTL_CHILDREN(tree);
4077
	child = SYSCTL_CHILDREN(tree);
3754
4078
3755
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
4079
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
3756
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
4080
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3757
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
4081
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3758
	}
4082
	}
Lines 3812-3827 Link Here
3812
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
4136
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
3813
	    "Times the receive interrupt task rescheduled itself");
4137
	    "Times the receive interrupt task rescheduled itself");
3814
4138
3815
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
4139
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
3816
	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
4140
	    CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
3817
	    "Aborted transmit of checksum offloaded buffer with unknown "
4141
	    "Aborted transmit of checksum offloaded buffer with unknown "
3818
	    "Ethernet type");
4142
	    "Ethernet type");
3819
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
4143
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
3820
	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
4144
	    CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
3821
	    "Aborted transmit of TSO buffer with unknown Ethernet type");
4145
	    "Aborted transmit of checksum offloaded buffer because mismatched "
4146
	    "protocols");
3822
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
4147
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3823
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
4148
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
3824
	    "Aborted transmit of TSO buffer with non TCP protocol");
4149
	    "Aborted transmit of TSO buffer with non TCP protocol");
4150
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
4151
	    CTLFLAG_RD, &stats->tx_tso_without_csum,
4152
	    "Aborted transmit of TSO buffer without TCP checksum offload");
3825
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
4153
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3826
	    CTLFLAG_RD, &stats->tx_defragged,
4154
	    CTLFLAG_RD, &stats->tx_defragged,
3827
	    "Transmit mbufs defragged");
4155
	    "Transmit mbufs defragged");
Lines 3854-3863 Link Here
3854
4182
3855
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
4183
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3856
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
4184
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3857
	    "Maximum number of supported virtqueue pairs");
4185
	    "Number of maximum supported virtqueue pairs");
3858
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
4186
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
3859
	    CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
4187
	    CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
3860
	    "Requested number of virtqueue pairs");
4188
	    "Number of requested virtqueue pairs");
3861
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
4189
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3862
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
4190
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3863
	    "Number of active virtqueue pairs");
4191
	    "Number of active virtqueue pairs");
Lines 3865-3870 Link Here
3865
	vtnet_setup_stat_sysctl(ctx, child, sc);
4193
	vtnet_setup_stat_sysctl(ctx, child, sc);
3866
}
4194
}
3867
4195
4196
static void
4197
vtnet_load_tunables(struct vtnet_softc *sc)
4198
{
4199
4200
	sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
4201
	    "lro_entry_count", vtnet_lro_entry_count);
4202
	if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
4203
		sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
4204
4205
	sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
4206
	    "lro_mbufq_depeth", vtnet_lro_mbufq_depth);
4207
}
4208
3868
static int
4209
static int
3869
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
4210
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3870
{
4211
{
Lines 3906-3915 Link Here
3906
static void
4247
static void
3907
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
4248
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3908
{
4249
{
4250
	struct vtnet_rxq *rxq;
3909
	int i;
4251
	int i;
3910
4252
3911
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4253
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3912
		vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
4254
		rxq = &sc->vtnet_rxqs[i];
4255
		if (vtnet_rxq_enable_intr(rxq) != 0)
4256
			taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
4257
	}
3913
}
4258
}
3914
4259
3915
static void
4260
static void
Lines 3934-3940 Link Here
3934
{
4279
{
3935
	int i;
4280
	int i;
3936
4281
3937
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4282
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3938
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
4283
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3939
}
4284
}
3940
4285
Lines 3943-3949 Link Here
3943
{
4288
{
3944
	int i;
4289
	int i;
3945
4290
3946
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4291
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3947
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
4292
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3948
}
4293
}
3949
4294
(-)sys/dev/virtio/network/if_vtnetvar.h (-51 / +74 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 41-49 Link Here
41
	uint64_t	rx_csum_bad_ipproto;
43
	uint64_t	rx_csum_bad_ipproto;
42
	uint64_t	rx_csum_bad_offset;
44
	uint64_t	rx_csum_bad_offset;
43
	uint64_t	rx_csum_bad_proto;
45
	uint64_t	rx_csum_bad_proto;
44
	uint64_t	tx_csum_bad_ethtype;
46
	uint64_t	tx_csum_unknown_ethtype;
45
	uint64_t	tx_tso_bad_ethtype;
47
	uint64_t	tx_csum_proto_mismatch;
46
	uint64_t	tx_tso_not_tcp;
48
	uint64_t	tx_tso_not_tcp;
49
	uint64_t	tx_tso_without_csum;
47
	uint64_t	tx_defragged;
50
	uint64_t	tx_defragged;
48
	uint64_t	tx_defrag_failed;
51
	uint64_t	tx_defrag_failed;
49
52
Lines 65-70 Link Here
65
	uint64_t	vrxs_ierrors;	/* if_ierrors */
68
	uint64_t	vrxs_ierrors;	/* if_ierrors */
66
	uint64_t	vrxs_csum;
69
	uint64_t	vrxs_csum;
67
	uint64_t	vrxs_csum_failed;
70
	uint64_t	vrxs_csum_failed;
71
	uint64_t	vrxs_host_lro;
68
	uint64_t	vrxs_rescheduled;
72
	uint64_t	vrxs_rescheduled;
69
};
73
};
70
74
Lines 77-82 Link Here
77
	struct vtnet_rxq_stats	 vtnrx_stats;
81
	struct vtnet_rxq_stats	 vtnrx_stats;
78
	struct taskqueue	*vtnrx_tq;
82
	struct taskqueue	*vtnrx_tq;
79
	struct task		 vtnrx_intrtask;
83
	struct task		 vtnrx_intrtask;
84
	struct lro_ctrl		 vtnrx_lro;
80
#ifdef DEV_NETMAP
85
#ifdef DEV_NETMAP
81
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
86
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
82
#endif  /* DEV_NETMAP */
87
#endif  /* DEV_NETMAP */
Lines 109-114 Link Here
109
#endif
114
#endif
110
	int			 vtntx_id;
115
	int			 vtntx_id;
111
	int			 vtntx_watchdog;
116
	int			 vtntx_watchdog;
117
	int			 vtntx_intr_threshold;
112
	struct vtnet_txq_stats	 vtntx_stats;
118
	struct vtnet_txq_stats	 vtntx_stats;
113
	struct taskqueue	*vtntx_tq;
119
	struct taskqueue	*vtntx_tq;
114
	struct task		 vtntx_intrtask;
120
	struct task		 vtntx_intrtask;
Lines 134-142 Link Here
134
	struct ifnet		*vtnet_ifp;
140
	struct ifnet		*vtnet_ifp;
135
	struct vtnet_rxq	*vtnet_rxqs;
141
	struct vtnet_rxq	*vtnet_rxqs;
136
	struct vtnet_txq	*vtnet_txqs;
142
	struct vtnet_txq	*vtnet_txqs;
143
	uint64_t		 vtnet_features;
137
144
138
	uint32_t		 vtnet_flags;
145
	uint32_t		 vtnet_flags;
139
#define VTNET_FLAG_SUSPENDED	 0x0001
146
#define VTNET_FLAG_MODERN	 0x0001
140
#define VTNET_FLAG_MAC		 0x0002
147
#define VTNET_FLAG_MAC		 0x0002
141
#define VTNET_FLAG_CTRL_VQ	 0x0004
148
#define VTNET_FLAG_CTRL_VQ	 0x0004
142
#define VTNET_FLAG_CTRL_RX	 0x0008
149
#define VTNET_FLAG_CTRL_RX	 0x0008
Lines 145-173 Link Here
145
#define VTNET_FLAG_TSO_ECN	 0x0040
152
#define VTNET_FLAG_TSO_ECN	 0x0040
146
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
153
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
147
#define VTNET_FLAG_LRO_NOMRG	 0x0100
154
#define VTNET_FLAG_LRO_NOMRG	 0x0100
148
#define VTNET_FLAG_MULTIQ	 0x0200
155
#define VTNET_FLAG_MQ		 0x0200
149
#define VTNET_FLAG_INDIRECT	 0x0400
156
#define VTNET_FLAG_INDIRECT	 0x0400
150
#define VTNET_FLAG_EVENT_IDX	 0x0800
157
#define VTNET_FLAG_EVENT_IDX	 0x0800
158
#define VTNET_FLAG_SUSPENDED	 0x1000
159
#define VTNET_FLAG_FIXUP_NEEDS_CSUM 0x2000
160
#define VTNET_FLAG_SW_LRO	 0x4000
151
161
152
	int			 vtnet_link_active;
153
	int			 vtnet_hdr_size;
162
	int			 vtnet_hdr_size;
154
	int			 vtnet_rx_process_limit;
155
	int			 vtnet_rx_nsegs;
156
	int			 vtnet_rx_nmbufs;
163
	int			 vtnet_rx_nmbufs;
157
	int			 vtnet_rx_clsize;
164
	int			 vtnet_rx_clustersz;
158
	int			 vtnet_rx_new_clsize;
165
	int			 vtnet_rx_nsegs;
159
	int			 vtnet_tx_intr_thresh;
166
	int			 vtnet_rx_process_limit;
160
	int			 vtnet_tx_nsegs;
167
	int			 vtnet_link_active;
161
	int			 vtnet_if_flags;
162
	int			 vtnet_act_vq_pairs;
168
	int			 vtnet_act_vq_pairs;
169
	int			 vtnet_req_vq_pairs;
163
	int			 vtnet_max_vq_pairs;
170
	int			 vtnet_max_vq_pairs;
164
	int			 vtnet_requested_vq_pairs;
171
	int			 vtnet_tx_nsegs;
172
	int			 vtnet_if_flags;
173
	int			 vtnet_max_mtu;
174
	int			 vtnet_lro_entry_count;
175
	int			 vtnet_lro_mbufq_depth;
165
176
166
	struct virtqueue	*vtnet_ctrl_vq;
177
	struct virtqueue	*vtnet_ctrl_vq;
167
	struct vtnet_mac_filter	*vtnet_mac_filter;
178
	struct vtnet_mac_filter	*vtnet_mac_filter;
168
	uint32_t		*vtnet_vlan_filter;
179
	uint32_t		*vtnet_vlan_filter;
169
180
170
	uint64_t		 vtnet_features;
181
	uint64_t		 vtnet_negotiated_features;
171
	struct vtnet_statistics	 vtnet_stats;
182
	struct vtnet_statistics	 vtnet_stats;
172
	struct callout		 vtnet_tick_ch;
183
	struct callout		 vtnet_tick_ch;
173
	struct ifmedia		 vtnet_media;
184
	struct ifmedia		 vtnet_media;
Lines 179-188 Link Here
179
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
190
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
180
};
191
};
181
192
193
static bool
194
vtnet_modern(struct vtnet_softc *sc)
195
{
196
	return ((sc->vtnet_flags & VTNET_FLAG_MODERN) != 0);
197
}
198
199
static bool
200
vtnet_software_lro(struct vtnet_softc *sc)
201
{
202
	return ((sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0);
203
}
204
182
/*
205
/*
183
 * Maximum number of queue pairs we will autoconfigure to.
206
 * Maximum number of queue pairs we will autoconfigure to.
184
 */
207
 */
185
#define VTNET_MAX_QUEUE_PAIRS	8
208
#define VTNET_MAX_QUEUE_PAIRS	32
186
209
187
/*
210
/*
188
 * Additional completed entries can appear in a virtqueue before we can
211
 * Additional completed entries can appear in a virtqueue before we can
Lines 200-224 Link Here
200
#define VTNET_NOTIFY_RETRIES		4
223
#define VTNET_NOTIFY_RETRIES		4
201
224
202
/*
225
/*
203
 * Fake the media type. The host does not provide us with any real media
204
 * information.
205
 */
206
#define VTNET_MEDIATYPE		 (IFM_ETHER | IFM_10G_T | IFM_FDX)
207
208
/*
209
 * Number of words to allocate for the VLAN shadow table. There is one
226
 * Number of words to allocate for the VLAN shadow table. There is one
210
 * bit for each VLAN.
227
 * bit for each VLAN.
211
 */
228
 */
212
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
229
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
213
230
231
/* We depend on these being the same size (and same layout). */
232
CTASSERT(sizeof(struct virtio_net_hdr_mrg_rxbuf) ==
233
    sizeof(struct virtio_net_hdr_v1));
234
214
/*
235
/*
215
 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
236
 * In legacy VirtIO when mergeable buffers are not negotiated, this structure
216
 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
237
 * is placed at the beginning of the mbuf data. Use 4 bytes of pad to keep
217
 * both keep the VirtIO header and the data non-contiguous and to keep the
238
 * both the VirtIO header and the data non-contiguous and the frame's payload
218
 * frame's payload 4 byte aligned.
239
 * 4 byte aligned. Note this padding would not be necessary if the
240
 * VIRTIO_F_ANY_LAYOUT feature was negotiated (but we don't support that yet).
219
 *
241
 *
220
 * When mergeable buffers are negotiated, the host puts the VirtIO header in
242
 * In modern VirtIO or when mergeable buffers are negotiated, the host puts
221
 * the beginning of the first mbuf's data.
243
 * the VirtIO header in the beginning of the first mbuf's data.
222
 */
244
 */
223
#define VTNET_RX_HEADER_PAD	4
245
#define VTNET_RX_HEADER_PAD	4
224
struct vtnet_rx_header {
246
struct vtnet_rx_header {
Lines 234-239 Link Here
234
	union {
256
	union {
235
		struct virtio_net_hdr		hdr;
257
		struct virtio_net_hdr		hdr;
236
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
258
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
259
		struct virtio_net_hdr_v1	v1hdr;
237
	} vth_uhdr;
260
	} vth_uhdr;
238
261
239
	struct mbuf *vth_mbuf;
262
	struct mbuf *vth_mbuf;
Lines 248-253 Link Here
248
 */
271
 */
249
#define VTNET_MAX_MAC_ENTRIES	128
272
#define VTNET_MAX_MAC_ENTRIES	128
250
273
274
/*
275
 * The driver version of struct virtio_net_ctrl_mac but with our predefined
276
 * number of MAC addresses allocated. This structure is shared with the host,
277
 * so nentries field is in the correct VirtIO endianness.
278
 */
251
struct vtnet_mac_table {
279
struct vtnet_mac_table {
252
	uint32_t	nentries;
280
	uint32_t	nentries;
253
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
281
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
Lines 273-287 Link Here
273
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
301
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
274
302
275
/* Features desired/implemented by this driver. */
303
/* Features desired/implemented by this driver. */
276
#define VTNET_FEATURES \
304
#define VTNET_COMMON_FEATURES \
277
    (VIRTIO_NET_F_MAC			| \
305
    (VIRTIO_NET_F_MAC			| \
278
     VIRTIO_NET_F_STATUS		| \
306
     VIRTIO_NET_F_STATUS		| \
307
     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS	| \
308
     VIRTIO_NET_F_MTU			| \
279
     VIRTIO_NET_F_CTRL_VQ		| \
309
     VIRTIO_NET_F_CTRL_VQ		| \
280
     VIRTIO_NET_F_CTRL_RX		| \
310
     VIRTIO_NET_F_CTRL_RX		| \
281
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
311
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
282
     VIRTIO_NET_F_CTRL_VLAN		| \
312
     VIRTIO_NET_F_CTRL_VLAN		| \
283
     VIRTIO_NET_F_CSUM			| \
313
     VIRTIO_NET_F_CSUM			| \
284
     VIRTIO_NET_F_GSO			| \
285
     VIRTIO_NET_F_HOST_TSO4		| \
314
     VIRTIO_NET_F_HOST_TSO4		| \
286
     VIRTIO_NET_F_HOST_TSO6		| \
315
     VIRTIO_NET_F_HOST_TSO6		| \
287
     VIRTIO_NET_F_HOST_ECN		| \
316
     VIRTIO_NET_F_HOST_ECN		| \
Lines 291-299 Link Here
291
     VIRTIO_NET_F_GUEST_ECN		| \
320
     VIRTIO_NET_F_GUEST_ECN		| \
292
     VIRTIO_NET_F_MRG_RXBUF		| \
321
     VIRTIO_NET_F_MRG_RXBUF		| \
293
     VIRTIO_NET_F_MQ			| \
322
     VIRTIO_NET_F_MQ			| \
323
     VIRTIO_NET_F_SPEED_DUPLEX		| \
294
     VIRTIO_RING_F_EVENT_IDX		| \
324
     VIRTIO_RING_F_EVENT_IDX		| \
295
     VIRTIO_RING_F_INDIRECT_DESC)
325
     VIRTIO_RING_F_INDIRECT_DESC)
296
326
327
#define VTNET_MODERN_FEATURES (VTNET_COMMON_FEATURES)
328
#define VTNET_LEGACY_FEATURES (VTNET_COMMON_FEATURES | VIRTIO_NET_F_GSO)
329
297
/*
330
/*
298
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
331
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
299
 * frames larger than 1514 bytes.
332
 * frames larger than 1514 bytes.
Lines 303-350 Link Here
303
336
304
/*
337
/*
305
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
338
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
306
 * frames larger than 1514 bytes. We do not yet support software LRO
339
 * frames larger than 1514 bytes.
307
 * via tcp_lro_rx().
308
 */
340
 */
309
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
341
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
310
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
342
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
311
343
344
#define VTNET_MIN_MTU		68
312
#define VTNET_MAX_MTU		65536
345
#define VTNET_MAX_MTU		65536
313
#define VTNET_MAX_RX_SIZE	65550
346
#define VTNET_MAX_RX_SIZE	65550
314
347
315
/*
348
/*
316
 * Used to preallocate the Vq indirect descriptors. The first segment
349
 * Used to preallocate the VQ indirect descriptors. Modern and mergeable
317
 * is reserved for the header, except for mergeable buffers since the
350
 * buffers do not required one segment for the VirtIO header since it is
318
 * header is placed inline with the data.
351
 * placed inline at the beginning of the receive buffer.
319
 */
352
 */
320
#define VTNET_MRG_RX_SEGS	1
353
#define VTNET_RX_SEGS_HDR_INLINE	1
321
#define VTNET_MIN_RX_SEGS	2
354
#define VTNET_RX_SEGS_HDR_SEPARATE	2
322
#define VTNET_MAX_RX_SEGS	34
355
#define VTNET_RX_SEGS_LRO_NOMRG		34
323
#define VTNET_MIN_TX_SEGS	4
356
#define VTNET_TX_SEGS_MIN		32
324
#define VTNET_MAX_TX_SEGS	64
357
#define VTNET_TX_SEGS_MAX		64
325
358
326
/*
359
/*
327
 * Assert we can receive and transmit the maximum with regular
360
 * Assert we can receive and transmit the maximum with regular
328
 * size clusters.
361
 * size clusters.
329
 */
362
 */
330
CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
363
CTASSERT(((VTNET_RX_SEGS_LRO_NOMRG - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
331
CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
364
CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU);
332
365
333
/*
366
/*
334
 * Number of slots in the Tx bufrings. This value matches most other
367
 * Number of slots in the Tx bufrings. This value matches most other
335
 * multiqueue drivers.
368
 * multiqueue drivers.
336
 */
369
 */
337
#define VTNET_DEFAULT_BUFRING_SIZE	4096
370
#define VTNET_DEFAULT_BUFRING_SIZE	4096
338
339
/*
340
 * Determine how many mbufs are in each receive buffer. For LRO without
341
 * mergeable buffers, we must allocate an mbuf chain large enough to
342
 * hold both the vtnet_rx_header and the maximum receivable data.
343
 */
344
#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize)				\
345
	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :		\
346
	    howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,	\
347
	        (_clsize))
348
371
349
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
372
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
350
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
373
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
(-)sys/dev/virtio/network/virtio_net.h (-25 / +80 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
2
 * This header is BSD licensed so anyone can use the definitions to implement
4
 * This header is BSD licensed so anyone can use the definitions to implement
3
 * compatible drivers/servers.
5
 * compatible drivers/servers.
4
 *
6
 *
Lines 32-60 Link Here
32
#define _VIRTIO_NET_H
34
#define _VIRTIO_NET_H
33
35
34
/* The feature bitmap for virtio net */
36
/* The feature bitmap for virtio net */
35
#define VIRTIO_NET_F_CSUM	0x00001 /* Host handles pkts w/ partial csum */
37
#define VIRTIO_NET_F_CSUM		 0x000001 /* Host handles pkts w/ partial csum */
36
#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
38
#define VIRTIO_NET_F_GUEST_CSUM		 0x000002 /* Guest handles pkts w/ partial csum*/
37
#define VIRTIO_NET_F_MAC	0x00020 /* Host has given MAC address. */
39
#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 0x000004 /* Dynamic offload configuration. */
38
#define VIRTIO_NET_F_GSO	0x00040 /* Host handles pkts w/ any GSO type */
40
#define VIRTIO_NET_F_MTU		 0x000008 /* Initial MTU advice */
39
#define VIRTIO_NET_F_GUEST_TSO4	0x00080 /* Guest can handle TSOv4 in. */
41
#define VIRTIO_NET_F_MAC		 0x000020 /* Host has given MAC address. */
40
#define VIRTIO_NET_F_GUEST_TSO6	0x00100 /* Guest can handle TSOv6 in. */
42
#define VIRTIO_NET_F_GSO		 0x000040 /* Host handles pkts w/ any GSO type */
41
#define VIRTIO_NET_F_GUEST_ECN	0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
43
#define VIRTIO_NET_F_GUEST_TSO4		 0x000080 /* Guest can handle TSOv4 in. */
42
#define VIRTIO_NET_F_GUEST_UFO	0x00400 /* Guest can handle UFO in. */
44
#define VIRTIO_NET_F_GUEST_TSO6		 0x000100 /* Guest can handle TSOv6 in. */
43
#define VIRTIO_NET_F_HOST_TSO4	0x00800 /* Host can handle TSOv4 in. */
45
#define VIRTIO_NET_F_GUEST_ECN		 0x000200 /* Guest can handle TSO[6] w/ ECN in. */
44
#define VIRTIO_NET_F_HOST_TSO6	0x01000 /* Host can handle TSOv6 in. */
46
#define VIRTIO_NET_F_GUEST_UFO		 0x000400 /* Guest can handle UFO in. */
45
#define VIRTIO_NET_F_HOST_ECN	0x02000 /* Host can handle TSO[6] w/ ECN in. */
47
#define VIRTIO_NET_F_HOST_TSO4		 0x000800 /* Host can handle TSOv4 in. */
46
#define VIRTIO_NET_F_HOST_UFO	0x04000 /* Host can handle UFO in. */
48
#define VIRTIO_NET_F_HOST_TSO6		 0x001000 /* Host can handle TSOv6 in. */
47
#define VIRTIO_NET_F_MRG_RXBUF	0x08000 /* Host can merge receive buffers. */
49
#define VIRTIO_NET_F_HOST_ECN		 0x002000 /* Host can handle TSO[6] w/ ECN in. */
48
#define VIRTIO_NET_F_STATUS	0x10000 /* virtio_net_config.status available*/
50
#define VIRTIO_NET_F_HOST_UFO		 0x004000 /* Host can handle UFO in. */
49
#define VIRTIO_NET_F_CTRL_VQ	0x20000 /* Control channel available */
51
#define VIRTIO_NET_F_MRG_RXBUF		 0x008000 /* Host can merge receive buffers. */
50
#define VIRTIO_NET_F_CTRL_RX	0x40000 /* Control channel RX mode support */
52
#define VIRTIO_NET_F_STATUS		 0x010000 /* virtio_net_config.status available*/
51
#define VIRTIO_NET_F_CTRL_VLAN	0x80000 /* Control channel VLAN filtering */
53
#define VIRTIO_NET_F_CTRL_VQ		 0x020000 /* Control channel available */
52
#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
54
#define VIRTIO_NET_F_CTRL_RX		 0x040000 /* Control channel RX mode support */
53
#define VIRTIO_NET_F_GUEST_ANNOUNCE 0x200000 /* Announce device on network */
55
#define VIRTIO_NET_F_CTRL_VLAN		 0x080000 /* Control channel VLAN filtering */
54
#define VIRTIO_NET_F_MQ		0x400000 /* Device supports RFS */
56
#define VIRTIO_NET_F_CTRL_RX_EXTRA	 0x100000 /* Extra RX mode control support */
55
#define VIRTIO_NET_F_CTRL_MAC_ADDR 0x800000 /* Set MAC address */
57
#define VIRTIO_NET_F_GUEST_ANNOUNCE	 0x200000 /* Announce device on network */
58
#define VIRTIO_NET_F_MQ			 0x400000 /* Device supports Receive Flow Steering */
59
#define VIRTIO_NET_F_CTRL_MAC_ADDR	 0x800000 /* Set MAC address */
60
#define VIRTIO_NET_F_SPEED_DUPLEX	 (1ULL << 63) /* Device set linkspeed and duplex */
56
61
57
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
62
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
63
#define VIRTIO_NET_S_ANNOUNCE	2	/* Announcement is needed */
58
64
59
struct virtio_net_config {
65
struct virtio_net_config {
60
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
66
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
Lines 66-81 Link Here
66
	 * Legal values are between 1 and 0x8000.
72
	 * Legal values are between 1 and 0x8000.
67
	 */
73
	 */
68
	uint16_t	max_virtqueue_pairs;
74
	uint16_t	max_virtqueue_pairs;
75
	/* Default maximum transmit unit advice */
76
	uint16_t	mtu;
77
	/*
78
	 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
79
	 * Any other value stands for unknown.
80
	 */
81
	uint32_t	speed;
82
	/*
83
	 * 0x00 - half duplex
84
	 * 0x01 - full duplex
85
	 * Any other value stands for unknown.
86
	 */
87
	uint8_t		duplex;
69
} __packed;
88
} __packed;
70
89
71
/*
90
/*
72
 * This is the first element of the scatter-gather list.  If you don't
91
 * This header comes first in the scatter-gather list.  If you don't
73
 * specify GSO or CSUM features, you can simply ignore the header.
92
 * specify GSO or CSUM features, you can simply ignore the header.
93
 *
94
 * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
95
 * only flattened.
74
 */
96
 */
75
struct virtio_net_hdr {
97
struct virtio_net_hdr_v1 {
76
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start,csum_offset*/
98
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start, csum_offset */
77
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
99
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
78
	uint8_t	flags;
100
	uint8_t flags;
79
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
101
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
80
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
102
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
81
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
103
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
Lines 86-94 Link Here
86
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
108
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
87
	uint16_t csum_start;	/* Position to start checksumming from */
109
	uint16_t csum_start;	/* Position to start checksumming from */
88
	uint16_t csum_offset;	/* Offset after that to place checksum */
110
	uint16_t csum_offset;	/* Offset after that to place checksum */
111
	uint16_t num_buffers;	/* Number of merged rx buffers */
89
};
112
};
90
113
91
/*
114
/*
115
 * This header comes first in the scatter-gather list.
116
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
117
 * be the first element of the scatter-gather list.  If you don't
118
 * specify GSO or CSUM features, you can simply ignore the header.
119
 */
120
struct virtio_net_hdr {
121
	/* See VIRTIO_NET_HDR_F_* */
122
	uint8_t	flags;
123
	/* See VIRTIO_NET_HDR_GSO_* */
124
	uint8_t gso_type;
125
	uint16_t hdr_len;	/* Ethernet + IP + tcp/udp hdrs */
126
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
127
	uint16_t csum_start;	/* Position to start checksumming from */
128
	uint16_t csum_offset;	/* Offset after that to place checksum */
129
};
130
131
/*
92
 * This is the version of the header to use when the MRG_RXBUF
132
 * This is the version of the header to use when the MRG_RXBUF
93
 * feature has been negotiated.
133
 * feature has been negotiated.
94
 */
134
 */
Lines 198-202 Link Here
198
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
238
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
199
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
239
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
200
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
240
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
241
242
/*
243
 * Control network offloads
244
 *
245
 * Reconfigures the network offloads that Guest can handle.
246
 *
247
 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
248
 *
249
 * Command data format matches the feature bit mask exactly.
250
 *
251
 * See VIRTIO_NET_F_GUEST_* for the list of offloads
252
 * that can be enabled/disabled.
253
 */
254
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS		5
255
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET	0
201
256
202
#endif /* _VIRTIO_NET_H */
257
#endif /* _VIRTIO_NET_H */
(-)sys/dev/virtio/pci/virtio_pci.c (-837 / +510 lines)
Lines 1-5 Link Here
1
/*-
1
/*-
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
5
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
Lines 33-38 Link Here
33
#include <sys/systm.h>
35
#include <sys/systm.h>
34
#include <sys/bus.h>
36
#include <sys/bus.h>
35
#include <sys/kernel.h>
37
#include <sys/kernel.h>
38
#include <sys/sbuf.h>
39
#include <sys/sysctl.h>
36
#include <sys/module.h>
40
#include <sys/module.h>
37
#include <sys/malloc.h>
41
#include <sys/malloc.h>
38
42
Lines 47-401 Link Here
47
#include <dev/virtio/virtio.h>
51
#include <dev/virtio/virtio.h>
48
#include <dev/virtio/virtqueue.h>
52
#include <dev/virtio/virtqueue.h>
49
#include <dev/virtio/pci/virtio_pci.h>
53
#include <dev/virtio/pci/virtio_pci.h>
54
#include <dev/virtio/pci/virtio_pci_var.h>
50
55
51
#include "virtio_bus_if.h"
56
#include "virtio_pci_if.h"
52
#include "virtio_if.h"
57
#include "virtio_if.h"
53
58
54
struct vtpci_interrupt {
59
static void	vtpci_describe_features(struct vtpci_common *, const char *,
55
	struct resource		*vti_irq;
56
	int			 vti_rid;
57
	void			*vti_handler;
58
};
59
60
struct vtpci_virtqueue {
61
	struct virtqueue	*vtv_vq;
62
	int			 vtv_no_intr;
63
};
64
65
struct vtpci_softc {
66
	device_t			 vtpci_dev;
67
	struct resource			*vtpci_res;
68
	struct resource			*vtpci_msix_res;
69
	uint64_t			 vtpci_features;
70
	uint32_t			 vtpci_flags;
71
#define VTPCI_FLAG_NO_MSI		0x0001
72
#define VTPCI_FLAG_NO_MSIX		0x0002
73
#define VTPCI_FLAG_LEGACY		0x1000
74
#define VTPCI_FLAG_MSI			0x2000
75
#define VTPCI_FLAG_MSIX			0x4000
76
#define VTPCI_FLAG_SHARED_MSIX		0x8000
77
#define VTPCI_FLAG_ITYPE_MASK		0xF000
78
79
	/* This "bus" will only ever have one child. */
80
	device_t			 vtpci_child_dev;
81
	struct virtio_feature_desc	*vtpci_child_feat_desc;
82
83
	int				 vtpci_nvqs;
84
	struct vtpci_virtqueue		*vtpci_vqs;
85
86
	/*
87
	 * Ideally, each virtqueue that the driver provides a callback for will
88
	 * receive its own MSIX vector. If there are not sufficient vectors
89
	 * available, then attempt to have all the VQs share one vector. For
90
	 * MSIX, the configuration changed notifications must be on their own
91
	 * vector.
92
	 *
93
	 * If MSIX is not available, we will attempt to have the whole device
94
	 * share one MSI vector, and then, finally, one legacy interrupt.
95
	 */
96
	struct vtpci_interrupt		 vtpci_device_interrupt;
97
	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
98
	int				 vtpci_nmsix_resources;
99
};
100
101
static int	vtpci_probe(device_t);
102
static int	vtpci_attach(device_t);
103
static int	vtpci_detach(device_t);
104
static int	vtpci_suspend(device_t);
105
static int	vtpci_resume(device_t);
106
static int	vtpci_shutdown(device_t);
107
static void	vtpci_driver_added(device_t, driver_t *);
108
static void	vtpci_child_detached(device_t, device_t);
109
static int	vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
110
static int	vtpci_write_ivar(device_t, device_t, int, uintptr_t);
111
112
static uint64_t	vtpci_negotiate_features(device_t, uint64_t);
113
static int	vtpci_with_feature(device_t, uint64_t);
114
static int	vtpci_alloc_virtqueues(device_t, int, int,
115
		    struct vq_alloc_info *);
116
static int	vtpci_setup_intr(device_t, enum intr_type);
117
static void	vtpci_stop(device_t);
118
static int	vtpci_reinit(device_t, uint64_t);
119
static void	vtpci_reinit_complete(device_t);
120
static void	vtpci_notify_virtqueue(device_t, uint16_t);
121
static uint8_t	vtpci_get_status(device_t);
122
static void	vtpci_set_status(device_t, uint8_t);
123
static void	vtpci_read_dev_config(device_t, bus_size_t, void *, int);
124
static void	vtpci_write_dev_config(device_t, bus_size_t, void *, int);
125
126
static void	vtpci_describe_features(struct vtpci_softc *, const char *,
127
		    uint64_t);
60
		    uint64_t);
128
static void	vtpci_probe_and_attach_child(struct vtpci_softc *);
61
static int	vtpci_alloc_msix(struct vtpci_common *, int);
129
62
static int	vtpci_alloc_msi(struct vtpci_common *);
130
static int	vtpci_alloc_msix(struct vtpci_softc *, int);
63
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_common *);
131
static int	vtpci_alloc_msi(struct vtpci_softc *);
64
static int	vtpci_alloc_intr_msix_shared(struct vtpci_common *);
132
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
65
static int	vtpci_alloc_intr_msi(struct vtpci_common *);
133
static int	vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
66
static int	vtpci_alloc_intr_intx(struct vtpci_common *);
134
static int	vtpci_alloc_intr_msi(struct vtpci_softc *);
67
static int	vtpci_alloc_interrupt(struct vtpci_common *, int, int,
135
static int	vtpci_alloc_intr_legacy(struct vtpci_softc *);
136
static int	vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
137
		    struct vtpci_interrupt *);
68
		    struct vtpci_interrupt *);
138
static int	vtpci_alloc_intr_resources(struct vtpci_softc *);
69
static void	vtpci_free_interrupt(struct vtpci_common *,
70
		    struct vtpci_interrupt *);
139
71
140
static int	vtpci_setup_legacy_interrupt(struct vtpci_softc *,
72
static void	vtpci_free_interrupts(struct vtpci_common *);
73
static void	vtpci_free_virtqueues(struct vtpci_common *);
74
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_common *);
75
static int	vtpci_alloc_intr_resources(struct vtpci_common *);
76
static int	vtpci_setup_intx_interrupt(struct vtpci_common *,
141
		    enum intr_type);
77
		    enum intr_type);
142
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
78
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_common *,
143
		    enum intr_type);
79
		    enum intr_type);
144
static int	vtpci_setup_msix_interrupts(struct vtpci_softc *,
80
static int	vtpci_set_host_msix_vectors(struct vtpci_common *);
81
static int	vtpci_setup_msix_interrupts(struct vtpci_common *,
145
		    enum intr_type);
82
		    enum intr_type);
146
static int	vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type);
83
static int	vtpci_setup_intrs(struct vtpci_common *, enum intr_type);
147
84
static int	vtpci_reinit_virtqueue(struct vtpci_common *, int);
148
static int	vtpci_register_msix_vector(struct vtpci_softc *, int,
85
static void	vtpci_intx_intr(void *);
149
		    struct vtpci_interrupt *);
150
static int	vtpci_set_host_msix_vectors(struct vtpci_softc *);
151
static int	vtpci_reinit_virtqueue(struct vtpci_softc *, int);
152
153
static void	vtpci_free_interrupt(struct vtpci_softc *,
154
		    struct vtpci_interrupt *);
155
static void	vtpci_free_interrupts(struct vtpci_softc *);
156
static void	vtpci_free_virtqueues(struct vtpci_softc *);
157
static void	vtpci_release_child_resources(struct vtpci_softc *);
158
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
159
static void	vtpci_reset(struct vtpci_softc *);
160
161
static void	vtpci_select_virtqueue(struct vtpci_softc *, int);
162
163
static void	vtpci_legacy_intr(void *);
164
static int	vtpci_vq_shared_intr_filter(void *);
86
static int	vtpci_vq_shared_intr_filter(void *);
165
static void	vtpci_vq_shared_intr(void *);
87
static void	vtpci_vq_shared_intr(void *);
166
static int	vtpci_vq_intr_filter(void *);
88
static int	vtpci_vq_intr_filter(void *);
167
static void	vtpci_vq_intr(void *);
89
static void	vtpci_vq_intr(void *);
168
static void	vtpci_config_intr(void *);
90
static void	vtpci_config_intr(void *);
169
91
170
#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
92
static void	vtpci_setup_sysctl(struct vtpci_common *);
171
93
172
#define VIRTIO_PCI_CONFIG(_sc) \
94
#define vtpci_setup_msi_interrupt vtpci_setup_intx_interrupt
173
    VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0)
174
95
175
/*
96
/*
176
 * I/O port read/write wrappers.
97
 * This module contains two drivers:
98
 *   - virtio_pci_legacy (vtpcil) for pre-V1 support
99
 *   - virtio_pci_modern (vtpcim) for V1 support
177
 */
100
 */
178
#define vtpci_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
179
#define vtpci_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
180
#define vtpci_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
181
#define vtpci_write_config_1(sc, o, v)	bus_write_1((sc)->vtpci_res, (o), (v))
182
#define vtpci_write_config_2(sc, o, v)	bus_write_2((sc)->vtpci_res, (o), (v))
183
#define vtpci_write_config_4(sc, o, v)	bus_write_4((sc)->vtpci_res, (o), (v))
184
185
/* Tunables. */
186
static int vtpci_disable_msix = 0;
187
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
188
189
static device_method_t vtpci_methods[] = {
190
	/* Device interface. */
191
	DEVMETHOD(device_probe,			  vtpci_probe),
192
	DEVMETHOD(device_attach,		  vtpci_attach),
193
	DEVMETHOD(device_detach,		  vtpci_detach),
194
	DEVMETHOD(device_suspend,		  vtpci_suspend),
195
	DEVMETHOD(device_resume,		  vtpci_resume),
196
	DEVMETHOD(device_shutdown,		  vtpci_shutdown),
197
198
	/* Bus interface. */
199
	DEVMETHOD(bus_driver_added,		  vtpci_driver_added),
200
	DEVMETHOD(bus_child_detached,		  vtpci_child_detached),
201
	DEVMETHOD(bus_read_ivar,		  vtpci_read_ivar),
202
	DEVMETHOD(bus_write_ivar,		  vtpci_write_ivar),
203
204
	/* VirtIO bus interface. */
205
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_negotiate_features),
206
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_with_feature),
207
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_alloc_virtqueues),
208
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_setup_intr),
209
	DEVMETHOD(virtio_bus_stop,		  vtpci_stop),
210
	DEVMETHOD(virtio_bus_reinit,		  vtpci_reinit),
211
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_reinit_complete),
212
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_notify_virtqueue),
213
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_read_dev_config),
214
	DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
215
216
	DEVMETHOD_END
217
};
218
219
static driver_t vtpci_driver = {
220
	"virtio_pci",
221
	vtpci_methods,
222
	sizeof(struct vtpci_softc)
223
};
224
225
devclass_t vtpci_devclass;
226
227
DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
228
MODULE_VERSION(virtio_pci, 1);
101
MODULE_VERSION(virtio_pci, 1);
229
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
102
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
230
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
103
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
231
104
232
static int
105
int vtpci_disable_msix = 0;
233
vtpci_probe(device_t dev)
106
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
107
108
static uint8_t
109
vtpci_read_isr(struct vtpci_common *cn)
234
{
110
{
235
	char desc[36];
111
	return (VIRTIO_PCI_READ_ISR(cn->vtpci_dev));
236
	const char *name;
112
}
237
113
238
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
114
static uint16_t
239
		return (ENXIO);
115
vtpci_get_vq_size(struct vtpci_common *cn, int idx)
116
{
117
	return (VIRTIO_PCI_GET_VQ_SIZE(cn->vtpci_dev, idx));
118
}
240
119
241
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
120
static bus_size_t
242
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
121
vtpci_get_vq_notify_off(struct vtpci_common *cn, int idx)
243
		return (ENXIO);
122
{
123
	return (VIRTIO_PCI_GET_VQ_NOTIFY_OFF(cn->vtpci_dev, idx));
124
}
244
125
245
	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
126
static void
246
		return (ENXIO);
127
vtpci_set_vq(struct vtpci_common *cn, struct virtqueue *vq)
128
{
129
	VIRTIO_PCI_SET_VQ(cn->vtpci_dev, vq);
130
}
247
131
248
	name = virtio_device_name(pci_get_subdevice(dev));
132
static void
249
	if (name == NULL)
133
vtpci_disable_vq(struct vtpci_common *cn, int idx)
250
		name = "Unknown";
134
{
135
	VIRTIO_PCI_DISABLE_VQ(cn->vtpci_dev, idx);
136
}
251
137
252
	snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
138
static int
253
	device_set_desc_copy(dev, desc);
139
vtpci_register_cfg_msix(struct vtpci_common *cn, struct vtpci_interrupt *intr)
254
140
{
255
	return (BUS_PROBE_DEFAULT);
141
	return (VIRTIO_PCI_REGISTER_CFG_MSIX(cn->vtpci_dev, intr));
256
}
142
}
257
143
258
static int
144
static int
259
vtpci_attach(device_t dev)
145
vtpci_register_vq_msix(struct vtpci_common *cn, int idx,
146
    struct vtpci_interrupt *intr)
260
{
147
{
261
	struct vtpci_softc *sc;
148
	return (VIRTIO_PCI_REGISTER_VQ_MSIX(cn->vtpci_dev, idx, intr));
262
	device_t child;
149
}
263
	int rid;
264
150
265
	sc = device_get_softc(dev);
151
void
266
	sc->vtpci_dev = dev;
152
vtpci_init(struct vtpci_common *cn, device_t dev, bool modern)
153
{
267
154
155
	cn->vtpci_dev = dev;
156
268
	pci_enable_busmaster(dev);
157
	pci_enable_busmaster(dev);
269
158
270
	rid = PCIR_BAR(0);
159
	if (modern)
271
	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
160
		cn->vtpci_flags |= VTPCI_FLAG_MODERN;
272
	    RF_ACTIVE);
273
	if (sc->vtpci_res == NULL) {
274
		device_printf(dev, "cannot map I/O space\n");
275
		return (ENXIO);
276
	}
277
278
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
161
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
279
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
162
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSI;
163
	if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0)
164
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
280
165
281
	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
166
	vtpci_setup_sysctl(cn);
282
		rid = PCIR_BAR(1);
167
}
283
		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
284
		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
285
	}
286
168
287
	if (sc->vtpci_msix_res == NULL)
169
int
288
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
170
vtpci_add_child(struct vtpci_common *cn)
171
{
172
	device_t dev, child;
289
173
290
	vtpci_reset(sc);
174
	dev = cn->vtpci_dev;
291
175
292
	/* Tell the host we've noticed this device. */
176
	child = device_add_child(dev, NULL, -1);
293
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
177
	if (child == NULL) {
294
295
	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
296
		device_printf(dev, "cannot create child device\n");
178
		device_printf(dev, "cannot create child device\n");
297
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
298
		vtpci_detach(dev);
299
		return (ENOMEM);
179
		return (ENOMEM);
300
	}
180
	}
301
181
302
	sc->vtpci_child_dev = child;
182
	cn->vtpci_child_dev = child;
303
	vtpci_probe_and_attach_child(sc);
304
183
305
	return (0);
184
	return (0);
306
}
185
}
307
186
308
static int
187
int
309
vtpci_detach(device_t dev)
188
vtpci_delete_child(struct vtpci_common *cn)
310
{
189
{
311
	struct vtpci_softc *sc;
190
	device_t dev, child;
312
	device_t child;
313
	int error;
191
	int error;
314
192
315
	sc = device_get_softc(dev);
193
	dev = cn->vtpci_dev;
316
194
317
	if ((child = sc->vtpci_child_dev) != NULL) {
195
	child = cn->vtpci_child_dev;
196
	if (child != NULL) {
318
		error = device_delete_child(dev, child);
197
		error = device_delete_child(dev, child);
319
		if (error)
198
		if (error)
320
			return (error);
199
			return (error);
321
		sc->vtpci_child_dev = NULL;
200
		cn->vtpci_child_dev = NULL;
322
	}
201
	}
323
202
324
	vtpci_reset(sc);
325
326
	if (sc->vtpci_msix_res != NULL) {
327
		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
328
		    sc->vtpci_msix_res);
329
		sc->vtpci_msix_res = NULL;
330
	}
331
332
	if (sc->vtpci_res != NULL) {
333
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
334
		    sc->vtpci_res);
335
		sc->vtpci_res = NULL;
336
	}
337
338
	return (0);
203
	return (0);
339
}
204
}
340
205
341
static int
206
void
342
vtpci_suspend(device_t dev)
207
vtpci_child_detached(struct vtpci_common *cn)
343
{
208
{
344
209
345
	return (bus_generic_suspend(dev));
210
	vtpci_release_child_resources(cn);
346
}
347
211
348
static int
212
	cn->vtpci_child_feat_desc = NULL;
349
vtpci_resume(device_t dev)
213
	cn->vtpci_host_features = 0;
350
{
214
	cn->vtpci_features = 0;
351
352
	return (bus_generic_resume(dev));
353
}
215
}
354
216
355
static int
217
int
356
vtpci_shutdown(device_t dev)
218
vtpci_reinit(struct vtpci_common *cn)
357
{
219
{
220
	int idx, error;
358
221
359
	(void) bus_generic_shutdown(dev);
222
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
360
	/* Forcibly stop the host device. */
223
		error = vtpci_reinit_virtqueue(cn, idx);
361
	vtpci_stop(dev);
224
		if (error)
225
			return (error);
226
	}
362
227
228
	if (vtpci_is_msix_enabled(cn)) {
229
		error = vtpci_set_host_msix_vectors(cn);
230
		if (error)
231
			return (error);
232
	}
233
363
	return (0);
234
	return (0);
364
}
235
}
365
236
366
static void
237
static void
367
vtpci_driver_added(device_t dev, driver_t *driver)
238
vtpci_describe_features(struct vtpci_common *cn, const char *msg,
239
    uint64_t features)
368
{
240
{
369
	struct vtpci_softc *sc;
241
	device_t dev, child;
370
242
371
	sc = device_get_softc(dev);
243
	dev = cn->vtpci_dev;
244
	child = cn->vtpci_child_dev;
372
245
373
	vtpci_probe_and_attach_child(sc);
246
	if (device_is_attached(child) || bootverbose == 0)
247
		return;
248
249
	virtio_describe(dev, msg, features, cn->vtpci_child_feat_desc);
374
}
250
}
375
251
376
static void
252
uint64_t
377
vtpci_child_detached(device_t dev, device_t child)
253
vtpci_negotiate_features(struct vtpci_common *cn,
254
    uint64_t child_features, uint64_t host_features)
378
{
255
{
379
	struct vtpci_softc *sc;
256
	uint64_t features;
380
257
381
	sc = device_get_softc(dev);
258
	cn->vtpci_host_features = host_features;
259
	vtpci_describe_features(cn, "host", host_features);
382
260
383
	vtpci_reset(sc);
261
	/*
384
	vtpci_release_child_resources(sc);
262
	 * Limit negotiated features to what the driver, virtqueue, and
263
	 * host all support.
264
	 */
265
	features = host_features & child_features;
266
	features = virtio_filter_transport_features(features);
267
268
	cn->vtpci_features = features;
269
	vtpci_describe_features(cn, "negotiated", features);
270
271
	return (features);
385
}
272
}
386
273
387
static int
274
int
388
vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
275
vtpci_with_feature(struct vtpci_common *cn, uint64_t feature)
389
{
276
{
390
	struct vtpci_softc *sc;
277
	return ((cn->vtpci_features & feature) != 0);
278
}
391
279
392
	sc = device_get_softc(dev);
280
int
281
vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result)
282
{
283
	device_t dev;
284
	int error;
393
285
394
	if (sc->vtpci_child_dev != child)
286
	dev = cn->vtpci_dev;
395
		return (ENOENT);
287
	error = 0;
396
288
397
	switch (index) {
289
	switch (index) {
398
	case VIRTIO_IVAR_DEVTYPE:
399
	case VIRTIO_IVAR_SUBDEVICE:
290
	case VIRTIO_IVAR_SUBDEVICE:
400
		*result = pci_get_subdevice(dev);
291
		*result = pci_get_subdevice(dev);
401
		break;
292
		break;
Lines 408-507 Link Here
408
	case VIRTIO_IVAR_SUBVENDOR:
299
	case VIRTIO_IVAR_SUBVENDOR:
409
		*result = pci_get_subdevice(dev);
300
		*result = pci_get_subdevice(dev);
410
		break;
301
		break;
302
	case VIRTIO_IVAR_MODERN:
303
		*result = vtpci_is_modern(cn);
304
		break;
411
	default:
305
	default:
412
		return (ENOENT);
306
		error = ENOENT;
413
	}
307
	}
414
308
415
	return (0);
309
	return (error);
416
}
310
}
417
311
418
static int
312
int
419
vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
313
vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value)
420
{
314
{
421
	struct vtpci_softc *sc;
315
	int error;
422
316
423
	sc = device_get_softc(dev);
317
	error = 0;
424
318
425
	if (sc->vtpci_child_dev != child)
426
		return (ENOENT);
427
428
	switch (index) {
319
	switch (index) {
429
	case VIRTIO_IVAR_FEATURE_DESC:
320
	case VIRTIO_IVAR_FEATURE_DESC:
430
		sc->vtpci_child_feat_desc = (void *) value;
321
		cn->vtpci_child_feat_desc = (void *) value;
431
		break;
322
		break;
432
	default:
323
	default:
433
		return (ENOENT);
324
		error = ENOENT;
434
	}
325
	}
435
326
436
	return (0);
327
	return (error);
437
}
328
}
438
329
439
static uint64_t
330
int
440
vtpci_negotiate_features(device_t dev, uint64_t child_features)
331
vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
332
    struct vq_alloc_info *vq_info)
441
{
333
{
442
	struct vtpci_softc *sc;
334
	device_t dev;
443
	uint64_t host_features, features;
335
	int idx, align, error;
444
336
445
	sc = device_get_softc(dev);
337
	dev = cn->vtpci_dev;
446
338
447
	host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
448
	vtpci_describe_features(sc, "host", host_features);
449
450
	/*
339
	/*
451
	 * Limit negotiated features to what the driver, virtqueue, and
340
	 * This is VIRTIO_PCI_VRING_ALIGN from legacy VirtIO. In modern VirtIO,
452
	 * host all support.
341
	 * the tables do not have to be allocated contiguously, but we do so
342
	 * anyways.
453
	 */
343
	 */
454
	features = host_features & child_features;
344
	align = 4096;
455
	features = virtqueue_filter_features(features);
456
	sc->vtpci_features = features;
457
345
458
	vtpci_describe_features(sc, "negotiated", features);
346
	if (cn->vtpci_nvqs != 0)
459
	vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
460
461
	return (features);
462
}
463
464
static int
465
vtpci_with_feature(device_t dev, uint64_t feature)
466
{
467
	struct vtpci_softc *sc;
468
469
	sc = device_get_softc(dev);
470
471
	return ((sc->vtpci_features & feature) != 0);
472
}
473
474
static int
475
vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
476
    struct vq_alloc_info *vq_info)
477
{
478
	struct vtpci_softc *sc;
479
	struct virtqueue *vq;
480
	struct vtpci_virtqueue *vqx;
481
	struct vq_alloc_info *info;
482
	int idx, error;
483
	uint16_t size;
484
485
	sc = device_get_softc(dev);
486
487
	if (sc->vtpci_nvqs != 0)
488
		return (EALREADY);
347
		return (EALREADY);
489
	if (nvqs <= 0)
348
	if (nvqs <= 0)
490
		return (EINVAL);
349
		return (EINVAL);
491
350
492
	sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
351
	cn->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
493
	    M_DEVBUF, M_NOWAIT | M_ZERO);
352
	    M_DEVBUF, M_NOWAIT | M_ZERO);
494
	if (sc->vtpci_vqs == NULL)
353
	if (cn->vtpci_vqs == NULL)
495
		return (ENOMEM);
354
		return (ENOMEM);
496
355
497
	for (idx = 0; idx < nvqs; idx++) {
356
	for (idx = 0; idx < nvqs; idx++) {
498
		vqx = &sc->vtpci_vqs[idx];
357
		struct vtpci_virtqueue *vqx;
358
		struct vq_alloc_info *info;
359
		struct virtqueue *vq;
360
		bus_size_t notify_offset;
361
		uint16_t size;
362
363
		vqx = &cn->vtpci_vqs[idx];
499
		info = &vq_info[idx];
364
		info = &vq_info[idx];
500
365
501
		vtpci_select_virtqueue(sc, idx);
366
		size = vtpci_get_vq_size(cn, idx);
502
		size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
367
		notify_offset = vtpci_get_vq_notify_off(cn, idx);
503
368
504
		error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN,
369
		error = virtqueue_alloc(dev, idx, size, notify_offset, align,
505
		    0xFFFFFFFFUL, info, &vq);
370
		    0xFFFFFFFFUL, info, &vq);
506
		if (error) {
371
		if (error) {
507
			device_printf(dev,
372
			device_printf(dev,
Lines 509-778 Link Here
509
			break;
374
			break;
510
		}
375
		}
511
376
512
		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
377
		vtpci_set_vq(cn, vq);
513
		    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
514
378
515
		vqx->vtv_vq = *info->vqai_vq = vq;
379
		vqx->vtv_vq = *info->vqai_vq = vq;
516
		vqx->vtv_no_intr = info->vqai_intr == NULL;
380
		vqx->vtv_no_intr = info->vqai_intr == NULL;
517
381
518
		sc->vtpci_nvqs++;
382
		cn->vtpci_nvqs++;
519
	}
383
	}
520
384
521
	if (error)
385
	if (error)
522
		vtpci_free_virtqueues(sc);
386
		vtpci_free_virtqueues(cn);
523
387
524
	return (error);
388
	return (error);
525
}
389
}
526
390
527
static int
391
static int
528
vtpci_setup_intr(device_t dev, enum intr_type type)
392
vtpci_alloc_msix(struct vtpci_common *cn, int nvectors)
529
{
393
{
530
	struct vtpci_softc *sc;
531
	int attempt, error;
532
533
	sc = device_get_softc(dev);
534
535
	for (attempt = 0; attempt < 5; attempt++) {
536
		/*
537
		 * Start with the most desirable interrupt configuration and
538
		 * fallback towards less desirable ones.
539
		 */
540
		switch (attempt) {
541
		case 0:
542
			error = vtpci_alloc_intr_msix_pervq(sc);
543
			break;
544
		case 1:
545
			error = vtpci_alloc_intr_msix_shared(sc);
546
			break;
547
		case 2:
548
			error = vtpci_alloc_intr_msi(sc);
549
			break;
550
		case 3:
551
			error = vtpci_alloc_intr_legacy(sc);
552
			break;
553
		default:
554
			device_printf(dev,
555
			    "exhausted all interrupt allocation attempts\n");
556
			return (ENXIO);
557
		}
558
559
		if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
560
			break;
561
562
		vtpci_cleanup_setup_intr_attempt(sc);
563
	}
564
565
	if (bootverbose) {
566
		if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
567
			device_printf(dev, "using legacy interrupt\n");
568
		else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
569
			device_printf(dev, "using MSI interrupt\n");
570
		else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
571
			device_printf(dev, "using shared MSIX interrupts\n");
572
		else
573
			device_printf(dev, "using per VQ MSIX interrupts\n");
574
	}
575
576
	return (0);
577
}
578
579
static void
580
vtpci_stop(device_t dev)
581
{
582
583
	vtpci_reset(device_get_softc(dev));
584
}
585
586
static int
587
vtpci_reinit(device_t dev, uint64_t features)
588
{
589
	struct vtpci_softc *sc;
590
	int idx, error;
591
592
	sc = device_get_softc(dev);
593
594
	/*
595
	 * Redrive the device initialization. This is a bit of an abuse of
596
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
597
	 * play nice.
598
	 *
599
	 * We do not allow the host device to change from what was originally
600
	 * negotiated beyond what the guest driver changed. MSIX state should
601
	 * not change, number of virtqueues and their size remain the same, etc.
602
	 * This will need to be rethought when we want to support migration.
603
	 */
604
605
	if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
606
		vtpci_stop(dev);
607
608
	/*
609
	 * Quickly drive the status through ACK and DRIVER. The device
610
	 * does not become usable again until vtpci_reinit_complete().
611
	 */
612
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
613
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
614
615
	vtpci_negotiate_features(dev, features);
616
617
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
618
		error = vtpci_reinit_virtqueue(sc, idx);
619
		if (error)
620
			return (error);
621
	}
622
623
	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
624
		error = vtpci_set_host_msix_vectors(sc);
625
		if (error)
626
			return (error);
627
	}
628
629
	return (0);
630
}
631
632
static void
633
vtpci_reinit_complete(device_t dev)
634
{
635
636
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
637
}
638
639
static void
640
vtpci_notify_virtqueue(device_t dev, uint16_t queue)
641
{
642
	struct vtpci_softc *sc;
643
644
	sc = device_get_softc(dev);
645
646
	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
647
}
648
649
static uint8_t
650
vtpci_get_status(device_t dev)
651
{
652
	struct vtpci_softc *sc;
653
654
	sc = device_get_softc(dev);
655
656
	return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
657
}
658
659
static void
660
vtpci_set_status(device_t dev, uint8_t status)
661
{
662
	struct vtpci_softc *sc;
663
664
	sc = device_get_softc(dev);
665
666
	if (status != VIRTIO_CONFIG_STATUS_RESET)
667
		status |= vtpci_get_status(dev);
668
669
	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
670
}
671
672
static void
673
vtpci_read_dev_config(device_t dev, bus_size_t offset,
674
    void *dst, int length)
675
{
676
	struct vtpci_softc *sc;
677
	bus_size_t off;
678
	uint8_t *d;
679
	int size;
680
681
	sc = device_get_softc(dev);
682
	off = VIRTIO_PCI_CONFIG(sc) + offset;
683
684
	for (d = dst; length > 0; d += size, off += size, length -= size) {
685
		if (length >= 4) {
686
			size = 4;
687
			*(uint32_t *)d = vtpci_read_config_4(sc, off);
688
		} else if (length >= 2) {
689
			size = 2;
690
			*(uint16_t *)d = vtpci_read_config_2(sc, off);
691
		} else {
692
			size = 1;
693
			*d = vtpci_read_config_1(sc, off);
694
		}
695
	}
696
}
697
698
static void
699
vtpci_write_dev_config(device_t dev, bus_size_t offset,
700
    void *src, int length)
701
{
702
	struct vtpci_softc *sc;
703
	bus_size_t off;
704
	uint8_t *s;
705
	int size;
706
707
	sc = device_get_softc(dev);
708
	off = VIRTIO_PCI_CONFIG(sc) + offset;
709
710
	for (s = src; length > 0; s += size, off += size, length -= size) {
711
		if (length >= 4) {
712
			size = 4;
713
			vtpci_write_config_4(sc, off, *(uint32_t *)s);
714
		} else if (length >= 2) {
715
			size = 2;
716
			vtpci_write_config_2(sc, off, *(uint16_t *)s);
717
		} else {
718
			size = 1;
719
			vtpci_write_config_1(sc, off, *s);
720
		}
721
	}
722
}
723
724
static void
725
vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
726
    uint64_t features)
727
{
728
	device_t dev, child;
729
730
	dev = sc->vtpci_dev;
731
	child = sc->vtpci_child_dev;
732
733
	if (device_is_attached(child) || bootverbose == 0)
734
		return;
735
736
	virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
737
}
738
739
static void
740
vtpci_probe_and_attach_child(struct vtpci_softc *sc)
741
{
742
	device_t dev, child;
743
744
	dev = sc->vtpci_dev;
745
	child = sc->vtpci_child_dev;
746
747
	if (child == NULL)
748
		return;
749
750
	if (device_get_state(child) != DS_NOTPRESENT)
751
		return;
752
753
	if (device_probe(child) != 0)
754
		return;
755
756
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
757
	if (device_attach(child) != 0) {
758
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
759
		vtpci_reset(sc);
760
		vtpci_release_child_resources(sc);
761
		/* Reset status for future attempt. */
762
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
763
	} else {
764
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
765
		VIRTIO_ATTACH_COMPLETED(child);
766
	}
767
}
768
769
static int
770
vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
771
{
772
	device_t dev;
394
	device_t dev;
773
	int nmsix, cnt, required;
395
	int nmsix, cnt, required;
774
396
775
	dev = sc->vtpci_dev;
397
	dev = cn->vtpci_dev;
776
398
777
	/* Allocate an additional vector for the config changes. */
399
	/* Allocate an additional vector for the config changes. */
778
	required = nvectors + 1;
400
	required = nvectors + 1;
Lines 783-789 Link Here
783
405
784
	cnt = required;
406
	cnt = required;
785
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
407
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
786
		sc->vtpci_nmsix_resources = required;
408
		cn->vtpci_nmsix_resources = required;
787
		return (0);
409
		return (0);
788
	}
410
	}
789
411
Lines 793-804 Link Here
793
}
415
}
794
416
795
static int
417
static int
796
vtpci_alloc_msi(struct vtpci_softc *sc)
418
vtpci_alloc_msi(struct vtpci_common *cn)
797
{
419
{
798
	device_t dev;
420
	device_t dev;
799
	int nmsi, cnt, required;
421
	int nmsi, cnt, required;
800
422
801
	dev = sc->vtpci_dev;
423
	dev = cn->vtpci_dev;
802
	required = 1;
424
	required = 1;
803
425
804
	nmsi = pci_msi_count(dev);
426
	nmsi = pci_msi_count(dev);
Lines 815-894 Link Here
815
}
437
}
816
438
817
static int
439
static int
818
vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
440
vtpci_alloc_intr_msix_pervq(struct vtpci_common *cn)
819
{
441
{
820
	int i, nvectors, error;
442
	int i, nvectors, error;
821
443
822
	if (vtpci_disable_msix != 0 ||
444
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
823
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
824
		return (ENOTSUP);
445
		return (ENOTSUP);
825
446
826
	for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
447
	for (nvectors = 0, i = 0; i < cn->vtpci_nvqs; i++) {
827
		if (sc->vtpci_vqs[i].vtv_no_intr == 0)
448
		if (cn->vtpci_vqs[i].vtv_no_intr == 0)
828
			nvectors++;
449
			nvectors++;
829
	}
450
	}
830
451
831
	error = vtpci_alloc_msix(sc, nvectors);
452
	error = vtpci_alloc_msix(cn, nvectors);
832
	if (error)
453
	if (error)
833
		return (error);
454
		return (error);
834
455
835
	sc->vtpci_flags |= VTPCI_FLAG_MSIX;
456
	cn->vtpci_flags |= VTPCI_FLAG_MSIX;
836
457
837
	return (0);
458
	return (0);
838
}
459
}
839
460
840
static int
461
static int
841
vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
462
vtpci_alloc_intr_msix_shared(struct vtpci_common *cn)
842
{
463
{
843
	int error;
464
	int error;
844
465
845
	if (vtpci_disable_msix != 0 ||
466
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
846
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
847
		return (ENOTSUP);
467
		return (ENOTSUP);
848
468
849
	error = vtpci_alloc_msix(sc, 1);
469
	error = vtpci_alloc_msix(cn, 1);
850
	if (error)
470
	if (error)
851
		return (error);
471
		return (error);
852
472
853
	sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
473
	cn->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
854
474
855
	return (0);
475
	return (0);
856
}
476
}
857
477
858
static int
478
static int
859
vtpci_alloc_intr_msi(struct vtpci_softc *sc)
479
vtpci_alloc_intr_msi(struct vtpci_common *cn)
860
{
480
{
861
	int error;
481
	int error;
862
482
863
	/* Only BHyVe supports MSI. */
483
	/* Only BHyVe supports MSI. */
864
	if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
484
	if (cn->vtpci_flags & VTPCI_FLAG_NO_MSI)
865
		return (ENOTSUP);
485
		return (ENOTSUP);
866
486
867
	error = vtpci_alloc_msi(sc);
487
	error = vtpci_alloc_msi(cn);
868
	if (error)
488
	if (error)
869
		return (error);
489
		return (error);
870
490
871
	sc->vtpci_flags |= VTPCI_FLAG_MSI;
491
	cn->vtpci_flags |= VTPCI_FLAG_MSI;
872
492
873
	return (0);
493
	return (0);
874
}
494
}
875
495
876
static int
496
static int
877
vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
497
vtpci_alloc_intr_intx(struct vtpci_common *cn)
878
{
498
{
879
499
880
	sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
500
	cn->vtpci_flags |= VTPCI_FLAG_INTX;
881
501
882
	return (0);
502
	return (0);
883
}
503
}
884
504
885
static int
505
static int
886
vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
506
vtpci_alloc_interrupt(struct vtpci_common *cn, int rid, int flags,
887
    struct vtpci_interrupt *intr)
507
    struct vtpci_interrupt *intr)
888
{
508
{
889
	struct resource *irq;
509
	struct resource *irq;
890
510
891
	irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags);
511
	irq = bus_alloc_resource_any(cn->vtpci_dev, SYS_RES_IRQ, &rid, flags);
892
	if (irq == NULL)
512
	if (irq == NULL)
893
		return (ENXIO);
513
		return (ENXIO);
894
514
Lines 898-937 Link Here
898
	return (0);
518
	return (0);
899
}
519
}
900
520
521
static void
522
vtpci_free_interrupt(struct vtpci_common *cn, struct vtpci_interrupt *intr)
523
{
524
	device_t dev;
525
526
	dev = cn->vtpci_dev;
527
528
	if (intr->vti_handler != NULL) {
529
		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
530
		intr->vti_handler = NULL;
531
	}
532
533
	if (intr->vti_irq != NULL) {
534
		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
535
		    intr->vti_irq);
536
		intr->vti_irq = NULL;
537
		intr->vti_rid = -1;
538
	}
539
}
540
541
static void
542
vtpci_free_interrupts(struct vtpci_common *cn)
543
{
544
	struct vtpci_interrupt *intr;
545
	int i, nvq_intrs;
546
547
	vtpci_free_interrupt(cn, &cn->vtpci_device_interrupt);
548
549
	if (cn->vtpci_nmsix_resources != 0) {
550
		nvq_intrs = cn->vtpci_nmsix_resources - 1;
551
		cn->vtpci_nmsix_resources = 0;
552
553
		if ((intr = cn->vtpci_msix_vq_interrupts) != NULL) {
554
			for (i = 0; i < nvq_intrs; i++, intr++)
555
				vtpci_free_interrupt(cn, intr);
556
557
			free(cn->vtpci_msix_vq_interrupts, M_DEVBUF);
558
			cn->vtpci_msix_vq_interrupts = NULL;
559
		}
560
	}
561
562
	if (cn->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
563
		pci_release_msi(cn->vtpci_dev);
564
565
	cn->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
566
}
567
568
static void
569
vtpci_free_virtqueues(struct vtpci_common *cn)
570
{
571
	struct vtpci_virtqueue *vqx;
572
	int idx;
573
574
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
575
		vtpci_disable_vq(cn, idx);
576
577
		vqx = &cn->vtpci_vqs[idx];
578
		virtqueue_free(vqx->vtv_vq);
579
		vqx->vtv_vq = NULL;
580
	}
581
582
	free(cn->vtpci_vqs, M_DEVBUF);
583
	cn->vtpci_vqs = NULL;
584
	cn->vtpci_nvqs = 0;
585
}
586
587
void
588
vtpci_release_child_resources(struct vtpci_common *cn)
589
{
590
591
	vtpci_free_interrupts(cn);
592
	vtpci_free_virtqueues(cn);
593
}
594
595
static void
596
vtpci_cleanup_setup_intr_attempt(struct vtpci_common *cn)
597
{
598
	int idx;
599
600
	if (cn->vtpci_flags & VTPCI_FLAG_MSIX) {
601
		vtpci_register_cfg_msix(cn, NULL);
602
603
		for (idx = 0; idx < cn->vtpci_nvqs; idx++)
604
			vtpci_register_vq_msix(cn, idx, NULL);
605
	}
606
607
	vtpci_free_interrupts(cn);
608
}
609
901
static int
610
static int
902
vtpci_alloc_intr_resources(struct vtpci_softc *sc)
611
vtpci_alloc_intr_resources(struct vtpci_common *cn)
903
{
612
{
904
	struct vtpci_interrupt *intr;
613
	struct vtpci_interrupt *intr;
905
	int i, rid, flags, nvq_intrs, error;
614
	int i, rid, flags, nvq_intrs, error;
906
615
907
	rid = 0;
908
	flags = RF_ACTIVE;
616
	flags = RF_ACTIVE;
909
617
910
	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
618
	if (cn->vtpci_flags & VTPCI_FLAG_INTX) {
619
		rid = 0;
911
		flags |= RF_SHAREABLE;
620
		flags |= RF_SHAREABLE;
912
	else
621
	} else
913
		rid = 1;
622
		rid = 1;
914
623
915
	/*
624
	/*
916
	 * For legacy and MSI interrupts, this single resource handles all
625
	 * When using INTX or MSI interrupts, this resource handles all
917
	 * interrupts. For MSIX, this resource is used for the configuration
626
	 * interrupts. When using MSIX, this resource handles just the
918
	 * changed interrupt.
627
	 * configuration changed interrupt.
919
	 */
628
	 */
920
	intr = &sc->vtpci_device_interrupt;
629
	intr = &cn->vtpci_device_interrupt;
921
	error = vtpci_alloc_interrupt(sc, rid, flags, intr);
630
922
	if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI))
631
	error = vtpci_alloc_interrupt(cn, rid, flags, intr);
632
	if (error || cn->vtpci_flags & (VTPCI_FLAG_INTX | VTPCI_FLAG_MSI))
923
		return (error);
633
		return (error);
924
634
925
	/* Subtract one for the configuration changed interrupt. */
635
	/*
926
	nvq_intrs = sc->vtpci_nmsix_resources - 1;
636
	 * Now allocate the interrupts for the virtqueues. This may be one
637
	 * for all the virtqueues, or one for each virtqueue. Subtract one
638
	 * below for because of the configuration changed interrupt.
639
	 */
640
	nvq_intrs = cn->vtpci_nmsix_resources - 1;
927
641
928
	intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
642
	cn->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
929
	    sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
643
	    sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
930
	if (sc->vtpci_msix_vq_interrupts == NULL)
644
	if (cn->vtpci_msix_vq_interrupts == NULL)
931
		return (ENOMEM);
645
		return (ENOMEM);
932
646
647
	intr = cn->vtpci_msix_vq_interrupts;
648
933
	for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
649
	for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
934
		error = vtpci_alloc_interrupt(sc, rid, flags, intr);
650
		error = vtpci_alloc_interrupt(cn, rid, flags, intr);
935
		if (error)
651
		if (error)
936
			return (error);
652
			return (error);
937
	}
653
	}
Lines 940-973 Link Here
940
}
656
}
941
657
942
static int
658
static int
943
vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type)
659
vtpci_setup_intx_interrupt(struct vtpci_common *cn, enum intr_type type)
944
{
660
{
945
	struct vtpci_interrupt *intr;
661
	struct vtpci_interrupt *intr;
946
	int error;
662
	int error;
947
663
948
	intr = &sc->vtpci_device_interrupt;
664
	intr = &cn->vtpci_device_interrupt;
949
	error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL,
950
	    vtpci_legacy_intr, sc, &intr->vti_handler);
951
665
666
	error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL,
667
	    vtpci_intx_intr, cn, &intr->vti_handler);
668
952
	return (error);
669
	return (error);
953
}
670
}
954
671
955
static int
672
static int
956
vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
673
vtpci_setup_pervq_msix_interrupts(struct vtpci_common *cn, enum intr_type type)
957
{
674
{
958
	struct vtpci_virtqueue *vqx;
675
	struct vtpci_virtqueue *vqx;
959
	struct vtpci_interrupt *intr;
676
	struct vtpci_interrupt *intr;
960
	int i, error;
677
	int i, error;
961
678
962
	intr = sc->vtpci_msix_vq_interrupts;
679
	intr = cn->vtpci_msix_vq_interrupts;
963
680
964
	for (i = 0; i < sc->vtpci_nvqs; i++) {
681
	for (i = 0; i < cn->vtpci_nvqs; i++) {
965
		vqx = &sc->vtpci_vqs[i];
682
		vqx = &cn->vtpci_vqs[i];
966
683
967
		if (vqx->vtv_no_intr)
684
		if (vqx->vtv_no_intr)
968
			continue;
685
			continue;
969
686
970
		error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type,
687
		error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type,
971
		    vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
688
		    vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
972
		    &intr->vti_handler);
689
		    &intr->vti_handler);
973
		if (error)
690
		if (error)
Lines 980-1085 Link Here
980
}
697
}
981
698
982
static int
699
static int
983
vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
700
vtpci_set_host_msix_vectors(struct vtpci_common *cn)
984
{
701
{
985
	device_t dev;
986
	struct vtpci_interrupt *intr;
987
	int error;
988
989
	dev = sc->vtpci_dev;
990
	intr = &sc->vtpci_device_interrupt;
991
992
	error = bus_setup_intr(dev, intr->vti_irq, type, NULL,
993
	    vtpci_config_intr, sc, &intr->vti_handler);
994
	if (error)
995
		return (error);
996
997
	if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
998
		intr = sc->vtpci_msix_vq_interrupts;
999
		error = bus_setup_intr(dev, intr->vti_irq, type,
1000
		    vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc,
1001
		    &intr->vti_handler);
1002
	} else
1003
		error = vtpci_setup_pervq_msix_interrupts(sc, type);
1004
1005
	return (error ? error : vtpci_set_host_msix_vectors(sc));
1006
}
1007
1008
static int
1009
vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type)
1010
{
1011
	int error;
1012
1013
	type |= INTR_MPSAFE;
1014
	KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
1015
	    ("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags));
1016
1017
	error = vtpci_alloc_intr_resources(sc);
1018
	if (error)
1019
		return (error);
1020
1021
	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
1022
		error = vtpci_setup_legacy_interrupt(sc, type);
1023
	else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
1024
		error = vtpci_setup_msi_interrupt(sc, type);
1025
	else
1026
		error = vtpci_setup_msix_interrupts(sc, type);
1027
1028
	return (error);
1029
}
1030
1031
static int
1032
vtpci_register_msix_vector(struct vtpci_softc *sc, int offset,
1033
    struct vtpci_interrupt *intr)
1034
{
1035
	device_t dev;
1036
	uint16_t vector;
1037
1038
	dev = sc->vtpci_dev;
1039
1040
	if (intr != NULL) {
1041
		/* Map from guest rid to host vector. */
1042
		vector = intr->vti_rid - 1;
1043
	} else
1044
		vector = VIRTIO_MSI_NO_VECTOR;
1045
1046
	vtpci_write_config_2(sc, offset, vector);
1047
1048
	/* Read vector to determine if the host had sufficient resources. */
1049
	if (vtpci_read_config_2(sc, offset) != vector) {
1050
		device_printf(dev,
1051
		    "insufficient host resources for MSIX interrupts\n");
1052
		return (ENODEV);
1053
	}
1054
1055
	return (0);
1056
}
1057
1058
static int
1059
vtpci_set_host_msix_vectors(struct vtpci_softc *sc)
1060
{
1061
	struct vtpci_interrupt *intr, *tintr;
702
	struct vtpci_interrupt *intr, *tintr;
1062
	int idx, offset, error;
703
	int idx, error;
1063
704
1064
	intr = &sc->vtpci_device_interrupt;
705
	intr = &cn->vtpci_device_interrupt;
1065
	offset = VIRTIO_MSI_CONFIG_VECTOR;
706
	error = vtpci_register_cfg_msix(cn, intr);
1066
1067
	error = vtpci_register_msix_vector(sc, offset, intr);
1068
	if (error)
707
	if (error)
1069
		return (error);
708
		return (error);
1070
709
1071
	intr = sc->vtpci_msix_vq_interrupts;
710
	intr = cn->vtpci_msix_vq_interrupts;
1072
	offset = VIRTIO_MSI_QUEUE_VECTOR;
711
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
1073
712
		if (cn->vtpci_vqs[idx].vtv_no_intr)
1074
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1075
		vtpci_select_virtqueue(sc, idx);
1076
1077
		if (sc->vtpci_vqs[idx].vtv_no_intr)
1078
			tintr = NULL;
713
			tintr = NULL;
1079
		else
714
		else
1080
			tintr = intr;
715
			tintr = intr;
1081
716
1082
		error = vtpci_register_msix_vector(sc, offset, tintr);
717
		error = vtpci_register_vq_msix(cn, idx, tintr);
1083
		if (error)
718
		if (error)
1084
			break;
719
			break;
1085
720
Lines 1087-1094 Link Here
1087
		 * For shared MSIX, all the virtqueues share the first
722
		 * For shared MSIX, all the virtqueues share the first
1088
		 * interrupt.
723
		 * interrupt.
1089
		 */
724
		 */
1090
		if (!sc->vtpci_vqs[idx].vtv_no_intr &&
725
		if (!cn->vtpci_vqs[idx].vtv_no_intr &&
1091
		    (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
726
		    (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
1092
			intr++;
727
			intr++;
1093
	}
728
	}
1094
729
Lines 1096-1259 Link Here
1096
}
731
}
1097
732
1098
static int
733
static int
1099
vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx)
734
vtpci_setup_msix_interrupts(struct vtpci_common *cn, enum intr_type type)
1100
{
735
{
1101
	struct vtpci_virtqueue *vqx;
736
	struct vtpci_interrupt *intr;
1102
	struct virtqueue *vq;
1103
	int error;
737
	int error;
1104
	uint16_t size;
1105
738
1106
	vqx = &sc->vtpci_vqs[idx];
739
	intr = &cn->vtpci_device_interrupt;
1107
	vq = vqx->vtv_vq;
1108
740
1109
	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
741
	error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL,
1110
742
	    vtpci_config_intr, cn, &intr->vti_handler);
1111
	vtpci_select_virtqueue(sc, idx);
1112
	size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
1113
1114
	error = virtqueue_reinit(vq, size);
1115
	if (error)
743
	if (error)
1116
		return (error);
744
		return (error);
1117
745
1118
	vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
746
	if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
1119
	    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
747
		intr = &cn->vtpci_msix_vq_interrupts[0];
1120
748
1121
	return (0);
749
		error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type,
750
		    vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, cn,
751
		    &intr->vti_handler);
752
	} else
753
		error = vtpci_setup_pervq_msix_interrupts(cn, type);
754
755
	return (error ? error : vtpci_set_host_msix_vectors(cn));
1122
}
756
}
1123
757
1124
static void
758
static int
1125
vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr)
759
vtpci_setup_intrs(struct vtpci_common *cn, enum intr_type type)
1126
{
760
{
1127
	device_t dev;
761
	int error;
1128
762
1129
	dev = sc->vtpci_dev;
763
	type |= INTR_MPSAFE;
764
	KASSERT(cn->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
765
	    ("%s: no interrupt type selected %#x", __func__, cn->vtpci_flags));
1130
766
1131
	if (intr->vti_handler != NULL) {
767
	error = vtpci_alloc_intr_resources(cn);
1132
		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
768
	if (error)
1133
		intr->vti_handler = NULL;
769
		return (error);
1134
	}
1135
770
1136
	if (intr->vti_irq != NULL) {
771
	if (cn->vtpci_flags & VTPCI_FLAG_INTX)
1137
		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
772
		error = vtpci_setup_intx_interrupt(cn, type);
1138
		    intr->vti_irq);
773
	else if (cn->vtpci_flags & VTPCI_FLAG_MSI)
1139
		intr->vti_irq = NULL;
774
		error = vtpci_setup_msi_interrupt(cn, type);
1140
		intr->vti_rid = -1;
775
	else
1141
	}
776
		error = vtpci_setup_msix_interrupts(cn, type);
777
778
	return (error);
1142
}
779
}
1143
780
1144
static void
781
int
1145
vtpci_free_interrupts(struct vtpci_softc *sc)
782
vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type)
1146
{
783
{
1147
	struct vtpci_interrupt *intr;
784
	device_t dev;
1148
	int i, nvq_intrs;
785
	int attempt, error;
1149
786
1150
	vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt);
787
	dev = cn->vtpci_dev;
1151
788
1152
	if (sc->vtpci_nmsix_resources != 0) {
789
	for (attempt = 0; attempt < 5; attempt++) {
1153
		nvq_intrs = sc->vtpci_nmsix_resources - 1;
790
		/*
1154
		sc->vtpci_nmsix_resources = 0;
791
		 * Start with the most desirable interrupt configuration and
792
		 * fallback towards less desirable ones.
793
		 */
794
		switch (attempt) {
795
		case 0:
796
			error = vtpci_alloc_intr_msix_pervq(cn);
797
			break;
798
		case 1:
799
			error = vtpci_alloc_intr_msix_shared(cn);
800
			break;
801
		case 2:
802
			error = vtpci_alloc_intr_msi(cn);
803
			break;
804
		case 3:
805
			error = vtpci_alloc_intr_intx(cn);
806
			break;
807
		default:
808
			device_printf(dev,
809
			    "exhausted all interrupt allocation attempts\n");
810
			return (ENXIO);
811
		}
1155
812
1156
		intr = sc->vtpci_msix_vq_interrupts;
813
		if (error == 0 && vtpci_setup_intrs(cn, type) == 0)
1157
		if (intr != NULL) {
814
			break;
1158
			for (i = 0; i < nvq_intrs; i++, intr++)
1159
				vtpci_free_interrupt(sc, intr);
1160
815
1161
			free(sc->vtpci_msix_vq_interrupts, M_DEVBUF);
816
		vtpci_cleanup_setup_intr_attempt(cn);
1162
			sc->vtpci_msix_vq_interrupts = NULL;
1163
		}
1164
	}
817
	}
1165
818
1166
	if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
819
	if (bootverbose) {
1167
		pci_release_msi(sc->vtpci_dev);
820
		if (cn->vtpci_flags & VTPCI_FLAG_INTX)
821
			device_printf(dev, "using legacy interrupt\n");
822
		else if (cn->vtpci_flags & VTPCI_FLAG_MSI)
823
			device_printf(dev, "using MSI interrupt\n");
824
		else if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
825
			device_printf(dev, "using shared MSIX interrupts\n");
826
		else
827
			device_printf(dev, "using per VQ MSIX interrupts\n");
828
	}
1168
829
1169
	sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
830
	return (0);
1170
}
831
}
1171
832
1172
static void
833
static int
1173
vtpci_free_virtqueues(struct vtpci_softc *sc)
834
vtpci_reinit_virtqueue(struct vtpci_common *cn, int idx)
1174
{
835
{
1175
	struct vtpci_virtqueue *vqx;
836
	struct vtpci_virtqueue *vqx;
1176
	int idx;
837
	struct virtqueue *vq;
838
	int error;
1177
839
1178
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
840
	vqx = &cn->vtpci_vqs[idx];
1179
		vqx = &sc->vtpci_vqs[idx];
841
	vq = vqx->vtv_vq;
1180
842
1181
		vtpci_select_virtqueue(sc, idx);
843
	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
1182
		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
1183
844
1184
		virtqueue_free(vqx->vtv_vq);
845
	error = virtqueue_reinit(vq, vtpci_get_vq_size(cn, idx));
1185
		vqx->vtv_vq = NULL;
846
	if (error == 0)
1186
	}
847
		vtpci_set_vq(cn, vq);
1187
848
1188
	free(sc->vtpci_vqs, M_DEVBUF);
849
	return (error);
1189
	sc->vtpci_vqs = NULL;
1190
	sc->vtpci_nvqs = 0;
1191
}
850
}
1192
851
1193
static void
852
static void
1194
vtpci_release_child_resources(struct vtpci_softc *sc)
853
vtpci_intx_intr(void *xcn)
1195
{
854
{
1196
855
	struct vtpci_common *cn;
1197
	vtpci_free_interrupts(sc);
1198
	vtpci_free_virtqueues(sc);
1199
}
1200
1201
static void
1202
vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc)
1203
{
1204
	int idx;
1205
1206
	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
1207
		vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR,
1208
		    VIRTIO_MSI_NO_VECTOR);
1209
1210
		for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1211
			vtpci_select_virtqueue(sc, idx);
1212
			vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR,
1213
			    VIRTIO_MSI_NO_VECTOR);
1214
		}
1215
	}
1216
1217
	vtpci_free_interrupts(sc);
1218
}
1219
1220
static void
1221
vtpci_reset(struct vtpci_softc *sc)
1222
{
1223
1224
	/*
1225
	 * Setting the status to RESET sets the host device to
1226
	 * the original, uninitialized state.
1227
	 */
1228
	vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET);
1229
}
1230
1231
static void
1232
vtpci_select_virtqueue(struct vtpci_softc *sc, int idx)
1233
{
1234
1235
	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
1236
}
1237
1238
static void
1239
vtpci_legacy_intr(void *xsc)
1240
{
1241
	struct vtpci_softc *sc;
1242
	struct vtpci_virtqueue *vqx;
856
	struct vtpci_virtqueue *vqx;
1243
	int i;
857
	int i;
1244
	uint8_t isr;
858
	uint8_t isr;
1245
859
1246
	sc = xsc;
860
	cn = xcn;
1247
	vqx = &sc->vtpci_vqs[0];
861
	isr = vtpci_read_isr(cn);
1248
862
1249
	/* Reading the ISR also clears it. */
1250
	isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR);
1251
1252
	if (isr & VIRTIO_PCI_ISR_CONFIG)
863
	if (isr & VIRTIO_PCI_ISR_CONFIG)
1253
		vtpci_config_intr(sc);
864
		vtpci_config_intr(cn);
1254
865
1255
	if (isr & VIRTIO_PCI_ISR_INTR) {
866
	if (isr & VIRTIO_PCI_ISR_INTR) {
1256
		for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
867
		vqx = &cn->vtpci_vqs[0];
868
		for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
1257
			if (vqx->vtv_no_intr == 0)
869
			if (vqx->vtv_no_intr == 0)
1258
				virtqueue_intr(vqx->vtv_vq);
870
				virtqueue_intr(vqx->vtv_vq);
1259
		}
871
		}
Lines 1261-1277 Link Here
1261
}
873
}
1262
874
1263
static int
875
static int
1264
vtpci_vq_shared_intr_filter(void *xsc)
876
vtpci_vq_shared_intr_filter(void *xcn)
1265
{
877
{
1266
	struct vtpci_softc *sc;
878
	struct vtpci_common *cn;
1267
	struct vtpci_virtqueue *vqx;
879
	struct vtpci_virtqueue *vqx;
1268
	int i, rc;
880
	int i, rc;
1269
881
882
	cn = xcn;
883
	vqx = &cn->vtpci_vqs[0];
1270
	rc = 0;
884
	rc = 0;
1271
	sc = xsc;
1272
	vqx = &sc->vtpci_vqs[0];
1273
885
1274
	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
886
	for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
1275
		if (vqx->vtv_no_intr == 0)
887
		if (vqx->vtv_no_intr == 0)
1276
			rc |= virtqueue_intr_filter(vqx->vtv_vq);
888
			rc |= virtqueue_intr_filter(vqx->vtv_vq);
1277
	}
889
	}
Lines 1280-1295 Link Here
1280
}
892
}
1281
893
1282
static void
894
static void
1283
vtpci_vq_shared_intr(void *xsc)
895
vtpci_vq_shared_intr(void *xcn)
1284
{
896
{
1285
	struct vtpci_softc *sc;
897
	struct vtpci_common *cn;
1286
	struct vtpci_virtqueue *vqx;
898
	struct vtpci_virtqueue *vqx;
1287
	int i;
899
	int i;
1288
900
1289
	sc = xsc;
901
	cn = xcn;
1290
	vqx = &sc->vtpci_vqs[0];
902
	vqx = &cn->vtpci_vqs[0];
1291
903
1292
	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
904
	for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
1293
		if (vqx->vtv_no_intr == 0)
905
		if (vqx->vtv_no_intr == 0)
1294
			virtqueue_intr(vqx->vtv_vq);
906
			virtqueue_intr(vqx->vtv_vq);
1295
	}
907
	}
Lines 1317-1330 Link Here
1317
}
929
}
1318
930
1319
static void
931
static void
1320
vtpci_config_intr(void *xsc)
932
vtpci_config_intr(void *xcn)
1321
{
933
{
1322
	struct vtpci_softc *sc;
934
	struct vtpci_common *cn;
1323
	device_t child;
935
	device_t child;
1324
936
1325
	sc = xsc;
937
	cn = xcn;
1326
	child = sc->vtpci_child_dev;
938
	child = cn->vtpci_child_dev;
1327
939
1328
	if (child != NULL)
940
	if (child != NULL)
1329
		VIRTIO_CONFIG_CHANGE(child);
941
		VIRTIO_CONFIG_CHANGE(child);
942
}
943
944
static int
945
vtpci_feature_sysctl(struct sysctl_req *req, struct vtpci_common *cn,
946
    uint64_t features)
947
{
948
	struct sbuf *sb;
949
	int error;
950
951
	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
952
	if (sb == NULL)
953
		return (ENOMEM);
954
955
	error = virtio_describe_sbuf(sb, features, cn->vtpci_child_feat_desc);
956
	sbuf_delete(sb);
957
958
	return (error);
959
}
960
961
static int
962
vtpci_host_features_sysctl(SYSCTL_HANDLER_ARGS)
963
{
964
	struct vtpci_common *cn;
965
966
	cn = arg1;
967
968
	return (vtpci_feature_sysctl(req, cn, cn->vtpci_host_features));
969
}
970
971
static int
972
vtpci_negotiated_features_sysctl(SYSCTL_HANDLER_ARGS)
973
{
974
	struct vtpci_common *cn;
975
976
	cn = arg1;
977
978
	return (vtpci_feature_sysctl(req, cn, cn->vtpci_features));
979
}
980
981
static void
982
vtpci_setup_sysctl(struct vtpci_common *cn)
983
{
984
	device_t dev;
985
	struct sysctl_ctx_list *ctx;
986
	struct sysctl_oid *tree;
987
	struct sysctl_oid_list *child;
988
989
	dev = cn->vtpci_dev;
990
	ctx = device_get_sysctl_ctx(dev);
991
	tree = device_get_sysctl_tree(dev);
992
	child = SYSCTL_CHILDREN(tree);
993
994
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nvqs",
995
	    CTLFLAG_RD, &cn->vtpci_nvqs, 0, "Number of virtqueues");
996
997
	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "host_features",
998
	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0,
999
	    vtpci_host_features_sysctl, "A", "Features supported by the host");
1000
	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "negotiated_features",
1001
	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0,
1002
	    vtpci_negotiated_features_sysctl, "A", "Features negotiated");
1330
}
1003
}
(-)sys/dev/virtio/pci/virtio_pci.h (-61 / +107 lines)
Lines 1-86 Link Here
1
/*-
1
/*-
2
 * Copyright IBM Corp. 2007
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
3
 *
4
 * Authors:
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 *  Anthony Liguori  <aliguori@us.ibm.com>
5
 * All rights reserved.
6
 *
6
 *
7
 * This header is BSD licensed so anyone can use the definitions to implement
8
 * compatible drivers/servers.
9
 *
10
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
8
 * modification, are permitted provided that the following conditions
12
 * are met:
9
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
10
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
13
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
14
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
15
 *    documentation and/or other materials provided with the distribution.
18
 * 3. Neither the name of IBM nor the names of its contributors
19
 *    may be used to endorse or promote products derived from this software
20
 *    without specific prior written permission.
21
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31
 * SUCH DAMAGE.
32
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 *
33
 * $FreeBSD: releng/11.3/sys/dev/virtio/pci/virtio_pci.h 331722 2018-03-29 02:50:57Z eadler $
28
 * $FreeBSD: releng/11.3/sys/dev/virtio/pci/virtio_pci.h 331722 2018-03-29 02:50:57Z eadler $
34
 */
29
 */
35
30
36
#ifndef _VIRTIO_PCI_H
31
#ifndef _VIRTIO_PCI_H
37
#define _VIRTIO_PCI_H
32
#define _VIRTIO_PCI_H
38
33
39
/* VirtIO PCI vendor/device ID. */
34
struct vtpci_interrupt {
40
#define VIRTIO_PCI_VENDORID	0x1AF4
35
	struct resource		*vti_irq;
41
#define VIRTIO_PCI_DEVICEID_MIN	0x1000
36
	int			 vti_rid;
42
#define VIRTIO_PCI_DEVICEID_MAX	0x103F
37
	void			*vti_handler;
38
};
43
39
44
/* VirtIO ABI version, this must match exactly. */
40
struct vtpci_virtqueue {
45
#define VIRTIO_PCI_ABI_VERSION	0
41
	struct virtqueue	*vtv_vq;
42
	int			 vtv_no_intr;
43
	int			 vtv_notify_offset;
44
};
46
45
47
/*
46
struct vtpci_common {
48
 * VirtIO Header, located in BAR 0.
47
	device_t			 vtpci_dev;
49
 */
48
	uint64_t			 vtpci_host_features;
50
#define VIRTIO_PCI_HOST_FEATURES  0  /* host's supported features (32bit, RO)*/
49
	uint64_t			 vtpci_features;
51
#define VIRTIO_PCI_GUEST_FEATURES 4  /* guest's supported features (32, RW) */
50
	struct vtpci_virtqueue		*vtpci_vqs;
52
#define VIRTIO_PCI_QUEUE_PFN      8  /* physical address of VQ (32, RW) */
51
	int				 vtpci_nvqs;
53
#define VIRTIO_PCI_QUEUE_NUM      12 /* number of ring entries (16, RO) */
54
#define VIRTIO_PCI_QUEUE_SEL      14 /* current VQ selection (16, RW) */
55
#define VIRTIO_PCI_QUEUE_NOTIFY	  16 /* notify host regarding VQ (16, RW) */
56
#define VIRTIO_PCI_STATUS         18 /* device status register (8, RW) */
57
#define VIRTIO_PCI_ISR            19 /* interrupt status register, reading
58
				      * also clears the register (8, RO) */
59
/* Only if MSIX is enabled: */
60
#define VIRTIO_MSI_CONFIG_VECTOR  20 /* configuration change vector (16, RW) */
61
#define VIRTIO_MSI_QUEUE_VECTOR   22 /* vector for selected VQ notifications
62
					(16, RW) */
63
52
64
/* The bit of the ISR which indicates a device has an interrupt. */
53
	uint32_t			 vtpci_flags;
65
#define VIRTIO_PCI_ISR_INTR	0x1
54
#define VTPCI_FLAG_NO_MSI		0x0001
66
/* The bit of the ISR which indicates a device configuration change. */
55
#define VTPCI_FLAG_NO_MSIX		0x0002
67
#define VIRTIO_PCI_ISR_CONFIG	0x2
56
#define VTPCI_FLAG_MODERN		0x0004
68
/* Vector value used to disable MSI for queue. */
57
#define VTPCI_FLAG_INTX			0x1000
69
#define VIRTIO_MSI_NO_VECTOR	0xFFFF
58
#define VTPCI_FLAG_MSI			0x2000
59
#define VTPCI_FLAG_MSIX			0x4000
60
#define VTPCI_FLAG_SHARED_MSIX		0x8000
61
#define VTPCI_FLAG_ITYPE_MASK		0xF000
70
62
71
/*
63
	/* The VirtIO PCI "bus" will only ever have one child. */
72
 * The remaining space is defined by each driver as the per-driver
64
	device_t			 vtpci_child_dev;
73
 * configuration space.
65
	struct virtio_feature_desc	*vtpci_child_feat_desc;
74
 */
75
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled)     ((msix_enabled) ? 24 : 20)
76
66
77
/*
67
	/*
78
 * How many bits to shift physical queue address written to QUEUE_PFN.
68
	 * Ideally, each virtqueue that the driver provides a callback for will
79
 * 12 is historical, and due to x86 page size.
69
	 * receive its own MSIX vector. If there are not sufficient vectors
80
 */
70
	 * available, then attempt to have all the VQs share one vector. For
81
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT	12
71
	 * MSIX, the configuration changed notifications must be on their own
72
	 * vector.
73
	 *
74
	 * If MSIX is not available, attempt to have the whole device share
75
	 * one MSI vector, and then, finally, one intx interrupt.
76
	 */
77
	struct vtpci_interrupt		 vtpci_device_interrupt;
78
	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
79
	int				 vtpci_nmsix_resources;
80
};
82
81
83
/* The alignment to use between consumer and producer parts of vring. */
82
extern int vtpci_disable_msix;
84
#define VIRTIO_PCI_VRING_ALIGN	4096
83
84
static inline device_t
85
vtpci_child_device(struct vtpci_common *cn)
86
{
87
	return (cn->vtpci_child_dev);
88
}
89
90
static inline bool
91
vtpci_is_msix_available(struct vtpci_common *cn)
92
{
93
	return ((cn->vtpci_flags & VTPCI_FLAG_NO_MSIX) == 0);
94
}
95
96
static inline bool
97
vtpci_is_msix_enabled(struct vtpci_common *cn)
98
{
99
	return ((cn->vtpci_flags & VTPCI_FLAG_MSIX) != 0);
100
}
101
102
static inline bool
103
vtpci_is_modern(struct vtpci_common *cn)
104
{
105
	return ((cn->vtpci_flags & VTPCI_FLAG_MODERN) != 0);
106
}
107
108
static inline int
109
vtpci_virtqueue_count(struct vtpci_common *cn)
110
{
111
	return (cn->vtpci_nvqs);
112
}
113
114
void	vtpci_init(struct vtpci_common *cn, device_t dev, bool modern);
115
int	vtpci_add_child(struct vtpci_common *cn);
116
int	vtpci_delete_child(struct vtpci_common *cn);
117
void	vtpci_child_detached(struct vtpci_common *cn);
118
int	vtpci_reinit(struct vtpci_common *cn);
119
120
uint64_t vtpci_negotiate_features(struct vtpci_common *cn,
121
	     uint64_t child_features, uint64_t host_features);
122
int	 vtpci_with_feature(struct vtpci_common *cn, uint64_t feature);
123
124
int	vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result);
125
int	vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value);
126
127
int	vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
128
	    struct vq_alloc_info *vq_info);
129
int	vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type);
130
void	vtpci_release_child_resources(struct vtpci_common *cn);
85
131
86
#endif /* _VIRTIO_PCI_H */
132
#endif /* _VIRTIO_PCI_H */
(-)sys/dev/virtio/pci/virtio_pci_if.m (+71 lines)
Line 0 Link Here
1
#-
2
# Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
3
# All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
7
# are met:
8
# 1. Redistributions of source code must retain the above copyright
9
#    notice, this list of conditions and the following disclaimer.
10
# 2. Redistributions in binary form must reproduce the above copyright
11
#    notice, this list of conditions and the following disclaimer in the
12
#    documentation and/or other materials provided with the distribution.
13
#
14
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
# SUCH DAMAGE.
25
#
26
# $FreeBSD$
27
28
#include <sys/bus.h>
29
#include <machine/bus.h>
30
31
INTERFACE virtio_pci;
32
33
HEADER {
34
struct virtqueue;
35
struct vtpci_interrupt;
36
};
37
38
METHOD uint8_t read_isr {
39
	device_t	dev;
40
};
41
42
METHOD uint16_t get_vq_size {
43
	device_t	dev;
44
	int		idx;
45
};
46
47
METHOD bus_size_t get_vq_notify_off {
48
	device_t	dev;
49
	int		idx;
50
};
51
52
METHOD void set_vq {
53
	device_t		dev;
54
	struct virtqueue	*vq;
55
};
56
57
METHOD void disable_vq {
58
	device_t		 dev;
59
	int			 idx;
60
};
61
62
METHOD int register_cfg_msix {
63
	device_t	dev;
64
	struct vtpci_interrupt *intr;
65
};
66
67
METHOD int register_vq_msix {
68
	device_t		dev;
69
	int			idx;
70
	struct vtpci_interrupt	*intr;
71
};
(-)sys/dev/virtio/pci/virtio_pci_legacy.c (+714 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
29
/* Driver for the legacy VirtIO PCI interface. */
30
31
#include <sys/cdefs.h>
32
__FBSDID("$FreeBSD$");
33
34
#include <sys/param.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
38
#include <sys/module.h>
39
40
#include <machine/bus.h>
41
#include <machine/resource.h>
42
#include <sys/bus.h>
43
#include <sys/rman.h>
44
45
#include <dev/pci/pcivar.h>
46
#include <dev/pci/pcireg.h>
47
48
#include <dev/virtio/virtio.h>
49
#include <dev/virtio/virtqueue.h>
50
#include <dev/virtio/pci/virtio_pci.h>
51
#include <dev/virtio/pci/virtio_pci_legacy_var.h>
52
53
#include "virtio_bus_if.h"
54
#include "virtio_pci_if.h"
55
#include "virtio_if.h"
56
57
struct vtpci_legacy_softc {
58
	device_t			 vtpci_dev;
59
	struct vtpci_common		 vtpci_common;
60
	struct resource			*vtpci_res;
61
	struct resource			*vtpci_msix_res;
62
};
63
64
static int	vtpci_legacy_probe(device_t);
65
static int	vtpci_legacy_attach(device_t);
66
static int	vtpci_legacy_detach(device_t);
67
static int	vtpci_legacy_suspend(device_t);
68
static int	vtpci_legacy_resume(device_t);
69
static int	vtpci_legacy_shutdown(device_t);
70
71
static void	vtpci_legacy_driver_added(device_t, driver_t *);
72
static void	vtpci_legacy_child_detached(device_t, device_t);
73
static int	vtpci_legacy_read_ivar(device_t, device_t, int, uintptr_t *);
74
static int	vtpci_legacy_write_ivar(device_t, device_t, int, uintptr_t);
75
76
static uint8_t	vtpci_legacy_read_isr(device_t);
77
static uint16_t	vtpci_legacy_get_vq_size(device_t, int);
78
static bus_size_t vtpci_legacy_get_vq_notify_off(device_t, int);
79
static void	vtpci_legacy_set_vq(device_t, struct virtqueue *);
80
static void	vtpci_legacy_disable_vq(device_t, int);
81
static int	vtpci_legacy_register_cfg_msix(device_t,
82
		    struct vtpci_interrupt *);
83
static int	vtpci_legacy_register_vq_msix(device_t, int idx,
84
		    struct vtpci_interrupt *);
85
86
static uint64_t	vtpci_legacy_negotiate_features(device_t, uint64_t);
87
static int	vtpci_legacy_with_feature(device_t, uint64_t);
88
static int	vtpci_legacy_alloc_virtqueues(device_t, int, int,
89
		    struct vq_alloc_info *);
90
static int	vtpci_legacy_setup_interrupts(device_t, enum intr_type);
91
static void	vtpci_legacy_stop(device_t);
92
static int	vtpci_legacy_reinit(device_t, uint64_t);
93
static void	vtpci_legacy_reinit_complete(device_t);
94
static void	vtpci_legacy_notify_vq(device_t, uint16_t, bus_size_t);
95
static void	vtpci_legacy_read_dev_config(device_t, bus_size_t, void *, int);
96
static void	vtpci_legacy_write_dev_config(device_t, bus_size_t, void *, int);
97
98
static int	vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *);
99
static void	vtpci_legacy_free_resources(struct vtpci_legacy_softc *);
100
101
static void	vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *);
102
103
static uint8_t	vtpci_legacy_get_status(struct vtpci_legacy_softc *);
104
static void	vtpci_legacy_set_status(struct vtpci_legacy_softc *, uint8_t);
105
static void	vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *, int);
106
static void	vtpci_legacy_reset(struct vtpci_legacy_softc *);
107
108
#define VIRTIO_PCI_LEGACY_CONFIG(_sc) \
109
    VIRTIO_PCI_CONFIG_OFF(vtpci_is_msix_enabled(&(_sc)->vtpci_common))
110
111
/*
112
 * I/O port read/write wrappers.
113
 */
114
#define vtpci_legacy_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
115
#define vtpci_legacy_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
116
#define vtpci_legacy_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
117
#define vtpci_legacy_write_config_1(sc, o, v) \
118
    bus_write_1((sc)->vtpci_res, (o), (v))
119
#define vtpci_legacy_write_config_2(sc, o, v) \
120
    bus_write_2((sc)->vtpci_res, (o), (v))
121
#define vtpci_legacy_write_config_4(sc, o, v) \
122
    bus_write_4((sc)->vtpci_res, (o), (v))
123
124
static device_method_t vtpci_legacy_methods[] = {
125
	/* Device interface. */
126
	DEVMETHOD(device_probe,			  vtpci_legacy_probe),
127
	DEVMETHOD(device_attach,		  vtpci_legacy_attach),
128
	DEVMETHOD(device_detach,		  vtpci_legacy_detach),
129
	DEVMETHOD(device_suspend,		  vtpci_legacy_suspend),
130
	DEVMETHOD(device_resume,		  vtpci_legacy_resume),
131
	DEVMETHOD(device_shutdown,		  vtpci_legacy_shutdown),
132
133
	/* Bus interface. */
134
	DEVMETHOD(bus_driver_added,		  vtpci_legacy_driver_added),
135
	DEVMETHOD(bus_child_detached,		  vtpci_legacy_child_detached),
136
	DEVMETHOD(bus_read_ivar,		  vtpci_legacy_read_ivar),
137
	DEVMETHOD(bus_write_ivar,		  vtpci_legacy_write_ivar),
138
139
	/* VirtIO PCI interface. */
140
	DEVMETHOD(virtio_pci_read_isr,		 vtpci_legacy_read_isr),
141
	DEVMETHOD(virtio_pci_get_vq_size,	 vtpci_legacy_get_vq_size),
142
	DEVMETHOD(virtio_pci_get_vq_notify_off,	 vtpci_legacy_get_vq_notify_off),
143
	DEVMETHOD(virtio_pci_set_vq,		 vtpci_legacy_set_vq),
144
	DEVMETHOD(virtio_pci_disable_vq,	 vtpci_legacy_disable_vq),
145
	DEVMETHOD(virtio_pci_register_cfg_msix,  vtpci_legacy_register_cfg_msix),
146
	DEVMETHOD(virtio_pci_register_vq_msix,	 vtpci_legacy_register_vq_msix),
147
148
	/* VirtIO bus interface. */
149
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_legacy_negotiate_features),
150
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_legacy_with_feature),
151
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_legacy_alloc_virtqueues),
152
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_legacy_setup_interrupts),
153
	DEVMETHOD(virtio_bus_stop,		  vtpci_legacy_stop),
154
	DEVMETHOD(virtio_bus_reinit,		  vtpci_legacy_reinit),
155
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_legacy_reinit_complete),
156
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_legacy_notify_vq),
157
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_legacy_read_dev_config),
158
	DEVMETHOD(virtio_bus_write_device_config, vtpci_legacy_write_dev_config),
159
160
	DEVMETHOD_END
161
};
162
163
static driver_t vtpci_legacy_driver = {
164
	.name = "vtpcil",
165
	.methods = vtpci_legacy_methods,
166
	.size = sizeof(struct vtpci_legacy_softc)
167
};
168
169
devclass_t vtpci_legacy_devclass;
170
171
DRIVER_MODULE(vtpcil, pci, vtpci_legacy_driver, vtpci_legacy_devclass, 0, 0);
172
173
static int
174
vtpci_legacy_probe(device_t dev)
175
{
176
	char desc[64];
177
	const char *name;
178
179
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
180
		return (ENXIO);
181
182
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
183
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_LEGACY_MAX)
184
		return (ENXIO);
185
186
	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
187
		return (ENXIO);
188
189
	name = virtio_device_name(pci_get_subdevice(dev));
190
	if (name == NULL)
191
		name = "Unknown";
192
193
	snprintf(desc, sizeof(desc), "VirtIO PCI (legacy) %s adapter", name);
194
	device_set_desc_copy(dev, desc);
195
196
	/* Prefer transitional modern VirtIO PCI. */
197
	return (BUS_PROBE_LOW_PRIORITY);
198
}
199
200
static int
201
vtpci_legacy_attach(device_t dev)
202
{
203
	struct vtpci_legacy_softc *sc;
204
	int error;
205
206
	sc = device_get_softc(dev);
207
	sc->vtpci_dev = dev;
208
	vtpci_init(&sc->vtpci_common, dev, false);
209
210
	error = vtpci_legacy_alloc_resources(sc);
211
	if (error) {
212
		device_printf(dev, "cannot map I/O space\n");
213
		return (error);
214
	}
215
216
	vtpci_legacy_reset(sc);
217
218
	/* Tell the host we've noticed this device. */
219
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
220
221
	error = vtpci_add_child(&sc->vtpci_common);
222
	if (error)
223
		goto fail;
224
225
	vtpci_legacy_probe_and_attach_child(sc);
226
227
	return (0);
228
229
fail:
230
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
231
	vtpci_legacy_detach(dev);
232
233
	return (error);
234
}
235
236
static int
237
vtpci_legacy_detach(device_t dev)
238
{
239
	struct vtpci_legacy_softc *sc;
240
	int error;
241
242
	sc = device_get_softc(dev);
243
244
	error = vtpci_delete_child(&sc->vtpci_common);
245
	if (error)
246
		return (error);
247
248
	vtpci_legacy_reset(sc);
249
	vtpci_legacy_free_resources(sc);
250
251
	return (0);
252
}
253
254
static int
255
vtpci_legacy_suspend(device_t dev)
256
{
257
	return (bus_generic_suspend(dev));
258
}
259
260
static int
261
vtpci_legacy_resume(device_t dev)
262
{
263
	return (bus_generic_resume(dev));
264
}
265
266
static int
267
vtpci_legacy_shutdown(device_t dev)
268
{
269
	(void) bus_generic_shutdown(dev);
270
	/* Forcibly stop the host device. */
271
	vtpci_legacy_stop(dev);
272
273
	return (0);
274
}
275
276
static void
277
vtpci_legacy_driver_added(device_t dev, driver_t *driver)
278
{
279
	vtpci_legacy_probe_and_attach_child(device_get_softc(dev));
280
}
281
282
static void
283
vtpci_legacy_child_detached(device_t dev, device_t child)
284
{
285
	struct vtpci_legacy_softc *sc;
286
287
	sc = device_get_softc(dev);
288
289
	vtpci_legacy_reset(sc);
290
	vtpci_child_detached(&sc->vtpci_common);
291
292
	/* After the reset, retell the host we've noticed this device. */
293
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
294
}
295
296
static int
297
vtpci_legacy_read_ivar(device_t dev, device_t child, int index,
298
    uintptr_t *result)
299
{
300
	struct vtpci_legacy_softc *sc;
301
	struct vtpci_common *cn;
302
303
	sc = device_get_softc(dev);
304
	cn = &sc->vtpci_common;
305
306
	if (vtpci_child_device(cn) != child)
307
		return (ENOENT);
308
309
	switch (index) {
310
	case VIRTIO_IVAR_DEVTYPE:
311
		*result = pci_get_subdevice(dev);
312
		break;
313
	default:
314
		return (vtpci_read_ivar(cn, index, result));
315
	}
316
317
	return (0);
318
}
319
320
static int
321
vtpci_legacy_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
322
{
323
	struct vtpci_legacy_softc *sc;
324
	struct vtpci_common *cn;
325
326
	sc = device_get_softc(dev);
327
	cn = &sc->vtpci_common;
328
329
	if (vtpci_child_device(cn) != child)
330
		return (ENOENT);
331
332
	switch (index) {
333
	default:
334
		return (vtpci_write_ivar(cn, index, value));
335
	}
336
337
	return (0);
338
}
339
340
static uint64_t
341
vtpci_legacy_negotiate_features(device_t dev, uint64_t child_features)
342
{
343
	struct vtpci_legacy_softc *sc;
344
	uint64_t host_features, features;
345
346
	sc = device_get_softc(dev);
347
	host_features = vtpci_legacy_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
348
349
	features = vtpci_negotiate_features(&sc->vtpci_common,
350
	    child_features, host_features);
351
	vtpci_legacy_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
352
353
	return (features);
354
}
355
356
static int
357
vtpci_legacy_with_feature(device_t dev, uint64_t feature)
358
{
359
	struct vtpci_legacy_softc *sc;
360
361
	sc = device_get_softc(dev);
362
363
	return (vtpci_with_feature(&sc->vtpci_common, feature));
364
}
365
366
static int
367
vtpci_legacy_alloc_virtqueues(device_t dev, int flags, int nvqs,
368
    struct vq_alloc_info *vq_info)
369
{
370
	struct vtpci_legacy_softc *sc;
371
	struct vtpci_common *cn;
372
373
	sc = device_get_softc(dev);
374
	cn = &sc->vtpci_common;
375
376
	return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
377
}
378
379
static int
380
vtpci_legacy_setup_interrupts(device_t dev, enum intr_type type)
381
{
382
	struct vtpci_legacy_softc *sc;
383
384
	sc = device_get_softc(dev);
385
386
	return (vtpci_setup_interrupts(&sc->vtpci_common, type));
387
}
388
389
static void
390
vtpci_legacy_stop(device_t dev)
391
{
392
	vtpci_legacy_reset(device_get_softc(dev));
393
}
394
395
static int
396
vtpci_legacy_reinit(device_t dev, uint64_t features)
397
{
398
	struct vtpci_legacy_softc *sc;
399
	struct vtpci_common *cn;
400
	int error;
401
402
	sc = device_get_softc(dev);
403
	cn = &sc->vtpci_common;
404
405
	/*
406
	 * Redrive the device initialization. This is a bit of an abuse of
407
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
408
	 * play nice.
409
	 *
410
	 * We do not allow the host device to change from what was originally
411
	 * negotiated beyond what the guest driver changed. MSIX state should
412
	 * not change, number of virtqueues and their size remain the same, etc.
413
	 * This will need to be rethought when we want to support migration.
414
	 */
415
416
	if (vtpci_legacy_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
417
		vtpci_legacy_stop(dev);
418
419
	/*
420
	 * Quickly drive the status through ACK and DRIVER. The device does
421
	 * not become usable again until DRIVER_OK in reinit complete.
422
	 */
423
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
424
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
425
426
	vtpci_legacy_negotiate_features(dev, features);
427
428
	error = vtpci_reinit(cn);
429
	if (error)
430
		return (error);
431
432
	return (0);
433
}
434
435
static void
436
vtpci_legacy_reinit_complete(device_t dev)
437
{
438
	struct vtpci_legacy_softc *sc;
439
440
	sc = device_get_softc(dev);
441
442
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
443
}
444
445
static void
446
vtpci_legacy_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
447
{
448
	struct vtpci_legacy_softc *sc;
449
450
	sc = device_get_softc(dev);
451
	MPASS(offset == VIRTIO_PCI_QUEUE_NOTIFY);
452
453
	vtpci_legacy_write_config_2(sc, offset, queue);
454
}
455
456
static uint8_t
457
vtpci_legacy_get_status(struct vtpci_legacy_softc *sc)
458
{
459
	return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_STATUS));
460
}
461
462
static void
463
vtpci_legacy_set_status(struct vtpci_legacy_softc *sc, uint8_t status)
464
{
465
	if (status != VIRTIO_CONFIG_STATUS_RESET)
466
		status |= vtpci_legacy_get_status(sc);
467
468
	vtpci_legacy_write_config_1(sc, VIRTIO_PCI_STATUS, status);
469
}
470
471
static void
472
vtpci_legacy_read_dev_config(device_t dev, bus_size_t offset,
473
    void *dst, int length)
474
{
475
	struct vtpci_legacy_softc *sc;
476
	bus_size_t off;
477
	uint8_t *d;
478
	int size;
479
480
	sc = device_get_softc(dev);
481
	off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset;
482
483
	for (d = dst; length > 0; d += size, off += size, length -= size) {
484
		if (length >= 4) {
485
			size = 4;
486
			*(uint32_t *)d = vtpci_legacy_read_config_4(sc, off);
487
		} else if (length >= 2) {
488
			size = 2;
489
			*(uint16_t *)d = vtpci_legacy_read_config_2(sc, off);
490
		} else {
491
			size = 1;
492
			*d = vtpci_legacy_read_config_1(sc, off);
493
		}
494
	}
495
}
496
497
static void
498
vtpci_legacy_write_dev_config(device_t dev, bus_size_t offset,
499
    void *src, int length)
500
{
501
	struct vtpci_legacy_softc *sc;
502
	bus_size_t off;
503
	uint8_t *s;
504
	int size;
505
506
	sc = device_get_softc(dev);
507
	off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset;
508
509
	for (s = src; length > 0; s += size, off += size, length -= size) {
510
		if (length >= 4) {
511
			size = 4;
512
			vtpci_legacy_write_config_4(sc, off, *(uint32_t *)s);
513
		} else if (length >= 2) {
514
			size = 2;
515
			vtpci_legacy_write_config_2(sc, off, *(uint16_t *)s);
516
		} else {
517
			size = 1;
518
			vtpci_legacy_write_config_1(sc, off, *s);
519
		}
520
	}
521
}
522
523
static int
524
vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *sc)
525
{
526
	device_t dev;
527
	int rid;
528
529
	dev = sc->vtpci_dev;
530
	
531
	rid = PCIR_BAR(0);
532
	if ((sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
533
	    &rid, RF_ACTIVE)) == NULL)
534
		return (ENXIO);
535
536
	if (vtpci_is_msix_available(&sc->vtpci_common)) {
537
		rid = PCIR_BAR(1);
538
		if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev,
539
		    SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL)
540
			return (ENXIO);
541
	}
542
543
	return (0);
544
}
545
546
static void
547
vtpci_legacy_free_resources(struct vtpci_legacy_softc *sc)
548
{
549
	device_t dev;
550
551
	dev = sc->vtpci_dev;
552
553
	if (sc->vtpci_msix_res != NULL) {
554
		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
555
		    sc->vtpci_msix_res);
556
		sc->vtpci_msix_res = NULL;
557
	}
558
559
	if (sc->vtpci_res != NULL) {
560
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
561
		    sc->vtpci_res);
562
		sc->vtpci_res = NULL;
563
	}
564
}
565
566
static void
567
vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *sc)
568
{
569
	device_t dev, child;
570
571
	dev = sc->vtpci_dev;
572
	child = vtpci_child_device(&sc->vtpci_common);
573
574
	if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
575
		return;
576
577
	if (device_probe(child) != 0)
578
		return;
579
580
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
581
582
	if (device_attach(child) != 0) {
583
		vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
584
		/* Reset status for future attempt. */
585
		vtpci_legacy_child_detached(dev, child);
586
	} else {
587
		vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
588
		VIRTIO_ATTACH_COMPLETED(child);
589
	}
590
}
591
592
static int
593
vtpci_legacy_register_msix(struct vtpci_legacy_softc *sc, int offset,
594
    struct vtpci_interrupt *intr)
595
{
596
	device_t dev;
597
	uint16_t vector;
598
599
	dev = sc->vtpci_dev;
600
601
	if (intr != NULL) {
602
		/* Map from guest rid to host vector. */
603
		vector = intr->vti_rid - 1;
604
	} else
605
		vector = VIRTIO_MSI_NO_VECTOR;
606
607
	vtpci_legacy_write_config_2(sc, offset, vector);
608
	return (vtpci_legacy_read_config_2(sc, offset) == vector ? 0 : ENODEV);
609
}
610
611
static int
612
vtpci_legacy_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
613
{
614
	struct vtpci_legacy_softc *sc;
615
	int error;
616
617
	sc = device_get_softc(dev);
618
619
	error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_CONFIG_VECTOR, intr);
620
	if (error) {
621
		device_printf(dev,
622
		    "unable to register config MSIX interrupt\n");
623
		return (error);
624
	}
625
626
	return (0);
627
}
628
629
static int
630
vtpci_legacy_register_vq_msix(device_t dev, int idx,
631
    struct vtpci_interrupt *intr)
632
{
633
	struct vtpci_legacy_softc *sc;
634
	int error;
635
636
	sc = device_get_softc(dev);
637
638
	vtpci_legacy_select_virtqueue(sc, idx);
639
	error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_QUEUE_VECTOR, intr);
640
	if (error) {
641
		device_printf(dev,
642
		    "unable to register virtqueue MSIX interrupt\n");
643
		return (error);
644
	}
645
646
	return (0);
647
}
648
649
static void
650
vtpci_legacy_reset(struct vtpci_legacy_softc *sc)
651
{
652
	/*
653
	 * Setting the status to RESET sets the host device to the
654
	 * original, uninitialized state.
655
	 */
656
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
657
	(void) vtpci_legacy_get_status(sc);
658
}
659
660
static void
661
vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *sc, int idx)
662
{
663
	vtpci_legacy_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
664
}
665
666
static uint8_t
667
vtpci_legacy_read_isr(device_t dev)
668
{
669
	struct vtpci_legacy_softc *sc;
670
671
	sc = device_get_softc(dev);
672
673
	return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_ISR));
674
}
675
676
static uint16_t
677
vtpci_legacy_get_vq_size(device_t dev, int idx)
678
{
679
	struct vtpci_legacy_softc *sc;
680
681
	sc = device_get_softc(dev);
682
683
	vtpci_legacy_select_virtqueue(sc, idx);
684
	return (vtpci_legacy_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM));
685
}
686
687
static bus_size_t
688
vtpci_legacy_get_vq_notify_off(device_t dev, int idx)
689
{
690
	return (VIRTIO_PCI_QUEUE_NOTIFY);
691
}
692
693
static void
694
vtpci_legacy_set_vq(device_t dev, struct virtqueue *vq)
695
{
696
	struct vtpci_legacy_softc *sc;
697
698
	sc = device_get_softc(dev);
699
700
	vtpci_legacy_select_virtqueue(sc, virtqueue_index(vq));
701
	vtpci_legacy_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
702
	    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
703
}
704
705
static void
706
vtpci_legacy_disable_vq(device_t dev, int idx)
707
{
708
	struct vtpci_legacy_softc *sc;
709
710
	sc = device_get_softc(dev);
711
712
	vtpci_legacy_select_virtqueue(sc, idx);
713
	vtpci_legacy_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
714
}
(-)sys/dev/virtio/pci/virtio_pci_legacy_var.h (+78 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
35
 * $FreeBSD$
36
 */
37
38
#ifndef _VIRTIO_PCI_LEGACY_VAR_H
39
#define _VIRTIO_PCI_LEGACY_VAR_H
40
41
#include <dev/virtio/pci/virtio_pci_var.h>
42
43
/* VirtIO ABI version, this must match exactly. */
44
#define VIRTIO_PCI_ABI_VERSION	0
45
46
/*
47
 * VirtIO Header, located in BAR 0.
48
 */
49
#define VIRTIO_PCI_HOST_FEATURES  0  /* host's supported features (32bit, RO)*/
50
#define VIRTIO_PCI_GUEST_FEATURES 4  /* guest's supported features (32, RW) */
51
#define VIRTIO_PCI_QUEUE_PFN      8  /* physical address of VQ (32, RW) */
52
#define VIRTIO_PCI_QUEUE_NUM      12 /* number of ring entries (16, RO) */
53
#define VIRTIO_PCI_QUEUE_SEL      14 /* current VQ selection (16, RW) */
54
#define VIRTIO_PCI_QUEUE_NOTIFY	  16 /* notify host regarding VQ (16, RW) */
55
#define VIRTIO_PCI_STATUS         18 /* device status register (8, RW) */
56
#define VIRTIO_PCI_ISR            19 /* interrupt status register, reading
57
				      * also clears the register (8, RO) */
58
/* Only if MSIX is enabled: */
59
#define VIRTIO_MSI_CONFIG_VECTOR  20 /* configuration change vector (16, RW) */
60
#define VIRTIO_MSI_QUEUE_VECTOR   22 /* vector for selected VQ notifications
61
					(16, RW) */
62
63
/*
64
 * The remaining space is defined by each driver as the per-driver
65
 * configuration space.
66
 */
67
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled)     ((msix_enabled) ? 24 : 20)
68
69
/*
70
 * How many bits to shift physical queue address written to QUEUE_PFN.
71
 * 12 is historical, and due to x86 page size.
72
 */
73
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT	12
74
75
/* The alignment to use between consumer and producer parts of vring. */
76
#define VIRTIO_PCI_VRING_ALIGN	4096
77
78
#endif /* _VIRTIO_PCI_LEGACY_VAR_H */
(-)sys/dev/virtio/pci/virtio_pci_modern.c (+1443 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
29
/* Driver for the modern VirtIO PCI interface. */
30
31
#include <sys/cdefs.h>
32
__FBSDID("$FreeBSD$");
33
34
#include <sys/param.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
38
#include <sys/module.h>
39
40
#include <machine/bus.h>
41
#include <machine/cpu.h>
42
#include <machine/resource.h>
43
#include <sys/bus.h>
44
#include <sys/rman.h>
45
46
#include <dev/pci/pcivar.h>
47
#include <dev/pci/pcireg.h>
48
49
#include <dev/virtio/virtio.h>
50
#include <dev/virtio/virtqueue.h>
51
#include <dev/virtio/pci/virtio_pci.h>
52
#include <dev/virtio/pci/virtio_pci_modern_var.h>
53
54
#include "virtio_bus_if.h"
55
#include "virtio_pci_if.h"
56
#include "virtio_if.h"
57
58
struct vtpci_modern_resource_map {
59
	struct resource_map	vtrm_map;
60
	int			vtrm_cap_offset;
61
	int			vtrm_bar;
62
	int			vtrm_offset;
63
	int			vtrm_length;
64
	int			vtrm_type;	/* SYS_RES_{MEMORY, IOPORT} */
65
};
66
67
struct vtpci_modern_bar_resource {
68
	struct resource		*vtbr_res;
69
	int			 vtbr_type;
70
};
71
72
struct vtpci_modern_softc {
73
	device_t			 vtpci_dev;
74
	struct vtpci_common		 vtpci_common;
75
	uint32_t			 vtpci_notify_offset_multiplier;
76
	uint16_t			 vtpci_devid;
77
	int				 vtpci_msix_bar;
78
	struct resource			*vtpci_msix_res;
79
80
	struct vtpci_modern_resource_map vtpci_common_res_map;
81
	struct vtpci_modern_resource_map vtpci_notify_res_map;
82
	struct vtpci_modern_resource_map vtpci_isr_res_map;
83
	struct vtpci_modern_resource_map vtpci_device_res_map;
84
85
#define VTPCI_MODERN_MAX_BARS		6
86
	struct vtpci_modern_bar_resource vtpci_bar_res[VTPCI_MODERN_MAX_BARS];
87
};
88
89
static int	vtpci_modern_probe(device_t);
90
static int	vtpci_modern_attach(device_t);
91
static int	vtpci_modern_detach(device_t);
92
static int	vtpci_modern_suspend(device_t);
93
static int	vtpci_modern_resume(device_t);
94
static int	vtpci_modern_shutdown(device_t);
95
96
static void	vtpci_modern_driver_added(device_t, driver_t *);
97
static void	vtpci_modern_child_detached(device_t, device_t);
98
static int	vtpci_modern_read_ivar(device_t, device_t, int, uintptr_t *);
99
static int	vtpci_modern_write_ivar(device_t, device_t, int, uintptr_t);
100
101
static uint8_t	vtpci_modern_read_isr(device_t);
102
static uint16_t	vtpci_modern_get_vq_size(device_t, int);
103
static bus_size_t vtpci_modern_get_vq_notify_off(device_t, int);
104
static void	vtpci_modern_set_vq(device_t, struct virtqueue *);
105
static void	vtpci_modern_disable_vq(device_t, int);
106
static int	vtpci_modern_register_msix(struct vtpci_modern_softc *, int,
107
		    struct vtpci_interrupt *);
108
static int	vtpci_modern_register_cfg_msix(device_t,
109
		    struct vtpci_interrupt *);
110
static int	vtpci_modern_register_vq_msix(device_t, int idx,
111
		    struct vtpci_interrupt *);
112
113
static uint64_t	vtpci_modern_negotiate_features(device_t, uint64_t);
114
static int	vtpci_modern_finalize_features(device_t);
115
static int	vtpci_modern_with_feature(device_t, uint64_t);
116
static int	vtpci_modern_alloc_virtqueues(device_t, int, int,
117
		    struct vq_alloc_info *);
118
static int	vtpci_modern_setup_interrupts(device_t, enum intr_type);
119
static void	vtpci_modern_stop(device_t);
120
static int	vtpci_modern_reinit(device_t, uint64_t);
121
static void	vtpci_modern_reinit_complete(device_t);
122
static void	vtpci_modern_notify_vq(device_t, uint16_t, bus_size_t);
123
static int	vtpci_modern_config_generation(device_t);
124
static void	vtpci_modern_read_dev_config(device_t, bus_size_t, void *, int);
125
static void	vtpci_modern_write_dev_config(device_t, bus_size_t, void *, int);
126
127
static int	vtpci_modern_probe_configs(device_t);
128
static int	vtpci_modern_find_cap(device_t, uint8_t, int *);
129
static int	vtpci_modern_map_configs(struct vtpci_modern_softc *);
130
static void	vtpci_modern_unmap_configs(struct vtpci_modern_softc *);
131
static int	vtpci_modern_find_cap_resource(struct vtpci_modern_softc *,
132
		     uint8_t, int, int, struct vtpci_modern_resource_map *);
133
static int	vtpci_modern_bar_type(struct vtpci_modern_softc *, int);
134
static struct resource *vtpci_modern_get_bar_resource(
135
		    struct vtpci_modern_softc *, int, int);
136
static struct resource *vtpci_modern_alloc_bar_resource(
137
		    struct vtpci_modern_softc *, int, int);
138
static void	vtpci_modern_free_bar_resources(struct vtpci_modern_softc *);
139
static int	vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *,
140
		    struct vtpci_modern_resource_map *);
141
static void	vtpci_modern_free_resource_map(struct vtpci_modern_softc *,
142
		    struct vtpci_modern_resource_map *);
143
static void	vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *);
144
static void	vtpci_modern_free_msix_resource(struct vtpci_modern_softc *);
145
146
static void	vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *);
147
148
static uint64_t vtpci_modern_read_features(struct vtpci_modern_softc *);
149
static void	vtpci_modern_write_features(struct vtpci_modern_softc *,
150
		    uint64_t);
151
static void	vtpci_modern_select_virtqueue(struct vtpci_modern_softc *, int);
152
static uint8_t	vtpci_modern_get_status(struct vtpci_modern_softc *);
153
static void	vtpci_modern_set_status(struct vtpci_modern_softc *, uint8_t);
154
static void	vtpci_modern_reset(struct vtpci_modern_softc *);
155
static void	vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *);
156
157
static uint8_t	vtpci_modern_read_common_1(struct vtpci_modern_softc *,
158
		    bus_size_t);
159
static uint16_t vtpci_modern_read_common_2(struct vtpci_modern_softc *,
160
		    bus_size_t);
161
static uint32_t vtpci_modern_read_common_4(struct vtpci_modern_softc *,
162
		    bus_size_t);
163
static void	vtpci_modern_write_common_1(struct vtpci_modern_softc *,
164
		     bus_size_t, uint8_t);
165
static void	vtpci_modern_write_common_2(struct vtpci_modern_softc *,
166
		     bus_size_t, uint16_t);
167
static void	vtpci_modern_write_common_4(struct vtpci_modern_softc *,
168
		    bus_size_t, uint32_t);
169
static void	vtpci_modern_write_common_8(struct vtpci_modern_softc *,
170
		    bus_size_t, uint64_t);
171
static void	vtpci_modern_write_notify_2(struct vtpci_modern_softc *,
172
		    bus_size_t, uint16_t);
173
static uint8_t  vtpci_modern_read_isr_1(struct vtpci_modern_softc *,
174
		    bus_size_t);
175
static uint8_t	vtpci_modern_read_device_1(struct vtpci_modern_softc *,
176
		    bus_size_t);
177
static uint16_t vtpci_modern_read_device_2(struct vtpci_modern_softc *,
178
		    bus_size_t);
179
static uint32_t vtpci_modern_read_device_4(struct vtpci_modern_softc *,
180
		    bus_size_t);
181
static uint64_t vtpci_modern_read_device_8(struct vtpci_modern_softc *,
182
		    bus_size_t);
183
static void	vtpci_modern_write_device_1(struct vtpci_modern_softc *,
184
		    bus_size_t, uint8_t);
185
static void	vtpci_modern_write_device_2(struct vtpci_modern_softc *,
186
		    bus_size_t, uint16_t);
187
static void	vtpci_modern_write_device_4(struct vtpci_modern_softc *,
188
		    bus_size_t, uint32_t);
189
static void	vtpci_modern_write_device_8(struct vtpci_modern_softc *,
190
		    bus_size_t, uint64_t);
191
192
/* Tunables. */
193
static int vtpci_modern_transitional = 0;
194
TUNABLE_INT("hw.virtio.pci.transitional", &vtpci_modern_transitional);
195
196
static device_method_t vtpci_modern_methods[] = {
197
	/* Device interface. */
198
	DEVMETHOD(device_probe,			vtpci_modern_probe),
199
	DEVMETHOD(device_attach,		vtpci_modern_attach),
200
	DEVMETHOD(device_detach,		vtpci_modern_detach),
201
	DEVMETHOD(device_suspend,		vtpci_modern_suspend),
202
	DEVMETHOD(device_resume,		vtpci_modern_resume),
203
	DEVMETHOD(device_shutdown,		vtpci_modern_shutdown),
204
205
	/* Bus interface. */
206
	DEVMETHOD(bus_driver_added,		vtpci_modern_driver_added),
207
	DEVMETHOD(bus_child_detached,		vtpci_modern_child_detached),
208
	DEVMETHOD(bus_read_ivar,		vtpci_modern_read_ivar),
209
	DEVMETHOD(bus_write_ivar,		vtpci_modern_write_ivar),
210
211
	/* VirtIO PCI interface. */
212
	DEVMETHOD(virtio_pci_read_isr,		 vtpci_modern_read_isr),
213
	DEVMETHOD(virtio_pci_get_vq_size,	 vtpci_modern_get_vq_size),
214
	DEVMETHOD(virtio_pci_get_vq_notify_off,	 vtpci_modern_get_vq_notify_off),
215
	DEVMETHOD(virtio_pci_set_vq,		 vtpci_modern_set_vq),
216
	DEVMETHOD(virtio_pci_disable_vq,	 vtpci_modern_disable_vq),
217
	DEVMETHOD(virtio_pci_register_cfg_msix,	 vtpci_modern_register_cfg_msix),
218
	DEVMETHOD(virtio_pci_register_vq_msix,	 vtpci_modern_register_vq_msix),
219
220
	/* VirtIO bus interface. */
221
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_modern_negotiate_features),
222
	DEVMETHOD(virtio_bus_finalize_features,	  vtpci_modern_finalize_features),
223
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_modern_with_feature),
224
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_modern_alloc_virtqueues),
225
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_modern_setup_interrupts),
226
	DEVMETHOD(virtio_bus_stop,		  vtpci_modern_stop),
227
	DEVMETHOD(virtio_bus_reinit,		  vtpci_modern_reinit),
228
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_modern_reinit_complete),
229
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_modern_notify_vq),
230
	DEVMETHOD(virtio_bus_config_generation,	  vtpci_modern_config_generation),
231
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_modern_read_dev_config),
232
	DEVMETHOD(virtio_bus_write_device_config, vtpci_modern_write_dev_config),
233
234
	DEVMETHOD_END
235
};
236
237
static driver_t vtpci_modern_driver = {
238
	.name = "vtpcim",
239
	.methods = vtpci_modern_methods,
240
	.size = sizeof(struct vtpci_modern_softc)
241
};
242
243
devclass_t vtpci_modern_devclass;
244
245
DRIVER_MODULE(vtpcim, pci, vtpci_modern_driver, vtpci_modern_devclass, 0, 0);
246
247
static int
248
vtpci_modern_probe(device_t dev)
249
{
250
	char desc[64];
251
	const char *name;
252
	uint16_t devid;
253
254
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
255
		return (ENXIO);
256
257
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
258
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MODERN_MAX)
259
		return (ENXIO);
260
261
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN) {
262
		if (!vtpci_modern_transitional)
263
			return (ENXIO);
264
		devid = pci_get_subdevice(dev);
265
	} else
266
		devid = pci_get_device(dev) - VIRTIO_PCI_DEVICEID_MODERN_MIN;
267
268
	if (vtpci_modern_probe_configs(dev) != 0)
269
		return (ENXIO);
270
271
	name = virtio_device_name(devid);
272
	if (name == NULL)
273
		name = "Unknown";
274
275
	snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name);
276
	device_set_desc_copy(dev, desc);
277
278
	return (BUS_PROBE_DEFAULT);
279
}
280
281
static int
282
vtpci_modern_attach(device_t dev)
283
{
284
	struct vtpci_modern_softc *sc;
285
	int error;
286
287
	sc = device_get_softc(dev);
288
	sc->vtpci_dev = dev;
289
	vtpci_init(&sc->vtpci_common, dev, true);
290
291
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN)
292
		sc->vtpci_devid = pci_get_subdevice(dev);
293
	else
294
		sc->vtpci_devid = pci_get_device(dev) -
295
		    VIRTIO_PCI_DEVICEID_MODERN_MIN;
296
297
	error = vtpci_modern_map_configs(sc);
298
	if (error) {
299
		device_printf(dev, "cannot map configs\n");
300
		vtpci_modern_unmap_configs(sc);
301
		return (error);
302
	}
303
304
	vtpci_modern_reset(sc);
305
306
	/* Tell the host we've noticed this device. */
307
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
308
309
	error = vtpci_add_child(&sc->vtpci_common);
310
	if (error)
311
		goto fail;
312
313
	vtpci_modern_probe_and_attach_child(sc);
314
315
	return (0);
316
317
fail:
318
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
319
	vtpci_modern_detach(dev);
320
321
	return (error);
322
}
323
324
static int
325
vtpci_modern_detach(device_t dev)
326
{
327
	struct vtpci_modern_softc *sc;
328
	int error;
329
330
	sc = device_get_softc(dev);
331
332
	error = vtpci_delete_child(&sc->vtpci_common);
333
	if (error)
334
		return (error);
335
336
	vtpci_modern_reset(sc);
337
	vtpci_modern_unmap_configs(sc);
338
339
	return (0);
340
}
341
342
static int
343
vtpci_modern_suspend(device_t dev)
344
{
345
	return (bus_generic_suspend(dev));
346
}
347
348
static int
349
vtpci_modern_resume(device_t dev)
350
{
351
	return (bus_generic_resume(dev));
352
}
353
354
static int
355
vtpci_modern_shutdown(device_t dev)
356
{
357
	(void) bus_generic_shutdown(dev);
358
	/* Forcibly stop the host device. */
359
	vtpci_modern_stop(dev);
360
361
	return (0);
362
}
363
364
static void
365
vtpci_modern_driver_added(device_t dev, driver_t *driver)
366
{
367
	vtpci_modern_probe_and_attach_child(device_get_softc(dev));
368
}
369
370
static void
371
vtpci_modern_child_detached(device_t dev, device_t child)
372
{
373
	struct vtpci_modern_softc *sc;
374
375
	sc = device_get_softc(dev);
376
377
	vtpci_modern_reset(sc);
378
	vtpci_child_detached(&sc->vtpci_common);
379
380
	/* After the reset, retell the host we've noticed this device. */
381
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
382
}
383
384
static int
385
vtpci_modern_read_ivar(device_t dev, device_t child, int index,
386
    uintptr_t *result)
387
{
388
	struct vtpci_modern_softc *sc;
389
	struct vtpci_common *cn;
390
391
	sc = device_get_softc(dev);
392
	cn = &sc->vtpci_common;
393
394
	if (vtpci_child_device(cn) != child)
395
		return (ENOENT);
396
397
	switch (index) {
398
	case VIRTIO_IVAR_DEVTYPE:
399
		*result = sc->vtpci_devid;
400
		break;
401
	default:
402
		return (vtpci_read_ivar(cn, index, result));
403
	}
404
405
	return (0);
406
}
407
408
static int
409
vtpci_modern_write_ivar(device_t dev, device_t child, int index,
410
    uintptr_t value)
411
{
412
	struct vtpci_modern_softc *sc;
413
	struct vtpci_common *cn;
414
415
	sc = device_get_softc(dev);
416
	cn = &sc->vtpci_common;
417
418
	if (vtpci_child_device(cn) != child)
419
		return (ENOENT);
420
421
	switch (index) {
422
	default:
423
		return (vtpci_write_ivar(cn, index, value));
424
	}
425
426
	return (0);
427
}
428
429
static uint64_t
430
vtpci_modern_negotiate_features(device_t dev, uint64_t child_features)
431
{
432
	struct vtpci_modern_softc *sc;
433
	uint64_t host_features, features;
434
435
	sc = device_get_softc(dev);
436
	host_features = vtpci_modern_read_features(sc);
437
438
	/*
439
	 * Since the driver was added as a child of the modern PCI bus,
440
	 * always add the V1 flag.
441
	 */
442
	child_features |= VIRTIO_F_VERSION_1;
443
444
	features = vtpci_negotiate_features(&sc->vtpci_common,
445
	    child_features, host_features);
446
	vtpci_modern_write_features(sc, features);
447
448
	return (features);
449
}
450
451
static int
452
vtpci_modern_finalize_features(device_t dev)
453
{
454
	struct vtpci_modern_softc *sc;
455
	uint8_t status;
456
457
	sc = device_get_softc(dev);
458
459
	/*
460
	 * Must re-read the status after setting it to verify the negotiated
461
	 * features were accepted by the device.
462
	 */
463
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_S_FEATURES_OK);
464
465
	status = vtpci_modern_get_status(sc);
466
	if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
467
		device_printf(dev, "desired features were not accepted\n");
468
		return (ENOTSUP);
469
	}
470
471
	return (0);
472
}
473
474
static int
475
vtpci_modern_with_feature(device_t dev, uint64_t feature)
476
{
477
	struct vtpci_modern_softc *sc;
478
479
	sc = device_get_softc(dev);
480
481
	return (vtpci_with_feature(&sc->vtpci_common, feature));
482
}
483
484
static uint64_t
485
vtpci_modern_read_features(struct vtpci_modern_softc *sc)
486
{
487
	uint32_t features0, features1;
488
489
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 0);
490
	features0 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
491
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 1);
492
	features1 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
493
494
	return (((uint64_t) features1 << 32) | features0);
495
}
496
497
static void
498
vtpci_modern_write_features(struct vtpci_modern_softc *sc, uint64_t features)
499
{
500
	uint32_t features0, features1;
501
502
	features0 = features;
503
	features1 = features >> 32;
504
505
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 0);
506
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features0);
507
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 1);
508
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features1);
509
}
510
511
static int
512
vtpci_modern_alloc_virtqueues(device_t dev, int flags, int nvqs,
513
    struct vq_alloc_info *vq_info)
514
{
515
	struct vtpci_modern_softc *sc;
516
	struct vtpci_common *cn;
517
	uint16_t max_nvqs;
518
519
	sc = device_get_softc(dev);
520
	cn = &sc->vtpci_common;
521
522
	max_nvqs = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_NUMQ);
523
	if (nvqs > max_nvqs) {
524
		device_printf(sc->vtpci_dev, "requested virtqueue count %d "
525
		    "exceeds max %d\n", nvqs, max_nvqs);
526
		return (E2BIG);
527
	}
528
529
	return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
530
}
531
532
static int
533
vtpci_modern_setup_interrupts(device_t dev, enum intr_type type)
534
{
535
	struct vtpci_modern_softc *sc;
536
	int error;
537
538
	sc = device_get_softc(dev);
539
540
	error = vtpci_setup_interrupts(&sc->vtpci_common, type);
541
	if (error == 0)
542
		vtpci_modern_enable_virtqueues(sc);
543
544
	return (error);
545
}
546
547
static void
548
vtpci_modern_stop(device_t dev)
549
{
550
	vtpci_modern_reset(device_get_softc(dev));
551
}
552
553
static int
554
vtpci_modern_reinit(device_t dev, uint64_t features)
555
{
556
	struct vtpci_modern_softc *sc;
557
	struct vtpci_common *cn;
558
	int error;
559
560
	sc = device_get_softc(dev);
561
	cn = &sc->vtpci_common;
562
563
	/*
564
	 * Redrive the device initialization. This is a bit of an abuse of
565
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
566
	 * play nice.
567
	 *
568
	 * We do not allow the host device to change from what was originally
569
	 * negotiated beyond what the guest driver changed. MSIX state should
570
	 * not change, number of virtqueues and their size remain the same, etc.
571
	 * This will need to be rethought when we want to support migration.
572
	 */
573
574
	if (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
575
		vtpci_modern_stop(dev);
576
577
	/*
578
	 * Quickly drive the status through ACK and DRIVER. The device does
579
	 * not become usable again until DRIVER_OK in reinit complete.
580
	 */
581
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
582
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
583
584
	/*
585
	 * TODO: Check that features are not added as to what was
586
	 * originally negotiated.
587
	 */
588
	vtpci_modern_negotiate_features(dev, features);
589
	error = vtpci_modern_finalize_features(dev);
590
	if (error) {
591
		device_printf(dev, "cannot finalize features during reinit\n");
592
		return (error);
593
	}
594
595
	error = vtpci_reinit(cn);
596
	if (error)
597
		return (error);
598
599
	return (0);
600
}
601
602
static void
603
vtpci_modern_reinit_complete(device_t dev)
604
{
605
	struct vtpci_modern_softc *sc;
606
607
	sc = device_get_softc(dev);
608
609
	vtpci_modern_enable_virtqueues(sc);
610
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
611
}
612
613
static void
614
vtpci_modern_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
615
{
616
	struct vtpci_modern_softc *sc;
617
618
	sc = device_get_softc(dev);
619
620
	vtpci_modern_write_notify_2(sc, offset, queue);
621
}
622
623
static uint8_t
624
vtpci_modern_get_status(struct vtpci_modern_softc *sc)
625
{
626
	return (vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_STATUS));
627
}
628
629
static void
630
vtpci_modern_set_status(struct vtpci_modern_softc *sc, uint8_t status)
631
{
632
	if (status != VIRTIO_CONFIG_STATUS_RESET)
633
		status |= vtpci_modern_get_status(sc);
634
635
	vtpci_modern_write_common_1(sc, VIRTIO_PCI_COMMON_STATUS, status);
636
}
637
638
static int
639
vtpci_modern_config_generation(device_t dev)
640
{
641
	struct vtpci_modern_softc *sc;
642
	uint8_t gen;
643
644
	sc = device_get_softc(dev);
645
	gen = vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_CFGGENERATION);
646
647
	return (gen);
648
}
649
650
static void
651
vtpci_modern_read_dev_config(device_t dev, bus_size_t offset, void *dst,
652
    int length)
653
{
654
	struct vtpci_modern_softc *sc;
655
656
	sc = device_get_softc(dev);
657
658
	if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
659
		panic("%s: attempt to read dev config but not present",
660
		    __func__);
661
	}
662
663
	switch (length) {
664
	case 1:
665
		*(uint8_t *) dst = vtpci_modern_read_device_1(sc, offset);
666
		break;
667
	case 2:
668
		*(uint16_t *) dst = virtio_htog16(true,
669
		    vtpci_modern_read_device_2(sc, offset));
670
		break;
671
	case 4:
672
		*(uint32_t *) dst = virtio_htog32(true,
673
		    vtpci_modern_read_device_4(sc, offset));
674
		break;
675
	case 8:
676
		*(uint64_t *) dst = virtio_htog64(true,
677
		    vtpci_modern_read_device_8(sc, offset));
678
		break;
679
	default:
680
		panic("%s: device %s invalid device read length %d offset %d",
681
		    __func__, device_get_nameunit(dev), length, (int) offset);
682
	}
683
}
684
685
static void
686
vtpci_modern_write_dev_config(device_t dev, bus_size_t offset, void *src,
687
    int length)
688
{
689
	struct vtpci_modern_softc *sc;
690
691
	sc = device_get_softc(dev);
692
693
	if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
694
		panic("%s: attempt to write dev config but not present",
695
		    __func__);
696
	}
697
698
	switch (length) {
699
	case 1:
700
		vtpci_modern_write_device_1(sc, offset, *(uint8_t *) src);
701
		break;
702
	case 2: {
703
		uint16_t val = virtio_gtoh16(true, *(uint16_t *) src);
704
		vtpci_modern_write_device_2(sc, offset, val);
705
		break;
706
	}
707
	case 4: {
708
		uint32_t val = virtio_gtoh32(true, *(uint32_t *) src);
709
		vtpci_modern_write_device_4(sc, offset, val);
710
		break;
711
	}
712
	case 8: {
713
		uint64_t val = virtio_gtoh64(true, *(uint64_t *) src);
714
		vtpci_modern_write_device_8(sc, offset, val);
715
		break;
716
	}
717
	default:
718
		panic("%s: device %s invalid device write length %d offset %d",
719
		    __func__, device_get_nameunit(dev), length, (int) offset);
720
	}
721
}
722
723
static int
724
vtpci_modern_probe_configs(device_t dev)
725
{
726
	int error;
727
728
	/*
729
	 * These config capabilities must be present. The DEVICE_CFG
730
	 * capability is only present if the device requires it.
731
	 */
732
733
	error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_COMMON_CFG, NULL);
734
	if (error) {
735
		device_printf(dev, "cannot find COMMON_CFG capability\n");
736
		return (error);
737
	}
738
739
	error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_NOTIFY_CFG, NULL);
740
	if (error) {
741
		device_printf(dev, "cannot find NOTIFY_CFG capability\n");
742
		return (error);
743
	}
744
745
	error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_ISR_CFG, NULL);
746
	if (error) {
747
		device_printf(dev, "cannot find ISR_CFG capability\n");
748
		return (error);
749
	}
750
751
	return (0);
752
}
753
754
static int
755
vtpci_modern_find_cap(device_t dev, uint8_t cfg_type, int *cap_offset)
756
{
757
	uint32_t type, bar;
758
	int capreg, error;
759
760
	for (error = pci_find_cap(dev, PCIY_VENDOR, &capreg);
761
	     error == 0;
762
	     error = pci_find_next_cap(dev, PCIY_VENDOR, capreg, &capreg)) {
763
764
		type = pci_read_config(dev, capreg +
765
		    offsetof(struct virtio_pci_cap, cfg_type), 1);
766
		bar = pci_read_config(dev, capreg +
767
		    offsetof(struct virtio_pci_cap, bar), 1);
768
769
		/* Must ignore reserved BARs. */
770
		if (bar >= VTPCI_MODERN_MAX_BARS)
771
			continue;
772
773
		if (type == cfg_type) {
774
			if (cap_offset != NULL)
775
				*cap_offset = capreg;
776
			break;
777
		}
778
	}
779
780
	return (error);
781
}
782
783
static int
784
vtpci_modern_map_common_config(struct vtpci_modern_softc *sc)
785
{
786
	device_t dev;
787
	int error;
788
789
	dev = sc->vtpci_dev;
790
791
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_COMMON_CFG,
792
	    sizeof(struct virtio_pci_common_cfg), 4, &sc->vtpci_common_res_map);
793
	if (error) {
794
		device_printf(dev, "cannot find cap COMMON_CFG resource\n");
795
		return (error);
796
	}
797
798
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_common_res_map);
799
	if (error) {
800
		device_printf(dev, "cannot alloc resource for COMMON_CFG\n");
801
		return (error);
802
	}
803
804
	return (0);
805
}
806
807
static int
808
vtpci_modern_map_notify_config(struct vtpci_modern_softc *sc)
809
{
810
	device_t dev;
811
	int cap_offset, error;
812
813
	dev = sc->vtpci_dev;
814
815
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_NOTIFY_CFG,
816
	    -1, 2, &sc->vtpci_notify_res_map);
817
	if (error) {
818
		device_printf(dev, "cannot find cap NOTIFY_CFG resource\n");
819
		return (error);
820
	}
821
822
	cap_offset = sc->vtpci_notify_res_map.vtrm_cap_offset;
823
824
	sc->vtpci_notify_offset_multiplier = pci_read_config(dev, cap_offset +
825
	    offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), 4);
826
827
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_notify_res_map);
828
	if (error) {
829
		device_printf(dev, "cannot alloc resource for NOTIFY_CFG\n");
830
		return (error);
831
	}
832
833
	return (0);
834
}
835
836
static int
837
vtpci_modern_map_isr_config(struct vtpci_modern_softc *sc)
838
{
839
	device_t dev;
840
	int error;
841
842
	dev = sc->vtpci_dev;
843
844
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_ISR_CFG,
845
	    sizeof(uint8_t), 1, &sc->vtpci_isr_res_map);
846
	if (error) {
847
		device_printf(dev, "cannot find cap ISR_CFG resource\n");
848
		return (error);
849
	}
850
851
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_isr_res_map);
852
	if (error) {
853
		device_printf(dev, "cannot alloc resource for ISR_CFG\n");
854
		return (error);
855
	}
856
857
	return (0);
858
}
859
860
static int
861
vtpci_modern_map_device_config(struct vtpci_modern_softc *sc)
862
{
863
	device_t dev;
864
	int error;
865
866
	dev = sc->vtpci_dev;
867
868
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_DEVICE_CFG,
869
	    -1, 4, &sc->vtpci_device_res_map);
870
	if (error == ENOENT) {
871
		/* Device configuration is optional depending on device. */
872
		return (0);
873
	} else if (error) {
874
		device_printf(dev, "cannot find cap DEVICE_CFG resource\n");
875
		return (error);
876
	}
877
878
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_device_res_map);
879
	if (error) {
880
		device_printf(dev, "cannot alloc resource for DEVICE_CFG\n");
881
		return (error);
882
	}
883
884
	return (error);
885
}
886
887
static int
888
vtpci_modern_map_configs(struct vtpci_modern_softc *sc)
889
{
890
	int error;
891
892
	error = vtpci_modern_map_common_config(sc);
893
	if (error)
894
		return (error);
895
896
	error = vtpci_modern_map_notify_config(sc);
897
	if (error)
898
		return (error);
899
900
	error = vtpci_modern_map_isr_config(sc);
901
	if (error)
902
		return (error);
903
904
	error = vtpci_modern_map_device_config(sc);
905
	if (error)
906
		return (error);
907
908
	vtpci_modern_alloc_msix_resource(sc);
909
910
	return (0);
911
}
912
913
static void
914
vtpci_modern_unmap_configs(struct vtpci_modern_softc *sc)
915
{
916
917
	vtpci_modern_free_resource_map(sc, &sc->vtpci_common_res_map);
918
	vtpci_modern_free_resource_map(sc, &sc->vtpci_notify_res_map);
919
	vtpci_modern_free_resource_map(sc, &sc->vtpci_isr_res_map);
920
	vtpci_modern_free_resource_map(sc, &sc->vtpci_device_res_map);
921
922
	vtpci_modern_free_bar_resources(sc);
923
	vtpci_modern_free_msix_resource(sc);
924
925
	sc->vtpci_notify_offset_multiplier = 0;
926
}
927
928
static int
929
vtpci_modern_find_cap_resource(struct vtpci_modern_softc *sc, uint8_t cfg_type,
930
    int min_size, int alignment, struct vtpci_modern_resource_map *res)
931
{
932
	device_t dev;
933
	int cap_offset, offset, length, error;
934
	uint8_t bar, cap_length;
935
936
	dev = sc->vtpci_dev;
937
938
	error = vtpci_modern_find_cap(dev, cfg_type, &cap_offset);
939
	if (error)
940
		return (error);
941
942
	cap_length = pci_read_config(dev,
943
	    cap_offset + offsetof(struct virtio_pci_cap, cap_len), 1);
944
945
	if (cap_length < sizeof(struct virtio_pci_cap)) {
946
		device_printf(dev, "cap %u length %d less than expected\n",
947
		    cfg_type, cap_length);
948
		return (ENXIO);
949
	}
950
951
	bar = pci_read_config(dev,
952
	    cap_offset + offsetof(struct virtio_pci_cap, bar), 1);
953
	offset = pci_read_config(dev,
954
	    cap_offset + offsetof(struct virtio_pci_cap, offset), 4);
955
	length = pci_read_config(dev,
956
	    cap_offset + offsetof(struct virtio_pci_cap, length), 4);
957
958
	if (min_size != -1 && length < min_size) {
959
		device_printf(dev, "cap %u struct length %d less than min %d\n",
960
		    cfg_type, length, min_size);
961
		return (ENXIO);
962
	}
963
964
	if (offset % alignment) {
965
		device_printf(dev, "cap %u struct offset %d not aligned to %d\n",
966
		    cfg_type, offset, alignment);
967
		return (ENXIO);
968
	}
969
970
	/* BMV: TODO Can we determine the size of the BAR here? */
971
972
	res->vtrm_cap_offset = cap_offset;
973
	res->vtrm_bar = bar;
974
	res->vtrm_offset = offset;
975
	res->vtrm_length = length;
976
	res->vtrm_type = vtpci_modern_bar_type(sc, bar);
977
978
	return (0);
979
}
980
981
static int
982
vtpci_modern_bar_type(struct vtpci_modern_softc *sc, int bar)
983
{
984
	uint32_t val;
985
986
	/*
987
	 * The BAR described by a config capability may be either an IOPORT or
988
	 * MEM, but we must know the type when calling bus_alloc_resource().
989
	 */
990
	val = pci_read_config(sc->vtpci_dev, PCIR_BAR(bar), 4);
991
	if (PCI_BAR_IO(val))
992
		return (SYS_RES_IOPORT);
993
	else
994
		return (SYS_RES_MEMORY);
995
}
996
997
static struct resource *
998
vtpci_modern_get_bar_resource(struct vtpci_modern_softc *sc, int bar, int type)
999
{
1000
	struct resource *res;
1001
1002
	MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
1003
	res = sc->vtpci_bar_res[bar].vtbr_res;
1004
	MPASS(res == NULL || sc->vtpci_bar_res[bar].vtbr_type == type);
1005
1006
	return (res);
1007
}
1008
1009
static struct resource *
1010
vtpci_modern_alloc_bar_resource(struct vtpci_modern_softc *sc, int bar,
1011
    int type)
1012
{
1013
	struct resource *res;
1014
	int rid;
1015
1016
	MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
1017
	MPASS(type == SYS_RES_MEMORY || type == SYS_RES_IOPORT);
1018
1019
	res = sc->vtpci_bar_res[bar].vtbr_res;
1020
	if (res != NULL) {
1021
		MPASS(sc->vtpci_bar_res[bar].vtbr_type == type);
1022
		return (res);
1023
	}
1024
1025
	rid = PCIR_BAR(bar);
1026
	res = bus_alloc_resource_any(sc->vtpci_dev, type, &rid,
1027
	    RF_ACTIVE | RF_UNMAPPED);
1028
	if (res != NULL) {
1029
		sc->vtpci_bar_res[bar].vtbr_res = res;
1030
		sc->vtpci_bar_res[bar].vtbr_type = type;
1031
	}
1032
1033
	return (res);
1034
}
1035
1036
static void
1037
vtpci_modern_free_bar_resources(struct vtpci_modern_softc *sc)
1038
{
1039
	device_t dev;
1040
	struct resource *res;
1041
	int bar, rid, type;
1042
1043
	dev = sc->vtpci_dev;
1044
1045
	for (bar = 0; bar < VTPCI_MODERN_MAX_BARS; bar++) {
1046
		res = sc->vtpci_bar_res[bar].vtbr_res;
1047
		type = sc->vtpci_bar_res[bar].vtbr_type;
1048
1049
		if (res != NULL) {
1050
			rid = PCIR_BAR(bar);
1051
			bus_release_resource(dev, type, rid, res);
1052
			sc->vtpci_bar_res[bar].vtbr_res = NULL;
1053
			sc->vtpci_bar_res[bar].vtbr_type = 0;
1054
		}
1055
	}
1056
}
1057
1058
static int
1059
vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *sc,
1060
    struct vtpci_modern_resource_map *map)
1061
{
1062
	struct resource_map_request req;
1063
	struct resource *res;
1064
	int type;
1065
1066
	type = map->vtrm_type;
1067
1068
	res = vtpci_modern_alloc_bar_resource(sc, map->vtrm_bar, type);
1069
	if (res == NULL)
1070
		return (ENXIO);
1071
1072
	resource_init_map_request(&req);
1073
	req.offset = map->vtrm_offset;
1074
	req.length = map->vtrm_length;
1075
1076
	return (bus_map_resource(sc->vtpci_dev, type, res, &req,
1077
	    &map->vtrm_map));
1078
}
1079
1080
static void
1081
vtpci_modern_free_resource_map(struct vtpci_modern_softc *sc,
1082
    struct vtpci_modern_resource_map *map)
1083
{
1084
	struct resource *res;
1085
	int type;
1086
1087
	type = map->vtrm_type;
1088
	res = vtpci_modern_get_bar_resource(sc, map->vtrm_bar, type);
1089
1090
	if (res != NULL && map->vtrm_map.r_size != 0) {
1091
		bus_unmap_resource(sc->vtpci_dev, type, res, &map->vtrm_map);
1092
		bzero(map, sizeof(struct vtpci_modern_resource_map));
1093
	}
1094
}
1095
1096
static void
1097
vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *sc)
1098
{
1099
	device_t dev;
1100
	int bar;
1101
1102
	dev = sc->vtpci_dev;
1103
1104
	if (!vtpci_is_msix_available(&sc->vtpci_common) ||
1105
	    (bar = pci_msix_table_bar(dev)) == -1)
1106
		return;
1107
1108
	sc->vtpci_msix_bar = bar;
1109
	if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1110
	    &bar, RF_ACTIVE)) == NULL)
1111
		device_printf(dev, "Unable to map MSIX table\n");
1112
}
1113
1114
static void
1115
vtpci_modern_free_msix_resource(struct vtpci_modern_softc *sc)
1116
{
1117
	device_t dev;
1118
1119
	dev = sc->vtpci_dev;
1120
1121
	if (sc->vtpci_msix_res != NULL) {
1122
		bus_release_resource(dev, SYS_RES_MEMORY, sc->vtpci_msix_bar,
1123
		    sc->vtpci_msix_res);
1124
		sc->vtpci_msix_bar = 0;
1125
		sc->vtpci_msix_res = NULL;
1126
	}
1127
}
1128
1129
static void
1130
vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *sc)
1131
{
1132
	device_t dev, child;
1133
1134
	dev = sc->vtpci_dev;
1135
	child = vtpci_child_device(&sc->vtpci_common);
1136
1137
	if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
1138
		return;
1139
1140
	if (device_probe(child) != 0)
1141
		return;
1142
1143
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
1144
1145
	if (device_attach(child) != 0) {
1146
		vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
1147
		/* Reset state for later attempt. */
1148
		vtpci_modern_child_detached(dev, child);
1149
	} else {
1150
		vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
1151
		VIRTIO_ATTACH_COMPLETED(child);
1152
	}
1153
}
1154
1155
static int
1156
vtpci_modern_register_msix(struct vtpci_modern_softc *sc, int offset,
1157
    struct vtpci_interrupt *intr)
1158
{
1159
	uint16_t vector;
1160
1161
	if (intr != NULL) {
1162
		/* Map from guest rid to host vector. */
1163
		vector = intr->vti_rid - 1;
1164
	} else
1165
		vector = VIRTIO_MSI_NO_VECTOR;
1166
1167
	vtpci_modern_write_common_2(sc, offset, vector);
1168
	return (vtpci_modern_read_common_2(sc, offset) == vector ? 0 : ENODEV);
1169
}
1170
1171
static int
1172
vtpci_modern_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
1173
{
1174
	struct vtpci_modern_softc *sc;
1175
	int error;
1176
1177
	sc = device_get_softc(dev);
1178
1179
	error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_MSIX, intr);
1180
	if (error) {
1181
		device_printf(dev,
1182
		    "unable to register config MSIX interrupt\n");
1183
		return (error);
1184
	}
1185
1186
	return (0);
1187
}
1188
1189
static int
1190
vtpci_modern_register_vq_msix(device_t dev, int idx,
1191
    struct vtpci_interrupt *intr)
1192
{
1193
	struct vtpci_modern_softc *sc;
1194
	int error;
1195
1196
	sc = device_get_softc(dev);
1197
1198
	vtpci_modern_select_virtqueue(sc, idx);
1199
	error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_Q_MSIX, intr);
1200
	if (error) {
1201
		device_printf(dev,
1202
		    "unable to register virtqueue MSIX interrupt\n");
1203
		return (error);
1204
	}
1205
1206
	return (0);
1207
}
1208
1209
static void
1210
vtpci_modern_reset(struct vtpci_modern_softc *sc)
1211
{
1212
	/*
1213
	 * Setting the status to RESET sets the host device to the
1214
	 * original, uninitialized state. Must poll the status until
1215
	 * the reset is complete.
1216
	 */
1217
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
1218
1219
	while (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
1220
		cpu_spinwait();
1221
}
1222
1223
static void
1224
vtpci_modern_select_virtqueue(struct vtpci_modern_softc *sc, int idx)
1225
{
1226
	vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_SELECT, idx);
1227
}
1228
1229
static uint8_t
1230
vtpci_modern_read_isr(device_t dev)
1231
{
1232
	return (vtpci_modern_read_isr_1(device_get_softc(dev), 0));
1233
}
1234
1235
static uint16_t
1236
vtpci_modern_get_vq_size(device_t dev, int idx)
1237
{
1238
	struct vtpci_modern_softc *sc;
1239
1240
	sc = device_get_softc(dev);
1241
1242
	vtpci_modern_select_virtqueue(sc, idx);
1243
	return (vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_SIZE));
1244
}
1245
1246
static bus_size_t
1247
vtpci_modern_get_vq_notify_off(device_t dev, int idx)
1248
{
1249
	struct vtpci_modern_softc *sc;
1250
	uint16_t q_notify_off;
1251
1252
	sc = device_get_softc(dev);
1253
1254
	vtpci_modern_select_virtqueue(sc, idx);
1255
	q_notify_off = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_NOFF);
1256
1257
	return (q_notify_off * sc->vtpci_notify_offset_multiplier);
1258
}
1259
1260
static void
1261
vtpci_modern_set_vq(device_t dev, struct virtqueue *vq)
1262
{
1263
	struct vtpci_modern_softc *sc;
1264
1265
	sc = device_get_softc(dev);
1266
1267
	vtpci_modern_select_virtqueue(sc, virtqueue_index(vq));
1268
1269
	/* BMV: Currently we never adjust the device's proposed VQ size. */
1270
	vtpci_modern_write_common_2(sc,
1271
	    VIRTIO_PCI_COMMON_Q_SIZE, virtqueue_size(vq));
1272
1273
	vtpci_modern_write_common_8(sc,
1274
	    VIRTIO_PCI_COMMON_Q_DESCLO, virtqueue_desc_paddr(vq));
1275
	vtpci_modern_write_common_8(sc,
1276
	    VIRTIO_PCI_COMMON_Q_AVAILLO, virtqueue_avail_paddr(vq));
1277
        vtpci_modern_write_common_8(sc,
1278
	    VIRTIO_PCI_COMMON_Q_USEDLO, virtqueue_used_paddr(vq));
1279
}
1280
1281
static void
1282
vtpci_modern_disable_vq(device_t dev, int idx)
1283
{
1284
	struct vtpci_modern_softc *sc;
1285
1286
	sc = device_get_softc(dev);
1287
1288
	vtpci_modern_select_virtqueue(sc, idx);
1289
	vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 0);
1290
	vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_DESCLO, 0ULL);
1291
	vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_AVAILLO, 0ULL);
1292
        vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_USEDLO, 0ULL);
1293
}
1294
1295
static void
1296
vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *sc)
1297
{
1298
	int idx;
1299
1300
	for (idx = 0; idx < sc->vtpci_common.vtpci_nvqs; idx++) {
1301
		vtpci_modern_select_virtqueue(sc, idx);
1302
		vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 1);
1303
	}
1304
}
1305
1306
static uint8_t
1307
vtpci_modern_read_common_1(struct vtpci_modern_softc *sc, bus_size_t off)
1308
{
1309
	return (bus_read_1(&sc->vtpci_common_res_map.vtrm_map, off));
1310
}
1311
1312
static uint16_t
1313
vtpci_modern_read_common_2(struct vtpci_modern_softc *sc, bus_size_t off)
1314
{
1315
	return (bus_read_2(&sc->vtpci_common_res_map.vtrm_map, off));
1316
}
1317
1318
static uint32_t
1319
vtpci_modern_read_common_4(struct vtpci_modern_softc *sc, bus_size_t off)
1320
{
1321
	return (bus_read_4(&sc->vtpci_common_res_map.vtrm_map, off));
1322
}
1323
1324
static void
1325
vtpci_modern_write_common_1(struct vtpci_modern_softc *sc, bus_size_t off,
1326
    uint8_t val)
1327
{
1328
	bus_write_1(&sc->vtpci_common_res_map.vtrm_map, off, val);
1329
}
1330
1331
static void
1332
vtpci_modern_write_common_2(struct vtpci_modern_softc *sc, bus_size_t off,
1333
    uint16_t val)
1334
{
1335
	bus_write_2(&sc->vtpci_common_res_map.vtrm_map, off, val);
1336
}
1337
1338
static void
1339
vtpci_modern_write_common_4(struct vtpci_modern_softc *sc, bus_size_t off,
1340
    uint32_t val)
1341
{
1342
	bus_write_4(&sc->vtpci_common_res_map.vtrm_map, off, val);
1343
}
1344
1345
static void
1346
vtpci_modern_write_common_8(struct vtpci_modern_softc *sc, bus_size_t off,
1347
    uint64_t val)
1348
{
1349
	uint32_t val0, val1;
1350
1351
	val0 = (uint32_t) val;
1352
	val1 = val >> 32;
1353
1354
	vtpci_modern_write_common_4(sc, off, val0);
1355
	vtpci_modern_write_common_4(sc, off + 4, val1);
1356
}
1357
1358
static void
1359
vtpci_modern_write_notify_2(struct vtpci_modern_softc *sc, bus_size_t off,
1360
    uint16_t val)
1361
{
1362
	bus_write_2(&sc->vtpci_notify_res_map.vtrm_map, off, val);
1363
}
1364
1365
static uint8_t
1366
vtpci_modern_read_isr_1(struct vtpci_modern_softc *sc, bus_size_t off)
1367
{
1368
	return (bus_read_1(&sc->vtpci_isr_res_map.vtrm_map, off));
1369
}
1370
1371
static uint8_t
1372
vtpci_modern_read_device_1(struct vtpci_modern_softc *sc, bus_size_t off)
1373
{
1374
	return (bus_read_1(&sc->vtpci_device_res_map.vtrm_map, off));
1375
}
1376
1377
static uint16_t
1378
vtpci_modern_read_device_2(struct vtpci_modern_softc *sc, bus_size_t off)
1379
{
1380
	return (bus_read_2(&sc->vtpci_device_res_map.vtrm_map, off));
1381
}
1382
1383
static uint32_t
1384
vtpci_modern_read_device_4(struct vtpci_modern_softc *sc, bus_size_t off)
1385
{
1386
	return (bus_read_4(&sc->vtpci_device_res_map.vtrm_map, off));
1387
}
1388
1389
static uint64_t
1390
vtpci_modern_read_device_8(struct vtpci_modern_softc *sc, bus_size_t off)
1391
{
1392
	device_t dev;
1393
	int gen;
1394
	uint32_t val0, val1;
1395
1396
	dev = sc->vtpci_dev;
1397
1398
	/*
1399
	 * Treat the 64-bit field as two 32-bit fields. Use the generation
1400
	 * to ensure a consistent read.
1401
	 */
1402
	do {
1403
		gen = vtpci_modern_config_generation(dev);
1404
		val0 = vtpci_modern_read_device_4(sc, off);
1405
		val1 = vtpci_modern_read_device_4(sc, off + 4);
1406
	} while (gen != vtpci_modern_config_generation(dev));
1407
1408
	return (((uint64_t) val1 << 32) | val0);
1409
}
1410
1411
static void
1412
vtpci_modern_write_device_1(struct vtpci_modern_softc *sc, bus_size_t off,
1413
    uint8_t val)
1414
{
1415
	bus_write_1(&sc->vtpci_device_res_map.vtrm_map, off, val);
1416
}
1417
1418
static void
1419
vtpci_modern_write_device_2(struct vtpci_modern_softc *sc, bus_size_t off,
1420
    uint16_t val)
1421
{
1422
	bus_write_2(&sc->vtpci_device_res_map.vtrm_map, off, val);
1423
}
1424
1425
static void
1426
vtpci_modern_write_device_4(struct vtpci_modern_softc *sc, bus_size_t off,
1427
    uint32_t val)
1428
{
1429
	bus_write_4(&sc->vtpci_device_res_map.vtrm_map, off, val);
1430
}
1431
1432
static void
1433
vtpci_modern_write_device_8(struct vtpci_modern_softc *sc, bus_size_t off,
1434
    uint64_t val)
1435
{
1436
	uint32_t val0, val1;
1437
1438
	val0 = (uint32_t) val;
1439
	val1 = val >> 32;
1440
1441
	vtpci_modern_write_device_4(sc, off, val0);
1442
	vtpci_modern_write_device_4(sc, off + 4, val1);
1443
}
(-)sys/dev/virtio/pci/virtio_pci_modern_var.h (+135 lines)
Line 0 Link Here
1
/*
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
35
 * $FreeBSD$
36
 */
37
38
#ifndef _VIRTIO_PCI_MODERN_VAR_H
39
#define _VIRTIO_PCI_MODERN_VAR_H
40
41
#include <dev/virtio/pci/virtio_pci_var.h>
42
43
/* IDs for different capabilities.  Must all exist. */
44
/* Common configuration */
45
#define VIRTIO_PCI_CAP_COMMON_CFG	1
46
/* Notifications */
47
#define VIRTIO_PCI_CAP_NOTIFY_CFG	2
48
/* ISR access */
49
#define VIRTIO_PCI_CAP_ISR_CFG		3
50
/* Device specific configuration */
51
#define VIRTIO_PCI_CAP_DEVICE_CFG	4
52
/* PCI configuration access */
53
#define VIRTIO_PCI_CAP_PCI_CFG		5
54
55
/* This is the PCI capability header: */
56
struct virtio_pci_cap {
57
	uint8_t cap_vndr;		/* Generic PCI field: PCI_CAP_ID_VNDR */
58
	uint8_t cap_next;		/* Generic PCI field: next ptr. */
59
	uint8_t cap_len;		/* Generic PCI field: capability length */
60
	uint8_t cfg_type;		/* Identifies the structure. */
61
	uint8_t bar;			/* Where to find it. */
62
	uint8_t padding[3];		/* Pad to full dword. */
63
	uint32_t offset;		/* Offset within bar. */
64
	uint32_t length;		/* Length of the structure, in bytes. */
65
};
66
67
struct virtio_pci_notify_cap {
68
	struct virtio_pci_cap cap;
69
	uint32_t notify_off_multiplier;	/* Multiplier for queue_notify_off. */
70
};
71
72
/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
73
struct virtio_pci_common_cfg {
74
	/* About the whole device. */
75
	uint32_t device_feature_select;	/* read-write */
76
	uint32_t device_feature;	/* read-only */
77
	uint32_t guest_feature_select;	/* read-write */
78
	uint32_t guest_feature;		/* read-write */
79
	uint16_t msix_config;		/* read-write */
80
	uint16_t num_queues;		/* read-only */
81
	uint8_t device_status;		/* read-write */
82
	uint8_t config_generation;	/* read-only */
83
84
	/* About a specific virtqueue. */
85
	uint16_t queue_select;		/* read-write */
86
	uint16_t queue_size;		/* read-write, power of 2. */
87
	uint16_t queue_msix_vector;	/* read-write */
88
	uint16_t queue_enable;		/* read-write */
89
	uint16_t queue_notify_off;	/* read-only */
90
	uint32_t queue_desc_lo;		/* read-write */
91
	uint32_t queue_desc_hi;		/* read-write */
92
	uint32_t queue_avail_lo;	/* read-write */
93
	uint32_t queue_avail_hi;	/* read-write */
94
	uint32_t queue_used_lo;		/* read-write */
95
	uint32_t queue_used_hi;		/* read-write */
96
};
97
98
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
99
struct virtio_pci_cfg_cap {
100
	struct virtio_pci_cap cap;
101
	uint8_t pci_cfg_data[4]; /* Data for BAR access. */
102
};
103
104
/* Macro versions of offsets for the Old Timers! */
105
#define VIRTIO_PCI_CAP_VNDR		0
106
#define VIRTIO_PCI_CAP_NEXT		1
107
#define VIRTIO_PCI_CAP_LEN		2
108
#define VIRTIO_PCI_CAP_CFG_TYPE		3
109
#define VIRTIO_PCI_CAP_BAR		4
110
#define VIRTIO_PCI_CAP_OFFSET		8
111
#define VIRTIO_PCI_CAP_LENGTH		12
112
113
#define VIRTIO_PCI_NOTIFY_CAP_MULT	16
114
115
#define VIRTIO_PCI_COMMON_DFSELECT	0
116
#define VIRTIO_PCI_COMMON_DF		4
117
#define VIRTIO_PCI_COMMON_GFSELECT	8
118
#define VIRTIO_PCI_COMMON_GF		12
119
#define VIRTIO_PCI_COMMON_MSIX		16
120
#define VIRTIO_PCI_COMMON_NUMQ		18
121
#define VIRTIO_PCI_COMMON_STATUS	20
122
#define VIRTIO_PCI_COMMON_CFGGENERATION	21
123
#define VIRTIO_PCI_COMMON_Q_SELECT	22
124
#define VIRTIO_PCI_COMMON_Q_SIZE	24
125
#define VIRTIO_PCI_COMMON_Q_MSIX	26
126
#define VIRTIO_PCI_COMMON_Q_ENABLE	28
127
#define VIRTIO_PCI_COMMON_Q_NOFF	30
128
#define VIRTIO_PCI_COMMON_Q_DESCLO	32
129
#define VIRTIO_PCI_COMMON_Q_DESCHI	36
130
#define VIRTIO_PCI_COMMON_Q_AVAILLO	40
131
#define VIRTIO_PCI_COMMON_Q_AVAILHI	44
132
#define VIRTIO_PCI_COMMON_Q_USEDLO	48
133
#define VIRTIO_PCI_COMMON_Q_USEDHI	52
134
135
#endif /* _VIRTIO_PCI_MODERN_VAR_H */
(-)sys/dev/virtio/pci/virtio_pci_var.h (+55 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
35
 * $FreeBSD$
36
 */
37
38
#ifndef _VIRTIO_PCI_VAR_H
39
#define _VIRTIO_PCI_VAR_H
40
41
/* VirtIO PCI vendor/device ID. */
42
#define VIRTIO_PCI_VENDORID	0x1AF4
43
#define VIRTIO_PCI_DEVICEID_MIN	0x1000
44
#define VIRTIO_PCI_DEVICEID_LEGACY_MAX	0x103F
45
#define VIRTIO_PCI_DEVICEID_MODERN_MIN	0x1040
46
#define VIRTIO_PCI_DEVICEID_MODERN_MAX	0x107F
47
48
/* The bit of the ISR which indicates a device has an interrupt. */
49
#define VIRTIO_PCI_ISR_INTR	0x1
50
/* The bit of the ISR which indicates a device configuration change. */
51
#define VIRTIO_PCI_ISR_CONFIG	0x2
52
/* Vector value used to disable MSI for queue. */
53
#define VIRTIO_MSI_NO_VECTOR	0xFFFF
54
55
#endif /* _VIRTIO_PCI_VAR_H */
(-)sys/dev/virtio/random/virtio_random.c (-5 / +27 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2013, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2013, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 56-62 Link Here
56
static int	vtrnd_attach(device_t);
58
static int	vtrnd_attach(device_t);
57
static int	vtrnd_detach(device_t);
59
static int	vtrnd_detach(device_t);
58
60
59
static void	vtrnd_negotiate_features(struct vtrnd_softc *);
61
static int	vtrnd_negotiate_features(struct vtrnd_softc *);
62
static int	vtrnd_setup_features(struct vtrnd_softc *);
60
static int	vtrnd_alloc_virtqueue(struct vtrnd_softc *);
63
static int	vtrnd_alloc_virtqueue(struct vtrnd_softc *);
61
static void	vtrnd_harvest(struct vtrnd_softc *);
64
static void	vtrnd_harvest(struct vtrnd_softc *);
62
static void	vtrnd_timer(void *);
65
static void	vtrnd_timer(void *);
Lines 83-90 Link Here
83
};
86
};
84
static devclass_t vtrnd_devclass;
87
static devclass_t vtrnd_devclass;
85
88
86
DRIVER_MODULE(virtio_random, virtio_pci, vtrnd_driver, vtrnd_devclass,
89
DRIVER_MODULE(virtio_random, vtpcil, vtrnd_driver, vtrnd_devclass,
87
    vtrnd_modevent, 0);
90
    vtrnd_modevent, 0);
91
DRIVER_MODULE(virtio_random, vtpcim, vtrnd_driver, vtrnd_devclass,
92
    vtrnd_modevent, 0);
88
MODULE_VERSION(virtio_random, 1);
93
MODULE_VERSION(virtio_random, 1);
89
MODULE_DEPEND(virtio_random, virtio, 1, 1, 1);
94
MODULE_DEPEND(virtio_random, virtio, 1, 1, 1);
90
95
Lines 128-138 Link Here
128
133
129
	sc = device_get_softc(dev);
134
	sc = device_get_softc(dev);
130
	sc->vtrnd_dev = dev;
135
	sc->vtrnd_dev = dev;
136
	virtio_set_feature_desc(dev, vtrnd_feature_desc);
131
137
132
	callout_init(&sc->vtrnd_callout, 1);
138
	callout_init(&sc->vtrnd_callout, 1);
133
139
134
	virtio_set_feature_desc(dev, vtrnd_feature_desc);
140
	error = vtrnd_setup_features(sc);
135
	vtrnd_negotiate_features(sc);
141
	if (error) {
142
		device_printf(dev, "cannot setup features\n");
143
		goto fail;
144
	}
136
145
137
	error = vtrnd_alloc_virtqueue(sc);
146
	error = vtrnd_alloc_virtqueue(sc);
138
	if (error) {
147
	if (error) {
Lines 161-167 Link Here
161
	return (0);
170
	return (0);
162
}
171
}
163
172
164
static void
173
static int
165
vtrnd_negotiate_features(struct vtrnd_softc *sc)
174
vtrnd_negotiate_features(struct vtrnd_softc *sc)
166
{
175
{
167
	device_t dev;
176
	device_t dev;
Lines 171-176 Link Here
171
	features = VTRND_FEATURES;
180
	features = VTRND_FEATURES;
172
181
173
	sc->vtrnd_features = virtio_negotiate_features(dev, features);
182
	sc->vtrnd_features = virtio_negotiate_features(dev, features);
183
	return (virtio_finalize_features(dev));
184
}
185
186
static int
187
vtrnd_setup_features(struct vtrnd_softc *sc)
188
{
189
	int error;
190
191
	error = vtrnd_negotiate_features(sc);
192
	if (error)
193
		return (error);
194
195
	return (0);
174
}
196
}
175
197
176
static int
198
static int
(-)sys/dev/virtio/scsi/virtio_scsi.c (-39 / +101 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 74-84 Link Here
74
static int	vtscsi_suspend(device_t);
76
static int	vtscsi_suspend(device_t);
75
static int	vtscsi_resume(device_t);
77
static int	vtscsi_resume(device_t);
76
78
77
static void	vtscsi_negotiate_features(struct vtscsi_softc *);
79
static int	vtscsi_negotiate_features(struct vtscsi_softc *);
80
static int	vtscsi_setup_features(struct vtscsi_softc *);
78
static void	vtscsi_read_config(struct vtscsi_softc *,
81
static void	vtscsi_read_config(struct vtscsi_softc *,
79
		    struct virtio_scsi_config *);
82
		    struct virtio_scsi_config *);
80
static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
83
static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
81
static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
84
static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
85
static void     vtscsi_check_sizes(struct vtscsi_softc *);
82
static void	vtscsi_write_device_config(struct vtscsi_softc *);
86
static void	vtscsi_write_device_config(struct vtscsi_softc *);
83
static int	vtscsi_reinit(struct vtscsi_softc *);
87
static int	vtscsi_reinit(struct vtscsi_softc *);
84
88
Lines 132-141 Link Here
132
136
133
static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
137
static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
134
static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
138
static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
135
static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
139
static void	vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
136
		    struct virtio_scsi_cmd_req *);
140
		    struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
137
static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
141
static void	vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
138
		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
142
		    uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
139
143
140
static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
144
static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
141
static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
145
static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
Lines 181-191 Link Here
181
static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
185
static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
182
186
183
static void	vtscsi_get_tunables(struct vtscsi_softc *);
187
static void	vtscsi_get_tunables(struct vtscsi_softc *);
184
static void	vtscsi_add_sysctl(struct vtscsi_softc *);
188
static void	vtscsi_setup_sysctl(struct vtscsi_softc *);
185
189
186
static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
190
static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
187
		    const char *, ...);
191
		    const char *, ...);
188
192
193
#define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
194
#define vtscsi_htog16(_sc, _val)	virtio_htog16(vtscsi_modern(_sc), _val)
195
#define vtscsi_htog32(_sc, _val)	virtio_htog32(vtscsi_modern(_sc), _val)
196
#define vtscsi_htog64(_sc, _val)	virtio_htog64(vtscsi_modern(_sc), _val)
197
#define vtscsi_gtoh16(_sc, _val)	virtio_gtoh16(vtscsi_modern(_sc), _val)
198
#define vtscsi_gtoh32(_sc, _val)	virtio_gtoh32(vtscsi_modern(_sc), _val)
199
#define vtscsi_gtoh64(_sc, _val)	virtio_gtoh64(vtscsi_modern(_sc), _val)
200
189
/* Global tunables. */
201
/* Global tunables. */
190
/*
202
/*
191
 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
203
 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
Lines 203-208 Link Here
203
static struct virtio_feature_desc vtscsi_feature_desc[] = {
215
static struct virtio_feature_desc vtscsi_feature_desc[] = {
204
	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
216
	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
205
	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
217
	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
218
	{ VIRTIO_SCSI_F_CHANGE,		"ChangeEvent"	},
219
	{ VIRTIO_SCSI_F_T10_PI, 	"T10PI"		},
206
220
207
	{ 0, NULL }
221
	{ 0, NULL }
208
};
222
};
Lines 225-232 Link Here
225
};
239
};
226
static devclass_t vtscsi_devclass;
240
static devclass_t vtscsi_devclass;
227
241
228
DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
242
DRIVER_MODULE(virtio_scsi, vtpcil, vtscsi_driver, vtscsi_devclass,
229
    vtscsi_modevent, 0);
243
    vtscsi_modevent, 0);
244
DRIVER_MODULE(virtio_scsi, vtpcim, vtscsi_driver, vtscsi_devclass,
245
    vtscsi_modevent, 0);
230
MODULE_VERSION(virtio_scsi, 1);
246
MODULE_VERSION(virtio_scsi, 1);
231
MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
247
MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
232
MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
248
MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
Lines 272-294 Link Here
272
288
273
	sc = device_get_softc(dev);
289
	sc = device_get_softc(dev);
274
	sc->vtscsi_dev = dev;
290
	sc->vtscsi_dev = dev;
291
	virtio_set_feature_desc(dev, vtscsi_feature_desc);
275
292
276
	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
293
	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
277
	TAILQ_INIT(&sc->vtscsi_req_free);
294
	TAILQ_INIT(&sc->vtscsi_req_free);
278
295
279
	vtscsi_get_tunables(sc);
296
	vtscsi_get_tunables(sc);
280
	vtscsi_add_sysctl(sc);
297
	vtscsi_setup_sysctl(sc);
281
298
282
	virtio_set_feature_desc(dev, vtscsi_feature_desc);
299
	error = vtscsi_setup_features(sc);
283
	vtscsi_negotiate_features(sc);
300
	if (error) {
301
		device_printf(dev, "cannot setup features\n");
302
		goto fail;
303
	}
284
304
285
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
286
		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
287
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
288
		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
289
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
290
		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
291
292
	vtscsi_read_config(sc, &scsicfg);
305
	vtscsi_read_config(sc, &scsicfg);
293
306
294
	sc->vtscsi_max_channel = scsicfg.max_channel;
307
	sc->vtscsi_max_channel = scsicfg.max_channel;
Lines 312-317 Link Here
312
		goto fail;
325
		goto fail;
313
	}
326
	}
314
327
328
	vtscsi_check_sizes(sc);
329
315
	error = vtscsi_init_event_vq(sc);
330
	error = vtscsi_init_event_vq(sc);
316
	if (error) {
331
	if (error) {
317
		device_printf(dev, "cannot populate the eventvq\n");
332
		device_printf(dev, "cannot populate the eventvq\n");
Lines 398-414 Link Here
398
	return (0);
413
	return (0);
399
}
414
}
400
415
401
static void
416
static int
402
vtscsi_negotiate_features(struct vtscsi_softc *sc)
417
vtscsi_negotiate_features(struct vtscsi_softc *sc)
403
{
418
{
404
	device_t dev;
419
	device_t dev;
405
	uint64_t features;
420
	uint64_t features;
406
421
407
	dev = sc->vtscsi_dev;
422
	dev = sc->vtscsi_dev;
408
	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
423
	features = VTSCSI_FEATURES;
409
	sc->vtscsi_features = features;
424
425
	sc->vtscsi_features = virtio_negotiate_features(dev, features);
426
	return (virtio_finalize_features(dev));
410
}
427
}
411
428
429
static int
430
vtscsi_setup_features(struct vtscsi_softc *sc)
431
{
432
	device_t dev;
433
	int error;
434
435
	dev = sc->vtscsi_dev;
436
437
	error = vtscsi_negotiate_features(sc);
438
	if (error)
439
		return (error);
440
441
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
442
		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
443
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
444
		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
445
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
446
		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
447
448
	return (0);
449
}
450
412
#define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
451
#define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
413
	virtio_read_device_config(_dev,				\
452
	virtio_read_device_config(_dev,				\
414
	    offsetof(struct virtio_scsi_config, _field),	\
453
	    offsetof(struct virtio_scsi_config, _field),	\
Lines 479-484 Link Here
479
}
518
}
480
519
481
static void
520
static void
521
vtscsi_check_sizes(struct vtscsi_softc *sc)
522
{
523
       int rqsize;
524
525
       if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
526
               /*
527
                * Ensure the assertions in virtqueue_enqueue(),
528
                * even if the hypervisor reports a bad seg_max.
529
                */
530
               rqsize = virtqueue_size(sc->vtscsi_request_vq);
531
               if (sc->vtscsi_max_nsegs > rqsize) {
532
                       device_printf(sc->vtscsi_dev,
533
                           "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
534
                           rqsize);
535
                       sc->vtscsi_max_nsegs = rqsize;
536
               }
537
       }
538
}
539
540
static void
482
vtscsi_write_device_config(struct vtscsi_softc *sc)
541
vtscsi_write_device_config(struct vtscsi_softc *sc)
483
{
542
{
484
543
Lines 506-513 Link Here
506
	error = virtio_reinit(dev, sc->vtscsi_features);
565
	error = virtio_reinit(dev, sc->vtscsi_features);
507
	if (error == 0) {
566
	if (error == 0) {
508
		vtscsi_write_device_config(sc);
567
		vtscsi_write_device_config(sc);
509
		vtscsi_reinit_event_vq(sc);
510
		virtio_reinit_complete(dev);
568
		virtio_reinit_complete(dev);
569
		vtscsi_reinit_event_vq(sc);
511
570
512
		vtscsi_enable_vqs_intr(sc);
571
		vtscsi_enable_vqs_intr(sc);
513
	}
572
	}
Lines 1061-1067 Link Here
1061
	cmd_req = &req->vsr_cmd_req;
1120
	cmd_req = &req->vsr_cmd_req;
1062
	cmd_resp = &req->vsr_cmd_resp;
1121
	cmd_resp = &req->vsr_cmd_resp;
1063
1122
1064
	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1123
	vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1065
1124
1066
	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1125
	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1067
	if (error)
1126
	if (error)
Lines 1181-1187 Link Here
1181
	tmf_req = &req->vsr_tmf_req;
1240
	tmf_req = &req->vsr_tmf_req;
1182
	tmf_resp = &req->vsr_tmf_resp;
1241
	tmf_resp = &req->vsr_tmf_resp;
1183
1242
1184
	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1243
	vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1185
	    (uintptr_t) to_ccbh, tmf_req);
1244
	    (uintptr_t) to_ccbh, tmf_req);
1186
1245
1187
	sglist_reset(sg);
1246
	sglist_reset(sg);
Lines 1289-1314 Link Here
1289
vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1348
vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1290
    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1349
    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1291
{
1350
{
1351
	uint32_t resp_sense_length;
1292
	cam_status status;
1352
	cam_status status;
1293
1353
1294
	csio->scsi_status = cmd_resp->status;
1354
	csio->scsi_status = cmd_resp->status;
1295
	csio->resid = cmd_resp->resid;
1355
	csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1296
1356
1297
	if (csio->scsi_status == SCSI_STATUS_OK)
1357
	if (csio->scsi_status == SCSI_STATUS_OK)
1298
		status = CAM_REQ_CMP;
1358
		status = CAM_REQ_CMP;
1299
	else
1359
	else
1300
		status = CAM_SCSI_STATUS_ERROR;
1360
		status = CAM_SCSI_STATUS_ERROR;
1301
1361
1302
	if (cmd_resp->sense_len > 0) {
1362
	resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1363
1364
	if (resp_sense_length > 0) {
1303
		status |= CAM_AUTOSNS_VALID;
1365
		status |= CAM_AUTOSNS_VALID;
1304
1366
1305
		if (cmd_resp->sense_len < csio->sense_len)
1367
		if (resp_sense_length < csio->sense_len)
1306
			csio->sense_resid = csio->sense_len -
1368
			csio->sense_resid = csio->sense_len - resp_sense_length;
1307
			    cmd_resp->sense_len;
1308
		else
1369
		else
1309
			csio->sense_resid = 0;
1370
			csio->sense_resid = 0;
1310
1371
1311
		memcpy(&csio->sense_data, cmd_resp->sense,
1372
		bzero(&csio->sense_data, sizeof(csio->sense_data));
1373
		memcpy(cmd_resp->sense, &csio->sense_data,
1312
		    csio->sense_len - csio->sense_resid);
1374
		    csio->sense_len - csio->sense_resid);
1313
	}
1375
	}
1314
1376
Lines 1469-1475 Link Here
1469
	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1531
	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1470
		callout_stop(&abort_req->vsr_callout);
1532
		callout_stop(&abort_req->vsr_callout);
1471
1533
1472
	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1534
	vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1473
	    (uintptr_t) abort_ccbh, tmf_req);
1535
	    (uintptr_t) abort_ccbh, tmf_req);
1474
1536
1475
	sglist_reset(sg);
1537
	sglist_reset(sg);
Lines 1538-1544 Link Here
1538
	else
1600
	else
1539
		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1601
		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1540
1602
1541
	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1603
	vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1542
1604
1543
	sglist_reset(sg);
1605
	sglist_reset(sg);
1544
	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1606
	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
Lines 1575-1581 Link Here
1575
}
1637
}
1576
1638
1577
static void
1639
static void
1578
vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1640
vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1579
    struct virtio_scsi_cmd_req *cmd_req)
1641
    struct virtio_scsi_cmd_req *cmd_req)
1580
{
1642
{
1581
	uint8_t attr;
1643
	uint8_t attr;
Lines 1596-1602 Link Here
1596
	}
1658
	}
1597
1659
1598
	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1660
	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1599
	cmd_req->tag = (uintptr_t) csio;
1661
	cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1600
	cmd_req->task_attr = attr;
1662
	cmd_req->task_attr = attr;
1601
1663
1602
	memcpy(cmd_req->cdb,
1664
	memcpy(cmd_req->cdb,
Lines 1606-1620 Link Here
1606
}
1668
}
1607
1669
1608
static void
1670
static void
1609
vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1671
vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1610
    uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1672
    uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1611
{
1673
{
1612
1674
1613
	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1675
	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1614
1676
1615
	tmf_req->type = VIRTIO_SCSI_T_TMF;
1677
	tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1616
	tmf_req->subtype = subtype;
1678
	tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1617
	tmf_req->tag = tag;
1679
	tmf_req->tag = vtscsi_gtoh64(sc, tag);
1618
}
1680
}
1619
1681
1620
static void
1682
static void
Lines 2248-2254 Link Here
2248
}
2310
}
2249
2311
2250
static void
2312
static void
2251
vtscsi_add_sysctl(struct vtscsi_softc *sc)
2313
vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2252
{
2314
{
2253
	device_t dev;
2315
	device_t dev;
2254
	struct vtscsi_statistics *stats;
2316
	struct vtscsi_statistics *stats;
(-)sys/dev/virtio/scsi/virtio_scsi.h (-8 / +33 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * This header is BSD licensed so anyone can use the definitions to implement
4
 * This header is BSD licensed so anyone can use the definitions to implement
3
 * compatible drivers/servers.
5
 * compatible drivers/servers.
4
 *
6
 *
Lines 29-41 Link Here
29
#ifndef _VIRTIO_SCSI_H
31
#ifndef _VIRTIO_SCSI_H
30
#define _VIRTIO_SCSI_H
32
#define _VIRTIO_SCSI_H
31
33
32
/* Feature bits */
34
/* Default values of the CDB and sense data size configuration fields */
33
#define VIRTIO_SCSI_F_INOUT	0x0001	/* Single request can contain both
34
					 * read and write buffers */
35
#define VIRTIO_SCSI_F_HOTPLUG	0x0002	/* Host should enable hot plug/unplug
36
					 * of new LUNs and targets.
37
					 */
38
39
#define VIRTIO_SCSI_CDB_SIZE	32
35
#define VIRTIO_SCSI_CDB_SIZE	32
40
#define VIRTIO_SCSI_SENSE_SIZE	96
36
#define VIRTIO_SCSI_SENSE_SIZE	96
41
37
Lines 44-54 Link Here
44
	uint8_t lun[8];		/* Logical Unit Number */
40
	uint8_t lun[8];		/* Logical Unit Number */
45
	uint64_t tag;		/* Command identifier */
41
	uint64_t tag;		/* Command identifier */
46
	uint8_t task_attr;	/* Task attribute */
42
	uint8_t task_attr;	/* Task attribute */
47
	uint8_t prio;
43
	uint8_t prio;		/* SAM command priority field */
48
	uint8_t crn;
44
	uint8_t crn;
49
	uint8_t cdb[VIRTIO_SCSI_CDB_SIZE];
45
	uint8_t cdb[VIRTIO_SCSI_CDB_SIZE];
50
} __packed;
46
} __packed;
51
47
48
/* SCSI command request, followed by protection information */
49
struct virtio_scsi_cmd_req_pi {
50
	uint8_t lun[8];		/* Logical Unit Number */
51
	uint64_t tag;		/* Command identifier */
52
	uint8_t task_attr;	/* Task attribute */
53
	uint8_t prio;		/* SAM command priority field */
54
	uint8_t crn;
55
	uint32_t pi_bytesout;	/* DataOUT PI Number of bytes */
56
	uint32_t pi_bytesin;	/* DataIN PI Number of bytes */
57
	uint8_t cdb[VIRTIO_SCSI_CDB_SIZE];
58
} __packed;
59
52
/* Response, followed by sense data and data-in */
60
/* Response, followed by sense data and data-in */
53
struct virtio_scsi_cmd_resp {
61
struct virtio_scsi_cmd_resp {
54
	uint32_t sense_len;		/* Sense data length */
62
	uint32_t sense_len;		/* Sense data length */
Lines 102-107 Link Here
102
	uint32_t max_lun;
110
	uint32_t max_lun;
103
} __packed;
111
} __packed;
104
112
113
/* Feature bits */
114
#define VIRTIO_SCSI_F_INOUT	0x0001	/* Single request can contain both
115
					 * read and write buffers.
116
					 */
117
#define VIRTIO_SCSI_F_HOTPLUG	0x0002	/* Host should enable hot plug/unplug
118
					 * of new LUNs and targets.
119
					 */
120
#define VIRTIO_SCSI_F_CHANGE	0x0004	/* Host will report changes to LUN
121
					 * parameters via a
122
					 * VIRTIO_SCSI_T_PARAM_CHANGE event.
123
					 */
124
#define VIRTIO_SCSI_F_T10_PI 	0x0008	/* Extended fields for T10 protection
125
					 * information (DIF/DIX) are included
126
					 * in the SCSI request header.
127
					 */
128
105
/* Response codes */
129
/* Response codes */
106
#define VIRTIO_SCSI_S_OK                       0
130
#define VIRTIO_SCSI_S_OK                       0
107
#define VIRTIO_SCSI_S_FUNCTION_COMPLETE        0
131
#define VIRTIO_SCSI_S_FUNCTION_COMPLETE        0
Lines 138-143 Link Here
138
#define VIRTIO_SCSI_T_NO_EVENT                 0
162
#define VIRTIO_SCSI_T_NO_EVENT                 0
139
#define VIRTIO_SCSI_T_TRANSPORT_RESET          1
163
#define VIRTIO_SCSI_T_TRANSPORT_RESET          1
140
#define VIRTIO_SCSI_T_ASYNC_NOTIFY             2
164
#define VIRTIO_SCSI_T_ASYNC_NOTIFY             2
165
#define VIRTIO_SCSI_T_PARAM_CHANGE             3
141
166
142
/* Reasons of transport reset event */
167
/* Reasons of transport reset event */
143
#define VIRTIO_SCSI_EVT_RESET_HARD             0
168
#define VIRTIO_SCSI_EVT_RESET_HARD             0
(-)sys/dev/virtio/scsi/virtio_scsivar.h (+2 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
(-)sys/dev/virtio/virtio.c (-28 / +95 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 73-82 Link Here
73
75
74
/* Device independent features. */
76
/* Device independent features. */
75
static struct virtio_feature_desc virtio_common_feature_desc[] = {
77
static struct virtio_feature_desc virtio_common_feature_desc[] = {
76
	{ VIRTIO_F_NOTIFY_ON_EMPTY,	"NotifyOnEmpty"	},
78
	{ VIRTIO_F_NOTIFY_ON_EMPTY,	"NotifyOnEmpty"		}, /* Legacy */
77
	{ VIRTIO_RING_F_INDIRECT_DESC,	"RingIndirect"	},
79
	{ VIRTIO_F_ANY_LAYOUT,		"AnyLayout"		}, /* Legacy */
78
	{ VIRTIO_RING_F_EVENT_IDX,	"EventIdx"	},
80
	{ VIRTIO_RING_F_INDIRECT_DESC,	"RingIndirectDesc"	},
79
	{ VIRTIO_F_BAD_FEATURE,		"BadFeature"	},
81
	{ VIRTIO_RING_F_EVENT_IDX,	"RingEventIdx"		},
82
	{ VIRTIO_F_BAD_FEATURE,		"BadFeature"		}, /* Legacy */
83
	{ VIRTIO_F_VERSION_1,		"Version1"		},
84
	{ VIRTIO_F_IOMMU_PLATFORM,	"IOMMUPlatform"		},
80
85
81
	{ 0, NULL }
86
	{ 0, NULL }
82
};
87
};
Lines 114-137 Link Here
114
	return (NULL);
119
	return (NULL);
115
}
120
}
116
121
117
void
122
int
118
virtio_describe(device_t dev, const char *msg,
123
virtio_describe_sbuf(struct sbuf *sb, uint64_t features,
119
    uint64_t features, struct virtio_feature_desc *desc)
124
    struct virtio_feature_desc *desc)
120
{
125
{
121
	struct sbuf sb;
122
	uint64_t val;
123
	char *buf;
124
	const char *name;
126
	const char *name;
127
	uint64_t val;
125
	int n;
128
	int n;
126
129
127
	if ((buf = malloc(512, M_TEMP, M_NOWAIT)) == NULL) {
130
	sbuf_printf(sb, "%#jx", (uintmax_t) features);
128
		device_printf(dev, "%s features: %#jx\n", msg, (uintmax_t) features);
129
		return;
130
	}
131
131
132
	sbuf_new(&sb, buf, 512, SBUF_FIXEDLEN);
133
	sbuf_printf(&sb, "%s features: %#jx", msg, (uintmax_t) features);
134
135
	for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
132
	for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
136
		/*
133
		/*
137
		 * BAD_FEATURE is used to detect broken Linux clients
134
		 * BAD_FEATURE is used to detect broken Linux clients
Lines 141-172 Link Here
141
			continue;
138
			continue;
142
139
143
		if (n++ == 0)
140
		if (n++ == 0)
144
			sbuf_cat(&sb, " <");
141
			sbuf_cat(sb, " <");
145
		else
142
		else
146
			sbuf_cat(&sb, ",");
143
			sbuf_cat(sb, ",");
147
144
148
		name = virtio_feature_name(val, desc);
145
		name = virtio_feature_name(val, desc);
149
		if (name == NULL)
146
		if (name == NULL)
150
			sbuf_printf(&sb, "%#jx", (uintmax_t) val);
147
			sbuf_printf(sb, "%#jx", (uintmax_t) val);
151
		else
148
		else
152
			sbuf_cat(&sb, name);
149
			sbuf_cat(sb, name);
153
	}
150
	}
154
151
155
	if (n > 0)
152
	if (n > 0)
156
		sbuf_cat(&sb, ">");
153
		sbuf_cat(sb, ">");
157
154
158
#if __FreeBSD_version < 900020
155
	return (sbuf_finish(sb));
159
	sbuf_finish(&sb);
156
}
160
	if (sbuf_overflowed(&sb) == 0)
157
161
#else
158
void
162
	if (sbuf_finish(&sb) == 0)
159
virtio_describe(device_t dev, const char *msg, uint64_t features,
163
#endif
160
    struct virtio_feature_desc *desc)
161
{
162
	struct sbuf sb;
163
	char *buf;
164
	int error;
165
166
	if ((buf = malloc(1024, M_TEMP, M_NOWAIT)) == NULL) {
167
		error = ENOMEM;
168
		goto out;
169
	}
170
171
	sbuf_new(&sb, buf, 1024, SBUF_FIXEDLEN);
172
	sbuf_printf(&sb, "%s features: ", msg);
173
174
	error = virtio_describe_sbuf(&sb, features, desc);
175
	if (error == 0)
164
		device_printf(dev, "%s\n", sbuf_data(&sb));
176
		device_printf(dev, "%s\n", sbuf_data(&sb));
165
177
166
	sbuf_delete(&sb);
178
	sbuf_delete(&sb);
167
	free(buf, M_TEMP);
179
	free(buf, M_TEMP);
180
181
out:
182
	if (error != 0) {
183
		device_printf(dev, "%s features: %#jx\n", msg,
184
		    (uintmax_t) features);
185
	}
168
}
186
}
169
187
188
uint64_t
189
virtio_filter_transport_features(uint64_t features)
190
{
191
	uint64_t transport, mask;
192
193
	transport = (1ULL <<
194
	    (VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START)) - 1;
195
	transport <<= VIRTIO_TRANSPORT_F_START;
196
197
	mask = -1ULL & ~transport;
198
	mask |= VIRTIO_RING_F_INDIRECT_DESC;
199
	mask |= VIRTIO_RING_F_EVENT_IDX;
200
	mask |= VIRTIO_F_VERSION_1;
201
202
	return (features & mask);
203
}
204
205
int
206
virtio_bus_is_modern(device_t dev)
207
{
208
	uintptr_t modern;
209
210
	virtio_read_ivar(dev, VIRTIO_IVAR_MODERN, &modern);
211
	return (modern != 0);
212
}
213
214
void
215
virtio_read_device_config_array(device_t dev, bus_size_t offset, void *dst,
216
    int size, int count)
217
{
218
	int i, gen;
219
220
	do {
221
		gen = virtio_config_generation(dev);
222
223
		for (i = 0; i < count; i++) {
224
			virtio_read_device_config(dev, offset + i * size,
225
			    (uint8_t *) dst + i * size, size);
226
		}
227
	} while (gen != virtio_config_generation(dev));
228
}
229
170
/*
230
/*
171
 * VirtIO bus method wrappers.
231
 * VirtIO bus method wrappers.
172
 */
232
 */
Lines 192-197 Link Here
192
252
193
	return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
253
	return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
194
	    child_features));
254
	    child_features));
255
}
256
257
int
258
virtio_finalize_features(device_t dev)
259
{
260
261
	return (VIRTIO_BUS_FINALIZE_FEATURES(device_get_parent(dev)));
195
}
262
}
196
263
197
int
264
int
(-)sys/dev/virtio/virtio.h (-4 / +12 lines)
Lines 29-37 Link Here
29
#ifndef _VIRTIO_H_
29
#ifndef _VIRTIO_H_
30
#define _VIRTIO_H_
30
#define _VIRTIO_H_
31
31
32
#include <dev/virtio/virtio_endian.h>
32
#include <dev/virtio/virtio_ids.h>
33
#include <dev/virtio/virtio_ids.h>
33
#include <dev/virtio/virtio_config.h>
34
#include <dev/virtio/virtio_config.h>
34
35
36
struct sbuf;
35
struct vq_alloc_info;
37
struct vq_alloc_info;
36
38
37
/*
39
/*
Lines 55-61 Link Here
55
#define VIRTIO_IVAR_DEVICE		4
57
#define VIRTIO_IVAR_DEVICE		4
56
#define VIRTIO_IVAR_SUBVENDOR		5
58
#define VIRTIO_IVAR_SUBVENDOR		5
57
#define VIRTIO_IVAR_SUBDEVICE		6
59
#define VIRTIO_IVAR_SUBDEVICE		6
58
60
#define VIRTIO_IVAR_MODERN		7
59
struct virtio_feature_desc {
61
struct virtio_feature_desc {
60
	uint64_t	 vfd_val;
62
	uint64_t	 vfd_val;
61
	const char	*vfd_str;
63
	const char	*vfd_str;
Lines 63-76 Link Here
63
65
64
const char *virtio_device_name(uint16_t devid);
66
const char *virtio_device_name(uint16_t devid);
65
void	 virtio_describe(device_t dev, const char *msg,
67
void	 virtio_describe(device_t dev, const char *msg,
66
	     uint64_t features, struct virtio_feature_desc *feature_desc);
68
	     uint64_t features, struct virtio_feature_desc *desc);
67
69
int	 virtio_describe_sbuf(struct sbuf *sb, uint64_t features,
70
	     struct virtio_feature_desc *desc);
71
uint64_t virtio_filter_transport_features(uint64_t features);
72
int	 virtio_bus_is_modern(device_t dev);
73
void	 virtio_read_device_config_array(device_t dev, bus_size_t offset,
74
	     void *dst, int size, int count);
68
/*
75
/*
69
 * VirtIO Bus Methods.
76
 * VirtIO Bus Methods.
70
 */
77
 */
71
void	 virtio_read_ivar(device_t dev, int ivar, uintptr_t *val);
78
void	 virtio_read_ivar(device_t dev, int ivar, uintptr_t *val);
72
void	 virtio_write_ivar(device_t dev, int ivar, uintptr_t val);
79
void	 virtio_write_ivar(device_t dev, int ivar, uintptr_t val);
73
uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features);
80
uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features);
81
int	 virtio_finalize_features(device_t dev);
74
int	 virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
82
int	 virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
75
	     struct vq_alloc_info *info);
83
	     struct vq_alloc_info *info);
76
int	 virtio_setup_intr(device_t dev, enum intr_type type);
84
int	 virtio_setup_intr(device_t dev, enum intr_type type);
Lines 128-134 Link Here
128
VIRTIO_READ_IVAR(device,	VIRTIO_IVAR_DEVICE);
136
VIRTIO_READ_IVAR(device,	VIRTIO_IVAR_DEVICE);
129
VIRTIO_READ_IVAR(subvendor,	VIRTIO_IVAR_SUBVENDOR);
137
VIRTIO_READ_IVAR(subvendor,	VIRTIO_IVAR_SUBVENDOR);
130
VIRTIO_READ_IVAR(subdevice,	VIRTIO_IVAR_SUBDEVICE);
138
VIRTIO_READ_IVAR(subdevice,	VIRTIO_IVAR_SUBDEVICE);
131
139
VIRTIO_READ_IVAR(modern,	VIRTIO_IVAR_MODERN);
132
#undef VIRTIO_READ_IVAR
140
#undef VIRTIO_READ_IVAR
133
141
134
#define VIRTIO_WRITE_IVAR(name, ivar)					\
142
#define VIRTIO_WRITE_IVAR(name, ivar)					\
(-)sys/dev/virtio/virtio_bus_if.m (+11 lines)
Lines 36-41 Link Here
36
36
37
CODE {
37
CODE {
38
	static int
38
	static int
39
	virtio_bus_default_finalize_features(device_t dev)
40
	{
41
		return (0);
42
	}
43
44
	static int
39
	virtio_bus_default_config_generation(device_t dev)
45
	virtio_bus_default_config_generation(device_t dev)
40
	{
46
	{
41
		return (0);
47
		return (0);
Lines 47-52 Link Here
47
	uint64_t	child_features;
53
	uint64_t	child_features;
48
};
54
};
49
55
56
METHOD int finalize_features {
57
	device_t	dev;
58
} DEFAULT virtio_bus_default_finalize_features;
59
50
METHOD int with_feature {
60
METHOD int with_feature {
51
	device_t	dev;
61
	device_t	dev;
52
	uint64_t	feature;
62
	uint64_t	feature;
Lines 80-85 Link Here
80
METHOD void notify_vq {
90
METHOD void notify_vq {
81
	device_t	dev;
91
	device_t	dev;
82
	uint16_t	queue;
92
	uint16_t	queue;
93
	bus_size_t	offset;
83
};
94
};
84
95
85
METHOD int config_generation {
96
METHOD int config_generation {
(-)sys/dev/virtio/virtio_config.h (-7 / +30 lines)
Lines 33-67 Link Here
33
33
34
/* Status byte for guest to report progress. */
34
/* Status byte for guest to report progress. */
35
#define VIRTIO_CONFIG_STATUS_RESET	0x00
35
#define VIRTIO_CONFIG_STATUS_RESET	0x00
36
/* We have seen device and processed generic fields. */
36
#define VIRTIO_CONFIG_STATUS_ACK	0x01
37
#define VIRTIO_CONFIG_STATUS_ACK	0x01
37
#define VIRTIO_CONFIG_STATUS_DRIVER	0x03
38
/* We have found a driver for the device. */
39
#define VIRTIO_CONFIG_STATUS_DRIVER	0x02
40
/* Driver has used its parts of the config, and is happy. */
38
#define VIRTIO_CONFIG_STATUS_DRIVER_OK	0x04
41
#define VIRTIO_CONFIG_STATUS_DRIVER_OK	0x04
42
/* Driver has finished configuring features (modern only). */
43
#define VIRTIO_CONFIG_S_FEATURES_OK	0x08
44
/* Device entered invalid state, driver must reset it. */
45
#define VIRTIO_CONFIG_S_NEEDS_RESET	0x40
46
/* We've given up on this device. */
39
#define VIRTIO_CONFIG_STATUS_FAILED	0x80
47
#define VIRTIO_CONFIG_STATUS_FAILED	0x80
40
48
41
/*
49
/*
42
 * Generate interrupt when the virtqueue ring is
50
 * Generate interrupt when the virtqueue ring is
43
 * completely used, even if we've suppressed them.
51
 * completely used, even if we've suppressed them.
44
 */
52
 */
45
#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
53
#define VIRTIO_F_NOTIFY_ON_EMPTY	(1UL << 24)
46
54
55
/* Can the device handle any descriptor layout? */
56
#define VIRTIO_F_ANY_LAYOUT		(1UL << 27)
57
47
/* Support for indirect buffer descriptors. */
58
/* Support for indirect buffer descriptors. */
48
#define VIRTIO_RING_F_INDIRECT_DESC	(1 << 28)
59
#define VIRTIO_RING_F_INDIRECT_DESC	(1UL << 28)
49
60
50
/* Support to suppress interrupt until specific index is reached. */
61
/* Support to suppress interrupt until specific index is reached. */
51
#define VIRTIO_RING_F_EVENT_IDX		(1 << 29)
62
#define VIRTIO_RING_F_EVENT_IDX		(1UL << 29)
52
63
53
/*
64
/*
54
 * The guest should never negotiate this feature; it
65
 * The guest should never negotiate this feature; it
55
 * is used to detect faulty drivers.
66
 * is used to detect faulty drivers.
56
 */
67
 */
57
#define VIRTIO_F_BAD_FEATURE (1 << 30)
68
#define VIRTIO_F_BAD_FEATURE	(1UL << 30)
58
69
70
/* v1.0 compliant. */
71
#define VIRTIO_F_VERSION_1	(1ULL << 32)
72
59
/*
73
/*
60
 * Some VirtIO feature bits (currently bits 28 through 31) are
74
 * If clear - device has the IOMMU bypass quirk feature.
75
 * If set - use platform tools to detect the IOMMU.
76
 *
77
 * Note the reverse polarity (compared to most other features),
78
 * this is for compatibility with legacy systems.
79
 */
80
#define VIRTIO_F_IOMMU_PLATFORM		(1ULL << 33)
81
82
/*
83
 * Some VirtIO feature bits (currently bits 28 through 34) are
61
 * reserved for the transport being used (eg. virtio_ring), the
84
 * reserved for the transport being used (eg. virtio_ring), the
62
 * rest are per-device feature bits.
85
 * rest are per-device feature bits.
63
 */
86
 */
64
#define VIRTIO_TRANSPORT_F_START	28
87
#define VIRTIO_TRANSPORT_F_START	28
65
#define VIRTIO_TRANSPORT_F_END		32
88
#define VIRTIO_TRANSPORT_F_END		34
66
89
67
#endif /* _VIRTIO_CONFIG_H_ */
90
#endif /* _VIRTIO_CONFIG_H_ */
(-)sys/dev/virtio/virtio_endian.h (+106 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause
3
 *
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 *
28
 * $FreeBSD$
29
 */
30
31
#ifndef _VIRTIO_ENDIAN_H_
32
#define _VIRTIO_ENDIAN_H_
33
34
#include <sys/endian.h>
35
36
/*
37
 * VirtIO V1 (modern) uses little endian, while legacy VirtIO uses the guest's
38
 * native endian. These functions convert to and from the Guest's (driver's)
39
 * and the Host's (device's) endianness when needed.
40
 */
41
42
static inline bool
43
virtio_swap_endian(bool modern)
44
{
45
#if _BYTE_ORDER == _LITTLE_ENDIAN
46
	return (false);
47
#else
48
	return (modern);
49
#endif
50
}
51
52
static inline uint16_t
53
virtio_htog16(bool modern, uint16_t val)
54
{
55
	if (virtio_swap_endian(modern))
56
		return (le16toh(val));
57
	else
58
		return (val);
59
}
60
61
static inline uint16_t
62
virtio_gtoh16(bool modern, uint16_t val)
63
{
64
	if (virtio_swap_endian(modern))
65
		return (htole16(val));
66
	else
67
		return (val);
68
}
69
70
static inline uint32_t
71
virtio_htog32(bool modern, uint32_t val)
72
{
73
	if (virtio_swap_endian(modern))
74
		return (le32toh(val));
75
	else
76
		return (val);
77
}
78
79
static inline uint32_t
80
virtio_gtoh32(bool modern, uint32_t val)
81
{
82
	if (virtio_swap_endian(modern))
83
		return (htole32(val));
84
	else
85
		return (val);
86
}
87
88
static inline uint64_t
89
virtio_htog64(bool modern, uint64_t val)
90
{
91
	if (virtio_swap_endian(modern))
92
		return (le64toh(val));
93
	else
94
		return (val);
95
}
96
97
static inline uint64_t
98
virtio_gtoh64(bool modern, uint64_t val)
99
{
100
	if (virtio_swap_endian(modern))
101
		return (htole64(val));
102
	else
103
		return (val);
104
}
105
106
#endif /* _VIRTIO_ENDIAN_H_ */
(-)sys/dev/virtio/virtio_ring.h (+7 lines)
Lines 90-95 Link Here
90
	struct vring_used *used;
90
	struct vring_used *used;
91
};
91
};
92
92
93
/* Alignment requirements for vring elements.
94
 * When using pre-virtio 1.0 layout, these fall out naturally.
95
 */
96
#define VRING_AVAIL_ALIGN_SIZE 2
97
#define VRING_USED_ALIGN_SIZE 4
98
#define VRING_DESC_ALIGN_SIZE 16
99
93
/* The standard layout for the ring is a continuous chunk of memory which
100
/* The standard layout for the ring is a continuous chunk of memory which
94
 * looks like this.  We assume num is a power of 2.
101
 * looks like this.  We assume num is a power of 2.
95
 *
102
 *
(-)sys/dev/virtio/virtqueue.c (-58 / +85 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 55-72 Link Here
55
57
56
struct virtqueue {
58
struct virtqueue {
57
	device_t		 vq_dev;
59
	device_t		 vq_dev;
58
	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
59
	uint16_t		 vq_queue_index;
60
	uint16_t		 vq_queue_index;
60
	uint16_t		 vq_nentries;
61
	uint16_t		 vq_nentries;
61
	uint32_t		 vq_flags;
62
	uint32_t		 vq_flags;
62
#define	VIRTQUEUE_FLAG_INDIRECT	 0x0001
63
#define	VIRTQUEUE_FLAG_MODERN	 0x0001
63
#define	VIRTQUEUE_FLAG_EVENT_IDX 0x0002
64
#define	VIRTQUEUE_FLAG_INDIRECT	 0x0002
64
65
#define	VIRTQUEUE_FLAG_EVENT_IDX 0x0004
65
	int			 vq_alignment;
66
					 
66
	int			 vq_ring_size;
67
	void			*vq_ring_mem;
68
	int			 vq_max_indirect_size;
67
	int			 vq_max_indirect_size;
69
	int			 vq_indirect_mem_size;
68
	bus_size_t		 vq_notify_offset;
70
	virtqueue_intr_t	*vq_intrhand;
69
	virtqueue_intr_t	*vq_intrhand;
71
	void			*vq_intrhand_arg;
70
	void			*vq_intrhand_arg;
72
71
Lines 85-90 Link Here
85
	 */
84
	 */
86
	uint16_t		 vq_used_cons_idx;
85
	uint16_t		 vq_used_cons_idx;
87
86
87
	void			*vq_ring_mem;
88
	int			 vq_indirect_mem_size;
89
	int			 vq_alignment;
90
	int			 vq_ring_size;
91
	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
92
88
	struct vq_desc_extra {
93
	struct vq_desc_extra {
89
		void		  *cookie;
94
		void		  *cookie;
90
		struct vring_desc *indirect;
95
		struct vring_desc *indirect;
Lines 132-137 Link Here
132
static void	vq_ring_notify_host(struct virtqueue *);
137
static void	vq_ring_notify_host(struct virtqueue *);
133
static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
138
static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
134
139
140
#define vq_modern(_vq) 		(((_vq)->vq_flags & VIRTQUEUE_FLAG_MODERN) != 0)
141
#define vq_htog16(_vq, _val) 	virtio_htog16(vq_modern(_vq), _val)
142
#define vq_htog32(_vq, _val) 	virtio_htog32(vq_modern(_vq), _val)
143
#define vq_htog64(_vq, _val) 	virtio_htog64(vq_modern(_vq), _val)
144
#define vq_gtoh16(_vq, _val) 	virtio_gtoh16(vq_modern(_vq), _val)
145
#define vq_gtoh32(_vq, _val) 	virtio_gtoh32(vq_modern(_vq), _val)
146
#define vq_gtoh64(_vq, _val) 	virtio_gtoh64(vq_modern(_vq), _val)
147
135
uint64_t
148
uint64_t
136
virtqueue_filter_features(uint64_t features)
149
virtqueue_filter_features(uint64_t features)
137
{
150
{
Lines 145-152 Link Here
145
}
158
}
146
159
147
int
160
int
148
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
161
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
149
    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
162
    bus_size_t notify_offset, int align, vm_paddr_t highaddr,
163
    struct vq_alloc_info *info, struct virtqueue **vqp)
150
{
164
{
151
	struct virtqueue *vq;
165
	struct virtqueue *vq;
152
	int error;
166
	int error;
Lines 182-193 Link Here
182
	vq->vq_dev = dev;
196
	vq->vq_dev = dev;
183
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
197
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184
	vq->vq_queue_index = queue;
198
	vq->vq_queue_index = queue;
199
	vq->vq_notify_offset = notify_offset;
185
	vq->vq_alignment = align;
200
	vq->vq_alignment = align;
186
	vq->vq_nentries = size;
201
	vq->vq_nentries = size;
187
	vq->vq_free_cnt = size;
202
	vq->vq_free_cnt = size;
188
	vq->vq_intrhand = info->vqai_intr;
203
	vq->vq_intrhand = info->vqai_intr;
189
	vq->vq_intrhand_arg = info->vqai_intr_arg;
204
	vq->vq_intrhand_arg = info->vqai_intr_arg;
190
205
206
	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_VERSION_1) != 0)
207
		vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
191
	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
208
	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
192
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
209
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
193
210
Lines 292-299 Link Here
292
	bzero(indirect, vq->vq_indirect_mem_size);
309
	bzero(indirect, vq->vq_indirect_mem_size);
293
310
294
	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
311
	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295
		indirect[i].next = i + 1;
312
		indirect[i].next = vq_gtoh16(vq, i + 1);
296
	indirect[i].next = VQ_RING_DESC_CHAIN_END;
313
	indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
297
}
314
}
298
315
299
int
316
int
Lines 391-396 Link Here
391
uint16_t
408
uint16_t
392
virtqueue_index(struct virtqueue *vq)
409
virtqueue_index(struct virtqueue *vq)
393
{
410
{
411
394
	return (vq->vq_queue_index);
412
	return (vq->vq_queue_index);
395
}
413
}
396
414
Lines 439-445 Link Here
439
{
457
{
440
	uint16_t used_idx, nused;
458
	uint16_t used_idx, nused;
441
459
442
	used_idx = vq->vq_ring.used->idx;
460
	used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
443
461
444
	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
462
	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
445
	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
463
	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
Lines 451-457 Link Here
451
virtqueue_intr_filter(struct virtqueue *vq)
469
virtqueue_intr_filter(struct virtqueue *vq)
452
{
470
{
453
471
454
	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
472
	if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
455
		return (0);
473
		return (0);
456
474
457
	virtqueue_disable_intr(vq);
475
	virtqueue_disable_intr(vq);
Lines 478-484 Link Here
478
{
496
{
479
	uint16_t ndesc, avail_idx;
497
	uint16_t ndesc, avail_idx;
480
498
481
	avail_idx = vq->vq_ring.avail->idx;
499
	avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
482
	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
500
	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
483
501
484
	switch (hint) {
502
	switch (hint) {
Lines 503-512 Link Here
503
{
521
{
504
522
505
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
523
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
506
		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
524
		vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
507
		    vq->vq_nentries - 1;
525
		    vq->vq_used_cons_idx - vq->vq_nentries - 1);
508
	} else
526
		return;
509
		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
527
	}
528
529
	vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
510
}
530
}
511
531
512
int
532
int
Lines 569-584 Link Here
569
	void *cookie;
589
	void *cookie;
570
	uint16_t used_idx, desc_idx;
590
	uint16_t used_idx, desc_idx;
571
591
572
	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
592
	if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
573
		return (NULL);
593
		return (NULL);
574
594
575
	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
595
	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
576
	uep = &vq->vq_ring.used->ring[used_idx];
596
	uep = &vq->vq_ring.used->ring[used_idx];
577
597
578
	rmb();
598
	rmb();
579
	desc_idx = (uint16_t) uep->id;
599
	desc_idx = (uint16_t) vq_htog32(vq, uep->id);
580
	if (len != NULL)
600
	if (len != NULL)
581
		*len = uep->len;
601
		*len = vq_htog32(vq, uep->len);
582
602
583
	vq_ring_free_chain(vq, desc_idx);
603
	vq_ring_free_chain(vq, desc_idx);
584
604
Lines 636-648 Link Here
636
	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
656
	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
637
	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
657
	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
638
	    "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
658
	    "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
639
	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
659
	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
640
	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
660
	    vq->vq_queued_cnt, vq->vq_desc_head_idx,
641
	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
661
	    vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
642
	    vq->vq_ring.used->idx,
662
	    vq_htog16(vq, vq->vq_ring.used->idx),
643
		vring_used_event(&vq->vq_ring),
663
	    vq_htog16(vq, vring_used_event(&vq->vq_ring)),
644
	    vq->vq_ring.avail->flags,
664
	    vq_htog16(vq, vq->vq_ring.avail->flags),
645
	    vq->vq_ring.used->flags);
665
	    vq_htog16(vq, vq->vq_ring.used->flags));
646
}
666
}
647
667
648
static void
668
static void
Lines 659-672 Link Here
659
	vring_init(vr, size, ring_mem, vq->vq_alignment);
679
	vring_init(vr, size, ring_mem, vq->vq_alignment);
660
680
661
	for (i = 0; i < size - 1; i++)
681
	for (i = 0; i < size - 1; i++)
662
		vr->desc[i].next = i + 1;
682
		vr->desc[i].next = vq_gtoh16(vq, i + 1);
663
	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
683
	vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
664
}
684
}
665
685
666
static void
686
static void
667
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
687
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
668
{
688
{
669
	uint16_t avail_idx;
689
	uint16_t avail_idx, avail_ring_idx;
670
690
671
	/*
691
	/*
672
	 * Place the head of the descriptor chain into the next slot and make
692
	 * Place the head of the descriptor chain into the next slot and make
Lines 675-685 Link Here
675
	 * currently running on another CPU, we can keep it processing the new
695
	 * currently running on another CPU, we can keep it processing the new
676
	 * descriptor.
696
	 * descriptor.
677
	 */
697
	 */
678
	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
698
	avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
679
	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
699
	avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
700
	vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
680
701
681
	wmb();
702
	wmb();
682
	vq->vq_ring.avail->idx++;
703
	vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
683
704
684
	/* Keep pending count until virtqueue_notify(). */
705
	/* Keep pending count until virtqueue_notify(). */
685
	vq->vq_queued_cnt++;
706
	vq->vq_queued_cnt++;
Lines 698-716 Link Here
698
719
699
	for (i = 0, idx = head_idx, seg = sg->sg_segs;
720
	for (i = 0, idx = head_idx, seg = sg->sg_segs;
700
	     i < needed;
721
	     i < needed;
701
	     i++, idx = dp->next, seg++) {
722
	     i++, idx = vq_htog16(vq, dp->next), seg++) {
702
		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
723
		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
703
		    "premature end of free desc chain");
724
		    "premature end of free desc chain");
704
725
705
		dp = &desc[idx];
726
		dp = &desc[idx];
706
		dp->addr = seg->ss_paddr;
727
		dp->addr = vq_gtoh64(vq, seg->ss_paddr);
707
		dp->len = seg->ss_len;
728
		dp->len = vq_gtoh32(vq, seg->ss_len);
708
		dp->flags = 0;
729
		dp->flags = 0;
709
730
710
		if (i < needed - 1)
731
		if (i < needed - 1)
711
			dp->flags |= VRING_DESC_F_NEXT;
732
			dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
712
		if (i >= readable)
733
		if (i >= readable)
713
			dp->flags |= VRING_DESC_F_WRITE;
734
			dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
714
	}
735
	}
715
736
716
	return (idx);
737
	return (idx);
Lines 755-768 Link Here
755
	dxp->cookie = cookie;
776
	dxp->cookie = cookie;
756
	dxp->ndescs = 1;
777
	dxp->ndescs = 1;
757
778
758
	dp->addr = dxp->indirect_paddr;
779
	dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
759
	dp->len = needed * sizeof(struct vring_desc);
780
	dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
760
	dp->flags = VRING_DESC_F_INDIRECT;
781
	dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
761
782
762
	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
783
	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
763
	    sg, readable, writable);
784
	    sg, readable, writable);
764
785
765
	vq->vq_desc_head_idx = dp->next;
786
	vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
766
	vq->vq_free_cnt--;
787
	vq->vq_free_cnt--;
767
	if (vq->vq_free_cnt == 0)
788
	if (vq->vq_free_cnt == 0)
768
		VQ_RING_ASSERT_CHAIN_TERM(vq);
789
		VQ_RING_ASSERT_CHAIN_TERM(vq);
Lines 780-789 Link Here
780
	 * Enable interrupts, making sure we get the latest index of
801
	 * Enable interrupts, making sure we get the latest index of
781
	 * what's already been consumed.
802
	 * what's already been consumed.
782
	 */
803
	 */
783
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
804
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
784
		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
805
		vring_used_event(&vq->vq_ring) =
785
	else
806
		    vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
786
		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
807
	} else {
808
		vq->vq_ring.avail->flags &=
809
		    vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
810
	}
787
811
788
	mb();
812
	mb();
789
813
Lines 801-824 Link Here
801
static int
825
static int
802
vq_ring_must_notify_host(struct virtqueue *vq)
826
vq_ring_must_notify_host(struct virtqueue *vq)
803
{
827
{
804
	uint16_t new_idx, prev_idx, event_idx;
828
	uint16_t new_idx, prev_idx, event_idx, flags;
805
829
806
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
830
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
807
		new_idx = vq->vq_ring.avail->idx;
831
		new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
808
		prev_idx = new_idx - vq->vq_queued_cnt;
832
		prev_idx = new_idx - vq->vq_queued_cnt;
809
		event_idx = vring_avail_event(&vq->vq_ring);
833
		event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
810
834
811
		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
835
		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
812
	}
836
	}
813
837
814
	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
838
	flags = vq->vq_ring.used->flags;
839
	return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
815
}
840
}
816
841
817
static void
842
static void
818
vq_ring_notify_host(struct virtqueue *vq)
843
vq_ring_notify_host(struct virtqueue *vq)
819
{
844
{
820
845
821
	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
846
	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
847
	    vq->vq_notify_offset);
822
}
848
}
823
849
824
static void
850
static void
Lines 837-846 Link Here
837
	vq->vq_free_cnt += dxp->ndescs;
863
	vq->vq_free_cnt += dxp->ndescs;
838
	dxp->ndescs--;
864
	dxp->ndescs--;
839
865
840
	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
866
	if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
841
		while (dp->flags & VRING_DESC_F_NEXT) {
867
		while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
842
			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
868
			uint16_t next_idx = vq_htog16(vq, dp->next);
843
			dp = &vq->vq_ring.desc[dp->next];
869
			VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
870
			dp = &vq->vq_ring.desc[next_idx];
844
			dxp->ndescs--;
871
			dxp->ndescs--;
845
		}
872
		}
846
	}
873
	}
Lines 853-858 Link Here
853
	 * newly freed chain. If the virtqueue was completely used, then
880
	 * newly freed chain. If the virtqueue was completely used, then
854
	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
881
	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
855
	 */
882
	 */
856
	dp->next = vq->vq_desc_head_idx;
883
	dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
857
	vq->vq_desc_head_idx = desc_idx;
884
	vq->vq_desc_head_idx = desc_idx;
858
}
885
}
(-)sys/dev/virtio/virtqueue.h (-2 / +4 lines)
Lines 1-4 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
2
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3
 * All rights reserved.
5
 * All rights reserved.
4
 *
6
 *
Lines 68-75 Link Here
68
uint64_t virtqueue_filter_features(uint64_t features);
70
uint64_t virtqueue_filter_features(uint64_t features);
69
71
70
int	 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
72
int	 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
71
	     int align, vm_paddr_t highaddr, struct vq_alloc_info *info,
73
	     bus_size_t notify_offset, int align, vm_paddr_t highaddr,
72
	     struct virtqueue **vqp);
74
	     struct vq_alloc_info *info, struct virtqueue **vqp);
73
void	*virtqueue_drain(struct virtqueue *vq, int *last);
75
void	*virtqueue_drain(struct virtqueue *vq, int *last);
74
void	 virtqueue_free(struct virtqueue *vq);
76
void	 virtqueue_free(struct virtqueue *vq);
75
int	 virtqueue_reinit(struct virtqueue *vq, uint16_t size);
77
int	 virtqueue_reinit(struct virtqueue *vq, uint16_t size);

Return to bug 236922