View | Details | Raw Unified | Return to bug 236922 | Differences between
and this patch

Collapse All | Expand All

(-)sys/conf/files (+3 lines)
Lines 3478-3483 Link Here
3478
dev/virtio/virtio_bus_if.m		optional	virtio
3478
dev/virtio/virtio_bus_if.m		optional	virtio
3479
dev/virtio/virtio_if.m			optional	virtio
3479
dev/virtio/virtio_if.m			optional	virtio
3480
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3480
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3481
dev/virtio/pci/virtio_pci_if.m          optional        virtio_pci
3482
dev/virtio/pci/virtio_pci_legacy.c      optional        virtio_pci
3483
dev/virtio/pci/virtio_pci_modern.c      optional        virtio_pci
3481
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio fdt
3484
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio fdt
3482
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio fdt
3485
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio fdt
3483
dev/virtio/network/if_vtnet.c		optional	vtnet
3486
dev/virtio/network/if_vtnet.c		optional	vtnet
(-)sys/modules/virtio/pci/Makefile (+2 lines)
Lines 27-32 Link Here
27
27
28
KMOD=	virtio_pci
28
KMOD=	virtio_pci
29
SRCS=	virtio_pci.c
29
SRCS=	virtio_pci.c
30
SRCS+=	virtio_pci_legacy.c virtio_pci_modern.c
31
SRCS+=	virtio_pci_if.c virtio_pci_if.h
30
SRCS+=	virtio_bus_if.h virtio_if.h 
32
SRCS+=	virtio_bus_if.h virtio_if.h 
31
SRCS+=	bus_if.h device_if.h pci_if.h
33
SRCS+=	bus_if.h device_if.h pci_if.h
32
34
(-)sys/dev/virtio/balloon/virtio_balloon.c (-15 / +49 lines)
Lines 80-85 Link Here
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
83
	{ VIRTIO_BALLOON_F_DEFLATE_ON_OOM,	"DeflateOnOOM"	},
83
84
84
	{ 0, NULL }
85
	{ 0, NULL }
85
};
86
};
Lines 89-95 Link Here
89
static int	vtballoon_detach(device_t);
90
static int	vtballoon_detach(device_t);
90
static int	vtballoon_config_change(device_t);
91
static int	vtballoon_config_change(device_t);
91
92
92
static void	vtballoon_negotiate_features(struct vtballoon_softc *);
93
static int	vtballoon_negotiate_features(struct vtballoon_softc *);
94
static int	vtballoon_setup_features(struct vtballoon_softc *);
93
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
95
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
94
96
95
static void	vtballoon_vq_intr(void *);
97
static void	vtballoon_vq_intr(void *);
Lines 109-118 Link Here
109
111
110
static int	vtballoon_sleep(struct vtballoon_softc *);
112
static int	vtballoon_sleep(struct vtballoon_softc *);
111
static void	vtballoon_thread(void *);
113
static void	vtballoon_thread(void *);
112
static void	vtballoon_add_sysctl(struct vtballoon_softc *);
114
static void	vtballoon_setup_sysctl(struct vtballoon_softc *);
113
115
116
#define vtballoon_modern(_sc) \
117
    (((_sc)->vtballoon_features & VIRTIO_F_VERSION_1) != 0)
118
114
/* Features desired/implemented by this driver. */
119
/* Features desired/implemented by this driver. */
115
#define VTBALLOON_FEATURES		0
120
#define VTBALLOON_FEATURES		VIRTIO_BALLOON_F_MUST_TELL_HOST
116
121
117
/* Timeout between retries when the balloon needs inflating. */
122
/* Timeout between retries when the balloon needs inflating. */
118
#define VTBALLOON_LOWMEM_TIMEOUT	hz
123
#define VTBALLOON_LOWMEM_TIMEOUT	hz
Lines 153-160 Link Here
153
};
158
};
154
static devclass_t vtballoon_devclass;
159
static devclass_t vtballoon_devclass;
155
160
156
DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver,
161
DRIVER_MODULE(virtio_balloon, vtpcil, vtballoon_driver,
157
    vtballoon_devclass, 0, 0);
162
    vtballoon_devclass, 0, 0);
163
DRIVER_MODULE(virtio_balloon, vtpcim, vtballoon_driver,
164
    vtballoon_devclass, 0, 0);
158
MODULE_VERSION(virtio_balloon, 1);
165
MODULE_VERSION(virtio_balloon, 1);
159
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
166
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
160
167
Lines 178-191 Link Here
178
185
179
	sc = device_get_softc(dev);
186
	sc = device_get_softc(dev);
180
	sc->vtballoon_dev = dev;
187
	sc->vtballoon_dev = dev;
188
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
181
189
182
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
190
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
183
	TAILQ_INIT(&sc->vtballoon_pages);
191
	TAILQ_INIT(&sc->vtballoon_pages);
184
192
185
	vtballoon_add_sysctl(sc);
193
	vtballoon_setup_sysctl(sc);
186
194
187
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
195
	error = vtballoon_setup_features(sc);
188
	vtballoon_negotiate_features(sc);
196
	if (error) {
197
		device_printf(dev, "cannot setup features\n");
198
		goto fail;
199
	}
189
200
190
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
201
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
191
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
202
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
Lines 271-288 Link Here
271
	return (1);
282
	return (1);
272
}
283
}
273
284
274
static void
285
static int
275
vtballoon_negotiate_features(struct vtballoon_softc *sc)
286
vtballoon_negotiate_features(struct vtballoon_softc *sc)
276
{
287
{
277
	device_t dev;
288
	device_t dev;
278
	uint64_t features;
289
	uint64_t features;
279
290
280
	dev = sc->vtballoon_dev;
291
	dev = sc->vtballoon_dev;
281
	features = virtio_negotiate_features(dev, VTBALLOON_FEATURES);
292
	features = VTBALLOON_FEATURES;
282
	sc->vtballoon_features = features;
293
294
	sc->vtballoon_features = virtio_negotiate_features(dev, features);
295
	return (virtio_finalize_features(dev));
283
}
296
}
284
297
285
static int
298
static int
299
vtballoon_setup_features(struct vtballoon_softc *sc)
300
{
301
	int error;
302
303
	error = vtballoon_negotiate_features(sc);
304
	if (error)
305
		return (error);
306
307
	return (0);
308
}
309
310
static int
286
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
311
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
287
{
312
{
288
	device_t dev;
313
	device_t dev;
Lines 440-446 Link Here
440
{
465
{
441
	vm_page_t m;
466
	vm_page_t m;
442
467
443
	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
468
	m = vm_page_alloc(NULL, 0,
469
	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP);
444
	if (m != NULL)
470
	if (m != NULL)
445
		sc->vtballoon_current_npages++;
471
		sc->vtballoon_current_npages++;
446
472
Lines 463-478 Link Here
463
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
489
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
464
	    offsetof(struct virtio_balloon_config, num_pages));
490
	    offsetof(struct virtio_balloon_config, num_pages));
465
491
466
	return (le32toh(desired));
492
	if (vtballoon_modern(sc))
493
		return (desired);
494
	else
495
		return (le32toh(desired));
467
}
496
}
468
497
469
static void
498
static void
470
vtballoon_update_size(struct vtballoon_softc *sc)
499
vtballoon_update_size(struct vtballoon_softc *sc)
471
{
500
{
501
	uint32_t npages;
472
502
503
	npages = sc->vtballoon_current_npages;
504
	if (!vtballoon_modern(sc))
505
		npages = htole32(npages);
506
473
	virtio_write_dev_config_4(sc->vtballoon_dev,
507
	virtio_write_dev_config_4(sc->vtballoon_dev,
474
	    offsetof(struct virtio_balloon_config, actual),
508
	    offsetof(struct virtio_balloon_config, actual), npages);
475
	    htole32(sc->vtballoon_current_npages));
509
476
}
510
}
477
511
478
static int
512
static int
Lines 544-550 Link Here
544
}
578
}
545
579
546
static void
580
static void
547
vtballoon_add_sysctl(struct vtballoon_softc *sc)
581
vtballoon_setup_sysctl(struct vtballoon_softc *sc)
548
{
582
{
549
	device_t dev;
583
	device_t dev;
550
	struct sysctl_ctx_list *ctx;
584
	struct sysctl_ctx_list *ctx;
(-)sys/dev/virtio/balloon/virtio_balloon.h (-1 / +27 lines)
Lines 36-41 Link Here
36
/* Feature bits. */
36
/* Feature bits. */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
39
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	0x4 /* Deflate balloon on OOM */
39
40
40
/* Size of a PFN in the balloon interface. */
41
/* Size of a PFN in the balloon interface. */
41
#define VIRTIO_BALLOON_PFN_SHIFT 12
42
#define VIRTIO_BALLOON_PFN_SHIFT 12
Lines 54-61 Link Here
54
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
55
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
55
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
56
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
56
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
57
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
57
#define VIRTIO_BALLOON_S_NR       6
58
#define VIRTIO_BALLOON_S_AVAIL    6   /* Available memory as in /proc */
59
#define VIRTIO_BALLOON_S_CACHES   7   /* Disk caches */
60
#define VIRTIO_BALLOON_S_NR       8
58
61
62
/*
63
 * Memory statistics structure.
64
 * Driver fills an array of these structures and passes to device.
65
 *
66
 * NOTE: fields are laid out in a way that would make compiler add padding
67
 * between and after fields, so we have to use compiler-specific attributes to
68
 * pack it, to disable this padding. This also often causes compiler to
69
 * generate suboptimal code.
70
 *
71
 * We maintain this statistics structure format for backwards compatibility,
72
 * but don't follow this example.
73
 *
74
 * If implementing a similar structure, do something like the below instead:
75
 *     struct virtio_balloon_stat {
76
 *         __virtio16 tag;
77
 *         __u8 reserved[6];
78
 *         __virtio64 val;
79
 *     };
80
 *
81
 * In other words, add explicit reserved fields to align field and
82
 * structure boundaries at field size, avoiding compiler padding
83
 * without the packed attribute.
84
 */
59
struct virtio_balloon_stat {
85
struct virtio_balloon_stat {
60
	uint16_t tag;
86
	uint16_t tag;
61
	uint64_t val;
87
	uint64_t val;
(-)sys/dev/virtio/block/virtio_blk.c (-64 / +95 lines)
Lines 76-86 Link Here
76
	uint64_t		 vtblk_features;
76
	uint64_t		 vtblk_features;
77
	uint32_t		 vtblk_flags;
77
	uint32_t		 vtblk_flags;
78
#define VTBLK_FLAG_INDIRECT	0x0001
78
#define VTBLK_FLAG_INDIRECT	0x0001
79
#define VTBLK_FLAG_READONLY	0x0002
79
#define VTBLK_FLAG_DETACH	0x0002
80
#define VTBLK_FLAG_DETACH	0x0004
80
#define VTBLK_FLAG_SUSPEND	0x0004
81
#define VTBLK_FLAG_SUSPEND	0x0008
81
#define VTBLK_FLAG_BARRIER	0x0008
82
#define VTBLK_FLAG_BARRIER	0x0010
82
#define VTBLK_FLAG_WCE_CONFIG	0x0010
83
#define VTBLK_FLAG_WC_CONFIG	0x0020
84
83
85
	struct virtqueue	*vtblk_vq;
84
	struct virtqueue	*vtblk_vq;
86
	struct sglist		*vtblk_sglist;
85
	struct sglist		*vtblk_sglist;
Lines 109-117 Link Here
109
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
110
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
111
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
112
	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
111
	{ VIRTIO_BLK_F_FLUSH,		"FlushCmd"	},
113
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
114
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
113
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
114
	{ VIRTIO_BLK_F_MQ,		"Multiqueue"	},
115
115
116
	{ 0, NULL }
116
	{ 0, NULL }
117
};
117
};
Lines 133-140 Link Here
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
134
static void	vtblk_strategy(struct bio *);
134
static void	vtblk_strategy(struct bio *);
135
135
136
static void	vtblk_negotiate_features(struct vtblk_softc *);
136
static int	vtblk_negotiate_features(struct vtblk_softc *);
137
static void	vtblk_setup_features(struct vtblk_softc *);
137
static int	vtblk_setup_features(struct vtblk_softc *);
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
139
		    struct virtio_blk_config *);
139
		    struct virtio_blk_config *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
Lines 193-198 Link Here
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
195
195
196
#define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
197
#define vtblk_htog16(_sc, _val)	virtio_htog16(vtblk_modern(_sc), _val)
198
#define vtblk_htog32(_sc, _val)	virtio_htog32(vtblk_modern(_sc), _val)
199
#define vtblk_htog64(_sc, _val)	virtio_htog64(vtblk_modern(_sc), _val)
200
#define vtblk_gtoh16(_sc, _val)	virtio_gtoh16(vtblk_modern(_sc), _val)
201
#define vtblk_gtoh32(_sc, _val)	virtio_gtoh32(vtblk_modern(_sc), _val)
202
#define vtblk_gtoh64(_sc, _val)	virtio_gtoh64(vtblk_modern(_sc), _val)
203
196
/* Tunables. */
204
/* Tunables. */
197
static int vtblk_no_ident = 0;
205
static int vtblk_no_ident = 0;
198
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
206
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
Lines 200-217 Link Here
200
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
208
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
201
209
202
/* Features desired/implemented by this driver. */
210
/* Features desired/implemented by this driver. */
203
#define VTBLK_FEATURES \
211
#define VTBLK_COMMON_FEATURES \
204
    (VIRTIO_BLK_F_BARRIER		| \
212
    (VIRTIO_BLK_F_SIZE_MAX		| \
205
     VIRTIO_BLK_F_SIZE_MAX		| \
206
     VIRTIO_BLK_F_SEG_MAX		| \
213
     VIRTIO_BLK_F_SEG_MAX		| \
207
     VIRTIO_BLK_F_GEOMETRY		| \
214
     VIRTIO_BLK_F_GEOMETRY		| \
208
     VIRTIO_BLK_F_RO			| \
215
     VIRTIO_BLK_F_RO			| \
209
     VIRTIO_BLK_F_BLK_SIZE		| \
216
     VIRTIO_BLK_F_BLK_SIZE		| \
210
     VIRTIO_BLK_F_WCE			| \
217
     VIRTIO_BLK_F_FLUSH			| \
211
     VIRTIO_BLK_F_TOPOLOGY		| \
218
     VIRTIO_BLK_F_TOPOLOGY		| \
212
     VIRTIO_BLK_F_CONFIG_WCE		| \
219
     VIRTIO_BLK_F_CONFIG_WCE		| \
213
     VIRTIO_RING_F_INDIRECT_DESC)
220
     VIRTIO_RING_F_INDIRECT_DESC)
214
221
222
#define VTBLK_MODERN_FEATURES 	(VTBLK_COMMON_FEATURES)
223
#define VTBLK_LEGACY_FEATURES	(VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
224
215
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
225
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
216
#define VTBLK_LOCK_INIT(_sc, _name) \
226
#define VTBLK_LOCK_INIT(_sc, _name) \
217
				mtx_init(VTBLK_MTX((_sc)), (_name), \
227
				mtx_init(VTBLK_MTX((_sc)), (_name), \
Lines 256-263 Link Here
256
266
257
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
267
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
258
    vtblk_modevent, 0);
268
    vtblk_modevent, 0);
259
DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
269
DRIVER_MODULE(virtio_blk, vtpcil, vtblk_driver, vtblk_devclass,
260
    vtblk_modevent, 0);
270
    vtblk_modevent, 0);
271
DRIVER_MODULE(virtio_blk, vtpcim, vtblk_driver, vtblk_devclass,
272
    vtblk_modevent, 0);
261
MODULE_VERSION(virtio_blk, 1);
273
MODULE_VERSION(virtio_blk, 1);
262
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
274
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
263
275
Lines 301-310 Link Here
301
	struct virtio_blk_config blkcfg;
313
	struct virtio_blk_config blkcfg;
302
	int error;
314
	int error;
303
315
304
	virtio_set_feature_desc(dev, vtblk_feature_desc);
305
306
	sc = device_get_softc(dev);
316
	sc = device_get_softc(dev);
307
	sc->vtblk_dev = dev;
317
	sc->vtblk_dev = dev;
318
	virtio_set_feature_desc(dev, vtblk_feature_desc);
319
308
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
320
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
309
	bioq_init(&sc->vtblk_bioq);
321
	bioq_init(&sc->vtblk_bioq);
310
	TAILQ_INIT(&sc->vtblk_dump_queue);
322
	TAILQ_INIT(&sc->vtblk_dump_queue);
Lines 312-319 Link Here
312
	TAILQ_INIT(&sc->vtblk_req_ready);
324
	TAILQ_INIT(&sc->vtblk_req_ready);
313
325
314
	vtblk_setup_sysctl(sc);
326
	vtblk_setup_sysctl(sc);
315
	vtblk_setup_features(sc);
316
327
328
	error = vtblk_setup_features(sc);
329
	if (error) {
330
		device_printf(dev, "cannot setup features\n");
331
		goto fail;
332
	}
333
317
	vtblk_read_config(sc, &blkcfg);
334
	vtblk_read_config(sc, &blkcfg);
318
335
319
	/*
336
	/*
Lines 541-556 Link Here
541
		return;
558
		return;
542
	}
559
	}
543
560
544
	/*
545
	 * Fail any write if RO. Unfortunately, there does not seem to
546
	 * be a better way to report our readonly'ness to GEOM above.
547
	 */
548
	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
549
	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
550
		vtblk_bio_done(sc, bp, EROFS);
551
		return;
552
	}
553
554
	VTBLK_LOCK(sc);
561
	VTBLK_LOCK(sc);
555
562
556
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
563
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
Lines 565-599 Link Here
565
	VTBLK_UNLOCK(sc);
572
	VTBLK_UNLOCK(sc);
566
}
573
}
567
574
568
static void
575
static int
569
vtblk_negotiate_features(struct vtblk_softc *sc)
576
vtblk_negotiate_features(struct vtblk_softc *sc)
570
{
577
{
571
	device_t dev;
578
	device_t dev;
572
	uint64_t features;
579
	uint64_t features;
573
580
574
	dev = sc->vtblk_dev;
581
	dev = sc->vtblk_dev;
575
	features = VTBLK_FEATURES;
582
	features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
583
	    VTBLK_LEGACY_FEATURES;
576
584
577
	sc->vtblk_features = virtio_negotiate_features(dev, features);
585
	sc->vtblk_features = virtio_negotiate_features(dev, features);
586
	return (virtio_finalize_features(dev));
578
}
587
}
579
588
580
static void
589
static int
581
vtblk_setup_features(struct vtblk_softc *sc)
590
vtblk_setup_features(struct vtblk_softc *sc)
582
{
591
{
583
	device_t dev;
592
	device_t dev;
584
593
	int error;
594
	
585
	dev = sc->vtblk_dev;
595
	dev = sc->vtblk_dev;
586
596
587
	vtblk_negotiate_features(sc);
597
	error = vtblk_negotiate_features(sc);
598
	if (error)
599
		return (error);
588
600
589
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
601
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
590
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
602
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
591
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
592
		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
593
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
594
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
595
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
603
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
596
		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
604
		sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
605
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
606
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER; /* Legacy. */
607
608
	return (0);
597
}
609
}
598
610
599
static int
611
static int
Lines 672-686 Link Here
672
	dp->d_name = VTBLK_DISK_NAME;
684
	dp->d_name = VTBLK_DISK_NAME;
673
	dp->d_unit = device_get_unit(dev);
685
	dp->d_unit = device_get_unit(dev);
674
	dp->d_drv1 = sc;
686
	dp->d_drv1 = sc;
675
	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
687
	dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
676
	    DISKFLAG_DIRECT_COMPLETION;
677
	dp->d_hba_vendor = virtio_get_vendor(dev);
688
	dp->d_hba_vendor = virtio_get_vendor(dev);
678
	dp->d_hba_device = virtio_get_device(dev);
689
	dp->d_hba_device = virtio_get_device(dev);
679
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
690
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
680
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
691
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
681
692
682
	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
693
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
694
		dp->d_flags |= DISKFLAG_WRITE_PROTECT;
695
	else {
696
		if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
697
			dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
683
		dp->d_dump = vtblk_dump;
698
		dp->d_dump = vtblk_dump;
699
	}
684
700
685
	/* Capacity is always in 512-byte units. */
701
	/* Capacity is always in 512-byte units. */
686
	dp->d_mediasize = blkcfg->capacity * 512;
702
	dp->d_mediasize = blkcfg->capacity * 512;
Lines 864-889 Link Here
864
	bp = bioq_takefirst(bioq);
880
	bp = bioq_takefirst(bioq);
865
	req->vbr_bp = bp;
881
	req->vbr_bp = bp;
866
	req->vbr_ack = -1;
882
	req->vbr_ack = -1;
867
	req->vbr_hdr.ioprio = 1;
883
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
868
884
869
	switch (bp->bio_cmd) {
885
	switch (bp->bio_cmd) {
870
	case BIO_FLUSH:
886
	case BIO_FLUSH:
871
		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
887
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
888
		req->vbr_hdr.sector = 0;
872
		break;
889
		break;
873
	case BIO_READ:
890
	case BIO_READ:
874
		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
891
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
875
		req->vbr_hdr.sector = bp->bio_offset / 512;
892
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
876
		break;
893
		break;
877
	case BIO_WRITE:
894
	case BIO_WRITE:
878
		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
895
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
879
		req->vbr_hdr.sector = bp->bio_offset / 512;
896
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
880
		break;
897
		break;
881
	default:
898
	default:
882
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
899
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
883
	}
900
	}
884
901
885
	if (bp->bio_flags & BIO_ORDERED)
902
	if (bp->bio_flags & BIO_ORDERED)
886
		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
903
		req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
887
904
888
	return (req);
905
	return (req);
889
}
906
}
Lines 914-920 Link Here
914
			if (!virtqueue_empty(vq))
931
			if (!virtqueue_empty(vq))
915
				return (EBUSY);
932
				return (EBUSY);
916
			ordered = 1;
933
			ordered = 1;
917
			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
934
			req->vbr_hdr.type &= vtblk_gtoh32(sc,
935
				~VIRTIO_BLK_T_BARRIER);
918
		}
936
		}
919
	}
937
	}
920
938
Lines 1018-1032 Link Here
1018
static void
1036
static void
1019
vtblk_drain(struct vtblk_softc *sc)
1037
vtblk_drain(struct vtblk_softc *sc)
1020
{
1038
{
1021
	struct bio_queue queue;
1022
	struct bio_queue_head *bioq;
1039
	struct bio_queue_head *bioq;
1023
	struct vtblk_request *req;
1040
	struct vtblk_request *req;
1024
	struct bio *bp;
1041
	struct bio *bp;
1025
1042
1026
	bioq = &sc->vtblk_bioq;
1043
	bioq = &sc->vtblk_bioq;
1027
	TAILQ_INIT(&queue);
1028
1044
1029
	if (sc->vtblk_vq != NULL) {
1045
	if (sc->vtblk_vq != NULL) {
1046
		struct bio_queue queue;
1047
1048
		TAILQ_INIT(&queue);
1030
		vtblk_queue_completed(sc, &queue);
1049
		vtblk_queue_completed(sc, &queue);
1031
		vtblk_done_completed(sc, &queue);
1050
		vtblk_done_completed(sc, &queue);
1032
1051
Lines 1117-1126 Link Here
1117
	/* Read the configuration if the feature was negotiated. */
1136
	/* Read the configuration if the feature was negotiated. */
1118
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1137
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1119
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1138
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1120
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1139
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1140
	    geometry.cylinders, blkcfg);
1141
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1142
	    geometry.heads, blkcfg);
1143
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1144
	    geometry.sectors, blkcfg);
1121
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1145
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1122
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1146
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1123
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1147
	    topology.physical_block_exp, blkcfg);
1148
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1149
	    topology.alignment_offset, blkcfg);
1150
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1151
	    topology.min_io_size, blkcfg);
1152
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1153
	    topology.opt_io_size, blkcfg);
1154
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1124
}
1155
}
1125
1156
1126
#undef VTBLK_GET_CONFIG
1157
#undef VTBLK_GET_CONFIG
Lines 1144-1151 Link Here
1144
		return;
1175
		return;
1145
1176
1146
	req->vbr_ack = -1;
1177
	req->vbr_ack = -1;
1147
	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1178
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
1148
	req->vbr_hdr.ioprio = 1;
1179
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1149
	req->vbr_hdr.sector = 0;
1180
	req->vbr_hdr.sector = 0;
1150
1181
1151
	req->vbr_bp = &buf;
1182
	req->vbr_bp = &buf;
Lines 1276-1284 Link Here
1276
1307
1277
	req = &sc->vtblk_dump_request;
1308
	req = &sc->vtblk_dump_request;
1278
	req->vbr_ack = -1;
1309
	req->vbr_ack = -1;
1279
	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1310
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
1280
	req->vbr_hdr.ioprio = 1;
1311
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1281
	req->vbr_hdr.sector = offset / 512;
1312
	req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / 512);
1282
1313
1283
	req->vbr_bp = &buf;
1314
	req->vbr_bp = &buf;
1284
	g_reset_bio(&buf);
1315
	g_reset_bio(&buf);
Lines 1298-1305 Link Here
1298
1329
1299
	req = &sc->vtblk_dump_request;
1330
	req = &sc->vtblk_dump_request;
1300
	req->vbr_ack = -1;
1331
	req->vbr_ack = -1;
1301
	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1332
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
1302
	req->vbr_hdr.ioprio = 1;
1333
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1303
	req->vbr_hdr.sector = 0;
1334
	req->vbr_hdr.sector = 0;
1304
1335
1305
	req->vbr_bp = &buf;
1336
	req->vbr_bp = &buf;
Lines 1327-1333 Link Here
1327
1358
1328
	/* Set either writeback (1) or writethrough (0) mode. */
1359
	/* Set either writeback (1) or writethrough (0) mode. */
1329
	virtio_write_dev_config_1(sc->vtblk_dev,
1360
	virtio_write_dev_config_1(sc->vtblk_dev,
1330
	    offsetof(struct virtio_blk_config, writeback), wc);
1361
	    offsetof(struct virtio_blk_config, wce), wc);
1331
}
1362
}
1332
1363
1333
static int
1364
static int
Lines 1336-1350 Link Here
1336
{
1367
{
1337
	int wc;
1368
	int wc;
1338
1369
1339
	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1370
	if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
1340
		wc = vtblk_tunable_int(sc, "writecache_mode",
1371
		wc = vtblk_tunable_int(sc, "writecache_mode",
1341
		    vtblk_writecache_mode);
1372
		    vtblk_writecache_mode);
1342
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1373
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1343
			vtblk_set_write_cache(sc, wc);
1374
			vtblk_set_write_cache(sc, wc);
1344
		else
1375
		else
1345
			wc = blkcfg->writeback;
1376
			wc = blkcfg->wce;
1346
	} else
1377
	} else
1347
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1378
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
1348
1379
1349
	return (wc);
1380
	return (wc);
1350
}
1381
}
Lines 1361-1367 Link Here
1361
	error = sysctl_handle_int(oidp, &wc, 0, req);
1392
	error = sysctl_handle_int(oidp, &wc, 0, req);
1362
	if (error || req->newptr == NULL)
1393
	if (error || req->newptr == NULL)
1363
		return (error);
1394
		return (error);
1364
	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1395
	if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
1365
		return (EPERM);
1396
		return (EPERM);
1366
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1397
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1367
		return (EINVAL);
1398
		return (EINVAL);
(-)sys/dev/virtio/block/virtio_blk.h (-5 / +22 lines)
Lines 34-50 Link Here
34
#define _VIRTIO_BLK_H
34
#define _VIRTIO_BLK_H
35
35
36
/* Feature bits */
36
/* Feature bits */
37
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
38
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
37
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
39
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
38
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
40
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
39
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
41
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
40
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
42
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
41
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
43
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
42
#define VIRTIO_BLK_F_FLUSH	0x0200	/* Flush command supported */
44
#define VIRTIO_BLK_F_WCE	0x0200	/* Writeback mode enabled after reset */
45
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
43
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
46
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
44
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
45
#define VIRTIO_BLK_F_MQ 	0x1000 	/* Support more than one vq */
47
46
47
/* Legacy feature bits */
48
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
49
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
50
51
/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
52
#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
48
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
53
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
49
54
50
struct virtio_blk_config {
55
struct virtio_blk_config {
Lines 66-80 Link Here
66
71
67
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
72
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
68
	struct virtio_blk_topology {
73
	struct virtio_blk_topology {
74
		/* exponent for physical block per logical block. */
69
		uint8_t physical_block_exp;
75
		uint8_t physical_block_exp;
76
		/* alignment offset in logical blocks. */
70
		uint8_t alignment_offset;
77
		uint8_t alignment_offset;
78
		/* minimum I/O size without performance penalty in logical
79
		 * blocks. */
71
		uint16_t min_io_size;
80
		uint16_t min_io_size;
81
		/* optimal sustained I/O size in logical blocks. */
72
		uint32_t opt_io_size;
82
		uint32_t opt_io_size;
73
	} topology;
83
	} topology;
74
84
75
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
85
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
76
	uint8_t writeback;
86
	uint8_t wce;
87
	uint8_t unused;
77
88
89
	/* Number of vqs, only available when VIRTIO_BLK_F_MQ is set */
90
	uint16_t num_queues;
78
} __packed;
91
} __packed;
79
92
80
/*
93
/*
Lines 107-113 Link Here
107
/* ID string length */
120
/* ID string length */
108
#define VIRTIO_BLK_ID_BYTES	20
121
#define VIRTIO_BLK_ID_BYTES	20
109
122
110
/* This is the first element of the read scatter-gather list. */
123
/*
124
 * This comes first in the read scatter-gather list.
125
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
126
 * this is the first element of the read scatter-gather list.
127
 */
111
struct virtio_blk_outhdr {
128
struct virtio_blk_outhdr {
112
	/* VIRTIO_BLK_T* */
129
	/* VIRTIO_BLK_T* */
113
	uint32_t type;
130
	uint32_t type;
(-)sys/dev/virtio/console/virtio_console.c (-16 / +39 lines)
Lines 158-165 Link Here
158
static int	 vtcon_detach(device_t);
158
static int	 vtcon_detach(device_t);
159
static int	 vtcon_config_change(device_t);
159
static int	 vtcon_config_change(device_t);
160
160
161
static void	 vtcon_setup_features(struct vtcon_softc *);
161
static int	 vtcon_setup_features(struct vtcon_softc *);
162
static void	 vtcon_negotiate_features(struct vtcon_softc *);
162
static int	 vtcon_negotiate_features(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
165
static void	 vtcon_read_config(struct vtcon_softc *,
165
static void	 vtcon_read_config(struct vtcon_softc *,
Lines 227-232 Link Here
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
229
229
230
#define vtcon_modern(_sc) (((_sc)->vtcon_features & VIRTIO_F_VERSION_1) != 0)
231
#define vtcon_htog16(_sc, _val)	virtio_htog16(vtcon_modern(_sc), _val)
232
#define vtcon_htog32(_sc, _val)	virtio_htog32(vtcon_modern(_sc), _val)
233
#define vtcon_htog64(_sc, _val)	virtio_htog64(vtcon_modern(_sc), _val)
234
#define vtcon_gtoh16(_sc, _val)	virtio_gtoh16(vtcon_modern(_sc), _val)
235
#define vtcon_gtoh32(_sc, _val)	virtio_gtoh32(vtcon_modern(_sc), _val)
236
#define vtcon_gtoh64(_sc, _val)	virtio_gtoh64(vtcon_modern(_sc), _val)
237
230
static int	 vtcon_pending_free;
238
static int	 vtcon_pending_free;
231
239
232
static struct ttydevsw vtcon_tty_class = {
240
static struct ttydevsw vtcon_tty_class = {
Lines 256-263 Link Here
256
};
264
};
257
static devclass_t vtcon_devclass;
265
static devclass_t vtcon_devclass;
258
266
259
DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass,
267
DRIVER_MODULE(virtio_console, vtpcil, vtcon_driver, vtcon_devclass,
260
    vtcon_modevent, 0);
268
    vtcon_modevent, 0);
269
DRIVER_MODULE(virtio_console, vtpcim, vtcon_driver, vtcon_devclass,
270
    vtcon_modevent, 0);
261
MODULE_VERSION(virtio_console, 1);
271
MODULE_VERSION(virtio_console, 1);
262
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
272
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
263
273
Lines 323-334 Link Here
323
333
324
	sc = device_get_softc(dev);
334
	sc = device_get_softc(dev);
325
	sc->vtcon_dev = dev;
335
	sc->vtcon_dev = dev;
336
	virtio_set_feature_desc(dev, vtcon_feature_desc);
326
337
327
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
338
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
328
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
339
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
329
340
330
	virtio_set_feature_desc(dev, vtcon_feature_desc);
341
	error = vtcon_setup_features(sc);
331
	vtcon_setup_features(sc);
342
	if (error) {
343
		device_printf(dev, "cannot setup features\n");
344
		goto fail;
345
	}
332
346
333
	vtcon_read_config(sc, &concfg);
347
	vtcon_read_config(sc, &concfg);
334
	vtcon_determine_max_ports(sc, &concfg);
348
	vtcon_determine_max_ports(sc, &concfg);
Lines 420-426 Link Here
420
	return (0);
434
	return (0);
421
}
435
}
422
436
423
static void
437
static int
424
vtcon_negotiate_features(struct vtcon_softc *sc)
438
vtcon_negotiate_features(struct vtcon_softc *sc)
425
{
439
{
426
	device_t dev;
440
	device_t dev;
Lines 430-450 Link Here
430
	features = VTCON_FEATURES;
444
	features = VTCON_FEATURES;
431
445
432
	sc->vtcon_features = virtio_negotiate_features(dev, features);
446
	sc->vtcon_features = virtio_negotiate_features(dev, features);
447
	return (virtio_finalize_features(dev));
433
}
448
}
434
449
435
static void
450
static int
436
vtcon_setup_features(struct vtcon_softc *sc)
451
vtcon_setup_features(struct vtcon_softc *sc)
437
{
452
{
438
	device_t dev;
453
	device_t dev;
454
	int error;
439
455
440
	dev = sc->vtcon_dev;
456
	dev = sc->vtcon_dev;
441
457
442
	vtcon_negotiate_features(sc);
458
	error = vtcon_negotiate_features(sc);
459
	if (error)
460
		return (error);
443
461
444
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
462
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
445
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
463
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
446
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
464
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
447
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
465
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
466
467
	return (0);
448
}
468
}
449
469
450
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
470
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
Lines 847-863 Link Here
847
    struct virtio_console_control *control, void *data, size_t data_len)
867
    struct virtio_console_control *control, void *data, size_t data_len)
848
{
868
{
849
	device_t dev;
869
	device_t dev;
850
	int id;
870
	uint32_t id;
871
	uint16_t event;
851
872
852
	dev = sc->vtcon_dev;
873
	dev = sc->vtcon_dev;
853
	id = control->id;
874
	id = vtcon_htog32(sc, control->id);
875
	event = vtcon_htog16(sc, control->event);
854
876
855
	if (id < 0 || id >= sc->vtcon_max_ports) {
877
	if (id >= sc->vtcon_max_ports) {
856
		device_printf(dev, "%s: invalid port ID %d\n", __func__, id);
878
		device_printf(dev, "%s: event %d invalid port ID %d\n",
879
		    __func__, event, id);
857
		return;
880
		return;
858
	}
881
	}
859
882
860
	switch (control->event) {
883
	switch (event) {
861
	case VIRTIO_CONSOLE_PORT_ADD:
884
	case VIRTIO_CONSOLE_PORT_ADD:
862
		vtcon_ctrl_port_add_event(sc, id);
885
		vtcon_ctrl_port_add_event(sc, id);
863
		break;
886
		break;
Lines 985-993 Link Here
985
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
1008
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
986
		return;
1009
		return;
987
1010
988
	control.id = portid;
1011
	control.id = vtcon_gtoh32(sc, portid);
989
	control.event = event;
1012
	control.event = vtcon_gtoh16(sc, event);
990
	control.value = value;
1013
	control.value = vtcon_gtoh16(sc, value);
991
1014
992
	vtcon_ctrl_poll(sc, &control);
1015
	vtcon_ctrl_poll(sc, &control);
993
}
1016
}
(-)sys/dev/virtio/mmio/virtio_mmio.c (+4 lines)
Lines 426-431 Link Here
426
	case VIRTIO_IVAR_VENDOR:
426
	case VIRTIO_IVAR_VENDOR:
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
428
		break;
428
		break;
429
	case VIRTIO_IVAR_SUBVENDOR:
430
	case VIRTIO_IVAR_MODERN:
431
		*result = 0;
432
		break;
429
	default:
433
	default:
430
		return (ENOENT);
434
		return (ENOENT);
431
	}
435
	}
(-)sys/dev/virtio/network/if_vtnet.c (-846 / +1181 lines)
Lines 69-74 Link Here
69
#include <netinet6/ip6_var.h>
69
#include <netinet6/ip6_var.h>
70
#include <netinet/udp.h>
70
#include <netinet/udp.h>
71
#include <netinet/tcp.h>
71
#include <netinet/tcp.h>
72
#include <netinet/tcp_lro.h>
72
#include <netinet/netdump/netdump.h>
73
#include <netinet/netdump/netdump.h>
73
74
74
#include <machine/bus.h>
75
#include <machine/bus.h>
Lines 85-90 Link Here
85
#include "opt_inet.h"
86
#include "opt_inet.h"
86
#include "opt_inet6.h"
87
#include "opt_inet6.h"
87
88
89
#if defined(INET) || defined(INET6)
90
#include <machine/in_cksum.h>
91
#endif
92
88
static int	vtnet_modevent(module_t, int, void *);
93
static int	vtnet_modevent(module_t, int, void *);
89
94
90
static int	vtnet_probe(device_t);
95
static int	vtnet_probe(device_t);
Lines 96-103 Link Here
96
static int	vtnet_attach_completed(device_t);
101
static int	vtnet_attach_completed(device_t);
97
static int	vtnet_config_change(device_t);
102
static int	vtnet_config_change(device_t);
98
103
99
static void	vtnet_negotiate_features(struct vtnet_softc *);
104
static int	vtnet_negotiate_features(struct vtnet_softc *);
100
static void	vtnet_setup_features(struct vtnet_softc *);
105
static int	vtnet_setup_features(struct vtnet_softc *);
101
static int	vtnet_init_rxq(struct vtnet_softc *, int);
106
static int	vtnet_init_rxq(struct vtnet_softc *, int);
102
static int	vtnet_init_txq(struct vtnet_softc *, int);
107
static int	vtnet_init_txq(struct vtnet_softc *, int);
103
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
108
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
Lines 105-112 Link Here
105
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
110
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
106
static void	vtnet_free_rx_filters(struct vtnet_softc *);
111
static void	vtnet_free_rx_filters(struct vtnet_softc *);
107
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
112
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
113
static int	vtnet_alloc_interface(struct vtnet_softc *);
108
static int	vtnet_setup_interface(struct vtnet_softc *);
114
static int	vtnet_setup_interface(struct vtnet_softc *);
109
static int	vtnet_change_mtu(struct vtnet_softc *, int);
115
static int	vtnet_ioctl_mtu(struct vtnet_softc *, int);
116
static int	vtnet_ioctl_ifflags(struct vtnet_softc *);
117
static int	vtnet_ioctl_multi(struct vtnet_softc *);
118
static int	vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
110
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
119
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
111
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
120
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
112
121
Lines 114-124 Link Here
114
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
123
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
115
static struct mbuf *
124
static struct mbuf *
116
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
125
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
117
static int	vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
126
static int	vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
118
		    struct mbuf *, int);
127
		    struct mbuf *, int);
119
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
128
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
120
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
129
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
121
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
130
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
131
static int	vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
132
		     uint16_t, int, struct virtio_net_hdr *);
133
static int	vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
134
		     uint16_t, int, struct virtio_net_hdr *);
122
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
135
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
123
		     struct virtio_net_hdr *);
136
		     struct virtio_net_hdr *);
124
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
137
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
Lines 130-135 Link Here
130
static void	vtnet_rx_vq_intr(void *);
143
static void	vtnet_rx_vq_intr(void *);
131
static void	vtnet_rxq_tq_intr(void *, int);
144
static void	vtnet_rxq_tq_intr(void *, int);
132
145
146
static int	vtnet_txq_intr_threshold(struct vtnet_txq *);
133
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
147
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
134
static int	vtnet_txq_notify(struct vtnet_txq *);
148
static int	vtnet_txq_notify(struct vtnet_txq *);
135
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
149
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
Lines 179-184 Link Here
179
static int	vtnet_init_tx_queues(struct vtnet_softc *);
193
static int	vtnet_init_tx_queues(struct vtnet_softc *);
180
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
194
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
181
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
195
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
196
static void	vtnet_update_rx_offloads(struct vtnet_softc *);
182
static int	vtnet_reinit(struct vtnet_softc *);
197
static int	vtnet_reinit(struct vtnet_softc *);
183
static void	vtnet_init_locked(struct vtnet_softc *);
198
static void	vtnet_init_locked(struct vtnet_softc *);
184
static void	vtnet_init(void *);
199
static void	vtnet_init(void *);
Lines 187-197 Link Here
187
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
202
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
188
		    struct sglist *, int, int);
203
		    struct sglist *, int, int);
189
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
204
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
205
static int	vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
190
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
206
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
191
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
207
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int);
192
static int	vtnet_set_promisc(struct vtnet_softc *, int);
208
static int	vtnet_set_promisc(struct vtnet_softc *, int);
193
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
209
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
194
static void	vtnet_attach_disable_promisc(struct vtnet_softc *);
195
static void	vtnet_rx_filter(struct vtnet_softc *);
210
static void	vtnet_rx_filter(struct vtnet_softc *);
196
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
211
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
197
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
212
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
Lines 200-220 Link Here
200
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
215
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
201
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
216
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
202
217
218
static void	vtnet_update_speed_duplex(struct vtnet_softc *);
203
static int	vtnet_is_link_up(struct vtnet_softc *);
219
static int	vtnet_is_link_up(struct vtnet_softc *);
204
static void	vtnet_update_link_status(struct vtnet_softc *);
220
static void	vtnet_update_link_status(struct vtnet_softc *);
205
static int	vtnet_ifmedia_upd(struct ifnet *);
221
static int	vtnet_ifmedia_upd(struct ifnet *);
206
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
222
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
207
static void	vtnet_get_hwaddr(struct vtnet_softc *);
223
static void	vtnet_get_macaddr(struct vtnet_softc *);
208
static void	vtnet_set_hwaddr(struct vtnet_softc *);
224
static void	vtnet_set_macaddr(struct vtnet_softc *);
225
static void	vtnet_attached_set_macaddr(struct vtnet_softc *);
209
static void	vtnet_vlan_tag_remove(struct mbuf *);
226
static void	vtnet_vlan_tag_remove(struct mbuf *);
210
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
227
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
211
static void	vtnet_set_tx_intr_threshold(struct vtnet_softc *);
212
228
213
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
229
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
214
		    struct sysctl_oid_list *, struct vtnet_rxq *);
230
		    struct sysctl_oid_list *, struct vtnet_rxq *);
215
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
231
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
216
		    struct sysctl_oid_list *, struct vtnet_txq *);
232
		    struct sysctl_oid_list *, struct vtnet_txq *);
217
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
233
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
234
static void	vtnet_load_tunables(struct vtnet_softc *);
218
static void	vtnet_setup_sysctl(struct vtnet_softc *);
235
static void	vtnet_setup_sysctl(struct vtnet_softc *);
219
236
220
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
237
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
Lines 232-289 Link Here
232
249
233
NETDUMP_DEFINE(vtnet);
250
NETDUMP_DEFINE(vtnet);
234
251
235
/* Tunables. */
252
#define vtnet_htog16(_sc, _val)	virtio_htog16(vtnet_modern(_sc), _val)
236
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
253
#define vtnet_htog32(_sc, _val)	virtio_htog32(vtnet_modern(_sc), _val)
254
#define vtnet_htog64(_sc, _val)	virtio_htog64(vtnet_modern(_sc), _val)
255
#define vtnet_gtoh16(_sc, _val)	virtio_gtoh16(vtnet_modern(_sc), _val)
256
#define vtnet_gtoh32(_sc, _val)	virtio_gtoh32(vtnet_modern(_sc), _val)
257
#define vtnet_gtoh64(_sc, _val)	virtio_gtoh64(vtnet_modern(_sc), _val)
258
259
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VirtIO Net driver");
260
237
static int vtnet_csum_disable = 0;
261
static int vtnet_csum_disable = 0;
238
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
239
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
262
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
240
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
263
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
264
265
static int vtnet_fixup_needs_csum = 0;
266
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
267
    &vtnet_fixup_needs_csum, 0,
268
    "Calculate valid checksum for NEEDS_CSUM packets");
269
241
static int vtnet_tso_disable = 0;
270
static int vtnet_tso_disable = 0;
242
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
271
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
243
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
272
    &vtnet_tso_disable, 0, "Disables TSO");
244
    0, "Disables TCP Segmentation Offload");
273
245
static int vtnet_lro_disable = 0;
274
static int vtnet_lro_disable = 0;
246
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
275
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
247
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
276
    &vtnet_lro_disable, 0, "Disables hardware LRO");
248
    0, "Disables TCP Large Receive Offload");
277
249
static int vtnet_mq_disable = 0;
278
static int vtnet_mq_disable = 0;
250
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
279
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
251
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
280
    &vtnet_mq_disable, 0, "Disables multiqueue support");
252
    0, "Disables Multi Queue support");
281
253
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
282
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
254
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
255
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
283
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
256
    &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
284
    &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
257
static int vtnet_rx_process_limit = 512;
285
258
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
286
static int vtnet_tso_maxlen = IP_MAXPACKET;
287
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
288
    &vtnet_tso_maxlen, 0, "TSO burst limit");
289
290
static int vtnet_rx_process_limit = 1024;
259
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
291
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
260
    &vtnet_rx_process_limit, 0,
292
    &vtnet_rx_process_limit, 0,
261
    "Limits the number RX segments processed in a single pass");
293
    "Number of RX segments processed in one pass");
262
294
295
static int vtnet_lro_entry_count = 128;
296
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
297
    &vtnet_lro_entry_count, 0, "Software LRO entry count");
298
299
/* Enable sorted LRO, and the depth of the mbuf queue. */
300
static int vtnet_lro_mbufq_depth = 0;
301
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
302
    &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
303
263
static uma_zone_t vtnet_tx_header_zone;
304
static uma_zone_t vtnet_tx_header_zone;
264
305
265
static struct virtio_feature_desc vtnet_feature_desc[] = {
306
static struct virtio_feature_desc vtnet_feature_desc[] = {
266
	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
307
	{ VIRTIO_NET_F_CSUM,			"TxChecksum"		},
267
	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
308
	{ VIRTIO_NET_F_GUEST_CSUM,		"RxChecksum"		},
268
	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
309
	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,	"CtrlRxOffloads"	},
269
	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
310
	{ VIRTIO_NET_F_MAC,			"MAC"			},
270
	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
311
	{ VIRTIO_NET_F_GSO,			"TxGSO"			},
271
	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
312
	{ VIRTIO_NET_F_GUEST_TSO4,		"RxLROv4"		},
272
	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
313
	{ VIRTIO_NET_F_GUEST_TSO6,		"RxLROv6"		},
273
	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
314
	{ VIRTIO_NET_F_GUEST_ECN,		"RxLROECN"		},
274
	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
315
	{ VIRTIO_NET_F_GUEST_UFO,		"RxUFO"			},
275
	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
316
	{ VIRTIO_NET_F_HOST_TSO4,		"TxTSOv4"		},
276
	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
317
	{ VIRTIO_NET_F_HOST_TSO6,		"TxTSOv6"		},
277
	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
318
	{ VIRTIO_NET_F_HOST_ECN,		"TxTSOECN"		},
278
	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
319
	{ VIRTIO_NET_F_HOST_UFO,		"TxUFO"			},
279
	{ VIRTIO_NET_F_STATUS,		"Status"	},
320
	{ VIRTIO_NET_F_MRG_RXBUF,		"MrgRxBuf"		},
280
	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
321
	{ VIRTIO_NET_F_STATUS,			"Status"		},
281
	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
322
	{ VIRTIO_NET_F_CTRL_VQ,			"CtrlVq"		},
282
	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
323
	{ VIRTIO_NET_F_CTRL_RX,			"CtrlRxMode"		},
283
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
324
	{ VIRTIO_NET_F_CTRL_VLAN,		"CtrlVLANFilter"	},
284
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
325
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,		"CtrlRxModeExtra"	},
285
	{ VIRTIO_NET_F_MQ,		"Multiqueue"	},
326
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,		"GuestAnnounce"		},
286
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
327
	{ VIRTIO_NET_F_MQ,			"Multiqueue"		},
328
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,		"CtrlMacAddr"		},
329
	{ VIRTIO_NET_F_SPEED_DUPLEX,		"SpeedDuplex"		},
287
330
288
	{ 0, NULL }
331
	{ 0, NULL }
289
};
332
};
Lines 306-329 Link Here
306
349
307
#ifdef DEV_NETMAP
350
#ifdef DEV_NETMAP
308
#include <dev/netmap/if_vtnet_netmap.h>
351
#include <dev/netmap/if_vtnet_netmap.h>
309
#endif /* DEV_NETMAP */
352
#endif
310
353
311
static driver_t vtnet_driver = {
354
static driver_t vtnet_driver = {
312
	"vtnet",
355
	.name = "vtnet",
313
	vtnet_methods,
356
	.methods = vtnet_methods,
314
	sizeof(struct vtnet_softc)
357
	.size = sizeof(struct vtnet_softc)
315
};
358
};
316
static devclass_t vtnet_devclass;
359
static devclass_t vtnet_devclass;
317
360
318
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
361
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
319
    vtnet_modevent, 0);
362
    vtnet_modevent, 0);
320
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
363
DRIVER_MODULE(vtnet, vtpcil, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
321
    vtnet_modevent, 0);
364
DRIVER_MODULE(vtnet, vtpcim, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
322
MODULE_VERSION(vtnet, 1);
365
MODULE_VERSION(vtnet, 1);
323
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
366
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
324
#ifdef DEV_NETMAP
367
#ifdef DEV_NETMAP
325
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
368
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
326
#endif /* DEV_NETMAP */
369
#endif
327
370
328
static int
371
static int
329
vtnet_modevent(module_t mod, int type, void *unused)
372
vtnet_modevent(module_t mod, int type, void *unused)
Lines 365-371 Link Here
365
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
408
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
366
		return (ENXIO);
409
		return (ENXIO);
367
410
368
	device_set_desc(dev, "VirtIO Networking Adapter");
411
	device_set_desc(dev, "VirtIO Network Adapter");
369
412
370
	return (BUS_PROBE_DEFAULT);
413
	return (BUS_PROBE_DEFAULT);
371
}
414
}
Lines 378-393 Link Here
378
421
379
	sc = device_get_softc(dev);
422
	sc = device_get_softc(dev);
380
	sc->vtnet_dev = dev;
423
	sc->vtnet_dev = dev;
381
382
	/* Register our feature descriptions. */
383
	virtio_set_feature_desc(dev, vtnet_feature_desc);
424
	virtio_set_feature_desc(dev, vtnet_feature_desc);
384
425
385
	VTNET_CORE_LOCK_INIT(sc);
426
	VTNET_CORE_LOCK_INIT(sc);
386
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
427
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
428
	vtnet_load_tunables(sc);
387
429
430
	error = vtnet_alloc_interface(sc);
431
	if (error) {
432
		device_printf(dev, "cannot allocate interface\n");
433
		goto fail;
434
	}
435
388
	vtnet_setup_sysctl(sc);
436
	vtnet_setup_sysctl(sc);
389
	vtnet_setup_features(sc);
390
437
438
	error = vtnet_setup_features(sc);
439
	if (error) {
440
		device_printf(dev, "cannot setup features\n");
441
		goto fail;
442
	}
443
391
	error = vtnet_alloc_rx_filters(sc);
444
	error = vtnet_alloc_rx_filters(sc);
392
	if (error) {
445
	if (error) {
393
		device_printf(dev, "cannot allocate Rx filters\n");
446
		device_printf(dev, "cannot allocate Rx filters\n");
Lines 414-429 Link Here
414
467
415
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
468
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
416
	if (error) {
469
	if (error) {
417
		device_printf(dev, "cannot setup virtqueue interrupts\n");
470
		device_printf(dev, "cannot setup interrupts\n");
418
		/* BMV: This will crash if during boot! */
419
		ether_ifdetach(sc->vtnet_ifp);
471
		ether_ifdetach(sc->vtnet_ifp);
420
		goto fail;
472
		goto fail;
421
	}
473
	}
422
474
423
#ifdef DEV_NETMAP
475
#ifdef DEV_NETMAP
424
	vtnet_netmap_attach(sc);
476
	vtnet_netmap_attach(sc);
425
#endif /* DEV_NETMAP */
477
#endif
426
427
	vtnet_start_taskqueues(sc);
478
	vtnet_start_taskqueues(sc);
428
479
429
fail:
480
fail:
Lines 455-461 Link Here
455
506
456
#ifdef DEV_NETMAP
507
#ifdef DEV_NETMAP
457
	netmap_detach(ifp);
508
	netmap_detach(ifp);
458
#endif /* DEV_NETMAP */
509
#endif
459
510
460
	vtnet_free_taskqueues(sc);
511
	vtnet_free_taskqueues(sc);
461
512
Lines 522-528 Link Here
522
static int
573
static int
523
vtnet_shutdown(device_t dev)
574
vtnet_shutdown(device_t dev)
524
{
575
{
525
526
	/*
576
	/*
527
	 * Suspend already does all of what we need to
577
	 * Suspend already does all of what we need to
528
	 * do here; we just never expect to be resumed.
578
	 * do here; we just never expect to be resumed.
Lines 533-541 Link Here
533
static int
583
static int
534
vtnet_attach_completed(device_t dev)
584
vtnet_attach_completed(device_t dev)
535
{
585
{
586
	struct vtnet_softc *sc;
536
587
537
	vtnet_attach_disable_promisc(device_get_softc(dev));
588
	sc = device_get_softc(dev);
538
589
590
	VTNET_CORE_LOCK(sc);
591
	vtnet_attached_set_macaddr(sc);
592
	VTNET_CORE_UNLOCK(sc);
593
539
	return (0);
594
	return (0);
540
}
595
}
541
596
Lines 555-591 Link Here
555
	return (0);
610
	return (0);
556
}
611
}
557
612
558
static void
613
static int
559
vtnet_negotiate_features(struct vtnet_softc *sc)
614
vtnet_negotiate_features(struct vtnet_softc *sc)
560
{
615
{
561
	device_t dev;
616
	device_t dev;
562
	uint64_t mask, features;
617
	uint64_t features, negotiated_features;
618
	int no_csum;
563
619
564
	dev = sc->vtnet_dev;
620
	dev = sc->vtnet_dev;
565
	mask = 0;
621
	features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
622
	    VTNET_LEGACY_FEATURES;
566
623
567
	/*
624
	/*
568
	 * TSO and LRO are only available when their corresponding checksum
625
	 * TSO and LRO are only available when their corresponding checksum
569
	 * offload feature is also negotiated.
626
	 * offload feature is also negotiated.
570
	 */
627
	 */
571
	if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
628
	no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
572
		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
629
	if (no_csum)
573
		mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
630
		features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
574
	}
631
	if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
575
	if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
632
		features &= ~VTNET_TSO_FEATURES;
576
		mask |= VTNET_TSO_FEATURES;
633
	if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
577
	if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
634
		features &= ~VTNET_LRO_FEATURES;
578
		mask |= VTNET_LRO_FEATURES;
635
579
#ifndef VTNET_LEGACY_TX
636
#ifndef VTNET_LEGACY_TX
580
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
637
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
581
		mask |= VIRTIO_NET_F_MQ;
638
		features &= ~VIRTIO_NET_F_MQ;
582
#else
639
#else
583
	mask |= VIRTIO_NET_F_MQ;
640
	features &= ~VIRTIO_NET_F_MQ;
584
#endif
641
#endif
585
642
586
	features = VTNET_FEATURES & ~mask;
643
	negotiated_features = virtio_negotiate_features(dev, features);
587
	sc->vtnet_features = virtio_negotiate_features(dev, features);
588
644
645
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
646
		uint16_t mtu;
647
648
		mtu = virtio_read_dev_config_2(dev,
649
		    offsetof(struct virtio_net_config, mtu));
650
		if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
651
			device_printf(dev, "Invalid MTU value: %d. "
652
			    "MTU feature disabled.\n", mtu);
653
			features &= ~VIRTIO_NET_F_MTU;
654
			negotiated_features =
655
			    virtio_negotiate_features(dev, features);
656
		}
657
	}
658
659
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
660
		uint16_t npairs;
661
662
		npairs = virtio_read_dev_config_2(dev,
663
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
664
		if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
665
		    npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
666
			device_printf(dev, "Invalid max_virtqueue_pairs value: "
667
			    "%d. Multiqueue feature disabled.\n", npairs);
668
			features &= ~VIRTIO_NET_F_MQ;
669
			negotiated_features =
670
			    virtio_negotiate_features(dev, features);
671
		}
672
	}
673
589
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
674
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
590
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
675
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
591
		/*
676
		/*
Lines 599-624 Link Here
599
		 */
684
		 */
600
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
685
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
601
			device_printf(dev,
686
			device_printf(dev,
602
			    "LRO disabled due to both mergeable buffers and "
687
			    "Host LRO disabled since both mergeable buffers "
603
			    "indirect descriptors not negotiated\n");
688
			    "and indirect descriptors were not negotiated\n");
604
605
			features &= ~VTNET_LRO_FEATURES;
689
			features &= ~VTNET_LRO_FEATURES;
606
			sc->vtnet_features =
690
			negotiated_features =
607
			    virtio_negotiate_features(dev, features);
691
			    virtio_negotiate_features(dev, features);
608
		} else
692
		} else
609
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
693
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
610
	}
694
	}
695
696
	sc->vtnet_features = negotiated_features;
697
	sc->vtnet_negotiated_features = negotiated_features;
698
699
	return (virtio_finalize_features(dev));
611
}
700
}
612
701
613
static void
702
static int
614
vtnet_setup_features(struct vtnet_softc *sc)
703
vtnet_setup_features(struct vtnet_softc *sc)
615
{
704
{
616
	device_t dev;
705
	device_t dev;
706
	int error;
617
707
618
	dev = sc->vtnet_dev;
708
	dev = sc->vtnet_dev;
619
709
620
	vtnet_negotiate_features(sc);
710
	error = vtnet_negotiate_features(sc);
711
	if (error)
712
		return (error);
621
713
714
	if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
715
		sc->vtnet_flags |= VTNET_FLAG_MODERN;
622
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
716
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
623
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
717
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
624
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
718
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
Lines 629-654 Link Here
629
		sc->vtnet_flags |= VTNET_FLAG_MAC;
723
		sc->vtnet_flags |= VTNET_FLAG_MAC;
630
	}
724
	}
631
725
726
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
727
		sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
728
		    offsetof(struct virtio_net_config, mtu));
729
	} else
730
		sc->vtnet_max_mtu = VTNET_MAX_MTU;
731
632
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
732
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
633
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
733
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
634
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
734
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
735
	} else if (vtnet_modern(sc)) {
736
		/* This is identical to the mergeable header. */
737
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
635
	} else
738
	} else
636
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
739
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
637
740
638
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
741
	if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
639
		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
742
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
640
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
743
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
641
		sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
744
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
642
	else
745
	else
643
		sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
746
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
644
747
748
	/*
749
	 * Favor "hardware" LRO if negotiated, but support software LRO as
750
	 * a fallback; there is usually little benefit (or worse) with both.
751
	 */
752
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
753
	    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
754
		sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
755
645
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
756
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
646
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
757
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
647
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
758
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
648
		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
759
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
649
	else
760
	else
650
		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
761
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
651
762
763
	sc->vtnet_req_vq_pairs = 1;
764
	sc->vtnet_max_vq_pairs = 1;
765
652
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
766
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
653
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
767
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
654
768
Lines 658-692 Link Here
658
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
772
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
659
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
773
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
660
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
774
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
775
776
		if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
777
			sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
778
			    offsetof(struct virtio_net_config,
779
			    max_virtqueue_pairs));
780
		}
661
	}
781
	}
662
782
663
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
664
	    sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
665
		sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
666
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
667
	} else
668
		sc->vtnet_max_vq_pairs = 1;
669
670
	if (sc->vtnet_max_vq_pairs > 1) {
783
	if (sc->vtnet_max_vq_pairs > 1) {
784
		int req;
785
671
		/*
786
		/*
672
		 * Limit the maximum number of queue pairs to the lower of
787
		 * Limit the maximum number of requested queue pairs to the
673
		 * the number of CPUs and the configured maximum.
788
		 * number of CPUs and the configured maximum.
674
		 * The actual number of queues that get used may be less.
675
		 */
789
		 */
676
		int max;
790
		req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
677
791
		if (req < 0)
678
		max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
792
			req = 1;
679
		if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
793
		if (req == 0)
680
			if (max > mp_ncpus)
794
			req = mp_ncpus;
681
				max = mp_ncpus;
795
		if (req > sc->vtnet_max_vq_pairs)
682
			if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
796
			req = sc->vtnet_max_vq_pairs;
683
				max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
797
		if (req > mp_ncpus)
684
			if (max > 1) {
798
			req = mp_ncpus;
685
				sc->vtnet_requested_vq_pairs = max;
799
		if (req > 1) {
686
				sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
800
			sc->vtnet_req_vq_pairs = req;
687
			}
801
			sc->vtnet_flags |= VTNET_FLAG_MQ;
688
		}
802
		}
689
	}
803
	}
804
805
	return (0);
690
}
806
}
691
807
692
static int
808
static int
Lines 707-712 Link Here
707
	if (rxq->vtnrx_sg == NULL)
823
	if (rxq->vtnrx_sg == NULL)
708
		return (ENOMEM);
824
		return (ENOMEM);
709
825
826
#if defined(INET) || defined(INET6)
827
	if (vtnet_software_lro(sc)) {
828
		if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
829
		    sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
830
			return (ENOMEM);
831
	}
832
#endif
833
710
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
834
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
711
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
835
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
712
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
836
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
Lines 772-777 Link Here
772
			return (error);
896
			return (error);
773
	}
897
	}
774
898
899
	vtnet_set_rx_process_limit(sc);
775
	vtnet_setup_queue_sysctl(sc);
900
	vtnet_setup_queue_sysctl(sc);
776
901
777
	return (0);
902
	return (0);
Lines 784-789 Link Here
784
	rxq->vtnrx_sc = NULL;
909
	rxq->vtnrx_sc = NULL;
785
	rxq->vtnrx_id = -1;
910
	rxq->vtnrx_id = -1;
786
911
912
#if defined(INET) || defined(INET6)
913
	tcp_lro_free(&rxq->vtnrx_lro);
914
#endif
915
787
	if (rxq->vtnrx_sg != NULL) {
916
	if (rxq->vtnrx_sg != NULL) {
788
		sglist_free(rxq->vtnrx_sg);
917
		sglist_free(rxq->vtnrx_sg);
789
		rxq->vtnrx_sg = NULL;
918
		rxq->vtnrx_sg = NULL;
Lines 892-919 Link Here
892
	if (info == NULL)
1021
	if (info == NULL)
893
		return (ENOMEM);
1022
		return (ENOMEM);
894
1023
895
	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
1024
	for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
896
		rxq = &sc->vtnet_rxqs[i];
1025
		rxq = &sc->vtnet_rxqs[i];
897
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
1026
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
898
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
1027
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
899
		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
1028
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
900
1029
901
		txq = &sc->vtnet_txqs[i];
1030
		txq = &sc->vtnet_txqs[i];
902
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
1031
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
903
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
1032
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
904
		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
1033
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
905
	}
1034
	}
906
1035
1036
	/* These queues will not be used so allocate the minimum resources. */
1037
	for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
1038
		rxq = &sc->vtnet_rxqs[i];
1039
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
1040
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1041
1042
		txq = &sc->vtnet_txqs[i];
1043
		VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
1044
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1045
	}
1046
907
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
1047
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
908
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
1048
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
909
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
1049
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
910
	}
1050
	}
911
1051
912
	/*
1052
	/*
913
	 * Enable interrupt binding if this is multiqueue. This only matters
1053
	 * TODO: Enable interrupt binding if this is multiqueue. This will
914
	 * when per-vq MSIX is available.
1054
	 * only matter when per-virtqueue MSIX is available.
915
	 */
1055
	 */
916
	if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
1056
	if (sc->vtnet_flags & VTNET_FLAG_MQ)
917
		flags |= 0;
1057
		flags |= 0;
918
1058
919
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
1059
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
Lines 923-945 Link Here
923
}
1063
}
924
1064
925
static int
1065
static int
926
vtnet_setup_interface(struct vtnet_softc *sc)
1066
vtnet_alloc_interface(struct vtnet_softc *sc)
927
{
1067
{
928
	device_t dev;
1068
	device_t dev;
929
	struct ifnet *ifp;
1069
	struct ifnet *ifp;
930
1070
931
	dev = sc->vtnet_dev;
1071
	dev = sc->vtnet_dev;
932
1072
933
	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
1073
	ifp = if_alloc(IFT_ETHER);
934
	if (ifp == NULL) {
1074
	if (ifp == NULL)
935
		device_printf(dev, "cannot allocate ifnet structure\n");
1075
		return (ENOMEM);
936
		return (ENOSPC);
937
	}
938
1076
939
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1077
	sc->vtnet_ifp = ifp;
940
	ifp->if_baudrate = IF_Gbps(10);	/* Approx. */
941
	ifp->if_softc = sc;
1078
	ifp->if_softc = sc;
1079
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1080
1081
	return (0);
1082
}
1083
1084
static int
1085
vtnet_setup_interface(struct vtnet_softc *sc)
1086
{
1087
	device_t dev;
1088
	struct ifnet *ifp;
1089
1090
	dev = sc->vtnet_dev;
1091
	ifp = sc->vtnet_ifp;
1092
942
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1093
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1094
	ifp->if_baudrate = IF_Gbps(10);
943
	ifp->if_init = vtnet_init;
1095
	ifp->if_init = vtnet_init;
944
	ifp->if_ioctl = vtnet_ioctl;
1096
	ifp->if_ioctl = vtnet_ioctl;
945
	ifp->if_get_counter = vtnet_get_counter;
1097
	ifp->if_get_counter = vtnet_get_counter;
Lines 954-1004 Link Here
954
	IFQ_SET_READY(&ifp->if_snd);
1106
	IFQ_SET_READY(&ifp->if_snd);
955
#endif
1107
#endif
956
1108
957
	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
1109
	vtnet_get_macaddr(sc);
958
	    vtnet_ifmedia_sts);
959
	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
960
	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
961
1110
962
	/* Read (or generate) the MAC address for the adapter. */
963
	vtnet_get_hwaddr(sc);
964
965
	ether_ifattach(ifp, sc->vtnet_hwaddr);
966
967
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
1111
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
968
		ifp->if_capabilities |= IFCAP_LINKSTATE;
1112
		ifp->if_capabilities |= IFCAP_LINKSTATE;
969
1113
970
	/* Tell the upper layer(s) we support long frames. */
1114
	ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
971
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1115
	ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
972
	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
1116
	ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
973
1117
974
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1118
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1119
		int gso;
1120
975
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
1121
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
976
1122
977
		if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
1123
		gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
978
			ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1124
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
1125
			ifp->if_capabilities |= IFCAP_TSO4;
1126
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
1127
			ifp->if_capabilities |= IFCAP_TSO6;
1128
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
979
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
1129
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
980
		} else {
981
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
982
				ifp->if_capabilities |= IFCAP_TSO4;
983
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
984
				ifp->if_capabilities |= IFCAP_TSO6;
985
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
986
				sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
987
		}
988
1130
989
		if (ifp->if_capabilities & IFCAP_TSO)
1131
		if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) {
1132
			int tso_maxlen;
1133
990
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1134
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1135
1136
			tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
1137
			    vtnet_tso_maxlen);
1138
			ifp->if_hw_tsomax = tso_maxlen -
1139
			    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1140
			ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1;
1141
			ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1142
		}
991
	}
1143
	}
992
1144
993
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
1145
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
994
		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
1146
		ifp->if_capabilities |= IFCAP_RXCSUM;
1147
#ifdef notyet
1148
		/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
1149
		ifp->if_capabilities |= IFCAP_RXCSUM_IPV6;
1150
#endif
995
1151
996
		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
1152
		if (vtnet_tunable_int(sc, "fixup_needs_csum",
997
		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
1153
		    vtnet_fixup_needs_csum) != 0)
998
			ifp->if_capabilities |= IFCAP_LRO;
1154
			sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
1155
1156
		/* Support either "hardware" or software LRO. */
1157
		ifp->if_capabilities |= IFCAP_LRO;
999
	}
1158
	}
1000
1159
1001
	if (ifp->if_capabilities & IFCAP_HWCSUM) {
1160
	if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
1002
		/*
1161
		/*
1003
		 * VirtIO does not support VLAN tagging, but we can fake
1162
		 * VirtIO does not support VLAN tagging, but we can fake
1004
		 * it by inserting and removing the 802.1Q header during
1163
		 * it by inserting and removing the 802.1Q header during
Lines 1009-1019 Link Here
1009
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1168
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1010
	}
1169
	}
1011
1170
1012
	ifp->if_capenable = ifp->if_capabilities;
1171
	if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
1172
		ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1173
	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1013
1174
1014
	/*
1175
	/*
1015
	 * Capabilities after here are not enabled by default.
1176
	 * Capabilities after here are not enabled by default.
1016
	 */
1177
	 */
1178
	ifp->if_capenable = ifp->if_capabilities;
1017
1179
1018
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1180
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1019
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1181
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
Lines 1024-1082 Link Here
1024
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1186
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1025
	}
1187
	}
1026
1188
1027
	vtnet_set_rx_process_limit(sc);
1189
	ether_ifattach(ifp, sc->vtnet_hwaddr);
1028
	vtnet_set_tx_intr_threshold(sc);
1029
1190
1191
	/* Tell the upper layer(s) we support long frames. */
1192
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1193
1030
	NETDUMP_SET(ifp, vtnet);
1194
	NETDUMP_SET(ifp, vtnet);
1031
1195
1032
	return (0);
1196
	return (0);
1033
}
1197
}
1034
1198
1035
static int
1199
static int
1036
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1200
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
1037
{
1201
{
1202
	int framesz;
1203
1204
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
1205
		return (MJUMPAGESIZE);
1206
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1207
		return (MCLBYTES);
1208
1209
	/*
1210
	 * Try to scale the receive mbuf cluster size from the MTU. Without
1211
	 * the GUEST_TSO[46] features, the VirtIO specification says the
1212
	 * driver must only be able to receive ~1500 byte frames. But if
1213
	 * jumbo frames can be transmitted then try to receive jumbo.
1214
	 *
1215
	 * BMV: Not quite true when F_MTU is negotiated!
1216
	 */
1217
	if (vtnet_modern(sc)) {
1218
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
1219
		framesz = sizeof(struct virtio_net_hdr_v1);
1220
	} else
1221
		framesz = sizeof(struct vtnet_rx_header);
1222
	framesz += sizeof(struct ether_vlan_header) + mtu;
1223
1224
	if (framesz <= MCLBYTES)
1225
		return (MCLBYTES);
1226
	else if (framesz <= MJUMPAGESIZE)
1227
		return (MJUMPAGESIZE);
1228
	else if (framesz <= MJUM9BYTES)
1229
		return (MJUM9BYTES);
1230
1231
	/* Sane default; avoid 16KB clusters. */
1232
	return (MCLBYTES);
1233
}
1234
1235
static int
1236
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu)
1237
{
1038
	struct ifnet *ifp;
1238
	struct ifnet *ifp;
1039
	int frame_size, clsize;
1239
	int clustersz;
1040
1240
1041
	ifp = sc->vtnet_ifp;
1241
	ifp = sc->vtnet_ifp;
1242
	VTNET_CORE_LOCK_ASSERT(sc);
1042
1243
1043
	if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1244
	if (ifp->if_mtu == mtu)
1245
		return (0);
1246
	else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
1044
		return (EINVAL);
1247
		return (EINVAL);
1045
1248
1046
	frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1249
	ifp->if_mtu = mtu;
1047
	    new_mtu;
1250
	clustersz = vtnet_rx_cluster_size(sc, mtu);
1048
1251
1049
	/*
1252
	if (clustersz != sc->vtnet_rx_clustersz &&
1050
	 * Based on the new MTU (and hence frame size) determine which
1253
	    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1051
	 * cluster size is most appropriate for the receive queues.
1254
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1052
	 */
1255
		vtnet_init_locked(sc);
1053
	if (frame_size <= MCLBYTES) {
1256
	}
1054
		clsize = MCLBYTES;
1055
	} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1056
		/* Avoid going past 9K jumbos. */
1057
		if (frame_size > MJUM9BYTES)
1058
			return (EINVAL);
1059
		clsize = MJUM9BYTES;
1060
	} else
1061
		clsize = MJUMPAGESIZE;
1062
1257
1063
	ifp->if_mtu = new_mtu;
1258
	return (0);
1064
	sc->vtnet_rx_new_clsize = clsize;
1259
}
1065
1260
1066
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1261
static int
1067
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1262
vtnet_ioctl_ifflags(struct vtnet_softc *sc)
1263
{
1264
	struct ifnet *ifp;
1265
	int drv_running;
1266
1267
	ifp = sc->vtnet_ifp;
1268
	drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1269
1270
	VTNET_CORE_LOCK_ASSERT(sc);
1271
1272
	if ((ifp->if_flags & IFF_UP) == 0) {
1273
		if (drv_running)
1274
			vtnet_stop(sc);
1275
		goto out;
1276
	}
1277
1278
	if (!drv_running) {
1068
		vtnet_init_locked(sc);
1279
		vtnet_init_locked(sc);
1280
		goto out;
1069
	}
1281
	}
1070
1282
1283
	if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1284
	    (IFF_PROMISC | IFF_ALLMULTI)) {
1285
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1286
			return (ENOTSUP);
1287
		vtnet_rx_filter(sc);
1288
	}
1289
1290
out:
1291
	sc->vtnet_if_flags = ifp->if_flags;
1071
	return (0);
1292
	return (0);
1072
}
1293
}
1073
1294
1074
static int
1295
static int
1296
vtnet_ioctl_multi(struct vtnet_softc *sc)
1297
{
1298
	struct ifnet *ifp;
1299
1300
	ifp = sc->vtnet_ifp;
1301
1302
	VTNET_CORE_LOCK_ASSERT(sc);
1303
1304
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
1305
	    ifp->if_drv_flags & IFF_DRV_RUNNING)
1306
		vtnet_rx_filter_mac(sc);
1307
1308
	return (0);
1309
}
1310
1311
static int
1312
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
1313
{
1314
	struct ifnet *ifp;
1315
	int mask, reinit, update;
1316
1317
	ifp = sc->vtnet_ifp;
1318
	mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable;
1319
	reinit = update = 0;
1320
1321
	VTNET_CORE_LOCK_ASSERT(sc);
1322
1323
	if (mask & IFCAP_TXCSUM)
1324
		ifp->if_capenable ^= IFCAP_TXCSUM;
1325
	if (mask & IFCAP_TXCSUM_IPV6)
1326
		ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1327
	if (mask & IFCAP_TSO4)
1328
		ifp->if_capenable ^= IFCAP_TSO4;
1329
	if (mask & IFCAP_TSO6)
1330
		ifp->if_capenable ^= IFCAP_TSO6;
1331
1332
	if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
1333
		/*
1334
		 * These Rx features require the negotiated features to
1335
		 * be updated. Avoid a full reinit if possible.
1336
		 */
1337
		if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
1338
			update = 1;
1339
		else
1340
			reinit = 1;
1341
1342
		/* BMV: Avoid needless renegotiation for just software LRO. */
1343
		if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
1344
		    IFCAP_LRO && vtnet_software_lro(sc))
1345
			reinit = update = 0;
1346
1347
		if (mask & IFCAP_RXCSUM)
1348
			ifp->if_capenable ^= IFCAP_RXCSUM;
1349
		if (mask & IFCAP_RXCSUM_IPV6)
1350
			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1351
		if (mask & IFCAP_LRO)
1352
			ifp->if_capenable ^= IFCAP_LRO;
1353
1354
		/*
1355
		 * VirtIO does not distinguish between IPv4 and IPv6 checksums
1356
		 * so treat them as a pair. Guest TSO (LRO) requires receive
1357
		 * checksums.
1358
		 */
1359
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
1360
			ifp->if_capenable |= IFCAP_RXCSUM;
1361
#ifdef notyet
1362
			ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
1363
#endif
1364
		} else
1365
			ifp->if_capenable &=
1366
			    ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO);
1367
	}
1368
1369
	if (mask & IFCAP_VLAN_HWFILTER) {
1370
		/* These Rx features require renegotiation. */
1371
		reinit = 1;
1372
1373
		if (mask & IFCAP_VLAN_HWFILTER)
1374
			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1375
	}
1376
1377
	if (mask & IFCAP_VLAN_HWTSO)
1378
		ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1379
	if (mask & IFCAP_VLAN_HWTAGGING)
1380
		ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1381
1382
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1383
		if (reinit) {
1384
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1385
			vtnet_init_locked(sc);
1386
		} else if (update)
1387
			vtnet_update_rx_offloads(sc);
1388
	}
1389
1390
	return (0);
1391
}
1392
1393
static int
1075
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1394
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1076
{
1395
{
1077
	struct vtnet_softc *sc;
1396
	struct vtnet_softc *sc;
1078
	struct ifreq *ifr;
1397
	struct ifreq *ifr;
1079
	int reinit, mask, error;
1398
	int error;
1080
1399
1081
	sc = ifp->if_softc;
1400
	sc = ifp->if_softc;
1082
	ifr = (struct ifreq *) data;
1401
	ifr = (struct ifreq *) data;
Lines 1084-1128 Link Here
1084
1403
1085
	switch (cmd) {
1404
	switch (cmd) {
1086
	case SIOCSIFMTU:
1405
	case SIOCSIFMTU:
1087
		if (ifp->if_mtu != ifr->ifr_mtu) {
1406
		VTNET_CORE_LOCK(sc);
1088
			VTNET_CORE_LOCK(sc);
1407
		error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
1089
			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1408
		VTNET_CORE_UNLOCK(sc);
1090
			VTNET_CORE_UNLOCK(sc);
1091
		}
1092
		break;
1409
		break;
1093
1410
1094
	case SIOCSIFFLAGS:
1411
	case SIOCSIFFLAGS:
1095
		VTNET_CORE_LOCK(sc);
1412
		VTNET_CORE_LOCK(sc);
1096
		if ((ifp->if_flags & IFF_UP) == 0) {
1413
		error = vtnet_ioctl_ifflags(sc);
1097
			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098
				vtnet_stop(sc);
1099
		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1100
			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1101
			    (IFF_PROMISC | IFF_ALLMULTI)) {
1102
				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1103
					vtnet_rx_filter(sc);
1104
				else {
1105
					ifp->if_flags |= IFF_PROMISC;
1106
					if ((ifp->if_flags ^ sc->vtnet_if_flags)
1107
					    & IFF_ALLMULTI)
1108
						error = ENOTSUP;
1109
				}
1110
			}
1111
		} else
1112
			vtnet_init_locked(sc);
1113
1114
		if (error == 0)
1115
			sc->vtnet_if_flags = ifp->if_flags;
1116
		VTNET_CORE_UNLOCK(sc);
1414
		VTNET_CORE_UNLOCK(sc);
1117
		break;
1415
		break;
1118
1416
1119
	case SIOCADDMULTI:
1417
	case SIOCADDMULTI:
1120
	case SIOCDELMULTI:
1418
	case SIOCDELMULTI:
1121
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1122
			break;
1123
		VTNET_CORE_LOCK(sc);
1419
		VTNET_CORE_LOCK(sc);
1124
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1420
		error = vtnet_ioctl_multi(sc);
1125
			vtnet_rx_filter_mac(sc);
1126
		VTNET_CORE_UNLOCK(sc);
1421
		VTNET_CORE_UNLOCK(sc);
1127
		break;
1422
		break;
1128
1423
Lines 1133-1178 Link Here
1133
1428
1134
	case SIOCSIFCAP:
1429
	case SIOCSIFCAP:
1135
		VTNET_CORE_LOCK(sc);
1430
		VTNET_CORE_LOCK(sc);
1136
		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1431
		error = vtnet_ioctl_ifcap(sc, ifr);
1137
1138
		if (mask & IFCAP_TXCSUM)
1139
			ifp->if_capenable ^= IFCAP_TXCSUM;
1140
		if (mask & IFCAP_TXCSUM_IPV6)
1141
			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1142
		if (mask & IFCAP_TSO4)
1143
			ifp->if_capenable ^= IFCAP_TSO4;
1144
		if (mask & IFCAP_TSO6)
1145
			ifp->if_capenable ^= IFCAP_TSO6;
1146
1147
		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1148
		    IFCAP_VLAN_HWFILTER)) {
1149
			/* These Rx features require us to renegotiate. */
1150
			reinit = 1;
1151
1152
			if (mask & IFCAP_RXCSUM)
1153
				ifp->if_capenable ^= IFCAP_RXCSUM;
1154
			if (mask & IFCAP_RXCSUM_IPV6)
1155
				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1156
			if (mask & IFCAP_LRO)
1157
				ifp->if_capenable ^= IFCAP_LRO;
1158
			if (mask & IFCAP_VLAN_HWFILTER)
1159
				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1160
		} else
1161
			reinit = 0;
1162
1163
		if (mask & IFCAP_VLAN_HWTSO)
1164
			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1165
		if (mask & IFCAP_VLAN_HWTAGGING)
1166
			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1167
1168
		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1169
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1170
			vtnet_init_locked(sc);
1171
		}
1172
1173
		VTNET_CORE_UNLOCK(sc);
1432
		VTNET_CORE_UNLOCK(sc);
1174
		VLAN_CAPABILITIES(ifp);
1433
		VLAN_CAPABILITIES(ifp);
1175
1176
		break;
1434
		break;
1177
1435
1178
	default:
1436
	default:
Lines 1191-1202 Link Here
1191
	struct virtqueue *vq;
1449
	struct virtqueue *vq;
1192
	int nbufs, error;
1450
	int nbufs, error;
1193
1451
1194
#ifdef DEV_NETMAP
1195
	error = vtnet_netmap_rxq_populate(rxq);
1196
	if (error >= 0)
1197
		return (error);
1198
#endif  /* DEV_NETMAP */
1199
1200
	vq = rxq->vtnrx_vq;
1452
	vq = rxq->vtnrx_vq;
1201
	error = ENOSPC;
1453
	error = ENOSPC;
1202
1454
Lines 1226-1245 Link Here
1226
	struct virtqueue *vq;
1478
	struct virtqueue *vq;
1227
	struct mbuf *m;
1479
	struct mbuf *m;
1228
	int last;
1480
	int last;
1229
#ifdef DEV_NETMAP
1230
	int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX,
1231
						rxq->vtnrx_id);
1232
#else  /* !DEV_NETMAP */
1233
	int netmap_bufs = 0;
1234
#endif /* !DEV_NETMAP */
1235
1481
1236
	vq = rxq->vtnrx_vq;
1482
	vq = rxq->vtnrx_vq;
1237
	last = 0;
1483
	last = 0;
1238
1484
1239
	while ((m = virtqueue_drain(vq, &last)) != NULL) {
1485
	while ((m = virtqueue_drain(vq, &last)) != NULL)
1240
		if (!netmap_bufs)
1486
		m_freem(m);
1241
			m_freem(m);
1242
	}
1243
1487
1244
	KASSERT(virtqueue_empty(vq),
1488
	KASSERT(virtqueue_empty(vq),
1245
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1489
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
Lines 1249-1305 Link Here
1249
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1493
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1250
{
1494
{
1251
	struct mbuf *m_head, *m_tail, *m;
1495
	struct mbuf *m_head, *m_tail, *m;
1252
	int i, clsize;
1496
	int i, size;
1253
1497
1254
	clsize = sc->vtnet_rx_clsize;
1498
	m_head = NULL;
1499
	size = sc->vtnet_rx_clustersz;
1255
1500
1256
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1501
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1257
	    ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1502
	    ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
1258
1503
1259
	m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1504
	for (i = 0; i < nbufs; i++) {
1260
	if (m_head == NULL)
1505
		m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
1261
		goto fail;
1506
		if (m == NULL) {
1507
			sc->vtnet_stats.mbuf_alloc_failed++;
1508
			m_freem(m_head);
1509
			return (NULL);
1510
		}
1262
1511
1263
	m_head->m_len = clsize;
1512
		m->m_len = size;
1264
	m_tail = m_head;
1513
		if (m_head != NULL) {
1265
1514
			m_tail->m_next = m;
1266
	/* Allocate the rest of the chain. */
1515
			m_tail = m;
1267
	for (i = 1; i < nbufs; i++) {
1516
		} else
1268
		m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1517
			m_head = m_tail = m;
1269
		if (m == NULL)
1270
			goto fail;
1271
1272
		m->m_len = clsize;
1273
		m_tail->m_next = m;
1274
		m_tail = m;
1275
	}
1518
	}
1276
1519
1277
	if (m_tailp != NULL)
1520
	if (m_tailp != NULL)
1278
		*m_tailp = m_tail;
1521
		*m_tailp = m_tail;
1279
1522
1280
	return (m_head);
1523
	return (m_head);
1281
1282
fail:
1283
	sc->vtnet_stats.mbuf_alloc_failed++;
1284
	m_freem(m_head);
1285
1286
	return (NULL);
1287
}
1524
}
1288
1525
1289
/*
1526
/*
1290
 * Slow path for when LRO without mergeable buffers is negotiated.
1527
 * Slow path for when LRO without mergeable buffers is negotiated.
1291
 */
1528
 */
1292
static int
1529
static int
1293
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1530
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1294
    int len0)
1531
    int len0)
1295
{
1532
{
1296
	struct vtnet_softc *sc;
1533
	struct vtnet_softc *sc;
1297
	struct mbuf *m, *m_prev;
1534
	struct mbuf *m, *m_prev, *m_new, *m_tail;
1298
	struct mbuf *m_new, *m_tail;
1535
	int len, clustersz, nreplace, error;
1299
	int len, clsize, nreplace, error;
1300
1536
1301
	sc = rxq->vtnrx_sc;
1537
	sc = rxq->vtnrx_sc;
1302
	clsize = sc->vtnet_rx_clsize;
1538
	clustersz = sc->vtnet_rx_clustersz;
1303
1539
1304
	m_prev = NULL;
1540
	m_prev = NULL;
1305
	m_tail = NULL;
1541
	m_tail = NULL;
Lines 1309-1333 Link Here
1309
	len = len0;
1545
	len = len0;
1310
1546
1311
	/*
1547
	/*
1312
	 * Since these mbuf chains are so large, we avoid allocating an
1548
	 * Since these mbuf chains are so large, avoid allocating a complete
1313
	 * entire replacement chain if possible. When the received frame
1549
	 * replacement when the received frame did not consume the entire
1314
	 * did not consume the entire chain, the unused mbufs are moved
1550
	 * chain. Unused mbufs are moved to the tail of the replacement mbuf.
1315
	 * to the replacement chain.
1316
	 */
1551
	 */
1317
	while (len > 0) {
1552
	while (len > 0) {
1318
		/*
1319
		 * Something is seriously wrong if we received a frame
1320
		 * larger than the chain. Drop it.
1321
		 */
1322
		if (m == NULL) {
1553
		if (m == NULL) {
1323
			sc->vtnet_stats.rx_frame_too_large++;
1554
			sc->vtnet_stats.rx_frame_too_large++;
1324
			return (EMSGSIZE);
1555
			return (EMSGSIZE);
1325
		}
1556
		}
1326
1557
1327
		/* We always allocate the same cluster size. */
1558
		/*
1328
		KASSERT(m->m_len == clsize,
1559
		 * Every mbuf should have the expected cluster size sincethat
1329
		    ("%s: mbuf size %d is not the cluster size %d",
1560
		 * is also used to allocate the replacements.
1330
		    __func__, m->m_len, clsize));
1561
		 */
1562
		KASSERT(m->m_len == clustersz,
1563
		    ("%s: mbuf size %d not expected cluster size %d", __func__,
1564
		    m->m_len, clustersz));
1331
1565
1332
		m->m_len = MIN(m->m_len, len);
1566
		m->m_len = MIN(m->m_len, len);
1333
		len -= m->m_len;
1567
		len -= m->m_len;
Lines 1337-1355 Link Here
1337
		nreplace++;
1571
		nreplace++;
1338
	}
1572
	}
1339
1573
1340
	KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1574
	KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
1341
	    ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1575
	    ("%s: invalid replacement mbuf count %d max %d", __func__,
1342
	    sc->vtnet_rx_nmbufs));
1576
	    nreplace, sc->vtnet_rx_nmbufs));
1343
1577
1344
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1578
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1345
	if (m_new == NULL) {
1579
	if (m_new == NULL) {
1346
		m_prev->m_len = clsize;
1580
		m_prev->m_len = clustersz;
1347
		return (ENOBUFS);
1581
		return (ENOBUFS);
1348
	}
1582
	}
1349
1583
1350
	/*
1584
	/*
1351
	 * Move any unused mbufs from the received chain onto the end
1585
	 * Move any unused mbufs from the received mbuf chain onto the
1352
	 * of the new chain.
1586
	 * end of the replacement chain.
1353
	 */
1587
	 */
1354
	if (m_prev->m_next != NULL) {
1588
	if (m_prev->m_next != NULL) {
1355
		m_tail->m_next = m_prev->m_next;
1589
		m_tail->m_next = m_prev->m_next;
Lines 1359-1379 Link Here
1359
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1593
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1360
	if (error) {
1594
	if (error) {
1361
		/*
1595
		/*
1362
		 * BAD! We could not enqueue the replacement mbuf chain. We
1596
		 * The replacement is suppose to be an copy of the one
1363
		 * must restore the m0 chain to the original state if it was
1597
		 * dequeued so this is a very unexpected error.
1364
		 * modified so we can subsequently discard it.
1365
		 *
1598
		 *
1366
		 * NOTE: The replacement is suppose to be an identical copy
1599
		 * Restore the m0 chain to the original state if it was
1367
		 * to the one just dequeued so this is an unexpected error.
1600
		 * modified so we can then discard it.
1368
		 */
1601
		 */
1369
		sc->vtnet_stats.rx_enq_replacement_failed++;
1370
1371
		if (m_tail->m_next != NULL) {
1602
		if (m_tail->m_next != NULL) {
1372
			m_prev->m_next = m_tail->m_next;
1603
			m_prev->m_next = m_tail->m_next;
1373
			m_tail->m_next = NULL;
1604
			m_tail->m_next = NULL;
1374
		}
1605
		}
1375
1606
		m_prev->m_len = clustersz;
1376
		m_prev->m_len = clsize;
1607
		sc->vtnet_stats.rx_enq_replacement_failed++;
1377
		m_freem(m_new);
1608
		m_freem(m_new);
1378
	}
1609
	}
1379
1610
Lines 1389-1419 Link Here
1389
1620
1390
	sc = rxq->vtnrx_sc;
1621
	sc = rxq->vtnrx_sc;
1391
1622
1392
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1623
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1393
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1624
		return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
1394
1625
1395
	if (m->m_next == NULL) {
1626
	MPASS(m->m_next == NULL);
1396
		/* Fast-path for the common case of just one mbuf. */
1627
	if (m->m_len < len)
1397
		if (m->m_len < len)
1628
		return (EMSGSIZE);
1398
			return (EINVAL);
1399
1629
1400
		m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1630
	m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1401
		if (m_new == NULL)
1631
	if (m_new == NULL)
1402
			return (ENOBUFS);
1632
		return (ENOBUFS);
1403
1633
1404
		error = vtnet_rxq_enqueue_buf(rxq, m_new);
1634
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1405
		if (error) {
1635
	if (error) {
1406
			/*
1636
		sc->vtnet_stats.rx_enq_replacement_failed++;
1407
			 * The new mbuf is suppose to be an identical
1637
		m_freem(m_new);
1408
			 * copy of the one just dequeued so this is an
1409
			 * unexpected error.
1410
			 */
1411
			m_freem(m_new);
1412
			sc->vtnet_stats.rx_enq_replacement_failed++;
1413
		} else
1414
			m->m_len = len;
1415
	} else
1638
	} else
1416
		error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1639
		m->m_len = len;
1417
1640
1418
	return (error);
1641
	return (error);
1419
}
1642
}
Lines 1423-1461 Link Here
1423
{
1646
{
1424
	struct vtnet_softc *sc;
1647
	struct vtnet_softc *sc;
1425
	struct sglist *sg;
1648
	struct sglist *sg;
1426
	struct vtnet_rx_header *rxhdr;
1649
	int header_inlined, error;
1427
	uint8_t *mdata;
1428
	int offset, error;
1429
1650
1430
	sc = rxq->vtnrx_sc;
1651
	sc = rxq->vtnrx_sc;
1431
	sg = rxq->vtnrx_sg;
1652
	sg = rxq->vtnrx_sg;
1432
	mdata = mtod(m, uint8_t *);
1433
1653
1654
	KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1655
	    ("%s: mbuf chain without LRO_NOMRG", __func__));
1434
	VTNET_RXQ_LOCK_ASSERT(rxq);
1656
	VTNET_RXQ_LOCK_ASSERT(rxq);
1435
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1436
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1437
	KASSERT(m->m_len == sc->vtnet_rx_clsize,
1438
	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1439
	     sc->vtnet_rx_clsize));
1440
1657
1441
	sglist_reset(sg);
1658
	sglist_reset(sg);
1442
	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1659
	header_inlined = vtnet_modern(sc) ||
1660
	    (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
1661
1662
	if (header_inlined)
1663
		error = sglist_append_mbuf(sg, m);
1664
	else {
1665
		struct vtnet_rx_header *rxhdr =
1666
		    mtod(m, struct vtnet_rx_header *);
1443
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1667
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1444
		rxhdr = (struct vtnet_rx_header *) mdata;
1445
		sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1446
		offset = sizeof(struct vtnet_rx_header);
1447
	} else
1448
		offset = 0;
1449
1668
1450
	sglist_append(sg, mdata + offset, m->m_len - offset);
1669
		/* Append the header and remaining mbuf data. */
1451
	if (m->m_next != NULL) {
1670
		error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1452
		error = sglist_append_mbuf(sg, m->m_next);
1671
		if (error)
1453
		MPASS(error == 0);
1672
			return (error);
1673
		error = sglist_append(sg, &rxhdr[1],
1674
		    m->m_len - sizeof(struct vtnet_rx_header));
1675
		if (error)
1676
			return (error);
1677
1678
		if (m->m_next != NULL)
1679
			error = sglist_append_mbuf(sg, m->m_next);
1454
	}
1680
	}
1455
1681
1456
	error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1682
	if (error)
1683
		return (error);
1457
1684
1458
	return (error);
1685
	return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
1459
}
1686
}
1460
1687
1461
static int
1688
static int
Lines 1478-1531 Link Here
1478
	return (error);
1705
	return (error);
1479
}
1706
}
1480
1707
1481
/*
1482
 * Use the checksum offset in the VirtIO header to set the
1483
 * correct CSUM_* flags.
1484
 */
1485
static int
1708
static int
1486
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1709
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
1487
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1710
    int hoff, struct virtio_net_hdr *hdr)
1488
{
1711
{
1489
	struct vtnet_softc *sc;
1712
	struct vtnet_softc *sc;
1490
#if defined(INET) || defined(INET6)
1713
	int error;
1491
	int offset = hdr->csum_start + hdr->csum_offset;
1492
#endif
1493
1714
1494
	sc = rxq->vtnrx_sc;
1715
	sc = rxq->vtnrx_sc;
1495
1716
1496
	/* Only do a basic sanity check on the offset. */
1717
	/*
1497
	switch (eth_type) {
1718
	 * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
1498
#if defined(INET)
1719
	 * not have an analogous CSUM flag. The checksum has been validated,
1499
	case ETHERTYPE_IP:
1720
	 * but is incomplete (TCP/UDP pseudo header).
1500
		if (__predict_false(offset < ip_start + sizeof(struct ip)))
1721
	 *
1501
			return (1);
1722
	 * The packet is likely from another VM on the same host that itself
1502
		break;
1723
	 * performed checksum offloading so Tx/Rx is basically a memcpy and
1503
#endif
1724
	 * the checksum has little value.
1504
#if defined(INET6)
1725
	 *
1505
	case ETHERTYPE_IPV6:
1726
	 * Default to receiving the packet as-is for performance reasons, but
1506
		if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1727
	 * this can cause issues if the packet is to be forwarded because it
1507
			return (1);
1728
	 * does not contain a valid checksum. This patch may be helpful:
1508
		break;
1729
	 * https://reviews.freebsd.org/D6611. In the meantime, have the driver
1509
#endif
1730
	 * compute the checksum if requested.
1510
	default:
1731
	 *
1511
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1732
	 * BMV: Need to add an CSUM_PARTIAL flag?
1512
		return (1);
1733
	 */
1734
	if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
1735
		error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
1736
		return (error);
1513
	}
1737
	}
1514
1738
1515
	/*
1739
	/*
1516
	 * Use the offset to determine the appropriate CSUM_* flags. This is
1740
	 * Compute the checksum in the driver so the packet will contain a
1517
	 * a bit dirty, but we can get by with it since the checksum offsets
1741
	 * valid checksum. The checksum is at csum_offset from csum_start.
1518
	 * happen to be different. We assume the host host does not do IPv4
1519
	 * header checksum offloading.
1520
	 */
1742
	 */
1521
	switch (hdr->csum_offset) {
1743
	switch (etype) {
1522
	case offsetof(struct udphdr, uh_sum):
1744
#if defined(INET) || defined(INET6)
1523
	case offsetof(struct tcphdr, th_sum):
1745
	case ETHERTYPE_IP:
1746
	case ETHERTYPE_IPV6: {
1747
		int csum_off, csum_end;
1748
		uint16_t csum;
1749
1750
		csum_off = hdr->csum_start + hdr->csum_offset;
1751
		csum_end = csum_off + sizeof(uint16_t);
1752
1753
		/* Assume checksum will be in the first mbuf. */
1754
		if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
1755
			return (1);
1756
1757
		/*
1758
		 * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
1759
		 * checksum and write it at the specified offset. We could
1760
		 * try to verify the packet: csum_start should probably
1761
		 * correspond to the start of the TCP/UDP header.
1762
		 *
1763
		 * BMV: Need to properly handle UDP with zero checksum. Is
1764
		 * the IPv4 header checksum implicitly validated?
1765
		 */
1766
		csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
1767
		*(uint16_t *)(mtodo(m, csum_off)) = csum;
1524
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1768
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1525
		m->m_pkthdr.csum_data = 0xFFFF;
1769
		m->m_pkthdr.csum_data = 0xFFFF;
1526
		break;
1770
		break;
1771
	}
1772
#endif
1527
	default:
1773
	default:
1528
		sc->vtnet_stats.rx_csum_bad_offset++;
1774
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1529
		return (1);
1775
		return (1);
1530
	}
1776
	}
1531
1777
Lines 1533-1596 Link Here
1533
}
1779
}
1534
1780
1535
static int
1781
static int
1536
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1782
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
1537
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1783
    uint16_t etype, int hoff, struct virtio_net_hdr *hdr)
1538
{
1784
{
1539
	struct vtnet_softc *sc;
1785
	struct vtnet_softc *sc;
1540
	int offset, proto;
1786
	int protocol;
1541
1787
1542
	sc = rxq->vtnrx_sc;
1788
	sc = rxq->vtnrx_sc;
1543
1789
1544
	switch (eth_type) {
1790
	switch (etype) {
1545
#if defined(INET)
1791
#if defined(INET)
1546
	case ETHERTYPE_IP: {
1792
	case ETHERTYPE_IP:
1547
		struct ip *ip;
1793
		if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
1548
		if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1794
			protocol = IPPROTO_DONE;
1549
			return (1);
1795
		else {
1550
		ip = (struct ip *)(m->m_data + ip_start);
1796
			struct ip *ip = (struct ip *)(m->m_data + hoff);
1551
		proto = ip->ip_p;
1797
			protocol = ip->ip_p;
1552
		offset = ip_start + (ip->ip_hl << 2);
1798
		}
1553
		break;
1799
		break;
1554
	}
1555
#endif
1800
#endif
1556
#if defined(INET6)
1801
#if defined(INET6)
1557
	case ETHERTYPE_IPV6:
1802
	case ETHERTYPE_IPV6:
1558
		if (__predict_false(m->m_len < ip_start +
1803
		if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
1559
		    sizeof(struct ip6_hdr)))
1804
		    || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
1560
			return (1);
1805
			protocol = IPPROTO_DONE;
1561
		offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1562
		if (__predict_false(offset < 0))
1563
			return (1);
1564
		break;
1806
		break;
1565
#endif
1807
#endif
1566
	default:
1808
	default:
1567
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1809
		protocol = IPPROTO_DONE;
1568
		return (1);
1810
		break;
1569
	}
1811
	}
1570
1812
1571
	switch (proto) {
1813
	switch (protocol) {
1572
	case IPPROTO_TCP:
1814
	case IPPROTO_TCP:
1573
		if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1574
			return (1);
1575
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1576
		m->m_pkthdr.csum_data = 0xFFFF;
1577
		break;
1578
	case IPPROTO_UDP:
1815
	case IPPROTO_UDP:
1579
		if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1580
			return (1);
1581
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1816
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1582
		m->m_pkthdr.csum_data = 0xFFFF;
1817
		m->m_pkthdr.csum_data = 0xFFFF;
1583
		break;
1818
		break;
1584
	default:
1819
	default:
1585
		/*
1820
		/*
1586
		 * For the remaining protocols, FreeBSD does not support
1821
		 * FreeBSD does not support checksum offloading of this
1587
		 * checksum offloading, so the checksum will be recomputed.
1822
		 * protocol. Let the stack re-verify the checksum later
1823
		 * if the protocol is supported.
1588
		 */
1824
		 */
1589
#if 0
1825
#if 0
1590
		if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1826
		if_printf(sc->vtnet_ifp,
1591
		    "protocol eth_type=%#x proto=%d csum_start=%d "
1827
		    "%s: checksum offload of unsupported protocol "
1592
		    "csum_offset=%d\n", __func__, eth_type, proto,
1828
		    "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
1593
		    hdr->csum_start, hdr->csum_offset);
1829
		    __func__, etype, protocol, hdr->csum_start,
1830
		    hdr->csum_offset);
1594
#endif
1831
#endif
1595
		break;
1832
		break;
1596
	}
1833
	}
Lines 1598-1638 Link Here
1598
	return (0);
1835
	return (0);
1599
}
1836
}
1600
1837
1601
/*
1602
 * Set the appropriate CSUM_* flags. Unfortunately, the information
1603
 * provided is not directly useful to us. The VirtIO header gives the
1604
 * offset of the checksum, which is all Linux needs, but this is not
1605
 * how FreeBSD does things. We are forced to peek inside the packet
1606
 * a bit.
1607
 *
1608
 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1609
 * could accept the offsets and let the stack figure it out.
1610
 */
1611
static int
1838
static int
1612
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1839
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1613
    struct virtio_net_hdr *hdr)
1840
    struct virtio_net_hdr *hdr)
1614
{
1841
{
1615
	struct ether_header *eh;
1842
	const struct ether_header *eh;
1616
	struct ether_vlan_header *evh;
1843
	int hoff;
1617
	uint16_t eth_type;
1844
	uint16_t etype;
1618
	int offset, error;
1619
1845
1620
	eh = mtod(m, struct ether_header *);
1846
	eh = mtod(m, const struct ether_header *);
1621
	eth_type = ntohs(eh->ether_type);
1847
	etype = ntohs(eh->ether_type);
1622
	if (eth_type == ETHERTYPE_VLAN) {
1848
	if (etype == ETHERTYPE_VLAN) {
1623
		/* BMV: We should handle nested VLAN tags too. */
1849
		/* TODO BMV: Handle QinQ. */
1624
		evh = mtod(m, struct ether_vlan_header *);
1850
		const struct ether_vlan_header *evh =
1625
		eth_type = ntohs(evh->evl_proto);
1851
		    mtod(m, const struct ether_vlan_header *);
1626
		offset = sizeof(struct ether_vlan_header);
1852
		etype = ntohs(evh->evl_proto);
1853
		hoff = sizeof(struct ether_vlan_header);
1627
	} else
1854
	} else
1628
		offset = sizeof(struct ether_header);
1855
		hoff = sizeof(struct ether_header);
1629
1856
1630
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1857
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1631
		error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1858
		return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
1632
	else
1859
	else /* VIRTIO_NET_HDR_F_DATA_VALID */
1633
		error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1860
		return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
1634
1635
	return (error);
1636
}
1861
}
1637
1862
1638
static void
1863
static void
Lines 1667-1680 Link Here
1667
{
1892
{
1668
	struct vtnet_softc *sc;
1893
	struct vtnet_softc *sc;
1669
	struct virtqueue *vq;
1894
	struct virtqueue *vq;
1670
	struct mbuf *m, *m_tail;
1895
	struct mbuf *m_tail;
1671
	int len;
1672
1896
1673
	sc = rxq->vtnrx_sc;
1897
	sc = rxq->vtnrx_sc;
1674
	vq = rxq->vtnrx_vq;
1898
	vq = rxq->vtnrx_vq;
1675
	m_tail = m_head;
1899
	m_tail = m_head;
1676
1900
1677
	while (--nbufs > 0) {
1901
	while (--nbufs > 0) {
1902
		struct mbuf *m;
1903
		int len;
1904
1678
		m = virtqueue_dequeue(vq, &len);
1905
		m = virtqueue_dequeue(vq, &len);
1679
		if (m == NULL) {
1906
		if (m == NULL) {
1680
			rxq->vtnrx_stats.vrxs_ierrors++;
1907
			rxq->vtnrx_stats.vrxs_ierrors++;
Lines 1709-1727 Link Here
1709
	return (1);
1936
	return (1);
1710
}
1937
}
1711
1938
1939
#if defined(INET) || defined(INET6)
1940
static int
1941
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
1942
{
1943
	struct lro_ctrl *lro;
1944
1945
	lro = &rxq->vtnrx_lro;
1946
1947
	if (lro->lro_mbuf_max != 0) {
1948
		tcp_lro_queue_mbuf(lro, m);
1949
		return (0);
1950
	}
1951
1952
	return (tcp_lro_rx(lro, m, 0));
1953
}
1954
#endif
1955
1712
static void
1956
static void
1713
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1957
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1714
    struct virtio_net_hdr *hdr)
1958
    struct virtio_net_hdr *hdr)
1715
{
1959
{
1716
	struct vtnet_softc *sc;
1960
	struct vtnet_softc *sc;
1717
	struct ifnet *ifp;
1961
	struct ifnet *ifp;
1718
	struct ether_header *eh;
1719
1962
1720
	sc = rxq->vtnrx_sc;
1963
	sc = rxq->vtnrx_sc;
1721
	ifp = sc->vtnet_ifp;
1964
	ifp = sc->vtnet_ifp;
1722
1965
1723
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1966
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1724
		eh = mtod(m, struct ether_header *);
1967
		struct ether_header *eh = mtod(m, struct ether_header *);
1725
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1968
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1726
			vtnet_vlan_tag_remove(m);
1969
			vtnet_vlan_tag_remove(m);
1727
			/*
1970
			/*
Lines 1736-1760 Link Here
1736
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1979
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1737
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1980
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1738
1981
1739
	/*
1982
	if (hdr->flags &
1740
	 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1983
	    (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
1741
	 * distinction that Linux does. Need to reevaluate if performing
1742
	 * offloading for the NEEDS_CSUM case is really appropriate.
1743
	 */
1744
	if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1745
	    VIRTIO_NET_HDR_F_DATA_VALID)) {
1746
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1984
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1747
			rxq->vtnrx_stats.vrxs_csum++;
1985
			rxq->vtnrx_stats.vrxs_csum++;
1748
		else
1986
		else
1749
			rxq->vtnrx_stats.vrxs_csum_failed++;
1987
			rxq->vtnrx_stats.vrxs_csum_failed++;
1750
	}
1988
	}
1751
1989
1990
	if (hdr->gso_size != 0) {
1991
		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1992
		case VIRTIO_NET_HDR_GSO_TCPV4:
1993
		case VIRTIO_NET_HDR_GSO_TCPV6:
1994
			m->m_pkthdr.lro_nsegs =
1995
			    howmany(m->m_pkthdr.len, hdr->gso_size);
1996
			rxq->vtnrx_stats.vrxs_host_lro++;
1997
			break;
1998
		}
1999
	}
2000
1752
	rxq->vtnrx_stats.vrxs_ipackets++;
2001
	rxq->vtnrx_stats.vrxs_ipackets++;
1753
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
2002
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1754
2003
1755
	VTNET_RXQ_UNLOCK(rxq);
2004
#if defined(INET) || defined(INET6)
2005
	if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) {
2006
		if (vtnet_lro_rx(rxq, m) == 0)
2007
			return;
2008
	}
2009
#endif
2010
1756
	(*ifp->if_input)(ifp, m);
2011
	(*ifp->if_input)(ifp, m);
1757
	VTNET_RXQ_LOCK(rxq);
1758
}
2012
}
1759
2013
1760
static int
2014
static int
Lines 1764-1783 Link Here
1764
	struct vtnet_softc *sc;
2018
	struct vtnet_softc *sc;
1765
	struct ifnet *ifp;
2019
	struct ifnet *ifp;
1766
	struct virtqueue *vq;
2020
	struct virtqueue *vq;
1767
	struct mbuf *m;
2021
	int deq, count;
1768
	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1769
	int len, deq, nbufs, adjsz, count;
1770
2022
1771
	sc = rxq->vtnrx_sc;
2023
	sc = rxq->vtnrx_sc;
1772
	vq = rxq->vtnrx_vq;
2024
	vq = rxq->vtnrx_vq;
1773
	ifp = sc->vtnet_ifp;
2025
	ifp = sc->vtnet_ifp;
1774
	hdr = &lhdr;
1775
	deq = 0;
2026
	deq = 0;
1776
	count = sc->vtnet_rx_process_limit;
2027
	count = sc->vtnet_rx_process_limit;
1777
2028
1778
	VTNET_RXQ_LOCK_ASSERT(rxq);
2029
	VTNET_RXQ_LOCK_ASSERT(rxq);
1779
2030
2031
#ifdef DEV_NETMAP
2032
	if (netmap_rx_irq(ifp, 0, &deq))
2033
		return (0);
2034
#endif
2035
1780
	while (count-- > 0) {
2036
	while (count-- > 0) {
2037
		struct mbuf *m;
2038
		int len, nbufs, adjsz;
2039
1781
		m = virtqueue_dequeue(vq, &len);
2040
		m = virtqueue_dequeue(vq, &len);
1782
		if (m == NULL)
2041
		if (m == NULL)
1783
			break;
2042
			break;
Lines 1789-1806 Link Here
1789
			continue;
2048
			continue;
1790
		}
2049
		}
1791
2050
1792
		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
2051
		if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
2052
			struct virtio_net_hdr_mrg_rxbuf *mhdr =
2053
			    mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
2054
			nbufs = vtnet_htog16(sc, mhdr->num_buffers);
2055
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2056
		} else if (vtnet_modern(sc)) {
2057
			nbufs = 1; /* num_buffers is always 1 */
2058
			adjsz = sizeof(struct virtio_net_hdr_v1);
2059
		} else {
1793
			nbufs = 1;
2060
			nbufs = 1;
1794
			adjsz = sizeof(struct vtnet_rx_header);
2061
			adjsz = sizeof(struct vtnet_rx_header);
1795
			/*
2062
			/*
1796
			 * Account for our pad inserted between the header
2063
			 * Account for our gap between the header and start of
1797
			 * and the actual start of the frame.
2064
			 * data to keep the segments separated.
1798
			 */
2065
			 */
1799
			len += VTNET_RX_HEADER_PAD;
2066
			len += VTNET_RX_HEADER_PAD;
1800
		} else {
1801
			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1802
			nbufs = mhdr->num_buffers;
1803
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1804
		}
2067
		}
1805
2068
1806
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
2069
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
Lines 1822-1847 Link Here
1822
		}
2085
		}
1823
2086
1824
		/*
2087
		/*
1825
		 * Save copy of header before we strip it. For both mergeable
2088
		 * Save an endian swapped version of the header prior to it
1826
		 * and non-mergeable, the header is at the beginning of the
2089
		 * being stripped. The header is always at the start of the
1827
		 * mbuf data. We no longer need num_buffers, so always use a
2090
		 * mbuf data. num_buffers was already saved (and not needed)
1828
		 * regular header.
2091
		 * so use the standard header.
1829
		 *
1830
		 * BMV: Is this memcpy() expensive? We know the mbuf data is
1831
		 * still valid even after the m_adj().
1832
		 */
2092
		 */
1833
		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
2093
		hdr = mtod(m, struct virtio_net_hdr *);
2094
		lhdr.flags = hdr->flags;
2095
		lhdr.gso_type = hdr->gso_type;
2096
		lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
2097
		lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
2098
		lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
2099
		lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
1834
		m_adj(m, adjsz);
2100
		m_adj(m, adjsz);
1835
2101
1836
		vtnet_rxq_input(rxq, m, hdr);
2102
		vtnet_rxq_input(rxq, m, &lhdr);
1837
1838
		/* Must recheck after dropping the Rx lock. */
1839
		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1840
			break;
1841
	}
2103
	}
1842
2104
1843
	if (deq > 0)
2105
	if (deq > 0) {
2106
#if defined(INET) || defined(INET6)
2107
		tcp_lro_flush_all(&rxq->vtnrx_lro);
2108
#endif
1844
		virtqueue_notify(vq);
2109
		virtqueue_notify(vq);
2110
	}
1845
2111
1846
	return (count > 0 ? 0 : EAGAIN);
2112
	return (count > 0 ? 0 : EAGAIN);
1847
}
2113
}
Lines 1870-1880 Link Here
1870
		return;
2136
		return;
1871
	}
2137
	}
1872
2138
1873
#ifdef DEV_NETMAP
1874
	if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS)
1875
		return;
1876
#endif /* DEV_NETMAP */
1877
1878
	VTNET_RXQ_LOCK(rxq);
2139
	VTNET_RXQ_LOCK(rxq);
1879
2140
1880
again:
2141
again:
Lines 1894-1901 Link Here
1894
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
2155
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1895
			goto again;
2156
			goto again;
1896
2157
1897
		VTNET_RXQ_UNLOCK(rxq);
1898
		rxq->vtnrx_stats.vrxs_rescheduled++;
2158
		rxq->vtnrx_stats.vrxs_rescheduled++;
2159
		VTNET_RXQ_UNLOCK(rxq);
1899
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2160
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1900
	} else
2161
	} else
1901
		VTNET_RXQ_UNLOCK(rxq);
2162
		VTNET_RXQ_UNLOCK(rxq);
Lines 1925-1946 Link Here
1925
		if (!more)
2186
		if (!more)
1926
			vtnet_rxq_disable_intr(rxq);
2187
			vtnet_rxq_disable_intr(rxq);
1927
		rxq->vtnrx_stats.vrxs_rescheduled++;
2188
		rxq->vtnrx_stats.vrxs_rescheduled++;
2189
		VTNET_RXQ_UNLOCK(rxq);
1928
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2190
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1929
	}
2191
	} else
2192
		VTNET_RXQ_UNLOCK(rxq);
2193
}
1930
2194
1931
	VTNET_RXQ_UNLOCK(rxq);
2195
static int
2196
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
2197
{
2198
	struct vtnet_softc *sc;
2199
	int threshold;
2200
2201
	sc = txq->vtntx_sc;
2202
2203
	/*
2204
	 * The Tx interrupt is disabled until the queue free count falls
2205
	 * below our threshold. Completed frames are drained from the Tx
2206
	 * virtqueue before transmitting new frames and in the watchdog
2207
	 * callout, so the frequency of Tx interrupts is greatly reduced,
2208
	 * at the cost of not freeing mbufs as quickly as they otherwise
2209
	 * would be.
2210
	 */
2211
	threshold = virtqueue_size(txq->vtntx_vq) / 4;
2212
2213
	/*
2214
	 * Without indirect descriptors, leave enough room for the most
2215
	 * segments we handle.
2216
	 */
2217
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
2218
	    threshold < sc->vtnet_tx_nsegs)
2219
		threshold = sc->vtnet_tx_nsegs;
2220
2221
	return (threshold);
1932
}
2222
}
1933
2223
1934
static int
2224
static int
1935
vtnet_txq_below_threshold(struct vtnet_txq *txq)
2225
vtnet_txq_below_threshold(struct vtnet_txq *txq)
1936
{
2226
{
1937
	struct vtnet_softc *sc;
1938
	struct virtqueue *vq;
2227
	struct virtqueue *vq;
1939
2228
1940
	sc = txq->vtntx_sc;
1941
	vq = txq->vtntx_vq;
2229
	vq = txq->vtntx_vq;
1942
2230
1943
	return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
2231
	return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
1944
}
2232
}
1945
2233
1946
static int
2234
static int
Lines 1975-1995 Link Here
1975
	struct virtqueue *vq;
2263
	struct virtqueue *vq;
1976
	struct vtnet_tx_header *txhdr;
2264
	struct vtnet_tx_header *txhdr;
1977
	int last;
2265
	int last;
1978
#ifdef DEV_NETMAP
1979
	int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX,
1980
						txq->vtntx_id);
1981
#else  /* !DEV_NETMAP */
1982
	int netmap_bufs = 0;
1983
#endif /* !DEV_NETMAP */
1984
2266
1985
	vq = txq->vtntx_vq;
2267
	vq = txq->vtntx_vq;
1986
	last = 0;
2268
	last = 0;
1987
2269
1988
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
2270
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1989
		if (!netmap_bufs) {
2271
		m_freem(txhdr->vth_mbuf);
1990
			m_freem(txhdr->vth_mbuf);
2272
		uma_zfree(vtnet_tx_header_zone, txhdr);
1991
			uma_zfree(vtnet_tx_header_zone, txhdr);
1992
		}
1993
	}
2273
	}
1994
2274
1995
	KASSERT(virtqueue_empty(vq),
2275
	KASSERT(virtqueue_empty(vq),
Lines 1997-2008 Link Here
1997
}
2277
}
1998
2278
1999
/*
2279
/*
2000
 * BMV: Much of this can go away once we finally have offsets in
2280
 * BMV: This can go away once we finally have offsets in the mbuf header.
2001
 * the mbuf packet header. Bug andre@.
2002
 */
2281
 */
2003
static int
2282
static int
2004
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
2283
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
2005
    int *etype, int *proto, int *start)
2284
    int *proto, int *start)
2006
{
2285
{
2007
	struct vtnet_softc *sc;
2286
	struct vtnet_softc *sc;
2008
	struct ether_vlan_header *evh;
2287
	struct ether_vlan_header *evh;
Lines 2046-2052 Link Here
2046
		break;
2325
		break;
2047
#endif
2326
#endif
2048
	default:
2327
	default:
2049
		sc->vtnet_stats.tx_csum_bad_ethtype++;
2328
		sc->vtnet_stats.tx_csum_unknown_ethtype++;
2050
		return (EINVAL);
2329
		return (EINVAL);
2051
	}
2330
	}
2052
2331
Lines 2054-2060 Link Here
2054
}
2333
}
2055
2334
2056
static int
2335
static int
2057
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2336
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags,
2058
    int offset, struct virtio_net_hdr *hdr)
2337
    int offset, struct virtio_net_hdr *hdr)
2059
{
2338
{
2060
	static struct timeval lastecn;
2339
	static struct timeval lastecn;
Lines 2070-2085 Link Here
2070
	} else
2349
	} else
2071
		tcp = (struct tcphdr *)(m->m_data + offset);
2350
		tcp = (struct tcphdr *)(m->m_data + offset);
2072
2351
2073
	hdr->hdr_len = offset + (tcp->th_off << 2);
2352
	hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
2074
	hdr->gso_size = m->m_pkthdr.tso_segsz;
2353
	hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
2075
	hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2354
	hdr->gso_type = (flags & CSUM_IP_TSO) ?
2076
	    VIRTIO_NET_HDR_GSO_TCPV6;
2355
	    VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
2077
2356
2078
	if (tcp->th_flags & TH_CWR) {
2357
	if (__predict_false(tcp->th_flags & TH_CWR)) {
2079
		/*
2358
		/*
2080
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2359
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
2081
		 * ECN support is not on a per-interface basis, but globally via
2360
		 * FreeBSD, ECN support is not on a per-interface basis,
2082
		 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2361
		 * but globally via the net.inet.tcp.ecn.enable sysctl
2362
		 * knob. The default is off.
2083
		 */
2363
		 */
2084
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2364
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2085
			if (ppsratecheck(&lastecn, &curecn, 1))
2365
			if (ppsratecheck(&lastecn, &curecn, 1))
Lines 2109-2138 Link Here
2109
	if (error)
2389
	if (error)
2110
		goto drop;
2390
		goto drop;
2111
2391
2112
	if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2392
	if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
2113
	    (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2393
		/* Sanity check the parsed mbuf matches the offload flags. */
2114
		/*
2394
		if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
2115
		 * We could compare the IP protocol vs the CSUM_ flag too,
2395
		    etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
2116
		 * but that really should not be necessary.
2396
		    && etype != ETHERTYPE_IPV6))) {
2117
		 */
2397
			sc->vtnet_stats.tx_csum_proto_mismatch++;
2398
			goto drop;
2399
		}
2400
2118
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2401
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2119
		hdr->csum_start = csum_start;
2402
		hdr->csum_start = vtnet_gtoh16(sc, csum_start);
2120
		hdr->csum_offset = m->m_pkthdr.csum_data;
2403
		hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
2121
		txq->vtntx_stats.vtxs_csum++;
2404
		txq->vtntx_stats.vtxs_csum++;
2122
	}
2405
	}
2123
2406
2124
	if (flags & CSUM_TSO) {
2407
	if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
2408
		/*
2409
		 * Sanity check the parsed mbuf IP protocol is TCP, and
2410
		 * VirtIO TSO reqires the checksum offloading above.
2411
		 */
2125
		if (__predict_false(proto != IPPROTO_TCP)) {
2412
		if (__predict_false(proto != IPPROTO_TCP)) {
2126
			/* Likely failed to correctly parse the mbuf. */
2127
			sc->vtnet_stats.tx_tso_not_tcp++;
2413
			sc->vtnet_stats.tx_tso_not_tcp++;
2128
			goto drop;
2414
			goto drop;
2415
		} else if (__predict_false((hdr->flags &
2416
		    VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
2417
			sc->vtnet_stats.tx_tso_without_csum++;
2418
			goto drop;
2129
		}
2419
		}
2130
2420
2131
		KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2421
		error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr);
2132
		    ("%s: mbuf %p TSO without checksum offload %#x",
2133
		    __func__, m, flags));
2134
2135
		error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2136
		if (error)
2422
		if (error)
2137
			goto drop;
2423
			goto drop;
2138
	}
2424
	}
Lines 2161-2168 Link Here
2161
2447
2162
	sglist_reset(sg);
2448
	sglist_reset(sg);
2163
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2449
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2164
	KASSERT(error == 0 && sg->sg_nseg == 1,
2450
	if (error != 0 || sg->sg_nseg != 1) {
2165
	    ("%s: error %d adding header to sglist", __func__, error));
2451
		KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
2452
		    __func__, error, sg->sg_nseg));
2453
		goto fail;
2454
	}
2166
2455
2167
	error = sglist_append_mbuf(sg, m);
2456
	error = sglist_append_mbuf(sg, m);
2168
	if (error) {
2457
	if (error) {
Lines 2210-2218 Link Here
2210
	}
2499
	}
2211
2500
2212
	/*
2501
	/*
2213
	 * Always use the non-mergeable header, regardless if the feature
2502
	 * Always use the non-mergeable header, regardless if mergable headers
2214
	 * was negotiated. For transmit, num_buffers is always zero. The
2503
	 * were negotiated, because for transmit num_buffers is always zero.
2215
	 * vtnet_hdr_size is used to enqueue the correct header size.
2504
	 * The vtnet_hdr_size is used to enqueue the right header size segment.
2216
	 */
2505
	 */
2217
	hdr = &txhdr->vth_uhdr.hdr;
2506
	hdr = &txhdr->vth_uhdr.hdr;
2218
2507
Lines 2234-2244 Link Here
2234
	}
2523
	}
2235
2524
2236
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2525
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2237
	if (error == 0)
2238
		return (0);
2239
2240
fail:
2526
fail:
2241
	uma_zfree(vtnet_tx_header_zone, txhdr);
2527
	if (error)
2528
		uma_zfree(vtnet_tx_header_zone, txhdr);
2242
2529
2243
	return (error);
2530
	return (error);
2244
}
2531
}
Lines 2387-2393 Link Here
2387
	sc = ifp->if_softc;
2674
	sc = ifp->if_softc;
2388
	npairs = sc->vtnet_act_vq_pairs;
2675
	npairs = sc->vtnet_act_vq_pairs;
2389
2676
2390
	/* check if flowid is set */
2391
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2677
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2392
		i = m->m_pkthdr.flowid % npairs;
2678
		i = m->m_pkthdr.flowid % npairs;
2393
	else
2679
	else
Lines 2477-2482 Link Here
2477
	deq = 0;
2763
	deq = 0;
2478
	VTNET_TXQ_LOCK_ASSERT(txq);
2764
	VTNET_TXQ_LOCK_ASSERT(txq);
2479
2765
2766
#ifdef DEV_NETMAP
2767
	if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
2768
		virtqueue_disable_intr(vq); // XXX luigi
2769
		return (0); // XXX or 1 ?
2770
	}
2771
#endif
2772
2480
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2773
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2481
		m = txhdr->vth_mbuf;
2774
		m = txhdr->vth_mbuf;
2482
		deq++;
2775
		deq++;
Lines 2518-2528 Link Here
2518
		return;
2811
		return;
2519
	}
2812
	}
2520
2813
2521
#ifdef DEV_NETMAP
2522
	if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2523
		return;
2524
#endif /* DEV_NETMAP */
2525
2526
	VTNET_TXQ_LOCK(txq);
2814
	VTNET_TXQ_LOCK(txq);
2527
2815
2528
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2816
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
Lines 2709-2715 Link Here
2709
	 * Most drivers just ignore the return value - it only fails
2997
	 * Most drivers just ignore the return value - it only fails
2710
	 * with ENOMEM so an error is not likely.
2998
	 * with ENOMEM so an error is not likely.
2711
	 */
2999
	 */
2712
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3000
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
2713
		rxq = &sc->vtnet_rxqs[i];
3001
		rxq = &sc->vtnet_rxqs[i];
2714
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
3002
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2715
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
3003
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
Lines 2739-2745 Link Here
2739
		rxq = &sc->vtnet_rxqs[i];
3027
		rxq = &sc->vtnet_rxqs[i];
2740
		if (rxq->vtnrx_tq != NULL) {
3028
		if (rxq->vtnrx_tq != NULL) {
2741
			taskqueue_free(rxq->vtnrx_tq);
3029
			taskqueue_free(rxq->vtnrx_tq);
2742
			rxq->vtnrx_tq = NULL;
3030
			rxq->vtnrx_vq = NULL;
2743
		}
3031
		}
2744
3032
2745
		txq = &sc->vtnet_txqs[i];
3033
		txq = &sc->vtnet_txqs[i];
Lines 2779-2785 Link Here
2779
	struct vtnet_txq *txq;
3067
	struct vtnet_txq *txq;
2780
	int i;
3068
	int i;
2781
3069
2782
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3070
#ifdef DEV_NETMAP
3071
	if (nm_native_on(NA(sc->vtnet_ifp)))
3072
		return;
3073
#endif
3074
3075
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2783
		rxq = &sc->vtnet_rxqs[i];
3076
		rxq = &sc->vtnet_rxqs[i];
2784
		vtnet_rxq_free_mbufs(rxq);
3077
		vtnet_rxq_free_mbufs(rxq);
2785
3078
Lines 2795-2805 Link Here
2795
	struct vtnet_txq *txq;
3088
	struct vtnet_txq *txq;
2796
	int i;
3089
	int i;
2797
3090
3091
	VTNET_CORE_LOCK_ASSERT(sc);
3092
2798
	/*
3093
	/*
2799
	 * Lock and unlock the per-queue mutex so we known the stop
3094
	 * Lock and unlock the per-queue mutex so we known the stop
2800
	 * state is visible. Doing only the active queues should be
3095
	 * state is visible. Doing only the active queues should be
2801
	 * sufficient, but it does not cost much extra to do all the
3096
	 * sufficient, but it does not cost much extra to do all the
2802
	 * queues. Note we hold the core mutex here too.
3097
	 * queues.
2803
	 */
3098
	 */
2804
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3099
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2805
		rxq = &sc->vtnet_rxqs[i];
3100
		rxq = &sc->vtnet_rxqs[i];
Lines 2838-2845 Link Here
2838
	virtio_stop(dev);
3133
	virtio_stop(dev);
2839
	vtnet_stop_rendezvous(sc);
3134
	vtnet_stop_rendezvous(sc);
2840
3135
2841
	/* Free any mbufs left in the virtqueues. */
2842
	vtnet_drain_rxtx_queues(sc);
3136
	vtnet_drain_rxtx_queues(sc);
3137
	sc->vtnet_act_vq_pairs = 1;
2843
}
3138
}
2844
3139
2845
static int
3140
static int
Lines 2848-2898 Link Here
2848
	device_t dev;
3143
	device_t dev;
2849
	struct ifnet *ifp;
3144
	struct ifnet *ifp;
2850
	uint64_t features;
3145
	uint64_t features;
2851
	int mask, error;
3146
	int error;
2852
3147
2853
	dev = sc->vtnet_dev;
3148
	dev = sc->vtnet_dev;
2854
	ifp = sc->vtnet_ifp;
3149
	ifp = sc->vtnet_ifp;
2855
	features = sc->vtnet_features;
3150
	features = sc->vtnet_negotiated_features;
2856
3151
2857
	mask = 0;
2858
#if defined(INET)
2859
	mask |= IFCAP_RXCSUM;
2860
#endif
2861
#if defined (INET6)
2862
	mask |= IFCAP_RXCSUM_IPV6;
2863
#endif
2864
2865
	/*
3152
	/*
2866
	 * Re-negotiate with the host, removing any disabled receive
3153
	 * Re-negotiate with the host, removing any disabled receive
2867
	 * features. Transmit features are disabled only on our side
3154
	 * features. Transmit features are disabled only on our side
2868
	 * via if_capenable and if_hwassist.
3155
	 * via if_capenable and if_hwassist.
2869
	 */
3156
	 */
2870
3157
2871
	if (ifp->if_capabilities & mask) {
3158
	if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
2872
		/*
3159
		features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
2873
		 * We require both IPv4 and IPv6 offloading to be enabled
2874
		 * in order to negotiated it: VirtIO does not distinguish
2875
		 * between the two.
2876
		 */
2877
		if ((ifp->if_capenable & mask) != mask)
2878
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2879
	}
2880
3160
2881
	if (ifp->if_capabilities & IFCAP_LRO) {
3161
	if ((ifp->if_capenable & IFCAP_LRO) == 0)
2882
		if ((ifp->if_capenable & IFCAP_LRO) == 0)
3162
		features &= ~VTNET_LRO_FEATURES;
2883
			features &= ~VTNET_LRO_FEATURES;
2884
	}
2885
3163
2886
	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
3164
	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2887
		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3165
		features &= ~VIRTIO_NET_F_CTRL_VLAN;
2888
			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2889
	}
2890
3166
2891
	error = virtio_reinit(dev, features);
3167
	error = virtio_reinit(dev, features);
2892
	if (error)
3168
	if (error) {
2893
		device_printf(dev, "virtio reinit error %d\n", error);
3169
		device_printf(dev, "virtio reinit error %d\n", error);
3170
		return (error);
3171
	}
2894
3172
2895
	return (error);
3173
	sc->vtnet_features = features;
3174
	virtio_reinit_complete(dev);
3175
3176
	return (0);
2896
}
3177
}
2897
3178
2898
static void
3179
static void
Lines 2903-2911 Link Here
2903
	ifp = sc->vtnet_ifp;
3184
	ifp = sc->vtnet_ifp;
2904
3185
2905
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
3186
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2906
		/* Restore promiscuous and all-multicast modes. */
2907
		vtnet_rx_filter(sc);
3187
		vtnet_rx_filter(sc);
2908
		/* Restore filtered MAC addresses. */
2909
		vtnet_rx_filter_mac(sc);
3188
		vtnet_rx_filter_mac(sc);
2910
	}
3189
	}
2911
3190
Lines 2917-2948 Link Here
2917
vtnet_init_rx_queues(struct vtnet_softc *sc)
3196
vtnet_init_rx_queues(struct vtnet_softc *sc)
2918
{
3197
{
2919
	device_t dev;
3198
	device_t dev;
3199
	struct ifnet *ifp;
2920
	struct vtnet_rxq *rxq;
3200
	struct vtnet_rxq *rxq;
2921
	int i, clsize, error;
3201
	int i, clustersz, error;
2922
3202
2923
	dev = sc->vtnet_dev;
3203
	dev = sc->vtnet_dev;
3204
	ifp = sc->vtnet_ifp;
2924
3205
2925
	/*
3206
	clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu);
2926
	 * Use the new cluster size if one has been set (via a MTU
3207
	sc->vtnet_rx_clustersz = clustersz;
2927
	 * change). Otherwise, use the standard 2K clusters.
3208
2928
	 *
3209
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
2929
	 * BMV: It might make sense to use page sized clusters as
3210
		sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
2930
	 * the default (depending on the features negotiated).
3211
		    VTNET_MAX_RX_SIZE, clustersz);
2931
	 */
3212
		KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2932
	if (sc->vtnet_rx_new_clsize != 0) {
3213
		    ("%s: too many rx mbufs %d for %d segments", __func__,
2933
		clsize = sc->vtnet_rx_new_clsize;
3214
		    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2934
		sc->vtnet_rx_new_clsize = 0;
2935
	} else
3215
	} else
2936
		clsize = MCLBYTES;
3216
		sc->vtnet_rx_nmbufs = 1;
2937
3217
2938
	sc->vtnet_rx_clsize = clsize;
3218
#ifdef DEV_NETMAP
2939
	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
3219
	if (vtnet_netmap_init_rx_buffers(sc))
3220
		return (0);
3221
#endif
2940
3222
2941
	KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2942
	    sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2943
	    ("%s: too many rx mbufs %d for %d segments", __func__,
2944
	    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2945
2946
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3223
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2947
		rxq = &sc->vtnet_rxqs[i];
3224
		rxq = &sc->vtnet_rxqs[i];
2948
3225
Lines 2952-2959 Link Here
2952
		VTNET_RXQ_UNLOCK(rxq);
3229
		VTNET_RXQ_UNLOCK(rxq);
2953
3230
2954
		if (error) {
3231
		if (error) {
2955
			device_printf(dev,
3232
			device_printf(dev, "cannot populate Rx queue %d\n", i);
2956
			    "cannot allocate mbufs for Rx queue %d\n", i);
2957
			return (error);
3233
			return (error);
2958
		}
3234
		}
2959
	}
3235
	}
Lines 2970-2975 Link Here
2970
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3246
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2971
		txq = &sc->vtnet_txqs[i];
3247
		txq = &sc->vtnet_txqs[i];
2972
		txq->vtntx_watchdog = 0;
3248
		txq->vtntx_watchdog = 0;
3249
		txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
2973
	}
3250
	}
2974
3251
2975
	return (0);
3252
	return (0);
Lines 2999-3034 Link Here
2999
3276
3000
	dev = sc->vtnet_dev;
3277
	dev = sc->vtnet_dev;
3001
3278
3002
	if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3279
	if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
3003
		sc->vtnet_act_vq_pairs = 1;
3280
		sc->vtnet_act_vq_pairs = 1;
3004
		return;
3281
		return;
3005
	}
3282
	}
3006
3283
3007
	npairs = sc->vtnet_requested_vq_pairs;
3284
	npairs = sc->vtnet_req_vq_pairs;
3008
3285
3009
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3286
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3010
		device_printf(dev,
3287
		device_printf(dev, "cannot set active queue pairs to %d, "
3011
		    "cannot set active queue pairs to %d\n", npairs);
3288
		    "falling back to 1 queue pair\n", npairs);
3012
		npairs = 1;
3289
		npairs = 1;
3013
	}
3290
	}
3014
3291
3015
	sc->vtnet_act_vq_pairs = npairs;
3292
	sc->vtnet_act_vq_pairs = npairs;
3016
}
3293
}
3017
3294
3295
static void
3296
vtnet_update_rx_offloads(struct vtnet_softc *sc)
3297
{
3298
	struct ifnet *ifp;
3299
	uint64_t features;
3300
	int error;
3301
3302
	ifp = sc->vtnet_ifp;
3303
	features = sc->vtnet_features;
3304
3305
	VTNET_CORE_LOCK_ASSERT(sc);
3306
3307
	if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
3308
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
3309
			features |= VIRTIO_NET_F_GUEST_CSUM;
3310
		else
3311
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
3312
	}
3313
3314
	if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) {
3315
		if (ifp->if_capenable & IFCAP_LRO)
3316
			features |= VTNET_LRO_FEATURES;
3317
		else
3318
			features &= ~VTNET_LRO_FEATURES;
3319
	}
3320
3321
	error = vtnet_ctrl_guest_offloads(sc,
3322
	    features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
3323
		        VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN  |
3324
			VIRTIO_NET_F_GUEST_UFO));
3325
	if (error) {
3326
		device_printf(sc->vtnet_dev,
3327
		    "%s: cannot update Rx features\n", __func__);
3328
		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3329
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3330
			vtnet_init_locked(sc);
3331
		}
3332
	} else
3333
		sc->vtnet_features = features;
3334
}
3335
3018
static int
3336
static int
3019
vtnet_reinit(struct vtnet_softc *sc)
3337
vtnet_reinit(struct vtnet_softc *sc)
3020
{
3338
{
3339
	device_t dev;
3021
	struct ifnet *ifp;
3340
	struct ifnet *ifp;
3022
	int error;
3341
	int error;
3023
3342
3343
	dev = sc->vtnet_dev;
3024
	ifp = sc->vtnet_ifp;
3344
	ifp = sc->vtnet_ifp;
3025
3345
3026
	/* Use the current MAC address. */
3027
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3346
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3028
	vtnet_set_hwaddr(sc);
3029
3347
3348
	error = vtnet_virtio_reinit(sc);
3349
	if (error)
3350
		return (error);
3351
3352
	vtnet_set_macaddr(sc);
3030
	vtnet_set_active_vq_pairs(sc);
3353
	vtnet_set_active_vq_pairs(sc);
3031
3354
3355
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3356
		vtnet_init_rx_filters(sc);
3357
3032
	ifp->if_hwassist = 0;
3358
	ifp->if_hwassist = 0;
3033
	if (ifp->if_capenable & IFCAP_TXCSUM)
3359
	if (ifp->if_capenable & IFCAP_TXCSUM)
3034
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3360
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
Lines 3039-3054 Link Here
3039
	if (ifp->if_capenable & IFCAP_TSO6)
3365
	if (ifp->if_capenable & IFCAP_TSO6)
3040
		ifp->if_hwassist |= CSUM_IP6_TSO;
3366
		ifp->if_hwassist |= CSUM_IP6_TSO;
3041
3367
3042
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3043
		vtnet_init_rx_filters(sc);
3044
3045
	error = vtnet_init_rxtx_queues(sc);
3368
	error = vtnet_init_rxtx_queues(sc);
3046
	if (error)
3369
	if (error)
3047
		return (error);
3370
		return (error);
3048
3371
3049
	vtnet_enable_interrupts(sc);
3050
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3051
3052
	return (0);
3372
	return (0);
3053
}
3373
}
3054
3374
Lines 3068-3089 Link Here
3068
3388
3069
	vtnet_stop(sc);
3389
	vtnet_stop(sc);
3070
3390
3071
	/* Reinitialize with the host. */
3391
	if (vtnet_reinit(sc) != 0) {
3072
	if (vtnet_virtio_reinit(sc) != 0)
3392
		vtnet_stop(sc);
3073
		goto fail;
3393
		return;
3394
	}
3074
3395
3075
	if (vtnet_reinit(sc) != 0)
3396
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3076
		goto fail;
3077
3078
	virtio_reinit_complete(dev);
3079
3080
	vtnet_update_link_status(sc);
3397
	vtnet_update_link_status(sc);
3398
	vtnet_enable_interrupts(sc);
3081
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3399
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3082
3083
	return;
3084
3085
fail:
3086
	vtnet_stop(sc);
3087
}
3400
}
3088
3401
3089
static void
3402
static void
Lines 3093-3098 Link Here
3093
3406
3094
	sc = xsc;
3407
	sc = xsc;
3095
3408
3409
#ifdef DEV_NETMAP
3410
	if (!NA(sc->vtnet_ifp)) {
3411
		D("try to attach again");
3412
		vtnet_netmap_attach(sc);
3413
	}
3414
#endif
3415
3096
	VTNET_CORE_LOCK(sc);
3416
	VTNET_CORE_LOCK(sc);
3097
	vtnet_init_locked(sc);
3417
	vtnet_init_locked(sc);
3098
	VTNET_CORE_UNLOCK(sc);
3418
	VTNET_CORE_UNLOCK(sc);
Lines 3101-3116 Link Here
3101
static void
3421
static void
3102
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3422
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3103
{
3423
{
3104
	struct virtqueue *vq;
3105
3424
3106
	vq = sc->vtnet_ctrl_vq;
3107
3108
	/*
3425
	/*
3109
	 * The control virtqueue is only polled and therefore it should
3426
	 * The control virtqueue is only polled and therefore it should
3110
	 * already be empty.
3427
	 * already be empty.
3111
	 */
3428
	 */
3112
	KASSERT(virtqueue_empty(vq),
3429
	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
3113
	    ("%s: ctrl vq %p not empty", __func__, vq));
3430
	    ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
3114
}
3431
}
3115
3432
3116
static void
3433
static void
Lines 3121-3167 Link Here
3121
3438
3122
	vq = sc->vtnet_ctrl_vq;
3439
	vq = sc->vtnet_ctrl_vq;
3123
3440
3441
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
3124
	VTNET_CORE_LOCK_ASSERT(sc);
3442
	VTNET_CORE_LOCK_ASSERT(sc);
3125
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3126
	    ("%s: CTRL_VQ feature not negotiated", __func__));
3127
3443
3128
	if (!virtqueue_empty(vq))
3444
	if (!virtqueue_empty(vq))
3129
		return;
3445
		return;
3130
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3131
		return;
3132
3446
3133
	/*
3447
	/*
3134
	 * Poll for the response, but the command is likely already
3448
	 * Poll for the response, but the command is likely completed before
3135
	 * done when we return from the notify.
3449
	 * returning from the notify.
3136
	 */
3450
	 */
3137
	virtqueue_notify(vq);
3451
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0)  {
3138
	virtqueue_poll(vq, NULL);
3452
		virtqueue_notify(vq);
3453
		virtqueue_poll(vq, NULL);
3454
	}
3139
}
3455
}
3140
3456
3141
static int
3457
static int
3142
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3458
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3143
{
3459
{
3144
	struct virtio_net_ctrl_hdr hdr __aligned(2);
3145
	struct sglist_seg segs[3];
3460
	struct sglist_seg segs[3];
3146
	struct sglist sg;
3461
	struct sglist sg;
3147
	uint8_t ack;
3462
	struct {
3463
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3464
		uint8_t pad1;
3465
		uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
3466
		uint8_t pad2;
3467
		uint8_t ack;
3468
	} s;
3148
	int error;
3469
	int error;
3149
3470
3150
	hdr.class = VIRTIO_NET_CTRL_MAC;
3471
	error = 0;
3151
	hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3472
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
3152
	ack = VIRTIO_NET_ERR;
3153
3473
3154
	sglist_init(&sg, 3, segs);
3474
	s.hdr.class = VIRTIO_NET_CTRL_MAC;
3475
	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3476
	bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
3477
	s.ack = VIRTIO_NET_ERR;
3478
3479
	sglist_init(&sg, nitems(segs), segs);
3480
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3481
	error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
3482
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3483
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3484
3485
	if (error == 0)
3486
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3487
3488
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3489
}
3490
3491
static int
3492
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
3493
{
3494
	struct sglist_seg segs[3];
3495
	struct sglist sg;
3496
	struct {
3497
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3498
		uint8_t pad1;
3499
		uint64_t offloads __aligned(8);
3500
		uint8_t pad2;
3501
		uint8_t ack;
3502
	} s;
3503
	int error;
3504
3155
	error = 0;
3505
	error = 0;
3156
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3506
	MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3157
	error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3158
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3159
	KASSERT(error == 0 && sg.sg_nseg == 3,
3160
	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
3161
3507
3162
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3508
	s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
3509
	s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
3510
	s.offloads = vtnet_gtoh64(sc, offloads);
3511
	s.ack = VIRTIO_NET_ERR;
3163
3512
3164
	return (ack == VIRTIO_NET_OK ? 0 : EIO);
3513
	sglist_init(&sg, nitems(segs), segs);
3514
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3515
	error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
3516
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3517
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3518
3519
	if (error == 0)
3520
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3521
3522
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3165
}
3523
}
3166
3524
3167
static int
3525
static int
Lines 3170-3232 Link Here
3170
	struct sglist_seg segs[3];
3528
	struct sglist_seg segs[3];
3171
	struct sglist sg;
3529
	struct sglist sg;
3172
	struct {
3530
	struct {
3173
		struct virtio_net_ctrl_hdr hdr;
3531
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3174
		uint8_t pad1;
3532
		uint8_t pad1;
3175
		struct virtio_net_ctrl_mq mq;
3533
		struct virtio_net_ctrl_mq mq __aligned(2);
3176
		uint8_t pad2;
3534
		uint8_t pad2;
3177
		uint8_t ack;
3535
		uint8_t ack;
3178
	} s __aligned(2);
3536
	} s;
3179
	int error;
3537
	int error;
3180
3538
3539
	error = 0;
3540
	MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
3541
3181
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3542
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3182
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3543
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3183
	s.mq.virtqueue_pairs = npairs;
3544
	s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
3184
	s.ack = VIRTIO_NET_ERR;
3545
	s.ack = VIRTIO_NET_ERR;
3185
3546
3186
	sglist_init(&sg, 3, segs);
3547
	sglist_init(&sg, nitems(segs), segs);
3187
	error = 0;
3188
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3548
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3189
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3549
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3190
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3550
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3191
	KASSERT(error == 0 && sg.sg_nseg == 3,
3551
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3192
	    ("%s: error %d adding MQ message to sglist", __func__, error));
3193
3552
3194
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3553
	if (error == 0)
3554
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3195
3555
3196
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3556
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3197
}
3557
}
3198
3558
3199
static int
3559
static int
3200
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3560
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on)
3201
{
3561
{
3202
	struct sglist_seg segs[3];
3562
	struct sglist_seg segs[3];
3203
	struct sglist sg;
3563
	struct sglist sg;
3204
	struct {
3564
	struct {
3205
		struct virtio_net_ctrl_hdr hdr;
3565
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3206
		uint8_t pad1;
3566
		uint8_t pad1;
3207
		uint8_t onoff;
3567
		uint8_t onoff;
3208
		uint8_t pad2;
3568
		uint8_t pad2;
3209
		uint8_t ack;
3569
		uint8_t ack;
3210
	} s __aligned(2);
3570
	} s;
3211
	int error;
3571
	int error;
3212
3572
3213
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3573
	error = 0;
3214
	    ("%s: CTRL_RX feature not negotiated", __func__));
3574
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3215
3575
3216
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3576
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3217
	s.hdr.cmd = cmd;
3577
	s.hdr.cmd = cmd;
3218
	s.onoff = !!on;
3578
	s.onoff = !!on;
3219
	s.ack = VIRTIO_NET_ERR;
3579
	s.ack = VIRTIO_NET_ERR;
3220
3580
3221
	sglist_init(&sg, 3, segs);
3581
	sglist_init(&sg, nitems(segs), segs);
3222
	error = 0;
3223
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3582
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3224
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3583
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3225
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3584
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3226
	KASSERT(error == 0 && sg.sg_nseg == 3,
3585
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3227
	    ("%s: error %d adding Rx message to sglist", __func__, error));
3228
3586
3229
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3587
	if (error == 0)
3588
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3230
3589
3231
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3590
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3232
}
3591
}
Lines 3234-3273 Link Here
3234
static int
3593
static int
3235
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3594
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3236
{
3595
{
3237
3238
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3596
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3239
}
3597
}
3240
3598
3241
static int
3599
static int
3242
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3600
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3243
{
3601
{
3244
3245
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3602
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3246
}
3603
}
3247
3604
3248
/*
3249
 * The device defaults to promiscuous mode for backwards compatibility.
3250
 * Turn it off at attach time if possible.
3251
 */
3252
static void
3605
static void
3253
vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3254
{
3255
	struct ifnet *ifp;
3256
3257
	ifp = sc->vtnet_ifp;
3258
3259
	VTNET_CORE_LOCK(sc);
3260
	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3261
		ifp->if_flags |= IFF_PROMISC;
3262
	} else if (vtnet_set_promisc(sc, 0) != 0) {
3263
		ifp->if_flags |= IFF_PROMISC;
3264
		device_printf(sc->vtnet_dev,
3265
		    "cannot disable default promiscuous mode\n");
3266
	}
3267
	VTNET_CORE_UNLOCK(sc);
3268
}
3269
3270
static void
3271
vtnet_rx_filter(struct vtnet_softc *sc)
3606
vtnet_rx_filter(struct vtnet_softc *sc)
3272
{
3607
{
3273
	device_t dev;
3608
	device_t dev;
Lines 3278-3290 Link Here
3278
3613
3279
	VTNET_CORE_LOCK_ASSERT(sc);
3614
	VTNET_CORE_LOCK_ASSERT(sc);
3280
3615
3281
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3616
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) {
3282
		device_printf(dev, "cannot %s promiscuous mode\n",
3617
		device_printf(dev, "cannot %s promiscuous mode\n",
3283
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3618
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3619
	}
3284
3620
3285
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3621
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) {
3286
		device_printf(dev, "cannot %s all-multicast mode\n",
3622
		device_printf(dev, "cannot %s all-multicast mode\n",
3287
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3623
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3624
	}
3288
}
3625
}
3289
3626
3290
static void
3627
static void
Lines 3302-3315 Link Here
3302
3639
3303
	ifp = sc->vtnet_ifp;
3640
	ifp = sc->vtnet_ifp;
3304
	filter = sc->vtnet_mac_filter;
3641
	filter = sc->vtnet_mac_filter;
3642
3305
	ucnt = 0;
3643
	ucnt = 0;
3306
	mcnt = 0;
3644
	mcnt = 0;
3307
	promisc = 0;
3645
	promisc = 0;
3308
	allmulti = 0;
3646
	allmulti = 0;
3647
	error = 0;
3309
3648
3649
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3310
	VTNET_CORE_LOCK_ASSERT(sc);
3650
	VTNET_CORE_LOCK_ASSERT(sc);
3311
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3312
	    ("%s: CTRL_RX feature not negotiated", __func__));
3313
3651
3314
	/* Unicast MAC addresses: */
3652
	/* Unicast MAC addresses: */
3315
	if_addr_rlock(ifp);
3653
	if_addr_rlock(ifp);
Lines 3330-3343 Link Here
3330
	}
3668
	}
3331
	if_addr_runlock(ifp);
3669
	if_addr_runlock(ifp);
3332
3670
3333
	if (promisc != 0) {
3334
		filter->vmf_unicast.nentries = 0;
3335
		if_printf(ifp, "more than %d MAC addresses assigned, "
3336
		    "falling back to promiscuous mode\n",
3337
		    VTNET_MAX_MAC_ENTRIES);
3338
	} else
3339
		filter->vmf_unicast.nentries = ucnt;
3340
3341
	/* Multicast MAC addresses: */
3671
	/* Multicast MAC addresses: */
3342
	if_maddr_rlock(ifp);
3672
	if_maddr_rlock(ifp);
3343
	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3673
	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
Lines 3354-3387 Link Here
3354
	}
3684
	}
3355
	if_maddr_runlock(ifp);
3685
	if_maddr_runlock(ifp);
3356
3686
3687
	if (promisc != 0) {
3688
		if_printf(ifp, "cannot filter more than %d MAC addresses, "
3689
		    "falling back to promiscuous mode\n",
3690
		    VTNET_MAX_MAC_ENTRIES);
3691
		ucnt = 0;
3692
	}
3357
	if (allmulti != 0) {
3693
	if (allmulti != 0) {
3358
		filter->vmf_multicast.nentries = 0;
3694
		if_printf(ifp, "cannot filter more than %d multicast MAC "
3359
		if_printf(ifp, "more than %d multicast MAC addresses "
3695
		    "addresses, falling back to all-multicast mode\n",
3360
		    "assigned, falling back to all-multicast mode\n",
3361
		    VTNET_MAX_MAC_ENTRIES);
3696
		    VTNET_MAX_MAC_ENTRIES);
3362
	} else
3697
		mcnt = 0;
3363
		filter->vmf_multicast.nentries = mcnt;
3698
	}
3364
3699
3365
	if (promisc != 0 && allmulti != 0)
3700
	if (promisc != 0 && allmulti != 0)
3366
		goto out;
3701
		goto out;
3367
3702
3703
	filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
3704
	filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
3705
3368
	hdr.class = VIRTIO_NET_CTRL_MAC;
3706
	hdr.class = VIRTIO_NET_CTRL_MAC;
3369
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3707
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3370
	ack = VIRTIO_NET_ERR;
3708
	ack = VIRTIO_NET_ERR;
3371
3709
3372
	sglist_init(&sg, 4, segs);
3710
	sglist_init(&sg, nitems(segs), segs);
3373
	error = 0;
3374
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3711
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3375
	error |= sglist_append(&sg, &filter->vmf_unicast,
3712
	error |= sglist_append(&sg, &filter->vmf_unicast,
3376
	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3713
	    sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
3377
	error |= sglist_append(&sg, &filter->vmf_multicast,
3714
	error |= sglist_append(&sg, &filter->vmf_multicast,
3378
	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3715
	    sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
3379
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3716
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3380
	KASSERT(error == 0 && sg.sg_nseg == 4,
3717
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3381
	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3382
3718
3383
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3719
	if (error == 0)
3384
3720
		vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3385
	if (ack != VIRTIO_NET_OK)
3721
	if (ack != VIRTIO_NET_OK)
3386
		if_printf(ifp, "error setting host MAC filter table\n");
3722
		if_printf(ifp, "error setting host MAC filter table\n");
3387
3723
Lines 3398-3425 Link Here
3398
	struct sglist_seg segs[3];
3734
	struct sglist_seg segs[3];
3399
	struct sglist sg;
3735
	struct sglist sg;
3400
	struct {
3736
	struct {
3401
		struct virtio_net_ctrl_hdr hdr;
3737
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3402
		uint8_t pad1;
3738
		uint8_t pad1;
3403
		uint16_t tag;
3739
		uint16_t tag __aligned(2);
3404
		uint8_t pad2;
3740
		uint8_t pad2;
3405
		uint8_t ack;
3741
		uint8_t ack;
3406
	} s __aligned(2);
3742
	} s;
3407
	int error;
3743
	int error;
3408
3744
3745
	error = 0;
3746
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3747
3409
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3748
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3410
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3749
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3411
	s.tag = tag;
3750
	s.tag = vtnet_gtoh16(sc, tag);
3412
	s.ack = VIRTIO_NET_ERR;
3751
	s.ack = VIRTIO_NET_ERR;
3413
3752
3414
	sglist_init(&sg, 3, segs);
3753
	sglist_init(&sg, nitems(segs), segs);
3415
	error = 0;
3416
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3754
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3417
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3755
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3418
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3756
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3419
	KASSERT(error == 0 && sg.sg_nseg == 3,
3757
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3420
	    ("%s: error %d adding VLAN message to sglist", __func__, error));
3421
3758
3422
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3759
	if (error == 0)
3760
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3423
3761
3424
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3762
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3425
}
3763
}
Lines 3427-3439 Link Here
3427
static void
3765
static void
3428
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3766
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3429
{
3767
{
3768
	int i, bit;
3430
	uint32_t w;
3769
	uint32_t w;
3431
	uint16_t tag;
3770
	uint16_t tag;
3432
	int i, bit;
3433
3771
3772
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3434
	VTNET_CORE_LOCK_ASSERT(sc);
3773
	VTNET_CORE_LOCK_ASSERT(sc);
3435
	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3436
	    ("%s: VLAN_FILTER feature not negotiated", __func__));
3437
3774
3438
	/* Enable the filter for each configured VLAN. */
3775
	/* Enable the filter for each configured VLAN. */
3439
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3776
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
Lines 3502-3522 Link Here
3502
	vtnet_update_vlan_filter(arg, 0, tag);
3839
	vtnet_update_vlan_filter(arg, 0, tag);
3503
}
3840
}
3504
3841
3842
static void
3843
vtnet_update_speed_duplex(struct vtnet_softc *sc)
3844
{
3845
	struct ifnet *ifp;
3846
	uint32_t speed;
3847
3848
	ifp = sc->vtnet_ifp;
3849
3850
	if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
3851
		return;
3852
3853
	/* BMV: Ignore duplex. */
3854
	speed = virtio_read_dev_config_4(sc->vtnet_dev,
3855
	    offsetof(struct virtio_net_config, speed));
3856
	if (speed != -1)
3857
		ifp->if_baudrate = IF_Mbps(speed);
3858
}
3859
3505
static int
3860
static int
3506
vtnet_is_link_up(struct vtnet_softc *sc)
3861
vtnet_is_link_up(struct vtnet_softc *sc)
3507
{
3862
{
3508
	device_t dev;
3509
	struct ifnet *ifp;
3510
	uint16_t status;
3863
	uint16_t status;
3511
3864
3512
	dev = sc->vtnet_dev;
3865
	if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
3513
	ifp = sc->vtnet_ifp;
3866
		return (1);
3514
3867
3515
	if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3868
	status = virtio_read_dev_config_2(sc->vtnet_dev,
3516
		status = VIRTIO_NET_S_LINK_UP;
3869
	    offsetof(struct virtio_net_config, status));
3517
	else
3518
		status = virtio_read_dev_config_2(dev,
3519
		    offsetof(struct virtio_net_config, status));
3520
3870
3521
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3871
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3522
}
3872
}
Lines 3528-3539 Link Here
3528
	int link;
3878
	int link;
3529
3879
3530
	ifp = sc->vtnet_ifp;
3880
	ifp = sc->vtnet_ifp;
3531
3532
	VTNET_CORE_LOCK_ASSERT(sc);
3881
	VTNET_CORE_LOCK_ASSERT(sc);
3533
	link = vtnet_is_link_up(sc);
3882
	link = vtnet_is_link_up(sc);
3534
3883
3535
	/* Notify if the link status has changed. */
3884
	/* Notify if the link status has changed. */
3536
	if (link != 0 && sc->vtnet_link_active == 0) {
3885
	if (link != 0 && sc->vtnet_link_active == 0) {
3886
		vtnet_update_speed_duplex(sc);
3537
		sc->vtnet_link_active = 1;
3887
		sc->vtnet_link_active = 1;
3538
		if_link_state_change(ifp, LINK_STATE_UP);
3888
		if_link_state_change(ifp, LINK_STATE_UP);
3539
	} else if (link == 0 && sc->vtnet_link_active != 0) {
3889
	} else if (link == 0 && sc->vtnet_link_active != 0) {
Lines 3545-3560 Link Here
3545
static int
3895
static int
3546
vtnet_ifmedia_upd(struct ifnet *ifp)
3896
vtnet_ifmedia_upd(struct ifnet *ifp)
3547
{
3897
{
3548
	struct vtnet_softc *sc;
3898
	return (EOPNOTSUPP);
3549
	struct ifmedia *ifm;
3550
3551
	sc = ifp->if_softc;
3552
	ifm = &sc->vtnet_media;
3553
3554
	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3555
		return (EINVAL);
3556
3557
	return (0);
3558
}
3899
}
3559
3900
3560
static void
3901
static void
Lines 3570-3594 Link Here
3570
	VTNET_CORE_LOCK(sc);
3911
	VTNET_CORE_LOCK(sc);
3571
	if (vtnet_is_link_up(sc) != 0) {
3912
	if (vtnet_is_link_up(sc) != 0) {
3572
		ifmr->ifm_status |= IFM_ACTIVE;
3913
		ifmr->ifm_status |= IFM_ACTIVE;
3573
		ifmr->ifm_active |= VTNET_MEDIATYPE;
3914
		ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
3574
	} else
3915
	} else
3575
		ifmr->ifm_active |= IFM_NONE;
3916
		ifmr->ifm_active |= IFM_NONE;
3576
	VTNET_CORE_UNLOCK(sc);
3917
	VTNET_CORE_UNLOCK(sc);
3577
}
3918
}
3578
3919
3579
static void
3920
static void
3580
vtnet_set_hwaddr(struct vtnet_softc *sc)
3921
vtnet_get_macaddr(struct vtnet_softc *sc)
3581
{
3922
{
3923
3924
	if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3925
		virtio_read_device_config_array(sc->vtnet_dev,
3926
		    offsetof(struct virtio_net_config, mac),
3927
		    &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
3928
	} else {
3929
		/* Generate a random locally administered unicast address. */
3930
		sc->vtnet_hwaddr[0] = 0xB2;
3931
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3932
	}
3933
}
3934
3935
static void
3936
vtnet_set_macaddr(struct vtnet_softc *sc)
3937
{
3582
	device_t dev;
3938
	device_t dev;
3583
	int i;
3939
	int error;
3584
3940
3585
	dev = sc->vtnet_dev;
3941
	dev = sc->vtnet_dev;
3586
3942
3587
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3943
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3588
		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3944
		error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
3945
		if (error)
3589
			device_printf(dev, "unable to set MAC address\n");
3946
			device_printf(dev, "unable to set MAC address\n");
3590
	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3947
		return;
3591
		for (i = 0; i < ETHER_ADDR_LEN; i++) {
3948
	}
3949
3950
	/* MAC in config is read-only in modern VirtIO. */
3951
	if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
3952
		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
3592
			virtio_write_dev_config_1(dev,
3953
			virtio_write_dev_config_1(dev,
3593
			    offsetof(struct virtio_net_config, mac) + i,
3954
			    offsetof(struct virtio_net_config, mac) + i,
3594
			    sc->vtnet_hwaddr[i]);
3955
			    sc->vtnet_hwaddr[i]);
Lines 3597-3627 Link Here
3597
}
3958
}
3598
3959
3599
static void
3960
static void
3600
vtnet_get_hwaddr(struct vtnet_softc *sc)
3961
vtnet_attached_set_macaddr(struct vtnet_softc *sc)
3601
{
3962
{
3602
	device_t dev;
3603
	int i;
3604
3963
3605
	dev = sc->vtnet_dev;
3964
	/* Assign MAC address if it was generated. */
3606
3965
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
3607
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3966
		vtnet_set_macaddr(sc);
3608
		/*
3609
		 * Generate a random locally administered unicast address.
3610
		 *
3611
		 * It would be nice to generate the same MAC address across
3612
		 * reboots, but it seems all the hosts currently available
3613
		 * support the MAC feature, so this isn't too important.
3614
		 */
3615
		sc->vtnet_hwaddr[0] = 0xB2;
3616
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3617
		vtnet_set_hwaddr(sc);
3618
		return;
3619
	}
3620
3621
	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3622
		sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3623
		    offsetof(struct virtio_net_config, mac) + i);
3624
	}
3625
}
3967
}
3626
3968
3627
static void
3969
static void
Lines 3652-3687 Link Here
3652
}
3994
}
3653
3995
3654
static void
3996
static void
3655
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3656
{
3657
	int size, thresh;
3658
3659
	size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3660
3661
	/*
3662
	 * The Tx interrupt is disabled until the queue free count falls
3663
	 * below our threshold. Completed frames are drained from the Tx
3664
	 * virtqueue before transmitting new frames and in the watchdog
3665
	 * callout, so the frequency of Tx interrupts is greatly reduced,
3666
	 * at the cost of not freeing mbufs as quickly as they otherwise
3667
	 * would be.
3668
	 *
3669
	 * N.B. We assume all the Tx queues are the same size.
3670
	 */
3671
	thresh = size / 4;
3672
3673
	/*
3674
	 * Without indirect descriptors, leave enough room for the most
3675
	 * segments we handle.
3676
	 */
3677
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3678
	    thresh < sc->vtnet_tx_nsegs)
3679
		thresh = sc->vtnet_tx_nsegs;
3680
3681
	sc->vtnet_tx_intr_thresh = thresh;
3682
}
3683
3684
static void
3685
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3997
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3686
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3998
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3687
{
3999
{
Lines 3709-3714 Link Here
3709
	    &stats->vrxs_csum, "Receive checksum offloaded");
4021
	    &stats->vrxs_csum, "Receive checksum offloaded");
3710
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
4022
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3711
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4023
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4024
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
4025
	    &stats->vrxs_host_lro, "Receive host segmentation offloaded");
3712
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4026
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3713
	    &stats->vrxs_rescheduled,
4027
	    &stats->vrxs_rescheduled,
3714
	    "Receive interrupt handler rescheduled");
4028
	    "Receive interrupt handler rescheduled");
Lines 3739-3745 Link Here
3739
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
4053
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3740
	    &stats->vtxs_csum, "Transmit checksum offloaded");
4054
	    &stats->vtxs_csum, "Transmit checksum offloaded");
3741
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
4055
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3742
	    &stats->vtxs_tso, "Transmit segmentation offloaded");
4056
	    &stats->vtxs_tso, "Transmit TCP segmentation offloaded");
3743
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4057
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3744
	    &stats->vtxs_rescheduled,
4058
	    &stats->vtxs_rescheduled,
3745
	    "Transmit interrupt handler rescheduled");
4059
	    "Transmit interrupt handler rescheduled");
Lines 3759-3765 Link Here
3759
	tree = device_get_sysctl_tree(dev);
4073
	tree = device_get_sysctl_tree(dev);
3760
	child = SYSCTL_CHILDREN(tree);
4074
	child = SYSCTL_CHILDREN(tree);
3761
4075
3762
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
4076
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
3763
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
4077
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3764
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
4078
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3765
	}
4079
	}
Lines 3819-3834 Link Here
3819
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
4133
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
3820
	    "Times the receive interrupt task rescheduled itself");
4134
	    "Times the receive interrupt task rescheduled itself");
3821
4135
3822
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
4136
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
3823
	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
4137
	    CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
3824
	    "Aborted transmit of checksum offloaded buffer with unknown "
4138
	    "Aborted transmit of checksum offloaded buffer with unknown "
3825
	    "Ethernet type");
4139
	    "Ethernet type");
3826
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
4140
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
3827
	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
4141
	    CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
3828
	    "Aborted transmit of TSO buffer with unknown Ethernet type");
4142
	    "Aborted transmit of checksum offloaded buffer because mismatched "
4143
	    "protocols");
3829
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
4144
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3830
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
4145
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
3831
	    "Aborted transmit of TSO buffer with non TCP protocol");
4146
	    "Aborted transmit of TSO buffer with non TCP protocol");
4147
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
4148
	    CTLFLAG_RD, &stats->tx_tso_without_csum,
4149
	    "Aborted transmit of TSO buffer without TCP checksum offload");
3832
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
4150
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3833
	    CTLFLAG_RD, &stats->tx_defragged,
4151
	    CTLFLAG_RD, &stats->tx_defragged,
3834
	    "Transmit mbufs defragged");
4152
	    "Transmit mbufs defragged");
Lines 3861-3870 Link Here
3861
4179
3862
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
4180
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3863
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
4181
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3864
	    "Maximum number of supported virtqueue pairs");
4182
	    "Number of maximum supported virtqueue pairs");
3865
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
4183
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
3866
	    CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
4184
	    CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
3867
	    "Requested number of virtqueue pairs");
4185
	    "Number of requested virtqueue pairs");
3868
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
4186
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3869
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
4187
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3870
	    "Number of active virtqueue pairs");
4188
	    "Number of active virtqueue pairs");
Lines 3872-3877 Link Here
3872
	vtnet_setup_stat_sysctl(ctx, child, sc);
4190
	vtnet_setup_stat_sysctl(ctx, child, sc);
3873
}
4191
}
3874
4192
4193
static void
4194
vtnet_load_tunables(struct vtnet_softc *sc)
4195
{
4196
4197
	sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
4198
	    "lro_entry_count", vtnet_lro_entry_count);
4199
	if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
4200
		sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
4201
4202
	sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
4203
	    "lro_mbufq_depeth", vtnet_lro_mbufq_depth);
4204
}
4205
3875
static int
4206
static int
3876
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
4207
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3877
{
4208
{
Lines 3913-3922 Link Here
3913
static void
4244
static void
3914
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
4245
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3915
{
4246
{
4247
	struct vtnet_rxq *rxq;
3916
	int i;
4248
	int i;
3917
4249
3918
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4250
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3919
		vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
4251
		rxq = &sc->vtnet_rxqs[i];
4252
		if (vtnet_rxq_enable_intr(rxq) != 0)
4253
			taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
4254
	}
3920
}
4255
}
3921
4256
3922
static void
4257
static void
Lines 3941-3947 Link Here
3941
{
4276
{
3942
	int i;
4277
	int i;
3943
4278
3944
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4279
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3945
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
4280
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3946
}
4281
}
3947
4282
Lines 3950-3956 Link Here
3950
{
4285
{
3951
	int i;
4286
	int i;
3952
4287
3953
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4288
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3954
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
4289
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3955
}
4290
}
3956
4291
Lines 3983-3991 Link Here
3983
	sc = if_getsoftc(ifp);
4318
	sc = if_getsoftc(ifp);
3984
4319
3985
	VTNET_CORE_LOCK(sc);
4320
	VTNET_CORE_LOCK(sc);
3986
	*nrxr = sc->vtnet_max_vq_pairs;
4321
	*nrxr = sc->vtnet_req_vq_pairs;
3987
	*ncl = NETDUMP_MAX_IN_FLIGHT;
4322
	*ncl = NETDUMP_MAX_IN_FLIGHT;
3988
	*clsize = sc->vtnet_rx_clsize;
4323
	*clsize = sc->vtnet_rx_clustersz;
3989
	VTNET_CORE_UNLOCK(sc);
4324
	VTNET_CORE_UNLOCK(sc);
3990
4325
3991
	/*
4326
	/*
Lines 4034-4040 Link Here
4034
		return (EBUSY);
4369
		return (EBUSY);
4035
4370
4036
	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4371
	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4037
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4372
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4038
		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4373
		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4039
	return (0);
4374
	return (0);
4040
}
4375
}
(-)sys/dev/virtio/network/if_vtnetvar.h (-51 / +73 lines)
Lines 43-51 Link Here
43
	uint64_t	rx_csum_bad_ipproto;
43
	uint64_t	rx_csum_bad_ipproto;
44
	uint64_t	rx_csum_bad_offset;
44
	uint64_t	rx_csum_bad_offset;
45
	uint64_t	rx_csum_bad_proto;
45
	uint64_t	rx_csum_bad_proto;
46
	uint64_t	tx_csum_bad_ethtype;
46
	uint64_t	tx_csum_unknown_ethtype;
47
	uint64_t	tx_tso_bad_ethtype;
47
	uint64_t	tx_csum_proto_mismatch;
48
	uint64_t	tx_tso_not_tcp;
48
	uint64_t	tx_tso_not_tcp;
49
	uint64_t	tx_tso_without_csum;
49
	uint64_t	tx_defragged;
50
	uint64_t	tx_defragged;
50
	uint64_t	tx_defrag_failed;
51
	uint64_t	tx_defrag_failed;
51
52
Lines 67-72 Link Here
67
	uint64_t	vrxs_ierrors;	/* if_ierrors */
68
	uint64_t	vrxs_ierrors;	/* if_ierrors */
68
	uint64_t	vrxs_csum;
69
	uint64_t	vrxs_csum;
69
	uint64_t	vrxs_csum_failed;
70
	uint64_t	vrxs_csum_failed;
71
	uint64_t	vrxs_host_lro;
70
	uint64_t	vrxs_rescheduled;
72
	uint64_t	vrxs_rescheduled;
71
};
73
};
72
74
Lines 79-84 Link Here
79
	struct vtnet_rxq_stats	 vtnrx_stats;
81
	struct vtnet_rxq_stats	 vtnrx_stats;
80
	struct taskqueue	*vtnrx_tq;
82
	struct taskqueue	*vtnrx_tq;
81
	struct task		 vtnrx_intrtask;
83
	struct task		 vtnrx_intrtask;
84
	struct lro_ctrl		 vtnrx_lro;
82
#ifdef DEV_NETMAP
85
#ifdef DEV_NETMAP
83
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
86
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
84
#endif  /* DEV_NETMAP */
87
#endif  /* DEV_NETMAP */
Lines 111-116 Link Here
111
#endif
114
#endif
112
	int			 vtntx_id;
115
	int			 vtntx_id;
113
	int			 vtntx_watchdog;
116
	int			 vtntx_watchdog;
117
	int			 vtntx_intr_threshold;
114
	struct vtnet_txq_stats	 vtntx_stats;
118
	struct vtnet_txq_stats	 vtntx_stats;
115
	struct taskqueue	*vtntx_tq;
119
	struct taskqueue	*vtntx_tq;
116
	struct task		 vtntx_intrtask;
120
	struct task		 vtntx_intrtask;
Lines 136-144 Link Here
136
	struct ifnet		*vtnet_ifp;
140
	struct ifnet		*vtnet_ifp;
137
	struct vtnet_rxq	*vtnet_rxqs;
141
	struct vtnet_rxq	*vtnet_rxqs;
138
	struct vtnet_txq	*vtnet_txqs;
142
	struct vtnet_txq	*vtnet_txqs;
143
	uint64_t		 vtnet_features;
139
144
140
	uint32_t		 vtnet_flags;
145
	uint32_t		 vtnet_flags;
141
#define VTNET_FLAG_SUSPENDED	 0x0001
146
#define VTNET_FLAG_MODERN	 0x0001
142
#define VTNET_FLAG_MAC		 0x0002
147
#define VTNET_FLAG_MAC		 0x0002
143
#define VTNET_FLAG_CTRL_VQ	 0x0004
148
#define VTNET_FLAG_CTRL_VQ	 0x0004
144
#define VTNET_FLAG_CTRL_RX	 0x0008
149
#define VTNET_FLAG_CTRL_RX	 0x0008
Lines 147-175 Link Here
147
#define VTNET_FLAG_TSO_ECN	 0x0040
152
#define VTNET_FLAG_TSO_ECN	 0x0040
148
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
153
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
149
#define VTNET_FLAG_LRO_NOMRG	 0x0100
154
#define VTNET_FLAG_LRO_NOMRG	 0x0100
150
#define VTNET_FLAG_MULTIQ	 0x0200
155
#define VTNET_FLAG_MQ		 0x0200
151
#define VTNET_FLAG_INDIRECT	 0x0400
156
#define VTNET_FLAG_INDIRECT	 0x0400
152
#define VTNET_FLAG_EVENT_IDX	 0x0800
157
#define VTNET_FLAG_EVENT_IDX	 0x0800
158
#define VTNET_FLAG_SUSPENDED	 0x1000
159
#define VTNET_FLAG_FIXUP_NEEDS_CSUM 0x2000
160
#define VTNET_FLAG_SW_LRO	 0x4000
153
161
154
	int			 vtnet_link_active;
155
	int			 vtnet_hdr_size;
162
	int			 vtnet_hdr_size;
156
	int			 vtnet_rx_process_limit;
157
	int			 vtnet_rx_nsegs;
158
	int			 vtnet_rx_nmbufs;
163
	int			 vtnet_rx_nmbufs;
159
	int			 vtnet_rx_clsize;
164
	int			 vtnet_rx_clustersz;
160
	int			 vtnet_rx_new_clsize;
165
	int			 vtnet_rx_nsegs;
161
	int			 vtnet_tx_intr_thresh;
166
	int			 vtnet_rx_process_limit;
162
	int			 vtnet_tx_nsegs;
167
	int			 vtnet_link_active;
163
	int			 vtnet_if_flags;
164
	int			 vtnet_act_vq_pairs;
168
	int			 vtnet_act_vq_pairs;
169
	int			 vtnet_req_vq_pairs;
165
	int			 vtnet_max_vq_pairs;
170
	int			 vtnet_max_vq_pairs;
166
	int			 vtnet_requested_vq_pairs;
171
	int			 vtnet_tx_nsegs;
172
	int			 vtnet_if_flags;
173
	int			 vtnet_max_mtu;
174
	int			 vtnet_lro_entry_count;
175
	int			 vtnet_lro_mbufq_depth;
167
176
168
	struct virtqueue	*vtnet_ctrl_vq;
177
	struct virtqueue	*vtnet_ctrl_vq;
169
	struct vtnet_mac_filter	*vtnet_mac_filter;
178
	struct vtnet_mac_filter	*vtnet_mac_filter;
170
	uint32_t		*vtnet_vlan_filter;
179
	uint32_t		*vtnet_vlan_filter;
171
180
172
	uint64_t		 vtnet_features;
181
	uint64_t		 vtnet_negotiated_features;
173
	struct vtnet_statistics	 vtnet_stats;
182
	struct vtnet_statistics	 vtnet_stats;
174
	struct callout		 vtnet_tick_ch;
183
	struct callout		 vtnet_tick_ch;
175
	struct ifmedia		 vtnet_media;
184
	struct ifmedia		 vtnet_media;
Lines 181-190 Link Here
181
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
190
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
182
};
191
};
183
192
193
static bool
194
vtnet_modern(struct vtnet_softc *sc)
195
{
196
	return ((sc->vtnet_flags & VTNET_FLAG_MODERN) != 0);
197
}
198
199
static bool
200
vtnet_software_lro(struct vtnet_softc *sc)
201
{
202
	return ((sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0);
203
}
204
184
/*
205
/*
185
 * Maximum number of queue pairs we will autoconfigure to.
206
 * Maximum number of queue pairs we will autoconfigure to.
186
 */
207
 */
187
#define VTNET_MAX_QUEUE_PAIRS	8
208
#define VTNET_MAX_QUEUE_PAIRS	32
188
209
189
/*
210
/*
190
 * Additional completed entries can appear in a virtqueue before we can
211
 * Additional completed entries can appear in a virtqueue before we can
Lines 202-226 Link Here
202
#define VTNET_NOTIFY_RETRIES		4
223
#define VTNET_NOTIFY_RETRIES		4
203
224
204
/*
225
/*
205
 * Fake the media type. The host does not provide us with any real media
206
 * information.
207
 */
208
#define VTNET_MEDIATYPE		 (IFM_ETHER | IFM_10G_T | IFM_FDX)
209
210
/*
211
 * Number of words to allocate for the VLAN shadow table. There is one
226
 * Number of words to allocate for the VLAN shadow table. There is one
212
 * bit for each VLAN.
227
 * bit for each VLAN.
213
 */
228
 */
214
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
229
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
215
230
231
/* We depend on these being the same size (and same layout). */
232
CTASSERT(sizeof(struct virtio_net_hdr_mrg_rxbuf) ==
233
    sizeof(struct virtio_net_hdr_v1));
234
216
/*
235
/*
217
 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
236
 * In legacy VirtIO when mergeable buffers are not negotiated, this structure
218
 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
237
 * is placed at the beginning of the mbuf data. Use 4 bytes of pad to keep
219
 * both keep the VirtIO header and the data non-contiguous and to keep the
238
 * both the VirtIO header and the data non-contiguous and the frame's payload
220
 * frame's payload 4 byte aligned.
239
 * 4 byte aligned. Note this padding would not be necessary if the
240
 * VIRTIO_F_ANY_LAYOUT feature was negotiated (but we don't support that yet).
221
 *
241
 *
222
 * When mergeable buffers are negotiated, the host puts the VirtIO header in
242
 * In modern VirtIO or when mergeable buffers are negotiated, the host puts
223
 * the beginning of the first mbuf's data.
243
 * the VirtIO header in the beginning of the first mbuf's data.
224
 */
244
 */
225
#define VTNET_RX_HEADER_PAD	4
245
#define VTNET_RX_HEADER_PAD	4
226
struct vtnet_rx_header {
246
struct vtnet_rx_header {
Lines 236-241 Link Here
236
	union {
256
	union {
237
		struct virtio_net_hdr		hdr;
257
		struct virtio_net_hdr		hdr;
238
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
258
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
259
		struct virtio_net_hdr_v1	v1hdr;
239
	} vth_uhdr;
260
	} vth_uhdr;
240
261
241
	struct mbuf *vth_mbuf;
262
	struct mbuf *vth_mbuf;
Lines 250-255 Link Here
250
 */
271
 */
251
#define VTNET_MAX_MAC_ENTRIES	128
272
#define VTNET_MAX_MAC_ENTRIES	128
252
273
274
/*
275
 * The driver version of struct virtio_net_ctrl_mac but with our predefined
276
 * number of MAC addresses allocated. This structure is shared with the host,
277
 * so nentries field is in the correct VirtIO endianness.
278
 */
253
struct vtnet_mac_table {
279
struct vtnet_mac_table {
254
	uint32_t	nentries;
280
	uint32_t	nentries;
255
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
281
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
Lines 275-289 Link Here
275
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
301
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
276
302
277
/* Features desired/implemented by this driver. */
303
/* Features desired/implemented by this driver. */
278
#define VTNET_FEATURES \
304
#define VTNET_COMMON_FEATURES \
279
    (VIRTIO_NET_F_MAC			| \
305
    (VIRTIO_NET_F_MAC			| \
280
     VIRTIO_NET_F_STATUS		| \
306
     VIRTIO_NET_F_STATUS		| \
307
     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS	| \
308
     VIRTIO_NET_F_MTU			| \
281
     VIRTIO_NET_F_CTRL_VQ		| \
309
     VIRTIO_NET_F_CTRL_VQ		| \
282
     VIRTIO_NET_F_CTRL_RX		| \
310
     VIRTIO_NET_F_CTRL_RX		| \
283
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
311
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
284
     VIRTIO_NET_F_CTRL_VLAN		| \
312
     VIRTIO_NET_F_CTRL_VLAN		| \
285
     VIRTIO_NET_F_CSUM			| \
313
     VIRTIO_NET_F_CSUM			| \
286
     VIRTIO_NET_F_GSO			| \
287
     VIRTIO_NET_F_HOST_TSO4		| \
314
     VIRTIO_NET_F_HOST_TSO4		| \
288
     VIRTIO_NET_F_HOST_TSO6		| \
315
     VIRTIO_NET_F_HOST_TSO6		| \
289
     VIRTIO_NET_F_HOST_ECN		| \
316
     VIRTIO_NET_F_HOST_ECN		| \
Lines 293-301 Link Here
293
     VIRTIO_NET_F_GUEST_ECN		| \
320
     VIRTIO_NET_F_GUEST_ECN		| \
294
     VIRTIO_NET_F_MRG_RXBUF		| \
321
     VIRTIO_NET_F_MRG_RXBUF		| \
295
     VIRTIO_NET_F_MQ			| \
322
     VIRTIO_NET_F_MQ			| \
323
     VIRTIO_NET_F_SPEED_DUPLEX		| \
296
     VIRTIO_RING_F_EVENT_IDX		| \
324
     VIRTIO_RING_F_EVENT_IDX		| \
297
     VIRTIO_RING_F_INDIRECT_DESC)
325
     VIRTIO_RING_F_INDIRECT_DESC)
298
326
327
#define VTNET_MODERN_FEATURES (VTNET_COMMON_FEATURES)
328
#define VTNET_LEGACY_FEATURES (VTNET_COMMON_FEATURES | VIRTIO_NET_F_GSO)
329
299
/*
330
/*
300
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
331
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
301
 * frames larger than 1514 bytes.
332
 * frames larger than 1514 bytes.
Lines 305-352 Link Here
305
336
306
/*
337
/*
307
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
338
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
308
 * frames larger than 1514 bytes. We do not yet support software LRO
339
 * frames larger than 1514 bytes.
309
 * via tcp_lro_rx().
340
					
310
 */
341
 */
311
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
342
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
312
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
343
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
313
344
345
#define VTNET_MIN_MTU		68
314
#define VTNET_MAX_MTU		65536
346
#define VTNET_MAX_MTU		65536
315
#define VTNET_MAX_RX_SIZE	65550
347
#define VTNET_MAX_RX_SIZE	65550
316
348
317
/*
349
/*
318
 * Used to preallocate the Vq indirect descriptors. The first segment
350
 * Used to preallocate the VQ indirect descriptors. Modern and mergeable
319
 * is reserved for the header, except for mergeable buffers since the
351
 * buffers do not required one segment for the VirtIO header since it is
320
 * header is placed inline with the data.
352
 * placed inline at the beginning of the receive buffer.
321
 */
353
 */
322
#define VTNET_MRG_RX_SEGS	1
354
#define VTNET_RX_SEGS_HDR_INLINE	1
323
#define VTNET_MIN_RX_SEGS	2
355
#define VTNET_RX_SEGS_HDR_SEPARATE	2
324
#define VTNET_MAX_RX_SEGS	34
356
#define VTNET_RX_SEGS_LRO_NOMRG		34
325
#define VTNET_MIN_TX_SEGS	32
357
#define VTNET_TX_SEGS_MIN		32
326
#define VTNET_MAX_TX_SEGS	64
358
#define VTNET_TX_SEGS_MAX		64
327
359
328
/*
360
/*
329
 * Assert we can receive and transmit the maximum with regular
361
 * Assert we can receive and transmit the maximum with regular
330
 * size clusters.
362
 * size clusters.
331
 */
363
 */
332
CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
364
CTASSERT(((VTNET_RX_SEGS_LRO_NOMRG - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
333
CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
365
CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU);
334
366
335
/*
367
/*
336
 * Number of slots in the Tx bufrings. This value matches most other
368
 * Number of slots in the Tx bufrings. This value matches most other
337
 * multiqueue drivers.
369
 * multiqueue drivers.
338
 */
370
 */
339
#define VTNET_DEFAULT_BUFRING_SIZE	4096
371
#define VTNET_DEFAULT_BUFRING_SIZE	4096
340
341
/*
342
 * Determine how many mbufs are in each receive buffer. For LRO without
343
 * mergeable buffers, we must allocate an mbuf chain large enough to
344
 * hold both the vtnet_rx_header and the maximum receivable data.
345
 */
346
#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize)				\
347
	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :		\
348
	    howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,	\
349
	        (_clsize))
350
372
351
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
373
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
352
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
374
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
(-)sys/dev/virtio/network/virtio_net.h (-25 / +78 lines)
Lines 34-62 Link Here
34
#define _VIRTIO_NET_H
34
#define _VIRTIO_NET_H
35
35
36
/* The feature bitmap for virtio net */
36
/* The feature bitmap for virtio net */
37
#define VIRTIO_NET_F_CSUM	0x00001 /* Host handles pkts w/ partial csum */
37
#define VIRTIO_NET_F_CSUM		 0x000001 /* Host handles pkts w/ partial csum */
38
#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
38
#define VIRTIO_NET_F_GUEST_CSUM		 0x000002 /* Guest handles pkts w/ partial csum*/
39
#define VIRTIO_NET_F_MAC	0x00020 /* Host has given MAC address. */
39
#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 0x000004 /* Dynamic offload configuration. */
40
#define VIRTIO_NET_F_GSO	0x00040 /* Host handles pkts w/ any GSO type */
40
#define VIRTIO_NET_F_MTU		 0x000008 /* Initial MTU advice */
41
#define VIRTIO_NET_F_GUEST_TSO4	0x00080 /* Guest can handle TSOv4 in. */
41
#define VIRTIO_NET_F_MAC		 0x000020 /* Host has given MAC address. */
42
#define VIRTIO_NET_F_GUEST_TSO6	0x00100 /* Guest can handle TSOv6 in. */
42
#define VIRTIO_NET_F_GSO		 0x000040 /* Host handles pkts w/ any GSO type */
43
#define VIRTIO_NET_F_GUEST_ECN	0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
43
#define VIRTIO_NET_F_GUEST_TSO4		 0x000080 /* Guest can handle TSOv4 in. */
44
#define VIRTIO_NET_F_GUEST_UFO	0x00400 /* Guest can handle UFO in. */
44
#define VIRTIO_NET_F_GUEST_TSO6		 0x000100 /* Guest can handle TSOv6 in. */
45
#define VIRTIO_NET_F_HOST_TSO4	0x00800 /* Host can handle TSOv4 in. */
45
#define VIRTIO_NET_F_GUEST_ECN		 0x000200 /* Guest can handle TSO[6] w/ ECN in. */
46
#define VIRTIO_NET_F_HOST_TSO6	0x01000 /* Host can handle TSOv6 in. */
46
#define VIRTIO_NET_F_GUEST_UFO		 0x000400 /* Guest can handle UFO in. */
47
#define VIRTIO_NET_F_HOST_ECN	0x02000 /* Host can handle TSO[6] w/ ECN in. */
47
#define VIRTIO_NET_F_HOST_TSO4		 0x000800 /* Host can handle TSOv4 in. */
48
#define VIRTIO_NET_F_HOST_UFO	0x04000 /* Host can handle UFO in. */
48
#define VIRTIO_NET_F_HOST_TSO6		 0x001000 /* Host can handle TSOv6 in. */
49
#define VIRTIO_NET_F_MRG_RXBUF	0x08000 /* Host can merge receive buffers. */
49
#define VIRTIO_NET_F_HOST_ECN		 0x002000 /* Host can handle TSO[6] w/ ECN in. */
50
#define VIRTIO_NET_F_STATUS	0x10000 /* virtio_net_config.status available*/
50
#define VIRTIO_NET_F_HOST_UFO		 0x004000 /* Host can handle UFO in. */
51
#define VIRTIO_NET_F_CTRL_VQ	0x20000 /* Control channel available */
51
#define VIRTIO_NET_F_MRG_RXBUF		 0x008000 /* Host can merge receive buffers. */
52
#define VIRTIO_NET_F_CTRL_RX	0x40000 /* Control channel RX mode support */
52
#define VIRTIO_NET_F_STATUS		 0x010000 /* virtio_net_config.status available*/
53
#define VIRTIO_NET_F_CTRL_VLAN	0x80000 /* Control channel VLAN filtering */
53
#define VIRTIO_NET_F_CTRL_VQ		 0x020000 /* Control channel available */
54
#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
54
#define VIRTIO_NET_F_CTRL_RX		 0x040000 /* Control channel RX mode support */
55
#define VIRTIO_NET_F_GUEST_ANNOUNCE 0x200000 /* Announce device on network */
55
#define VIRTIO_NET_F_CTRL_VLAN		 0x080000 /* Control channel VLAN filtering */
56
#define VIRTIO_NET_F_MQ		0x400000 /* Device supports RFS */
56
#define VIRTIO_NET_F_CTRL_RX_EXTRA	 0x100000 /* Extra RX mode control support */
57
#define VIRTIO_NET_F_CTRL_MAC_ADDR 0x800000 /* Set MAC address */
57
#define VIRTIO_NET_F_GUEST_ANNOUNCE	 0x200000 /* Announce device on network */
58
#define VIRTIO_NET_F_MQ			 0x400000 /* Device supports Receive Flow Steering */
59
#define VIRTIO_NET_F_CTRL_MAC_ADDR	 0x800000 /* Set MAC address */
60
#define VIRTIO_NET_F_SPEED_DUPLEX	 (1ULL << 63) /* Device set linkspeed and duplex */
58
61
59
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
62
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
63
#define VIRTIO_NET_S_ANNOUNCE	2	/* Announcement is needed */
60
64
61
struct virtio_net_config {
65
struct virtio_net_config {
62
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
66
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
Lines 68-83 Link Here
68
	 * Legal values are between 1 and 0x8000.
72
	 * Legal values are between 1 and 0x8000.
69
	 */
73
	 */
70
	uint16_t	max_virtqueue_pairs;
74
	uint16_t	max_virtqueue_pairs;
75
	/* Default maximum transmit unit advice */
76
	uint16_t	mtu;
77
	/*
78
	 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
79
	 * Any other value stands for unknown.
80
	 */
81
	uint32_t	speed;
82
	/*
83
	 * 0x00 - half duplex
84
	 * 0x01 - full duplex
85
	 * Any other value stands for unknown.
86
	 */
87
	uint8_t		duplex;
71
} __packed;
88
} __packed;
72
89
73
/*
90
/*
74
 * This is the first element of the scatter-gather list.  If you don't
91
 * This header comes first in the scatter-gather list.  If you don't
75
 * specify GSO or CSUM features, you can simply ignore the header.
92
 * specify GSO or CSUM features, you can simply ignore the header.
93
 *
94
 * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
95
 * only flattened.
76
 */
96
 */
77
struct virtio_net_hdr {
97
struct virtio_net_hdr_v1 {
78
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start,csum_offset*/
98
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start, csum_offset */
79
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
99
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
80
	uint8_t	flags;
100
	uint8_t flags;
81
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
101
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
82
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
102
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
83
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
103
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
Lines 88-96 Link Here
88
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
108
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
89
	uint16_t csum_start;	/* Position to start checksumming from */
109
	uint16_t csum_start;	/* Position to start checksumming from */
90
	uint16_t csum_offset;	/* Offset after that to place checksum */
110
	uint16_t csum_offset;	/* Offset after that to place checksum */
111
	uint16_t num_buffers;	/* Number of merged rx buffers */
91
};
112
};
92
113
93
/*
114
/*
115
 * This header comes first in the scatter-gather list.
116
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
117
 * be the first element of the scatter-gather list.  If you don't
118
 * specify GSO or CSUM features, you can simply ignore the header.
119
 */
120
struct virtio_net_hdr {
121
	/* See VIRTIO_NET_HDR_F_* */
122
	uint8_t	flags;
123
	/* See VIRTIO_NET_HDR_GSO_* */
124
	uint8_t gso_type;
125
	uint16_t hdr_len;	/* Ethernet + IP + tcp/udp hdrs */
126
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
127
	uint16_t csum_start;	/* Position to start checksumming from */
128
	uint16_t csum_offset;	/* Offset after that to place checksum */
129
};
130
131
/*
94
 * This is the version of the header to use when the MRG_RXBUF
132
 * This is the version of the header to use when the MRG_RXBUF
95
 * feature has been negotiated.
133
 * feature has been negotiated.
96
 */
134
 */
Lines 200-204 Link Here
200
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
238
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
201
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
239
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
202
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
240
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
241
242
/*
243
 * Control network offloads
244
 *
245
 * Reconfigures the network offloads that Guest can handle.
246
 *
247
 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
248
 *
249
 * Command data format matches the feature bit mask exactly.
250
 *
251
 * See VIRTIO_NET_F_GUEST_* for the list of offloads
252
 * that can be enabled/disabled.
253
 */
254
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS		5
255
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET	0
203
256
204
#endif /* _VIRTIO_NET_H */
257
#endif /* _VIRTIO_NET_H */
(-)sys/dev/virtio/pci/virtio_pci.c (-838 / +508 lines)
Lines 1-7 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
3
 *
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
5
 * All rights reserved.
6
 *
6
 *
7
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
Lines 35-40 Link Here
35
#include <sys/systm.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
37
#include <sys/kernel.h>
38
#include <sys/sbuf.h>
39
#include <sys/sysctl.h>
38
#include <sys/module.h>
40
#include <sys/module.h>
39
#include <sys/malloc.h>
41
#include <sys/malloc.h>
40
42
Lines 49-403 Link Here
49
#include <dev/virtio/virtio.h>
51
#include <dev/virtio/virtio.h>
50
#include <dev/virtio/virtqueue.h>
52
#include <dev/virtio/virtqueue.h>
51
#include <dev/virtio/pci/virtio_pci.h>
53
#include <dev/virtio/pci/virtio_pci.h>
54
#include <dev/virtio/pci/virtio_pci_var.h>
52
55
53
#include "virtio_bus_if.h"
56
#include "virtio_pci_if.h"
54
#include "virtio_if.h"
57
#include "virtio_if.h"
55
58
56
struct vtpci_interrupt {
59
static void	vtpci_describe_features(struct vtpci_common *, const char *,
57
	struct resource		*vti_irq;
58
	int			 vti_rid;
59
	void			*vti_handler;
60
};
61
62
struct vtpci_virtqueue {
63
	struct virtqueue	*vtv_vq;
64
	int			 vtv_no_intr;
65
};
66
67
struct vtpci_softc {
68
	device_t			 vtpci_dev;
69
	struct resource			*vtpci_res;
70
	struct resource			*vtpci_msix_res;
71
	uint64_t			 vtpci_features;
72
	uint32_t			 vtpci_flags;
73
#define VTPCI_FLAG_NO_MSI		0x0001
74
#define VTPCI_FLAG_NO_MSIX		0x0002
75
#define VTPCI_FLAG_LEGACY		0x1000
76
#define VTPCI_FLAG_MSI			0x2000
77
#define VTPCI_FLAG_MSIX			0x4000
78
#define VTPCI_FLAG_SHARED_MSIX		0x8000
79
#define VTPCI_FLAG_ITYPE_MASK		0xF000
80
81
	/* This "bus" will only ever have one child. */
82
	device_t			 vtpci_child_dev;
83
	struct virtio_feature_desc	*vtpci_child_feat_desc;
84
85
	int				 vtpci_nvqs;
86
	struct vtpci_virtqueue		*vtpci_vqs;
87
88
	/*
89
	 * Ideally, each virtqueue that the driver provides a callback for will
90
	 * receive its own MSIX vector. If there are not sufficient vectors
91
	 * available, then attempt to have all the VQs share one vector. For
92
	 * MSIX, the configuration changed notifications must be on their own
93
	 * vector.
94
	 *
95
	 * If MSIX is not available, we will attempt to have the whole device
96
	 * share one MSI vector, and then, finally, one legacy interrupt.
97
	 */
98
	struct vtpci_interrupt		 vtpci_device_interrupt;
99
	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
100
	int				 vtpci_nmsix_resources;
101
};
102
103
static int	vtpci_probe(device_t);
104
static int	vtpci_attach(device_t);
105
static int	vtpci_detach(device_t);
106
static int	vtpci_suspend(device_t);
107
static int	vtpci_resume(device_t);
108
static int	vtpci_shutdown(device_t);
109
static void	vtpci_driver_added(device_t, driver_t *);
110
static void	vtpci_child_detached(device_t, device_t);
111
static int	vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
112
static int	vtpci_write_ivar(device_t, device_t, int, uintptr_t);
113
114
static uint64_t	vtpci_negotiate_features(device_t, uint64_t);
115
static int	vtpci_with_feature(device_t, uint64_t);
116
static int	vtpci_alloc_virtqueues(device_t, int, int,
117
		    struct vq_alloc_info *);
118
static int	vtpci_setup_intr(device_t, enum intr_type);
119
static void	vtpci_stop(device_t);
120
static int	vtpci_reinit(device_t, uint64_t);
121
static void	vtpci_reinit_complete(device_t);
122
static void	vtpci_notify_virtqueue(device_t, uint16_t);
123
static uint8_t	vtpci_get_status(device_t);
124
static void	vtpci_set_status(device_t, uint8_t);
125
static void	vtpci_read_dev_config(device_t, bus_size_t, void *, int);
126
static void	vtpci_write_dev_config(device_t, bus_size_t, void *, int);
127
128
static void	vtpci_describe_features(struct vtpci_softc *, const char *,
129
		    uint64_t);
60
		    uint64_t);
130
static void	vtpci_probe_and_attach_child(struct vtpci_softc *);
61
static int	vtpci_alloc_msix(struct vtpci_common *, int);
131
62
static int	vtpci_alloc_msi(struct vtpci_common *);
132
static int	vtpci_alloc_msix(struct vtpci_softc *, int);
63
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_common *);
133
static int	vtpci_alloc_msi(struct vtpci_softc *);
64
static int	vtpci_alloc_intr_msix_shared(struct vtpci_common *);
134
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
65
static int	vtpci_alloc_intr_msi(struct vtpci_common *);
135
static int	vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
66
static int	vtpci_alloc_intr_intx(struct vtpci_common *);
136
static int	vtpci_alloc_intr_msi(struct vtpci_softc *);
67
static int	vtpci_alloc_interrupt(struct vtpci_common *, int, int,
137
static int	vtpci_alloc_intr_legacy(struct vtpci_softc *);
138
static int	vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
139
		    struct vtpci_interrupt *);
68
		    struct vtpci_interrupt *);
140
static int	vtpci_alloc_intr_resources(struct vtpci_softc *);
69
static void	vtpci_free_interrupt(struct vtpci_common *,
70
		    struct vtpci_interrupt *);
141
71
142
static int	vtpci_setup_legacy_interrupt(struct vtpci_softc *,
72
static void	vtpci_free_interrupts(struct vtpci_common *);
73
static void	vtpci_free_virtqueues(struct vtpci_common *);
74
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_common *);
75
static int	vtpci_alloc_intr_resources(struct vtpci_common *);
76
static int	vtpci_setup_intx_interrupt(struct vtpci_common *,
143
		    enum intr_type);
77
		    enum intr_type);
144
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
78
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_common *,
145
		    enum intr_type);
79
		    enum intr_type);
146
static int	vtpci_setup_msix_interrupts(struct vtpci_softc *,
80
static int	vtpci_set_host_msix_vectors(struct vtpci_common *);
81
static int	vtpci_setup_msix_interrupts(struct vtpci_common *,
147
		    enum intr_type);
82
		    enum intr_type);
148
static int	vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type);
83
static int	vtpci_setup_intrs(struct vtpci_common *, enum intr_type);
149
84
static int	vtpci_reinit_virtqueue(struct vtpci_common *, int);
150
static int	vtpci_register_msix_vector(struct vtpci_softc *, int,
85
static void	vtpci_intx_intr(void *);
151
		    struct vtpci_interrupt *);
152
static int	vtpci_set_host_msix_vectors(struct vtpci_softc *);
153
static int	vtpci_reinit_virtqueue(struct vtpci_softc *, int);
154
155
static void	vtpci_free_interrupt(struct vtpci_softc *,
156
		    struct vtpci_interrupt *);
157
static void	vtpci_free_interrupts(struct vtpci_softc *);
158
static void	vtpci_free_virtqueues(struct vtpci_softc *);
159
static void	vtpci_release_child_resources(struct vtpci_softc *);
160
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
161
static void	vtpci_reset(struct vtpci_softc *);
162
163
static void	vtpci_select_virtqueue(struct vtpci_softc *, int);
164
165
static void	vtpci_legacy_intr(void *);
166
static int	vtpci_vq_shared_intr_filter(void *);
86
static int	vtpci_vq_shared_intr_filter(void *);
167
static void	vtpci_vq_shared_intr(void *);
87
static void	vtpci_vq_shared_intr(void *);
168
static int	vtpci_vq_intr_filter(void *);
88
static int	vtpci_vq_intr_filter(void *);
169
static void	vtpci_vq_intr(void *);
89
static void	vtpci_vq_intr(void *);
170
static void	vtpci_config_intr(void *);
90
static void	vtpci_config_intr(void *);
91
static void	vtpci_setup_sysctl(struct vtpci_common *);
171
92
172
#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
93
#define vtpci_setup_msi_interrupt vtpci_setup_intx_interrupt
173
94
174
#define VIRTIO_PCI_CONFIG(_sc) \
175
    VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0)
176
177
/*
95
/*
178
 * I/O port read/write wrappers.
96
 * This module contains two drivers:
97
 *   - virtio_pci_legacy (vtpcil) for pre-V1 support
98
 *   - virtio_pci_modern (vtpcim) for V1 support
179
 */
99
 */
180
#define vtpci_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
181
#define vtpci_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
182
#define vtpci_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
183
#define vtpci_write_config_1(sc, o, v)	bus_write_1((sc)->vtpci_res, (o), (v))
184
#define vtpci_write_config_2(sc, o, v)	bus_write_2((sc)->vtpci_res, (o), (v))
185
#define vtpci_write_config_4(sc, o, v)	bus_write_4((sc)->vtpci_res, (o), (v))
186
187
/* Tunables. */
188
static int vtpci_disable_msix = 0;
189
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
190
191
static device_method_t vtpci_methods[] = {
192
	/* Device interface. */
193
	DEVMETHOD(device_probe,			  vtpci_probe),
194
	DEVMETHOD(device_attach,		  vtpci_attach),
195
	DEVMETHOD(device_detach,		  vtpci_detach),
196
	DEVMETHOD(device_suspend,		  vtpci_suspend),
197
	DEVMETHOD(device_resume,		  vtpci_resume),
198
	DEVMETHOD(device_shutdown,		  vtpci_shutdown),
199
200
	/* Bus interface. */
201
	DEVMETHOD(bus_driver_added,		  vtpci_driver_added),
202
	DEVMETHOD(bus_child_detached,		  vtpci_child_detached),
203
	DEVMETHOD(bus_read_ivar,		  vtpci_read_ivar),
204
	DEVMETHOD(bus_write_ivar,		  vtpci_write_ivar),
205
206
	/* VirtIO bus interface. */
207
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_negotiate_features),
208
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_with_feature),
209
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_alloc_virtqueues),
210
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_setup_intr),
211
	DEVMETHOD(virtio_bus_stop,		  vtpci_stop),
212
	DEVMETHOD(virtio_bus_reinit,		  vtpci_reinit),
213
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_reinit_complete),
214
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_notify_virtqueue),
215
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_read_dev_config),
216
	DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
217
218
	DEVMETHOD_END
219
};
220
221
static driver_t vtpci_driver = {
222
	"virtio_pci",
223
	vtpci_methods,
224
	sizeof(struct vtpci_softc)
225
};
226
227
devclass_t vtpci_devclass;
228
229
DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
230
MODULE_VERSION(virtio_pci, 1);
100
MODULE_VERSION(virtio_pci, 1);
231
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
101
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
232
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
102
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
233
103
234
static int
104
int vtpci_disable_msix = 0;
235
vtpci_probe(device_t dev)
105
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
106
107
static uint8_t
108
vtpci_read_isr(struct vtpci_common *cn)
236
{
109
{
237
	char desc[36];
110
	return (VIRTIO_PCI_READ_ISR(cn->vtpci_dev));
238
	const char *name;
111
}
239
112
240
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
113
static uint16_t
241
		return (ENXIO);
114
vtpci_get_vq_size(struct vtpci_common *cn, int idx)
115
{
116
	return (VIRTIO_PCI_GET_VQ_SIZE(cn->vtpci_dev, idx));
117
}
242
118
243
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
119
static bus_size_t
244
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
120
vtpci_get_vq_notify_off(struct vtpci_common *cn, int idx)
245
		return (ENXIO);
121
{
122
	return (VIRTIO_PCI_GET_VQ_NOTIFY_OFF(cn->vtpci_dev, idx));
123
}
246
124
247
	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
125
static void
248
		return (ENXIO);
126
vtpci_set_vq(struct vtpci_common *cn, struct virtqueue *vq)
127
{
128
	VIRTIO_PCI_SET_VQ(cn->vtpci_dev, vq);
129
}
249
130
250
	name = virtio_device_name(pci_get_subdevice(dev));
131
static void
251
	if (name == NULL)
132
vtpci_disable_vq(struct vtpci_common *cn, int idx)
252
		name = "Unknown";
133
{
134
	VIRTIO_PCI_DISABLE_VQ(cn->vtpci_dev, idx);
135
}
253
136
254
	snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
137
static int
255
	device_set_desc_copy(dev, desc);
138
vtpci_register_cfg_msix(struct vtpci_common *cn, struct vtpci_interrupt *intr)
256
139
{
257
	return (BUS_PROBE_DEFAULT);
140
	return (VIRTIO_PCI_REGISTER_CFG_MSIX(cn->vtpci_dev, intr));
258
}
141
}
259
142
260
static int
143
static int
261
vtpci_attach(device_t dev)
144
vtpci_register_vq_msix(struct vtpci_common *cn, int idx,
145
    struct vtpci_interrupt *intr)
262
{
146
{
263
	struct vtpci_softc *sc;
147
	return (VIRTIO_PCI_REGISTER_VQ_MSIX(cn->vtpci_dev, idx, intr));
264
	device_t child;
148
}
265
	int rid;
266
149
267
	sc = device_get_softc(dev);
150
void
268
	sc->vtpci_dev = dev;
151
vtpci_init(struct vtpci_common *cn, device_t dev, bool modern)
152
{
269
153
154
	cn->vtpci_dev = dev;
155
270
	pci_enable_busmaster(dev);
156
	pci_enable_busmaster(dev);
271
157
272
	rid = PCIR_BAR(0);
158
	if (modern)
273
	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
159
		cn->vtpci_flags |= VTPCI_FLAG_MODERN;
274
	    RF_ACTIVE);
275
	if (sc->vtpci_res == NULL) {
276
		device_printf(dev, "cannot map I/O space\n");
277
		return (ENXIO);
278
	}
279
280
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
160
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
281
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
161
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSI;
162
	if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0)
163
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
282
164
283
	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
165
	vtpci_setup_sysctl(cn);
284
		rid = PCIR_BAR(1);
166
}
285
		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
286
		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
287
	}
288
167
289
	if (sc->vtpci_msix_res == NULL)
168
int
290
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
169
vtpci_add_child(struct vtpci_common *cn)
170
{
171
	device_t dev, child;
291
172
292
	vtpci_reset(sc);
173
	dev = cn->vtpci_dev;
293
174
294
	/* Tell the host we've noticed this device. */
175
	child = device_add_child(dev, NULL, -1);
295
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
176
	if (child == NULL) {
296
297
	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
298
		device_printf(dev, "cannot create child device\n");
177
		device_printf(dev, "cannot create child device\n");
299
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
300
		vtpci_detach(dev);
301
		return (ENOMEM);
178
		return (ENOMEM);
302
	}
179
	}
303
180
304
	sc->vtpci_child_dev = child;
181
	cn->vtpci_child_dev = child;
305
	vtpci_probe_and_attach_child(sc);
306
182
307
	return (0);
183
	return (0);
308
}
184
}
309
185
310
static int
186
int
311
vtpci_detach(device_t dev)
187
vtpci_delete_child(struct vtpci_common *cn)
312
{
188
{
313
	struct vtpci_softc *sc;
189
	device_t dev, child;
314
	device_t child;
315
	int error;
190
	int error;
316
191
317
	sc = device_get_softc(dev);
192
	dev = cn->vtpci_dev;
318
193
319
	if ((child = sc->vtpci_child_dev) != NULL) {
194
	child = cn->vtpci_child_dev;
195
	if (child != NULL) {
320
		error = device_delete_child(dev, child);
196
		error = device_delete_child(dev, child);
321
		if (error)
197
		if (error)
322
			return (error);
198
			return (error);
323
		sc->vtpci_child_dev = NULL;
199
		cn->vtpci_child_dev = NULL;
324
	}
200
	}
325
201
326
	vtpci_reset(sc);
327
328
	if (sc->vtpci_msix_res != NULL) {
329
		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
330
		    sc->vtpci_msix_res);
331
		sc->vtpci_msix_res = NULL;
332
	}
333
334
	if (sc->vtpci_res != NULL) {
335
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
336
		    sc->vtpci_res);
337
		sc->vtpci_res = NULL;
338
	}
339
340
	return (0);
202
	return (0);
341
}
203
}
342
204
343
static int
205
void
344
vtpci_suspend(device_t dev)
206
vtpci_child_detached(struct vtpci_common *cn)
345
{
207
{
346
208
347
	return (bus_generic_suspend(dev));
209
	vtpci_release_child_resources(cn);
348
}
349
210
350
static int
211
	cn->vtpci_child_feat_desc = NULL;
351
vtpci_resume(device_t dev)
212
	cn->vtpci_host_features = 0;
352
{
213
	cn->vtpci_features = 0;
353
354
	return (bus_generic_resume(dev));
355
}
214
}
356
215
357
static int
216
int
358
vtpci_shutdown(device_t dev)
217
vtpci_reinit(struct vtpci_common *cn)
359
{
218
{
219
	int idx, error;
360
220
361
	(void) bus_generic_shutdown(dev);
221
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
362
	/* Forcibly stop the host device. */
222
		error = vtpci_reinit_virtqueue(cn, idx);
363
	vtpci_stop(dev);
223
		if (error)
224
			return (error);
225
	}
364
226
227
	if (vtpci_is_msix_enabled(cn)) {
228
		error = vtpci_set_host_msix_vectors(cn);
229
		if (error)
230
			return (error);
231
	}
232
365
	return (0);
233
	return (0);
366
}
234
}
367
235
368
static void
236
static void
369
vtpci_driver_added(device_t dev, driver_t *driver)
237
vtpci_describe_features(struct vtpci_common *cn, const char *msg,
238
    uint64_t features)
370
{
239
{
371
	struct vtpci_softc *sc;
240
	device_t dev, child;
372
241
373
	sc = device_get_softc(dev);
242
	dev = cn->vtpci_dev;
243
	child = cn->vtpci_child_dev;
374
244
375
	vtpci_probe_and_attach_child(sc);
245
	if (device_is_attached(child) || bootverbose == 0)
246
		return;
247
248
	virtio_describe(dev, msg, features, cn->vtpci_child_feat_desc);
376
}
249
}
377
250
378
static void
251
uint64_t
379
vtpci_child_detached(device_t dev, device_t child)
252
vtpci_negotiate_features(struct vtpci_common *cn,
253
    uint64_t child_features, uint64_t host_features)
380
{
254
{
381
	struct vtpci_softc *sc;
255
	uint64_t features;
382
256
383
	sc = device_get_softc(dev);
257
	cn->vtpci_host_features = host_features;
258
	vtpci_describe_features(cn, "host", host_features);
384
259
385
	vtpci_reset(sc);
260
	/*
386
	vtpci_release_child_resources(sc);
261
	 * Limit negotiated features to what the driver, virtqueue, and
262
	 * host all support.
263
	 */
264
	features = host_features & child_features;
265
	features = virtio_filter_transport_features(features);
266
267
	cn->vtpci_features = features;
268
	vtpci_describe_features(cn, "negotiated", features);
269
270
	return (features);
387
}
271
}
388
272
389
static int
273
int
390
vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
274
vtpci_with_feature(struct vtpci_common *cn, uint64_t feature)
391
{
275
{
392
	struct vtpci_softc *sc;
276
	return ((cn->vtpci_features & feature) != 0);
277
}
393
278
394
	sc = device_get_softc(dev);
279
int
280
vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result)
281
{
282
	device_t dev;
283
	int error;
395
284
396
	if (sc->vtpci_child_dev != child)
285
	dev = cn->vtpci_dev;
397
		return (ENOENT);
286
	error = 0;
398
287
399
	switch (index) {
288
	switch (index) {
400
	case VIRTIO_IVAR_DEVTYPE:
401
	case VIRTIO_IVAR_SUBDEVICE:
289
	case VIRTIO_IVAR_SUBDEVICE:
402
		*result = pci_get_subdevice(dev);
290
		*result = pci_get_subdevice(dev);
403
		break;
291
		break;
Lines 410-509 Link Here
410
	case VIRTIO_IVAR_SUBVENDOR:
298
	case VIRTIO_IVAR_SUBVENDOR:
411
		*result = pci_get_subdevice(dev);
299
		*result = pci_get_subdevice(dev);
412
		break;
300
		break;
301
	case VIRTIO_IVAR_MODERN:
302
		*result = vtpci_is_modern(cn);
303
		break;
413
	default:
304
	default:
414
		return (ENOENT);
305
		error = ENOENT;
415
	}
306
	}
416
307
417
	return (0);
308
	return (error);
418
}
309
}
419
310
420
static int
311
int
421
vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
312
vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value)
422
{
313
{
423
	struct vtpci_softc *sc;
314
	int error;
424
315
425
	sc = device_get_softc(dev);
316
	error = 0;
426
317
427
	if (sc->vtpci_child_dev != child)
428
		return (ENOENT);
429
430
	switch (index) {
318
	switch (index) {
431
	case VIRTIO_IVAR_FEATURE_DESC:
319
	case VIRTIO_IVAR_FEATURE_DESC:
432
		sc->vtpci_child_feat_desc = (void *) value;
320
		cn->vtpci_child_feat_desc = (void *) value;
433
		break;
321
		break;
434
	default:
322
	default:
435
		return (ENOENT);
323
		error = ENOENT;
436
	}
324
	}
437
325
438
	return (0);
326
	return (error);
439
}
327
}
440
328
441
static uint64_t
329
int
442
vtpci_negotiate_features(device_t dev, uint64_t child_features)
330
vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
331
    struct vq_alloc_info *vq_info)
443
{
332
{
444
	struct vtpci_softc *sc;
333
	device_t dev;
445
	uint64_t host_features, features;
334
	int idx, align, error;
446
335
447
	sc = device_get_softc(dev);
336
	dev = cn->vtpci_dev;
448
337
449
	host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
450
	vtpci_describe_features(sc, "host", host_features);
451
452
	/*
338
	/*
453
	 * Limit negotiated features to what the driver, virtqueue, and
339
	 * This is VIRTIO_PCI_VRING_ALIGN from legacy VirtIO. In modern VirtIO,
454
	 * host all support.
340
	 * the tables do not have to be allocated contiguously, but we do so
341
	 * anyways.
455
	 */
342
	 */
456
	features = host_features & child_features;
343
	align = 4096;
457
	features = virtqueue_filter_features(features);
458
	sc->vtpci_features = features;
459
344
460
	vtpci_describe_features(sc, "negotiated", features);
345
	if (cn->vtpci_nvqs != 0)
461
	vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
462
463
	return (features);
464
}
465
466
static int
467
vtpci_with_feature(device_t dev, uint64_t feature)
468
{
469
	struct vtpci_softc *sc;
470
471
	sc = device_get_softc(dev);
472
473
	return ((sc->vtpci_features & feature) != 0);
474
}
475
476
static int
477
vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
478
    struct vq_alloc_info *vq_info)
479
{
480
	struct vtpci_softc *sc;
481
	struct virtqueue *vq;
482
	struct vtpci_virtqueue *vqx;
483
	struct vq_alloc_info *info;
484
	int idx, error;
485
	uint16_t size;
486
487
	sc = device_get_softc(dev);
488
489
	if (sc->vtpci_nvqs != 0)
490
		return (EALREADY);
346
		return (EALREADY);
491
	if (nvqs <= 0)
347
	if (nvqs <= 0)
492
		return (EINVAL);
348
		return (EINVAL);
493
349
494
	sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
350
	cn->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
495
	    M_DEVBUF, M_NOWAIT | M_ZERO);
351
	    M_DEVBUF, M_NOWAIT | M_ZERO);
496
	if (sc->vtpci_vqs == NULL)
352
	if (cn->vtpci_vqs == NULL)
497
		return (ENOMEM);
353
		return (ENOMEM);
498
354
499
	for (idx = 0; idx < nvqs; idx++) {
355
	for (idx = 0; idx < nvqs; idx++) {
500
		vqx = &sc->vtpci_vqs[idx];
356
		struct vtpci_virtqueue *vqx;
357
		struct vq_alloc_info *info;
358
		struct virtqueue *vq;
359
		bus_size_t notify_offset;
360
		uint16_t size;
361
362
		vqx = &cn->vtpci_vqs[idx];
501
		info = &vq_info[idx];
363
		info = &vq_info[idx];
502
364
503
		vtpci_select_virtqueue(sc, idx);
365
		size = vtpci_get_vq_size(cn, idx);
504
		size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
366
		notify_offset = vtpci_get_vq_notify_off(cn, idx);
505
367
506
		error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN,
368
		error = virtqueue_alloc(dev, idx, size, notify_offset, align,
507
		    0xFFFFFFFFUL, info, &vq);
369
		    0xFFFFFFFFUL, info, &vq);
508
		if (error) {
370
		if (error) {
509
			device_printf(dev,
371
			device_printf(dev,
Lines 511-780 Link Here
511
			break;
373
			break;
512
		}
374
		}
513
375
514
		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
376
		vtpci_set_vq(cn, vq);
515
		    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
516
377
517
		vqx->vtv_vq = *info->vqai_vq = vq;
378
		vqx->vtv_vq = *info->vqai_vq = vq;
518
		vqx->vtv_no_intr = info->vqai_intr == NULL;
379
		vqx->vtv_no_intr = info->vqai_intr == NULL;
519
380
520
		sc->vtpci_nvqs++;
381
		cn->vtpci_nvqs++;
521
	}
382
	}
522
383
523
	if (error)
384
	if (error)
524
		vtpci_free_virtqueues(sc);
385
		vtpci_free_virtqueues(cn);
525
386
526
	return (error);
387
	return (error);
527
}
388
}
528
389
529
static int
390
static int
530
vtpci_setup_intr(device_t dev, enum intr_type type)
391
vtpci_alloc_msix(struct vtpci_common *cn, int nvectors)
531
{
392
{
532
	struct vtpci_softc *sc;
533
	int attempt, error;
534
535
	sc = device_get_softc(dev);
536
537
	for (attempt = 0; attempt < 5; attempt++) {
538
		/*
539
		 * Start with the most desirable interrupt configuration and
540
		 * fallback towards less desirable ones.
541
		 */
542
		switch (attempt) {
543
		case 0:
544
			error = vtpci_alloc_intr_msix_pervq(sc);
545
			break;
546
		case 1:
547
			error = vtpci_alloc_intr_msix_shared(sc);
548
			break;
549
		case 2:
550
			error = vtpci_alloc_intr_msi(sc);
551
			break;
552
		case 3:
553
			error = vtpci_alloc_intr_legacy(sc);
554
			break;
555
		default:
556
			device_printf(dev,
557
			    "exhausted all interrupt allocation attempts\n");
558
			return (ENXIO);
559
		}
560
561
		if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
562
			break;
563
564
		vtpci_cleanup_setup_intr_attempt(sc);
565
	}
566
567
	if (bootverbose) {
568
		if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
569
			device_printf(dev, "using legacy interrupt\n");
570
		else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
571
			device_printf(dev, "using MSI interrupt\n");
572
		else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
573
			device_printf(dev, "using shared MSIX interrupts\n");
574
		else
575
			device_printf(dev, "using per VQ MSIX interrupts\n");
576
	}
577
578
	return (0);
579
}
580
581
static void
582
vtpci_stop(device_t dev)
583
{
584
585
	vtpci_reset(device_get_softc(dev));
586
}
587
588
static int
589
vtpci_reinit(device_t dev, uint64_t features)
590
{
591
	struct vtpci_softc *sc;
592
	int idx, error;
593
594
	sc = device_get_softc(dev);
595
596
	/*
597
	 * Redrive the device initialization. This is a bit of an abuse of
598
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
599
	 * play nice.
600
	 *
601
	 * We do not allow the host device to change from what was originally
602
	 * negotiated beyond what the guest driver changed. MSIX state should
603
	 * not change, number of virtqueues and their size remain the same, etc.
604
	 * This will need to be rethought when we want to support migration.
605
	 */
606
607
	if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
608
		vtpci_stop(dev);
609
610
	/*
611
	 * Quickly drive the status through ACK and DRIVER. The device
612
	 * does not become usable again until vtpci_reinit_complete().
613
	 */
614
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
615
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
616
617
	vtpci_negotiate_features(dev, features);
618
619
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
620
		error = vtpci_reinit_virtqueue(sc, idx);
621
		if (error)
622
			return (error);
623
	}
624
625
	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
626
		error = vtpci_set_host_msix_vectors(sc);
627
		if (error)
628
			return (error);
629
	}
630
631
	return (0);
632
}
633
634
static void
635
vtpci_reinit_complete(device_t dev)
636
{
637
638
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
639
}
640
641
static void
642
vtpci_notify_virtqueue(device_t dev, uint16_t queue)
643
{
644
	struct vtpci_softc *sc;
645
646
	sc = device_get_softc(dev);
647
648
	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
649
}
650
651
static uint8_t
652
vtpci_get_status(device_t dev)
653
{
654
	struct vtpci_softc *sc;
655
656
	sc = device_get_softc(dev);
657
658
	return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
659
}
660
661
static void
662
vtpci_set_status(device_t dev, uint8_t status)
663
{
664
	struct vtpci_softc *sc;
665
666
	sc = device_get_softc(dev);
667
668
	if (status != VIRTIO_CONFIG_STATUS_RESET)
669
		status |= vtpci_get_status(dev);
670
671
	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
672
}
673
674
static void
675
vtpci_read_dev_config(device_t dev, bus_size_t offset,
676
    void *dst, int length)
677
{
678
	struct vtpci_softc *sc;
679
	bus_size_t off;
680
	uint8_t *d;
681
	int size;
682
683
	sc = device_get_softc(dev);
684
	off = VIRTIO_PCI_CONFIG(sc) + offset;
685
686
	for (d = dst; length > 0; d += size, off += size, length -= size) {
687
		if (length >= 4) {
688
			size = 4;
689
			*(uint32_t *)d = vtpci_read_config_4(sc, off);
690
		} else if (length >= 2) {
691
			size = 2;
692
			*(uint16_t *)d = vtpci_read_config_2(sc, off);
693
		} else {
694
			size = 1;
695
			*d = vtpci_read_config_1(sc, off);
696
		}
697
	}
698
}
699
700
static void
701
vtpci_write_dev_config(device_t dev, bus_size_t offset,
702
    void *src, int length)
703
{
704
	struct vtpci_softc *sc;
705
	bus_size_t off;
706
	uint8_t *s;
707
	int size;
708
709
	sc = device_get_softc(dev);
710
	off = VIRTIO_PCI_CONFIG(sc) + offset;
711
712
	for (s = src; length > 0; s += size, off += size, length -= size) {
713
		if (length >= 4) {
714
			size = 4;
715
			vtpci_write_config_4(sc, off, *(uint32_t *)s);
716
		} else if (length >= 2) {
717
			size = 2;
718
			vtpci_write_config_2(sc, off, *(uint16_t *)s);
719
		} else {
720
			size = 1;
721
			vtpci_write_config_1(sc, off, *s);
722
		}
723
	}
724
}
725
726
static void
727
vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
728
    uint64_t features)
729
{
730
	device_t dev, child;
731
732
	dev = sc->vtpci_dev;
733
	child = sc->vtpci_child_dev;
734
735
	if (device_is_attached(child) || bootverbose == 0)
736
		return;
737
738
	virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
739
}
740
741
static void
742
vtpci_probe_and_attach_child(struct vtpci_softc *sc)
743
{
744
	device_t dev, child;
745
746
	dev = sc->vtpci_dev;
747
	child = sc->vtpci_child_dev;
748
749
	if (child == NULL)
750
		return;
751
752
	if (device_get_state(child) != DS_NOTPRESENT)
753
		return;
754
755
	if (device_probe(child) != 0)
756
		return;
757
758
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
759
	if (device_attach(child) != 0) {
760
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
761
		vtpci_reset(sc);
762
		vtpci_release_child_resources(sc);
763
		/* Reset status for future attempt. */
764
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
765
	} else {
766
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
767
		VIRTIO_ATTACH_COMPLETED(child);
768
	}
769
}
770
771
static int
772
vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
773
{
774
	device_t dev;
393
	device_t dev;
775
	int nmsix, cnt, required;
394
	int nmsix, cnt, required;
776
395
777
	dev = sc->vtpci_dev;
396
	dev = cn->vtpci_dev;
778
397
779
	/* Allocate an additional vector for the config changes. */
398
	/* Allocate an additional vector for the config changes. */
780
	required = nvectors + 1;
399
	required = nvectors + 1;
Lines 785-791 Link Here
785
404
786
	cnt = required;
405
	cnt = required;
787
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
406
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
788
		sc->vtpci_nmsix_resources = required;
407
		cn->vtpci_nmsix_resources = required;
789
		return (0);
408
		return (0);
790
	}
409
	}
791
410
Lines 795-806 Link Here
795
}
414
}
796
415
797
static int
416
static int
798
vtpci_alloc_msi(struct vtpci_softc *sc)
417
vtpci_alloc_msi(struct vtpci_common *cn)
799
{
418
{
800
	device_t dev;
419
	device_t dev;
801
	int nmsi, cnt, required;
420
	int nmsi, cnt, required;
802
421
803
	dev = sc->vtpci_dev;
422
	dev = cn->vtpci_dev;
804
	required = 1;
423
	required = 1;
805
424
806
	nmsi = pci_msi_count(dev);
425
	nmsi = pci_msi_count(dev);
Lines 817-896 Link Here
817
}
436
}
818
437
819
static int
438
static int
820
vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
439
vtpci_alloc_intr_msix_pervq(struct vtpci_common *cn)
821
{
440
{
822
	int i, nvectors, error;
441
	int i, nvectors, error;
823
442
824
	if (vtpci_disable_msix != 0 ||
443
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
825
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
826
		return (ENOTSUP);
444
		return (ENOTSUP);
827
445
828
	for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
446
	for (nvectors = 0, i = 0; i < cn->vtpci_nvqs; i++) {
829
		if (sc->vtpci_vqs[i].vtv_no_intr == 0)
447
		if (cn->vtpci_vqs[i].vtv_no_intr == 0)
830
			nvectors++;
448
			nvectors++;
831
	}
449
	}
832
450
833
	error = vtpci_alloc_msix(sc, nvectors);
451
	error = vtpci_alloc_msix(cn, nvectors);
834
	if (error)
452
	if (error)
835
		return (error);
453
		return (error);
836
454
837
	sc->vtpci_flags |= VTPCI_FLAG_MSIX;
455
	cn->vtpci_flags |= VTPCI_FLAG_MSIX;
838
456
839
	return (0);
457
	return (0);
840
}
458
}
841
459
842
static int
460
static int
843
vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
461
vtpci_alloc_intr_msix_shared(struct vtpci_common *cn)
844
{
462
{
845
	int error;
463
	int error;
846
464
847
	if (vtpci_disable_msix != 0 ||
465
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
848
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
849
		return (ENOTSUP);
466
		return (ENOTSUP);
850
467
851
	error = vtpci_alloc_msix(sc, 1);
468
	error = vtpci_alloc_msix(cn, 1);
852
	if (error)
469
	if (error)
853
		return (error);
470
		return (error);
854
471
855
	sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
472
	cn->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
856
473
857
	return (0);
474
	return (0);
858
}
475
}
859
476
860
static int
477
static int
861
vtpci_alloc_intr_msi(struct vtpci_softc *sc)
478
vtpci_alloc_intr_msi(struct vtpci_common *cn)
862
{
479
{
863
	int error;
480
	int error;
864
481
865
	/* Only BHyVe supports MSI. */
482
	/* Only BHyVe supports MSI. */
866
	if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
483
	if (cn->vtpci_flags & VTPCI_FLAG_NO_MSI)
867
		return (ENOTSUP);
484
		return (ENOTSUP);
868
485
869
	error = vtpci_alloc_msi(sc);
486
	error = vtpci_alloc_msi(cn);
870
	if (error)
487
	if (error)
871
		return (error);
488
		return (error);
872
489
873
	sc->vtpci_flags |= VTPCI_FLAG_MSI;
490
	cn->vtpci_flags |= VTPCI_FLAG_MSI;
874
491
875
	return (0);
492
	return (0);
876
}
493
}
877
494
878
static int
495
static int
879
vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
496
vtpci_alloc_intr_intx(struct vtpci_common *cn)
880
{
497
{
881
498
882
	sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
499
	cn->vtpci_flags |= VTPCI_FLAG_INTX;
883
500
884
	return (0);
501
	return (0);
885
}
502
}
886
503
887
static int
504
static int
888
vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
505
vtpci_alloc_interrupt(struct vtpci_common *cn, int rid, int flags,
889
    struct vtpci_interrupt *intr)
506
    struct vtpci_interrupt *intr)
890
{
507
{
891
	struct resource *irq;
508
	struct resource *irq;
892
509
893
	irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags);
510
	irq = bus_alloc_resource_any(cn->vtpci_dev, SYS_RES_IRQ, &rid, flags);
894
	if (irq == NULL)
511
	if (irq == NULL)
895
		return (ENXIO);
512
		return (ENXIO);
896
513
Lines 900-939 Link Here
900
	return (0);
517
	return (0);
901
}
518
}
902
519
520
static void
521
vtpci_free_interrupt(struct vtpci_common *cn, struct vtpci_interrupt *intr)
522
{
523
	device_t dev;
524
525
	dev = cn->vtpci_dev;
526
527
	if (intr->vti_handler != NULL) {
528
		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
529
		intr->vti_handler = NULL;
530
	}
531
532
	if (intr->vti_irq != NULL) {
533
		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
534
		    intr->vti_irq);
535
		intr->vti_irq = NULL;
536
		intr->vti_rid = -1;
537
	}
538
}
539
540
static void
541
vtpci_free_interrupts(struct vtpci_common *cn)
542
{
543
	struct vtpci_interrupt *intr;
544
	int i, nvq_intrs;
545
546
	vtpci_free_interrupt(cn, &cn->vtpci_device_interrupt);
547
548
	if (cn->vtpci_nmsix_resources != 0) {
549
		nvq_intrs = cn->vtpci_nmsix_resources - 1;
550
		cn->vtpci_nmsix_resources = 0;
551
552
		if ((intr = cn->vtpci_msix_vq_interrupts) != NULL) {
553
			for (i = 0; i < nvq_intrs; i++, intr++)
554
				vtpci_free_interrupt(cn, intr);
555
556
			free(cn->vtpci_msix_vq_interrupts, M_DEVBUF);
557
			cn->vtpci_msix_vq_interrupts = NULL;
558
		}
559
	}
560
561
	if (cn->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
562
		pci_release_msi(cn->vtpci_dev);
563
564
	cn->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
565
}
566
567
static void
568
vtpci_free_virtqueues(struct vtpci_common *cn)
569
{
570
	struct vtpci_virtqueue *vqx;
571
	int idx;
572
573
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
574
		vtpci_disable_vq(cn, idx);
575
576
		vqx = &cn->vtpci_vqs[idx];
577
		virtqueue_free(vqx->vtv_vq);
578
		vqx->vtv_vq = NULL;
579
	}
580
581
	free(cn->vtpci_vqs, M_DEVBUF);
582
	cn->vtpci_vqs = NULL;
583
	cn->vtpci_nvqs = 0;
584
}
585
586
void
587
vtpci_release_child_resources(struct vtpci_common *cn)
588
{
589
590
	vtpci_free_interrupts(cn);
591
	vtpci_free_virtqueues(cn);
592
}
593
594
static void
595
vtpci_cleanup_setup_intr_attempt(struct vtpci_common *cn)
596
{
597
	int idx;
598
599
	if (cn->vtpci_flags & VTPCI_FLAG_MSIX) {
600
		vtpci_register_cfg_msix(cn, NULL);
601
602
		for (idx = 0; idx < cn->vtpci_nvqs; idx++)
603
			vtpci_register_vq_msix(cn, idx, NULL);
604
	}
605
606
	vtpci_free_interrupts(cn);
607
}
608
903
static int
609
static int
904
vtpci_alloc_intr_resources(struct vtpci_softc *sc)
610
vtpci_alloc_intr_resources(struct vtpci_common *cn)
905
{
611
{
906
	struct vtpci_interrupt *intr;
612
	struct vtpci_interrupt *intr;
907
	int i, rid, flags, nvq_intrs, error;
613
	int i, rid, flags, nvq_intrs, error;
908
614
909
	rid = 0;
910
	flags = RF_ACTIVE;
615
	flags = RF_ACTIVE;
911
616
912
	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
617
	if (cn->vtpci_flags & VTPCI_FLAG_INTX) {
618
		rid = 0;
913
		flags |= RF_SHAREABLE;
619
		flags |= RF_SHAREABLE;
914
	else
620
	} else
915
		rid = 1;
621
		rid = 1;
916
622
917
	/*
623
	/*
918
	 * For legacy and MSI interrupts, this single resource handles all
624
	 * When using INTX or MSI interrupts, this resource handles all
919
	 * interrupts. For MSIX, this resource is used for the configuration
625
	 * interrupts. When using MSIX, this resource handles just the
920
	 * changed interrupt.
626
	 * configuration changed interrupt.
921
	 */
627
	 */
922
	intr = &sc->vtpci_device_interrupt;
628
	intr = &cn->vtpci_device_interrupt;
923
	error = vtpci_alloc_interrupt(sc, rid, flags, intr);
629
924
	if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI))
630
	error = vtpci_alloc_interrupt(cn, rid, flags, intr);
631
	if (error || cn->vtpci_flags & (VTPCI_FLAG_INTX | VTPCI_FLAG_MSI))
925
		return (error);
632
		return (error);
926
633
927
	/* Subtract one for the configuration changed interrupt. */
634
	/*
928
	nvq_intrs = sc->vtpci_nmsix_resources - 1;
635
	 * Now allocate the interrupts for the virtqueues. This may be one
636
	 * for all the virtqueues, or one for each virtqueue. Subtract one
637
	 * below for because of the configuration changed interrupt.
638
	 */
639
	nvq_intrs = cn->vtpci_nmsix_resources - 1;
929
640
930
	intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
641
	cn->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
931
	    sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
642
	    sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
932
	if (sc->vtpci_msix_vq_interrupts == NULL)
643
	if (cn->vtpci_msix_vq_interrupts == NULL)
933
		return (ENOMEM);
644
		return (ENOMEM);
934
645
646
	intr = cn->vtpci_msix_vq_interrupts;
647
935
	for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
648
	for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
936
		error = vtpci_alloc_interrupt(sc, rid, flags, intr);
649
		error = vtpci_alloc_interrupt(cn, rid, flags, intr);
937
		if (error)
650
		if (error)
938
			return (error);
651
			return (error);
939
	}
652
	}
Lines 942-975 Link Here
942
}
655
}
943
656
944
static int
657
static int
945
vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type)
658
vtpci_setup_intx_interrupt(struct vtpci_common *cn, enum intr_type type)
946
{
659
{
947
	struct vtpci_interrupt *intr;
660
	struct vtpci_interrupt *intr;
948
	int error;
661
	int error;
949
662
950
	intr = &sc->vtpci_device_interrupt;
663
	intr = &cn->vtpci_device_interrupt;
951
	error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL,
952
	    vtpci_legacy_intr, sc, &intr->vti_handler);
953
664
665
	error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL,
666
	    vtpci_intx_intr, cn, &intr->vti_handler);
667
954
	return (error);
668
	return (error);
955
}
669
}
956
670
957
static int
671
static int
958
vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
672
vtpci_setup_pervq_msix_interrupts(struct vtpci_common *cn, enum intr_type type)
959
{
673
{
960
	struct vtpci_virtqueue *vqx;
674
	struct vtpci_virtqueue *vqx;
961
	struct vtpci_interrupt *intr;
675
	struct vtpci_interrupt *intr;
962
	int i, error;
676
	int i, error;
963
677
964
	intr = sc->vtpci_msix_vq_interrupts;
678
	intr = cn->vtpci_msix_vq_interrupts;
965
679
966
	for (i = 0; i < sc->vtpci_nvqs; i++) {
680
	for (i = 0; i < cn->vtpci_nvqs; i++) {
967
		vqx = &sc->vtpci_vqs[i];
681
		vqx = &cn->vtpci_vqs[i];
968
682
969
		if (vqx->vtv_no_intr)
683
		if (vqx->vtv_no_intr)
970
			continue;
684
			continue;
971
685
972
		error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type,
686
		error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type,
973
		    vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
687
		    vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
974
		    &intr->vti_handler);
688
		    &intr->vti_handler);
975
		if (error)
689
		if (error)
Lines 982-1087 Link Here
982
}
696
}
983
697
984
static int
698
static int
985
vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
699
vtpci_set_host_msix_vectors(struct vtpci_common *cn)
986
{
700
{
987
	device_t dev;
988
	struct vtpci_interrupt *intr;
989
	int error;
990
991
	dev = sc->vtpci_dev;
992
	intr = &sc->vtpci_device_interrupt;
993
994
	error = bus_setup_intr(dev, intr->vti_irq, type, NULL,
995
	    vtpci_config_intr, sc, &intr->vti_handler);
996
	if (error)
997
		return (error);
998
999
	if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
1000
		intr = sc->vtpci_msix_vq_interrupts;
1001
		error = bus_setup_intr(dev, intr->vti_irq, type,
1002
		    vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc,
1003
		    &intr->vti_handler);
1004
	} else
1005
		error = vtpci_setup_pervq_msix_interrupts(sc, type);
1006
1007
	return (error ? error : vtpci_set_host_msix_vectors(sc));
1008
}
1009
1010
static int
1011
vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type)
1012
{
1013
	int error;
1014
1015
	type |= INTR_MPSAFE;
1016
	KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
1017
	    ("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags));
1018
1019
	error = vtpci_alloc_intr_resources(sc);
1020
	if (error)
1021
		return (error);
1022
1023
	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
1024
		error = vtpci_setup_legacy_interrupt(sc, type);
1025
	else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
1026
		error = vtpci_setup_msi_interrupt(sc, type);
1027
	else
1028
		error = vtpci_setup_msix_interrupts(sc, type);
1029
1030
	return (error);
1031
}
1032
1033
static int
1034
vtpci_register_msix_vector(struct vtpci_softc *sc, int offset,
1035
    struct vtpci_interrupt *intr)
1036
{
1037
	device_t dev;
1038
	uint16_t vector;
1039
1040
	dev = sc->vtpci_dev;
1041
1042
	if (intr != NULL) {
1043
		/* Map from guest rid to host vector. */
1044
		vector = intr->vti_rid - 1;
1045
	} else
1046
		vector = VIRTIO_MSI_NO_VECTOR;
1047
1048
	vtpci_write_config_2(sc, offset, vector);
1049
1050
	/* Read vector to determine if the host had sufficient resources. */
1051
	if (vtpci_read_config_2(sc, offset) != vector) {
1052
		device_printf(dev,
1053
		    "insufficient host resources for MSIX interrupts\n");
1054
		return (ENODEV);
1055
	}
1056
1057
	return (0);
1058
}
1059
1060
static int
1061
vtpci_set_host_msix_vectors(struct vtpci_softc *sc)
1062
{
1063
	struct vtpci_interrupt *intr, *tintr;
701
	struct vtpci_interrupt *intr, *tintr;
1064
	int idx, offset, error;
702
	int idx, error;
1065
703
1066
	intr = &sc->vtpci_device_interrupt;
704
	intr = &cn->vtpci_device_interrupt;
1067
	offset = VIRTIO_MSI_CONFIG_VECTOR;
705
	error = vtpci_register_cfg_msix(cn, intr);
1068
1069
	error = vtpci_register_msix_vector(sc, offset, intr);
1070
	if (error)
706
	if (error)
1071
		return (error);
707
		return (error);
1072
708
1073
	intr = sc->vtpci_msix_vq_interrupts;
709
	intr = cn->vtpci_msix_vq_interrupts;
1074
	offset = VIRTIO_MSI_QUEUE_VECTOR;
710
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
1075
711
		if (cn->vtpci_vqs[idx].vtv_no_intr)
1076
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1077
		vtpci_select_virtqueue(sc, idx);
1078
1079
		if (sc->vtpci_vqs[idx].vtv_no_intr)
1080
			tintr = NULL;
712
			tintr = NULL;
1081
		else
713
		else
1082
			tintr = intr;
714
			tintr = intr;
1083
715
1084
		error = vtpci_register_msix_vector(sc, offset, tintr);
716
		error = vtpci_register_vq_msix(cn, idx, tintr);
1085
		if (error)
717
		if (error)
1086
			break;
718
			break;
1087
719
Lines 1089-1096 Link Here
1089
		 * For shared MSIX, all the virtqueues share the first
721
		 * For shared MSIX, all the virtqueues share the first
1090
		 * interrupt.
722
		 * interrupt.
1091
		 */
723
		 */
1092
		if (!sc->vtpci_vqs[idx].vtv_no_intr &&
724
		if (!cn->vtpci_vqs[idx].vtv_no_intr &&
1093
		    (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
725
		    (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
1094
			intr++;
726
			intr++;
1095
	}
727
	}
1096
728
Lines 1098-1261 Link Here
1098
}
730
}
1099
731
1100
static int
732
static int
1101
vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx)
733
vtpci_setup_msix_interrupts(struct vtpci_common *cn, enum intr_type type)
1102
{
734
{
1103
	struct vtpci_virtqueue *vqx;
735
	struct vtpci_interrupt *intr;
1104
	struct virtqueue *vq;
1105
	int error;
736
	int error;
1106
	uint16_t size;
1107
737
1108
	vqx = &sc->vtpci_vqs[idx];
738
	intr = &cn->vtpci_device_interrupt;
1109
	vq = vqx->vtv_vq;
1110
739
1111
	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
740
	error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL,
1112
741
	    vtpci_config_intr, cn, &intr->vti_handler);
1113
	vtpci_select_virtqueue(sc, idx);
1114
	size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
1115
1116
	error = virtqueue_reinit(vq, size);
1117
	if (error)
742
	if (error)
1118
		return (error);
743
		return (error);
1119
744
1120
	vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
745
	if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
1121
	    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
746
		intr = &cn->vtpci_msix_vq_interrupts[0];
1122
747
1123
	return (0);
748
		error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type,
749
		    vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, cn,
750
		    &intr->vti_handler);
751
	} else
752
		error = vtpci_setup_pervq_msix_interrupts(cn, type);
753
754
	return (error ? error : vtpci_set_host_msix_vectors(cn));
1124
}
755
}
1125
756
1126
static void
757
static int
1127
vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr)
758
vtpci_setup_intrs(struct vtpci_common *cn, enum intr_type type)
1128
{
759
{
1129
	device_t dev;
760
	int error;
1130
761
1131
	dev = sc->vtpci_dev;
762
	type |= INTR_MPSAFE;
763
	KASSERT(cn->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
764
	    ("%s: no interrupt type selected %#x", __func__, cn->vtpci_flags));
1132
765
1133
	if (intr->vti_handler != NULL) {
766
	error = vtpci_alloc_intr_resources(cn);
1134
		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
767
	if (error)
1135
		intr->vti_handler = NULL;
768
		return (error);
1136
	}
1137
769
1138
	if (intr->vti_irq != NULL) {
770
	if (cn->vtpci_flags & VTPCI_FLAG_INTX)
1139
		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
771
		error = vtpci_setup_intx_interrupt(cn, type);
1140
		    intr->vti_irq);
772
	else if (cn->vtpci_flags & VTPCI_FLAG_MSI)
1141
		intr->vti_irq = NULL;
773
		error = vtpci_setup_msi_interrupt(cn, type);
1142
		intr->vti_rid = -1;
774
	else
1143
	}
775
		error = vtpci_setup_msix_interrupts(cn, type);
776
777
	return (error);
1144
}
778
}
1145
779
1146
static void
780
int
1147
vtpci_free_interrupts(struct vtpci_softc *sc)
781
vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type)
1148
{
782
{
1149
	struct vtpci_interrupt *intr;
783
	device_t dev;
1150
	int i, nvq_intrs;
784
	int attempt, error;
1151
785
1152
	vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt);
786
	dev = cn->vtpci_dev;
1153
787
1154
	if (sc->vtpci_nmsix_resources != 0) {
788
	for (attempt = 0; attempt < 5; attempt++) {
1155
		nvq_intrs = sc->vtpci_nmsix_resources - 1;
789
		/*
1156
		sc->vtpci_nmsix_resources = 0;
790
		 * Start with the most desirable interrupt configuration and
791
		 * fallback towards less desirable ones.
792
		 */
793
		switch (attempt) {
794
		case 0:
795
			error = vtpci_alloc_intr_msix_pervq(cn);
796
			break;
797
		case 1:
798
			error = vtpci_alloc_intr_msix_shared(cn);
799
			break;
800
		case 2:
801
			error = vtpci_alloc_intr_msi(cn);
802
			break;
803
		case 3:
804
			error = vtpci_alloc_intr_intx(cn);
805
			break;
806
		default:
807
			device_printf(dev,
808
			    "exhausted all interrupt allocation attempts\n");
809
			return (ENXIO);
810
		}
1157
811
1158
		intr = sc->vtpci_msix_vq_interrupts;
812
		if (error == 0 && vtpci_setup_intrs(cn, type) == 0)
1159
		if (intr != NULL) {
813
			break;
1160
			for (i = 0; i < nvq_intrs; i++, intr++)
1161
				vtpci_free_interrupt(sc, intr);
1162
814
1163
			free(sc->vtpci_msix_vq_interrupts, M_DEVBUF);
815
		vtpci_cleanup_setup_intr_attempt(cn);
1164
			sc->vtpci_msix_vq_interrupts = NULL;
1165
		}
1166
	}
816
	}
1167
817
1168
	if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
818
	if (bootverbose) {
1169
		pci_release_msi(sc->vtpci_dev);
819
		if (cn->vtpci_flags & VTPCI_FLAG_INTX)
820
			device_printf(dev, "using legacy interrupt\n");
821
		else if (cn->vtpci_flags & VTPCI_FLAG_MSI)
822
			device_printf(dev, "using MSI interrupt\n");
823
		else if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
824
			device_printf(dev, "using shared MSIX interrupts\n");
825
		else
826
			device_printf(dev, "using per VQ MSIX interrupts\n");
827
	}
1170
828
1171
	sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
829
	return (0);
1172
}
830
}
1173
831
1174
static void
832
static int
1175
vtpci_free_virtqueues(struct vtpci_softc *sc)
833
vtpci_reinit_virtqueue(struct vtpci_common *cn, int idx)
1176
{
834
{
1177
	struct vtpci_virtqueue *vqx;
835
	struct vtpci_virtqueue *vqx;
1178
	int idx;
836
	struct virtqueue *vq;
837
	int error;
1179
838
1180
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
839
	vqx = &cn->vtpci_vqs[idx];
1181
		vqx = &sc->vtpci_vqs[idx];
840
	vq = vqx->vtv_vq;
1182
841
1183
		vtpci_select_virtqueue(sc, idx);
842
	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
1184
		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
1185
843
1186
		virtqueue_free(vqx->vtv_vq);
844
	error = virtqueue_reinit(vq, vtpci_get_vq_size(cn, idx));
1187
		vqx->vtv_vq = NULL;
845
	if (error == 0)
1188
	}
846
		vtpci_set_vq(cn, vq);
1189
847
1190
	free(sc->vtpci_vqs, M_DEVBUF);
848
	return (error);
1191
	sc->vtpci_vqs = NULL;
1192
	sc->vtpci_nvqs = 0;
1193
}
849
}
1194
850
1195
static void
851
static void
1196
vtpci_release_child_resources(struct vtpci_softc *sc)
852
vtpci_intx_intr(void *xcn)
1197
{
853
{
1198
854
	struct vtpci_common *cn;
1199
	vtpci_free_interrupts(sc);
1200
	vtpci_free_virtqueues(sc);
1201
}
1202
1203
static void
1204
vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc)
1205
{
1206
	int idx;
1207
1208
	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
1209
		vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR,
1210
		    VIRTIO_MSI_NO_VECTOR);
1211
1212
		for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1213
			vtpci_select_virtqueue(sc, idx);
1214
			vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR,
1215
			    VIRTIO_MSI_NO_VECTOR);
1216
		}
1217
	}
1218
1219
	vtpci_free_interrupts(sc);
1220
}
1221
1222
static void
1223
vtpci_reset(struct vtpci_softc *sc)
1224
{
1225
1226
	/*
1227
	 * Setting the status to RESET sets the host device to
1228
	 * the original, uninitialized state.
1229
	 */
1230
	vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET);
1231
}
1232
1233
static void
1234
vtpci_select_virtqueue(struct vtpci_softc *sc, int idx)
1235
{
1236
1237
	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
1238
}
1239
1240
static void
1241
vtpci_legacy_intr(void *xsc)
1242
{
1243
	struct vtpci_softc *sc;
1244
	struct vtpci_virtqueue *vqx;
855
	struct vtpci_virtqueue *vqx;
1245
	int i;
856
	int i;
1246
	uint8_t isr;
857
	uint8_t isr;
1247
858
1248
	sc = xsc;
859
	cn = xcn;
1249
	vqx = &sc->vtpci_vqs[0];
860
	isr = vtpci_read_isr(cn);
1250
861
1251
	/* Reading the ISR also clears it. */
1252
	isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR);
1253
1254
	if (isr & VIRTIO_PCI_ISR_CONFIG)
862
	if (isr & VIRTIO_PCI_ISR_CONFIG)
1255
		vtpci_config_intr(sc);
863
		vtpci_config_intr(cn);
1256
864
1257
	if (isr & VIRTIO_PCI_ISR_INTR) {
865
	if (isr & VIRTIO_PCI_ISR_INTR) {
1258
		for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
866
		vqx = &cn->vtpci_vqs[0];
867
		for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
1259
			if (vqx->vtv_no_intr == 0)
868
			if (vqx->vtv_no_intr == 0)
1260
				virtqueue_intr(vqx->vtv_vq);
869
				virtqueue_intr(vqx->vtv_vq);
1261
		}
870
		}
Lines 1263-1279 Link Here
1263
}
872
}
1264
873
1265
static int
874
static int
1266
vtpci_vq_shared_intr_filter(void *xsc)
875
vtpci_vq_shared_intr_filter(void *xcn)
1267
{
876
{
1268
	struct vtpci_softc *sc;
877
	struct vtpci_common *cn;
1269
	struct vtpci_virtqueue *vqx;
878
	struct vtpci_virtqueue *vqx;
1270
	int i, rc;
879
	int i, rc;
1271
880
881
	cn = xcn;
882
	vqx = &cn->vtpci_vqs[0];
1272
	rc = 0;
883
	rc = 0;
1273
	sc = xsc;
1274
	vqx = &sc->vtpci_vqs[0];
1275
884
1276
	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
885
	for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
1277
		if (vqx->vtv_no_intr == 0)
886
		if (vqx->vtv_no_intr == 0)
1278
			rc |= virtqueue_intr_filter(vqx->vtv_vq);
887
			rc |= virtqueue_intr_filter(vqx->vtv_vq);
1279
	}
888
	}
Lines 1282-1297 Link Here
1282
}
891
}
1283
892
1284
static void
893
static void
1285
vtpci_vq_shared_intr(void *xsc)
894
vtpci_vq_shared_intr(void *xcn)
1286
{
895
{
1287
	struct vtpci_softc *sc;
896
	struct vtpci_common *cn;
1288
	struct vtpci_virtqueue *vqx;
897
	struct vtpci_virtqueue *vqx;
1289
	int i;
898
	int i;
1290
899
1291
	sc = xsc;
900
	cn = xcn;
1292
	vqx = &sc->vtpci_vqs[0];
901
	vqx = &cn->vtpci_vqs[0];
1293
902
1294
	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
903
	for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
1295
		if (vqx->vtv_no_intr == 0)
904
		if (vqx->vtv_no_intr == 0)
1296
			virtqueue_intr(vqx->vtv_vq);
905
			virtqueue_intr(vqx->vtv_vq);
1297
	}
906
	}
Lines 1319-1332 Link Here
1319
}
928
}
1320
929
1321
static void
930
static void
1322
vtpci_config_intr(void *xsc)
931
vtpci_config_intr(void *xcn)
1323
{
932
{
1324
	struct vtpci_softc *sc;
933
	struct vtpci_common *cn;
1325
	device_t child;
934
	device_t child;
1326
935
1327
	sc = xsc;
936
	cn = xcn;
1328
	child = sc->vtpci_child_dev;
937
	child = cn->vtpci_child_dev;
1329
938
1330
	if (child != NULL)
939
	if (child != NULL)
1331
		VIRTIO_CONFIG_CHANGE(child);
940
		VIRTIO_CONFIG_CHANGE(child);
941
}
942
943
static int
944
vtpci_feature_sysctl(struct sysctl_req *req, struct vtpci_common *cn,
945
    uint64_t features)
946
{
947
	struct sbuf *sb;
948
	int error;
949
950
	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
951
	if (sb == NULL)
952
		return (ENOMEM);
953
954
	error = virtio_describe_sbuf(sb, features, cn->vtpci_child_feat_desc);
955
	sbuf_delete(sb);
956
957
	return (error);
958
}
959
960
static int
961
vtpci_host_features_sysctl(SYSCTL_HANDLER_ARGS)
962
{
963
	struct vtpci_common *cn;
964
965
	cn = arg1;
966
967
	return (vtpci_feature_sysctl(req, cn, cn->vtpci_host_features));
968
}
969
970
static int
971
vtpci_negotiated_features_sysctl(SYSCTL_HANDLER_ARGS)
972
{
973
	struct vtpci_common *cn;
974
975
	cn = arg1;
976
977
	return (vtpci_feature_sysctl(req, cn, cn->vtpci_features));
978
}
979
980
static void
981
vtpci_setup_sysctl(struct vtpci_common *cn)
982
{
983
	device_t dev;
984
	struct sysctl_ctx_list *ctx;
985
	struct sysctl_oid *tree;
986
	struct sysctl_oid_list *child;
987
988
	dev = cn->vtpci_dev;
989
	ctx = device_get_sysctl_ctx(dev);
990
	tree = device_get_sysctl_tree(dev);
991
	child = SYSCTL_CHILDREN(tree);
992
993
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nvqs",
994
	    CTLFLAG_RD, &cn->vtpci_nvqs, 0, "Number of virtqueues");
995
996
	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "host_features",
997
	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0,
998
	    vtpci_host_features_sysctl, "A", "Features supported by the host");
999
	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "negotiated_features",
1000
	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0,
1001
	    vtpci_negotiated_features_sysctl, "A", "Features negotiated");
1332
}
1002
}
(-)sys/dev/virtio/pci/virtio_pci.h (-63 / +107 lines)
Lines 1-88 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
3
 *
4
 * Copyright IBM Corp. 2007
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
5
 *
6
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
8
 * modification, are permitted provided that the following conditions
14
 * are met:
9
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
10
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
13
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
14
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
15
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 *
35
 * $FreeBSD: releng/12.1/sys/dev/virtio/pci/virtio_pci.h 326022 2017-11-20 19:36:21Z pfg $
28
 * $FreeBSD: releng/12.1/sys/dev/virtio/pci/virtio_pci.h 326022 2017-11-20 19:36:21Z pfg $
36
 */
29
 */
37
30
38
#ifndef _VIRTIO_PCI_H
31
#ifndef _VIRTIO_PCI_H
39
#define _VIRTIO_PCI_H
32
#define _VIRTIO_PCI_H
40
33
41
/* VirtIO PCI vendor/device ID. */
34
struct vtpci_interrupt {
42
#define VIRTIO_PCI_VENDORID	0x1AF4
35
	struct resource		*vti_irq;
43
#define VIRTIO_PCI_DEVICEID_MIN	0x1000
36
	int			 vti_rid;
44
#define VIRTIO_PCI_DEVICEID_MAX	0x103F
37
	void			*vti_handler;
38
};
45
39
46
/* VirtIO ABI version, this must match exactly. */
40
struct vtpci_virtqueue {
47
#define VIRTIO_PCI_ABI_VERSION	0
41
	struct virtqueue	*vtv_vq;
42
	int			 vtv_no_intr;
43
	int			 vtv_notify_offset;
44
};
48
45
49
/*
46
struct vtpci_common {
50
 * VirtIO Header, located in BAR 0.
47
	device_t			 vtpci_dev;
51
 */
48
	uint64_t			 vtpci_host_features;
52
#define VIRTIO_PCI_HOST_FEATURES  0  /* host's supported features (32bit, RO)*/
49
	uint64_t			 vtpci_features;
53
#define VIRTIO_PCI_GUEST_FEATURES 4  /* guest's supported features (32, RW) */
50
	struct vtpci_virtqueue		*vtpci_vqs;
54
#define VIRTIO_PCI_QUEUE_PFN      8  /* physical address of VQ (32, RW) */
51
	int				 vtpci_nvqs;
55
#define VIRTIO_PCI_QUEUE_NUM      12 /* number of ring entries (16, RO) */
56
#define VIRTIO_PCI_QUEUE_SEL      14 /* current VQ selection (16, RW) */
57
#define VIRTIO_PCI_QUEUE_NOTIFY	  16 /* notify host regarding VQ (16, RW) */
58
#define VIRTIO_PCI_STATUS         18 /* device status register (8, RW) */
59
#define VIRTIO_PCI_ISR            19 /* interrupt status register, reading
60
				      * also clears the register (8, RO) */
61
/* Only if MSIX is enabled: */
62
#define VIRTIO_MSI_CONFIG_VECTOR  20 /* configuration change vector (16, RW) */
63
#define VIRTIO_MSI_QUEUE_VECTOR   22 /* vector for selected VQ notifications
64
					(16, RW) */
65
52
66
/* The bit of the ISR which indicates a device has an interrupt. */
53
	uint32_t			 vtpci_flags;
67
#define VIRTIO_PCI_ISR_INTR	0x1
54
#define VTPCI_FLAG_NO_MSI		0x0001
68
/* The bit of the ISR which indicates a device configuration change. */
55
#define VTPCI_FLAG_NO_MSIX		0x0002
69
#define VIRTIO_PCI_ISR_CONFIG	0x2
56
#define VTPCI_FLAG_MODERN		0x0004
70
/* Vector value used to disable MSI for queue. */
57
#define VTPCI_FLAG_INTX			0x1000
71
#define VIRTIO_MSI_NO_VECTOR	0xFFFF
58
#define VTPCI_FLAG_MSI			0x2000
59
#define VTPCI_FLAG_MSIX			0x4000
60
#define VTPCI_FLAG_SHARED_MSIX		0x8000
61
#define VTPCI_FLAG_ITYPE_MASK		0xF000
72
62
73
/*
63
	/* The VirtIO PCI "bus" will only ever have one child. */
74
 * The remaining space is defined by each driver as the per-driver
64
	device_t			 vtpci_child_dev;
75
 * configuration space.
65
	struct virtio_feature_desc	*vtpci_child_feat_desc;
76
 */
77
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled)     ((msix_enabled) ? 24 : 20)
78
66
79
/*
67
	/*
80
 * How many bits to shift physical queue address written to QUEUE_PFN.
68
	 * Ideally, each virtqueue that the driver provides a callback for will
81
 * 12 is historical, and due to x86 page size.
69
	 * receive its own MSIX vector. If there are not sufficient vectors
82
 */
70
	 * available, then attempt to have all the VQs share one vector. For
83
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT	12
71
	 * MSIX, the configuration changed notifications must be on their own
72
	 * vector.
73
	 *
74
	 * If MSIX is not available, attempt to have the whole device share
75
	 * one MSI vector, and then, finally, one intx interrupt.
76
	 */
77
	struct vtpci_interrupt		 vtpci_device_interrupt;
78
	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
79
	int				 vtpci_nmsix_resources;
80
};
84
81
85
/* The alignment to use between consumer and producer parts of vring. */
82
extern int vtpci_disable_msix;
86
#define VIRTIO_PCI_VRING_ALIGN	4096
83
84
static inline device_t
85
vtpci_child_device(struct vtpci_common *cn)
86
{
87
	return (cn->vtpci_child_dev);
88
}
89
90
static inline bool
91
vtpci_is_msix_available(struct vtpci_common *cn)
92
{
93
	return ((cn->vtpci_flags & VTPCI_FLAG_NO_MSIX) == 0);
94
}
95
96
static inline bool
97
vtpci_is_msix_enabled(struct vtpci_common *cn)
98
{
99
	return ((cn->vtpci_flags & VTPCI_FLAG_MSIX) != 0);
100
}
101
102
static inline bool
103
vtpci_is_modern(struct vtpci_common *cn)
104
{
105
	return ((cn->vtpci_flags & VTPCI_FLAG_MODERN) != 0);
106
}
107
108
static inline int
109
vtpci_virtqueue_count(struct vtpci_common *cn)
110
{
111
	return (cn->vtpci_nvqs);
112
}
113
114
void	vtpci_init(struct vtpci_common *cn, device_t dev, bool modern);
115
int	vtpci_add_child(struct vtpci_common *cn);
116
int	vtpci_delete_child(struct vtpci_common *cn);
117
void	vtpci_child_detached(struct vtpci_common *cn);
118
int	vtpci_reinit(struct vtpci_common *cn);
119
120
uint64_t vtpci_negotiate_features(struct vtpci_common *cn,
121
	     uint64_t child_features, uint64_t host_features);
122
int	 vtpci_with_feature(struct vtpci_common *cn, uint64_t feature);
123
124
int	vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result);
125
int	vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value);
126
127
int	vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
128
	    struct vq_alloc_info *vq_info);
129
int	vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type);
130
void	vtpci_release_child_resources(struct vtpci_common *cn);
87
131
88
#endif /* _VIRTIO_PCI_H */
132
#endif /* _VIRTIO_PCI_H */
(-)sys/dev/virtio/pci/virtio_pci_if.m (+71 lines)
Line 0 Link Here
1
#-
2
# Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
3
# All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
7
# are met:
8
# 1. Redistributions of source code must retain the above copyright
9
#    notice, this list of conditions and the following disclaimer.
10
# 2. Redistributions in binary form must reproduce the above copyright
11
#    notice, this list of conditions and the following disclaimer in the
12
#    documentation and/or other materials provided with the distribution.
13
#
14
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
# SUCH DAMAGE.
25
#
26
# $FreeBSD$
27
28
#include <sys/bus.h>
29
#include <machine/bus.h>
30
31
INTERFACE virtio_pci;
32
33
HEADER {
34
struct virtqueue;
35
struct vtpci_interrupt;
36
};
37
38
METHOD uint8_t read_isr {
39
	device_t	dev;
40
};
41
42
METHOD uint16_t get_vq_size {
43
	device_t	dev;
44
	int		idx;
45
};
46
47
METHOD bus_size_t get_vq_notify_off {
48
	device_t	dev;
49
	int		idx;
50
};
51
52
METHOD void set_vq {
53
	device_t		dev;
54
	struct virtqueue	*vq;
55
};
56
57
METHOD void disable_vq {
58
	device_t		 dev;
59
	int			 idx;
60
};
61
62
METHOD int register_cfg_msix {
63
	device_t	dev;
64
	struct vtpci_interrupt *intr;
65
};
66
67
METHOD int register_vq_msix {
68
	device_t		dev;
69
	int			idx;
70
	struct vtpci_interrupt	*intr;
71
};
(-)sys/dev/virtio/pci/virtio_pci_legacy.c (+714 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
29
/* Driver for the legacy VirtIO PCI interface. */
30
31
#include <sys/cdefs.h>
32
__FBSDID("$FreeBSD$");
33
34
#include <sys/param.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
38
#include <sys/module.h>
39
40
#include <machine/bus.h>
41
#include <machine/resource.h>
42
#include <sys/bus.h>
43
#include <sys/rman.h>
44
45
#include <dev/pci/pcivar.h>
46
#include <dev/pci/pcireg.h>
47
48
#include <dev/virtio/virtio.h>
49
#include <dev/virtio/virtqueue.h>
50
#include <dev/virtio/pci/virtio_pci.h>
51
#include <dev/virtio/pci/virtio_pci_legacy_var.h>
52
53
#include "virtio_bus_if.h"
54
#include "virtio_pci_if.h"
55
#include "virtio_if.h"
56
57
struct vtpci_legacy_softc {
58
	device_t			 vtpci_dev;
59
	struct vtpci_common		 vtpci_common;
60
	struct resource			*vtpci_res;
61
	struct resource			*vtpci_msix_res;
62
};
63
64
static int	vtpci_legacy_probe(device_t);
65
static int	vtpci_legacy_attach(device_t);
66
static int	vtpci_legacy_detach(device_t);
67
static int	vtpci_legacy_suspend(device_t);
68
static int	vtpci_legacy_resume(device_t);
69
static int	vtpci_legacy_shutdown(device_t);
70
71
static void	vtpci_legacy_driver_added(device_t, driver_t *);
72
static void	vtpci_legacy_child_detached(device_t, device_t);
73
static int	vtpci_legacy_read_ivar(device_t, device_t, int, uintptr_t *);
74
static int	vtpci_legacy_write_ivar(device_t, device_t, int, uintptr_t);
75
76
static uint8_t	vtpci_legacy_read_isr(device_t);
77
static uint16_t	vtpci_legacy_get_vq_size(device_t, int);
78
static bus_size_t vtpci_legacy_get_vq_notify_off(device_t, int);
79
static void	vtpci_legacy_set_vq(device_t, struct virtqueue *);
80
static void	vtpci_legacy_disable_vq(device_t, int);
81
static int	vtpci_legacy_register_cfg_msix(device_t,
82
		    struct vtpci_interrupt *);
83
static int	vtpci_legacy_register_vq_msix(device_t, int idx,
84
		    struct vtpci_interrupt *);
85
86
static uint64_t	vtpci_legacy_negotiate_features(device_t, uint64_t);
87
static int	vtpci_legacy_with_feature(device_t, uint64_t);
88
static int	vtpci_legacy_alloc_virtqueues(device_t, int, int,
89
		    struct vq_alloc_info *);
90
static int	vtpci_legacy_setup_interrupts(device_t, enum intr_type);
91
static void	vtpci_legacy_stop(device_t);
92
static int	vtpci_legacy_reinit(device_t, uint64_t);
93
static void	vtpci_legacy_reinit_complete(device_t);
94
static void	vtpci_legacy_notify_vq(device_t, uint16_t, bus_size_t);
95
static void	vtpci_legacy_read_dev_config(device_t, bus_size_t, void *, int);
96
static void	vtpci_legacy_write_dev_config(device_t, bus_size_t, void *, int);
97
98
static int	vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *);
99
static void	vtpci_legacy_free_resources(struct vtpci_legacy_softc *);
100
101
static void	vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *);
102
103
static uint8_t	vtpci_legacy_get_status(struct vtpci_legacy_softc *);
104
static void	vtpci_legacy_set_status(struct vtpci_legacy_softc *, uint8_t);
105
static void	vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *, int);
106
static void	vtpci_legacy_reset(struct vtpci_legacy_softc *);
107
108
#define VIRTIO_PCI_LEGACY_CONFIG(_sc) \
109
    VIRTIO_PCI_CONFIG_OFF(vtpci_is_msix_enabled(&(_sc)->vtpci_common))
110
111
/*
112
 * I/O port read/write wrappers.
113
 */
114
#define vtpci_legacy_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
115
#define vtpci_legacy_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
116
#define vtpci_legacy_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
117
#define vtpci_legacy_write_config_1(sc, o, v) \
118
    bus_write_1((sc)->vtpci_res, (o), (v))
119
#define vtpci_legacy_write_config_2(sc, o, v) \
120
    bus_write_2((sc)->vtpci_res, (o), (v))
121
#define vtpci_legacy_write_config_4(sc, o, v) \
122
    bus_write_4((sc)->vtpci_res, (o), (v))
123
124
static device_method_t vtpci_legacy_methods[] = {
125
	/* Device interface. */
126
	DEVMETHOD(device_probe,			  vtpci_legacy_probe),
127
	DEVMETHOD(device_attach,		  vtpci_legacy_attach),
128
	DEVMETHOD(device_detach,		  vtpci_legacy_detach),
129
	DEVMETHOD(device_suspend,		  vtpci_legacy_suspend),
130
	DEVMETHOD(device_resume,		  vtpci_legacy_resume),
131
	DEVMETHOD(device_shutdown,		  vtpci_legacy_shutdown),
132
133
	/* Bus interface. */
134
	DEVMETHOD(bus_driver_added,		  vtpci_legacy_driver_added),
135
	DEVMETHOD(bus_child_detached,		  vtpci_legacy_child_detached),
136
	DEVMETHOD(bus_read_ivar,		  vtpci_legacy_read_ivar),
137
	DEVMETHOD(bus_write_ivar,		  vtpci_legacy_write_ivar),
138
139
	/* VirtIO PCI interface. */
140
	DEVMETHOD(virtio_pci_read_isr,		 vtpci_legacy_read_isr),
141
	DEVMETHOD(virtio_pci_get_vq_size,	 vtpci_legacy_get_vq_size),
142
	DEVMETHOD(virtio_pci_get_vq_notify_off,	 vtpci_legacy_get_vq_notify_off),
143
	DEVMETHOD(virtio_pci_set_vq,		 vtpci_legacy_set_vq),
144
	DEVMETHOD(virtio_pci_disable_vq,	 vtpci_legacy_disable_vq),
145
	DEVMETHOD(virtio_pci_register_cfg_msix,  vtpci_legacy_register_cfg_msix),
146
	DEVMETHOD(virtio_pci_register_vq_msix,	 vtpci_legacy_register_vq_msix),
147
148
	/* VirtIO bus interface. */
149
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_legacy_negotiate_features),
150
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_legacy_with_feature),
151
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_legacy_alloc_virtqueues),
152
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_legacy_setup_interrupts),
153
	DEVMETHOD(virtio_bus_stop,		  vtpci_legacy_stop),
154
	DEVMETHOD(virtio_bus_reinit,		  vtpci_legacy_reinit),
155
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_legacy_reinit_complete),
156
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_legacy_notify_vq),
157
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_legacy_read_dev_config),
158
	DEVMETHOD(virtio_bus_write_device_config, vtpci_legacy_write_dev_config),
159
160
	DEVMETHOD_END
161
};
162
163
static driver_t vtpci_legacy_driver = {
164
	.name = "vtpcil",
165
	.methods = vtpci_legacy_methods,
166
	.size = sizeof(struct vtpci_legacy_softc)
167
};
168
169
devclass_t vtpci_legacy_devclass;
170
171
DRIVER_MODULE(vtpcil, pci, vtpci_legacy_driver, vtpci_legacy_devclass, 0, 0);
172
173
static int
174
vtpci_legacy_probe(device_t dev)
175
{
176
	char desc[64];
177
	const char *name;
178
179
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
180
		return (ENXIO);
181
182
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
183
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_LEGACY_MAX)
184
		return (ENXIO);
185
186
	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
187
		return (ENXIO);
188
189
	name = virtio_device_name(pci_get_subdevice(dev));
190
	if (name == NULL)
191
		name = "Unknown";
192
193
	snprintf(desc, sizeof(desc), "VirtIO PCI (legacy) %s adapter", name);
194
	device_set_desc_copy(dev, desc);
195
196
	/* Prefer transitional modern VirtIO PCI. */
197
	return (BUS_PROBE_LOW_PRIORITY);
198
}
199
200
static int
201
vtpci_legacy_attach(device_t dev)
202
{
203
	struct vtpci_legacy_softc *sc;
204
	int error;
205
206
	sc = device_get_softc(dev);
207
	sc->vtpci_dev = dev;
208
	vtpci_init(&sc->vtpci_common, dev, false);
209
210
	error = vtpci_legacy_alloc_resources(sc);
211
	if (error) {
212
		device_printf(dev, "cannot map I/O space\n");
213
		return (error);
214
	}
215
216
	vtpci_legacy_reset(sc);
217
218
	/* Tell the host we've noticed this device. */
219
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
220
221
	error = vtpci_add_child(&sc->vtpci_common);
222
	if (error)
223
		goto fail;
224
225
	vtpci_legacy_probe_and_attach_child(sc);
226
227
	return (0);
228
229
fail:
230
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
231
	vtpci_legacy_detach(dev);
232
233
	return (error);
234
}
235
236
static int
237
vtpci_legacy_detach(device_t dev)
238
{
239
	struct vtpci_legacy_softc *sc;
240
	int error;
241
242
	sc = device_get_softc(dev);
243
244
	error = vtpci_delete_child(&sc->vtpci_common);
245
	if (error)
246
		return (error);
247
248
	vtpci_legacy_reset(sc);
249
	vtpci_legacy_free_resources(sc);
250
251
	return (0);
252
}
253
254
static int
255
vtpci_legacy_suspend(device_t dev)
256
{
257
	return (bus_generic_suspend(dev));
258
}
259
260
static int
261
vtpci_legacy_resume(device_t dev)
262
{
263
	return (bus_generic_resume(dev));
264
}
265
266
static int
267
vtpci_legacy_shutdown(device_t dev)
268
{
269
	(void) bus_generic_shutdown(dev);
270
	/* Forcibly stop the host device. */
271
	vtpci_legacy_stop(dev);
272
273
	return (0);
274
}
275
276
static void
277
vtpci_legacy_driver_added(device_t dev, driver_t *driver)
278
{
279
	vtpci_legacy_probe_and_attach_child(device_get_softc(dev));
280
}
281
282
static void
283
vtpci_legacy_child_detached(device_t dev, device_t child)
284
{
285
	struct vtpci_legacy_softc *sc;
286
287
	sc = device_get_softc(dev);
288
289
	vtpci_legacy_reset(sc);
290
	vtpci_child_detached(&sc->vtpci_common);
291
292
	/* After the reset, retell the host we've noticed this device. */
293
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
294
}
295
296
static int
297
vtpci_legacy_read_ivar(device_t dev, device_t child, int index,
298
    uintptr_t *result)
299
{
300
	struct vtpci_legacy_softc *sc;
301
	struct vtpci_common *cn;
302
303
	sc = device_get_softc(dev);
304
	cn = &sc->vtpci_common;
305
306
	if (vtpci_child_device(cn) != child)
307
		return (ENOENT);
308
309
	switch (index) {
310
	case VIRTIO_IVAR_DEVTYPE:
311
		*result = pci_get_subdevice(dev);
312
		break;
313
	default:
314
		return (vtpci_read_ivar(cn, index, result));
315
	}
316
317
	return (0);
318
}
319
320
static int
321
vtpci_legacy_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
322
{
323
	struct vtpci_legacy_softc *sc;
324
	struct vtpci_common *cn;
325
326
	sc = device_get_softc(dev);
327
	cn = &sc->vtpci_common;
328
329
	if (vtpci_child_device(cn) != child)
330
		return (ENOENT);
331
332
	switch (index) {
333
	default:
334
		return (vtpci_write_ivar(cn, index, value));
335
	}
336
337
	return (0);
338
}
339
340
static uint64_t
341
vtpci_legacy_negotiate_features(device_t dev, uint64_t child_features)
342
{
343
	struct vtpci_legacy_softc *sc;
344
	uint64_t host_features, features;
345
346
	sc = device_get_softc(dev);
347
	host_features = vtpci_legacy_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
348
349
	features = vtpci_negotiate_features(&sc->vtpci_common,
350
	    child_features, host_features);
351
	vtpci_legacy_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
352
353
	return (features);
354
}
355
356
static int
357
vtpci_legacy_with_feature(device_t dev, uint64_t feature)
358
{
359
	struct vtpci_legacy_softc *sc;
360
361
	sc = device_get_softc(dev);
362
363
	return (vtpci_with_feature(&sc->vtpci_common, feature));
364
}
365
366
static int
367
vtpci_legacy_alloc_virtqueues(device_t dev, int flags, int nvqs,
368
    struct vq_alloc_info *vq_info)
369
{
370
	struct vtpci_legacy_softc *sc;
371
	struct vtpci_common *cn;
372
373
	sc = device_get_softc(dev);
374
	cn = &sc->vtpci_common;
375
376
	return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
377
}
378
379
static int
380
vtpci_legacy_setup_interrupts(device_t dev, enum intr_type type)
381
{
382
	struct vtpci_legacy_softc *sc;
383
384
	sc = device_get_softc(dev);
385
386
	return (vtpci_setup_interrupts(&sc->vtpci_common, type));
387
}
388
389
static void
390
vtpci_legacy_stop(device_t dev)
391
{
392
	vtpci_legacy_reset(device_get_softc(dev));
393
}
394
395
static int
396
vtpci_legacy_reinit(device_t dev, uint64_t features)
397
{
398
	struct vtpci_legacy_softc *sc;
399
	struct vtpci_common *cn;
400
	int error;
401
402
	sc = device_get_softc(dev);
403
	cn = &sc->vtpci_common;
404
405
	/*
406
	 * Redrive the device initialization. This is a bit of an abuse of
407
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
408
	 * play nice.
409
	 *
410
	 * We do not allow the host device to change from what was originally
411
	 * negotiated beyond what the guest driver changed. MSIX state should
412
	 * not change, number of virtqueues and their size remain the same, etc.
413
	 * This will need to be rethought when we want to support migration.
414
	 */
415
416
	if (vtpci_legacy_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
417
		vtpci_legacy_stop(dev);
418
419
	/*
420
	 * Quickly drive the status through ACK and DRIVER. The device does
421
	 * not become usable again until DRIVER_OK in reinit complete.
422
	 */
423
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
424
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
425
426
	vtpci_legacy_negotiate_features(dev, features);
427
428
	error = vtpci_reinit(cn);
429
	if (error)
430
		return (error);
431
432
	return (0);
433
}
434
435
static void
436
vtpci_legacy_reinit_complete(device_t dev)
437
{
438
	struct vtpci_legacy_softc *sc;
439
440
	sc = device_get_softc(dev);
441
442
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
443
}
444
445
static void
446
vtpci_legacy_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
447
{
448
	struct vtpci_legacy_softc *sc;
449
450
	sc = device_get_softc(dev);
451
	MPASS(offset == VIRTIO_PCI_QUEUE_NOTIFY);
452
453
	vtpci_legacy_write_config_2(sc, offset, queue);
454
}
455
456
static uint8_t
457
vtpci_legacy_get_status(struct vtpci_legacy_softc *sc)
458
{
459
	return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_STATUS));
460
}
461
462
static void
463
vtpci_legacy_set_status(struct vtpci_legacy_softc *sc, uint8_t status)
464
{
465
	if (status != VIRTIO_CONFIG_STATUS_RESET)
466
		status |= vtpci_legacy_get_status(sc);
467
468
	vtpci_legacy_write_config_1(sc, VIRTIO_PCI_STATUS, status);
469
}
470
471
static void
472
vtpci_legacy_read_dev_config(device_t dev, bus_size_t offset,
473
    void *dst, int length)
474
{
475
	struct vtpci_legacy_softc *sc;
476
	bus_size_t off;
477
	uint8_t *d;
478
	int size;
479
480
	sc = device_get_softc(dev);
481
	off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset;
482
483
	for (d = dst; length > 0; d += size, off += size, length -= size) {
484
		if (length >= 4) {
485
			size = 4;
486
			*(uint32_t *)d = vtpci_legacy_read_config_4(sc, off);
487
		} else if (length >= 2) {
488
			size = 2;
489
			*(uint16_t *)d = vtpci_legacy_read_config_2(sc, off);
490
		} else {
491
			size = 1;
492
			*d = vtpci_legacy_read_config_1(sc, off);
493
		}
494
	}
495
}
496
497
static void
498
vtpci_legacy_write_dev_config(device_t dev, bus_size_t offset,
499
    void *src, int length)
500
{
501
	struct vtpci_legacy_softc *sc;
502
	bus_size_t off;
503
	uint8_t *s;
504
	int size;
505
506
	sc = device_get_softc(dev);
507
	off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset;
508
509
	for (s = src; length > 0; s += size, off += size, length -= size) {
510
		if (length >= 4) {
511
			size = 4;
512
			vtpci_legacy_write_config_4(sc, off, *(uint32_t *)s);
513
		} else if (length >= 2) {
514
			size = 2;
515
			vtpci_legacy_write_config_2(sc, off, *(uint16_t *)s);
516
		} else {
517
			size = 1;
518
			vtpci_legacy_write_config_1(sc, off, *s);
519
		}
520
	}
521
}
522
523
static int
524
vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *sc)
525
{
526
	device_t dev;
527
	int rid;
528
529
	dev = sc->vtpci_dev;
530
	
531
	rid = PCIR_BAR(0);
532
	if ((sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
533
	    &rid, RF_ACTIVE)) == NULL)
534
		return (ENXIO);
535
536
	if (vtpci_is_msix_available(&sc->vtpci_common)) {
537
		rid = PCIR_BAR(1);
538
		if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev,
539
		    SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL)
540
			return (ENXIO);
541
	}
542
543
	return (0);
544
}
545
546
static void
547
vtpci_legacy_free_resources(struct vtpci_legacy_softc *sc)
548
{
549
	device_t dev;
550
551
	dev = sc->vtpci_dev;
552
553
	if (sc->vtpci_msix_res != NULL) {
554
		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
555
		    sc->vtpci_msix_res);
556
		sc->vtpci_msix_res = NULL;
557
	}
558
559
	if (sc->vtpci_res != NULL) {
560
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
561
		    sc->vtpci_res);
562
		sc->vtpci_res = NULL;
563
	}
564
}
565
566
static void
567
vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *sc)
568
{
569
	device_t dev, child;
570
571
	dev = sc->vtpci_dev;
572
	child = vtpci_child_device(&sc->vtpci_common);
573
574
	if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
575
		return;
576
577
	if (device_probe(child) != 0)
578
		return;
579
580
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
581
582
	if (device_attach(child) != 0) {
583
		vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
584
		/* Reset status for future attempt. */
585
		vtpci_legacy_child_detached(dev, child);
586
	} else {
587
		vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
588
		VIRTIO_ATTACH_COMPLETED(child);
589
	}
590
}
591
592
static int
593
vtpci_legacy_register_msix(struct vtpci_legacy_softc *sc, int offset,
594
    struct vtpci_interrupt *intr)
595
{
596
	device_t dev;
597
	uint16_t vector;
598
599
	dev = sc->vtpci_dev;
600
601
	if (intr != NULL) {
602
		/* Map from guest rid to host vector. */
603
		vector = intr->vti_rid - 1;
604
	} else
605
		vector = VIRTIO_MSI_NO_VECTOR;
606
607
	vtpci_legacy_write_config_2(sc, offset, vector);
608
	return (vtpci_legacy_read_config_2(sc, offset) == vector ? 0 : ENODEV);
609
}
610
611
static int
612
vtpci_legacy_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
613
{
614
	struct vtpci_legacy_softc *sc;
615
	int error;
616
617
	sc = device_get_softc(dev);
618
619
	error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_CONFIG_VECTOR, intr);
620
	if (error) {
621
		device_printf(dev,
622
		    "unable to register config MSIX interrupt\n");
623
		return (error);
624
	}
625
626
	return (0);
627
}
628
629
static int
630
vtpci_legacy_register_vq_msix(device_t dev, int idx,
631
    struct vtpci_interrupt *intr)
632
{
633
	struct vtpci_legacy_softc *sc;
634
	int error;
635
636
	sc = device_get_softc(dev);
637
638
	vtpci_legacy_select_virtqueue(sc, idx);
639
	error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_QUEUE_VECTOR, intr);
640
	if (error) {
641
		device_printf(dev,
642
		    "unable to register virtqueue MSIX interrupt\n");
643
		return (error);
644
	}
645
646
	return (0);
647
}
648
649
static void
650
vtpci_legacy_reset(struct vtpci_legacy_softc *sc)
651
{
652
	/*
653
	 * Setting the status to RESET sets the host device to the
654
	 * original, uninitialized state.
655
	 */
656
	vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
657
	(void) vtpci_legacy_get_status(sc);
658
}
659
660
static void
661
vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *sc, int idx)
662
{
663
	vtpci_legacy_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
664
}
665
666
static uint8_t
667
vtpci_legacy_read_isr(device_t dev)
668
{
669
	struct vtpci_legacy_softc *sc;
670
671
	sc = device_get_softc(dev);
672
673
	return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_ISR));
674
}
675
676
static uint16_t
677
vtpci_legacy_get_vq_size(device_t dev, int idx)
678
{
679
	struct vtpci_legacy_softc *sc;
680
681
	sc = device_get_softc(dev);
682
683
	vtpci_legacy_select_virtqueue(sc, idx);
684
	return (vtpci_legacy_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM));
685
}
686
687
static bus_size_t
688
vtpci_legacy_get_vq_notify_off(device_t dev, int idx)
689
{
690
	return (VIRTIO_PCI_QUEUE_NOTIFY);
691
}
692
693
static void
694
vtpci_legacy_set_vq(device_t dev, struct virtqueue *vq)
695
{
696
	struct vtpci_legacy_softc *sc;
697
698
	sc = device_get_softc(dev);
699
700
	vtpci_legacy_select_virtqueue(sc, virtqueue_index(vq));
701
	vtpci_legacy_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
702
	    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
703
}
704
705
static void
706
vtpci_legacy_disable_vq(device_t dev, int idx)
707
{
708
	struct vtpci_legacy_softc *sc;
709
710
	sc = device_get_softc(dev);
711
712
	vtpci_legacy_select_virtqueue(sc, idx);
713
	vtpci_legacy_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
714
}
(-)sys/dev/virtio/pci/virtio_pci_legacy_var.h (+78 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
35
 * $FreeBSD$
36
 */
37
38
#ifndef _VIRTIO_PCI_LEGACY_VAR_H
39
#define _VIRTIO_PCI_LEGACY_VAR_H
40
41
#include <dev/virtio/pci/virtio_pci_var.h>
42
43
/* VirtIO ABI version, this must match exactly. */
44
#define VIRTIO_PCI_ABI_VERSION	0
45
46
/*
47
 * VirtIO Header, located in BAR 0.
48
 */
49
#define VIRTIO_PCI_HOST_FEATURES  0  /* host's supported features (32bit, RO)*/
50
#define VIRTIO_PCI_GUEST_FEATURES 4  /* guest's supported features (32, RW) */
51
#define VIRTIO_PCI_QUEUE_PFN      8  /* physical address of VQ (32, RW) */
52
#define VIRTIO_PCI_QUEUE_NUM      12 /* number of ring entries (16, RO) */
53
#define VIRTIO_PCI_QUEUE_SEL      14 /* current VQ selection (16, RW) */
54
#define VIRTIO_PCI_QUEUE_NOTIFY	  16 /* notify host regarding VQ (16, RW) */
55
#define VIRTIO_PCI_STATUS         18 /* device status register (8, RW) */
56
#define VIRTIO_PCI_ISR            19 /* interrupt status register, reading
57
				      * also clears the register (8, RO) */
58
/* Only if MSIX is enabled: */
59
#define VIRTIO_MSI_CONFIG_VECTOR  20 /* configuration change vector (16, RW) */
60
#define VIRTIO_MSI_QUEUE_VECTOR   22 /* vector for selected VQ notifications
61
					(16, RW) */
62
63
/*
64
 * The remaining space is defined by each driver as the per-driver
65
 * configuration space.
66
 */
67
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled)     ((msix_enabled) ? 24 : 20)
68
69
/*
70
 * How many bits to shift physical queue address written to QUEUE_PFN.
71
 * 12 is historical, and due to x86 page size.
72
 */
73
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT	12
74
75
/* The alignment to use between consumer and producer parts of vring. */
76
#define VIRTIO_PCI_VRING_ALIGN	4096
77
78
#endif /* _VIRTIO_PCI_LEGACY_VAR_H */
(-)sys/dev/virtio/pci/virtio_pci_modern.c (+1443 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
29
/* Driver for the modern VirtIO PCI interface. */
30
31
#include <sys/cdefs.h>
32
__FBSDID("$FreeBSD$");
33
34
#include <sys/param.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
38
#include <sys/module.h>
39
40
#include <machine/bus.h>
41
#include <machine/cpu.h>
42
#include <machine/resource.h>
43
#include <sys/bus.h>
44
#include <sys/rman.h>
45
46
#include <dev/pci/pcivar.h>
47
#include <dev/pci/pcireg.h>
48
49
#include <dev/virtio/virtio.h>
50
#include <dev/virtio/virtqueue.h>
51
#include <dev/virtio/pci/virtio_pci.h>
52
#include <dev/virtio/pci/virtio_pci_modern_var.h>
53
54
#include "virtio_bus_if.h"
55
#include "virtio_pci_if.h"
56
#include "virtio_if.h"
57
58
struct vtpci_modern_resource_map {
59
	struct resource_map	vtrm_map;
60
	int			vtrm_cap_offset;
61
	int			vtrm_bar;
62
	int			vtrm_offset;
63
	int			vtrm_length;
64
	int			vtrm_type;	/* SYS_RES_{MEMORY, IOPORT} */
65
};
66
67
struct vtpci_modern_bar_resource {
68
	struct resource		*vtbr_res;
69
	int			 vtbr_type;
70
};
71
72
struct vtpci_modern_softc {
73
	device_t			 vtpci_dev;
74
	struct vtpci_common		 vtpci_common;
75
	uint32_t			 vtpci_notify_offset_multiplier;
76
	uint16_t			 vtpci_devid;
77
	int				 vtpci_msix_bar;
78
	struct resource			*vtpci_msix_res;
79
80
	struct vtpci_modern_resource_map vtpci_common_res_map;
81
	struct vtpci_modern_resource_map vtpci_notify_res_map;
82
	struct vtpci_modern_resource_map vtpci_isr_res_map;
83
	struct vtpci_modern_resource_map vtpci_device_res_map;
84
85
#define VTPCI_MODERN_MAX_BARS		6
86
	struct vtpci_modern_bar_resource vtpci_bar_res[VTPCI_MODERN_MAX_BARS];
87
};
88
89
static int	vtpci_modern_probe(device_t);
90
static int	vtpci_modern_attach(device_t);
91
static int	vtpci_modern_detach(device_t);
92
static int	vtpci_modern_suspend(device_t);
93
static int	vtpci_modern_resume(device_t);
94
static int	vtpci_modern_shutdown(device_t);
95
96
static void	vtpci_modern_driver_added(device_t, driver_t *);
97
static void	vtpci_modern_child_detached(device_t, device_t);
98
static int	vtpci_modern_read_ivar(device_t, device_t, int, uintptr_t *);
99
static int	vtpci_modern_write_ivar(device_t, device_t, int, uintptr_t);
100
101
static uint8_t	vtpci_modern_read_isr(device_t);
102
static uint16_t	vtpci_modern_get_vq_size(device_t, int);
103
static bus_size_t vtpci_modern_get_vq_notify_off(device_t, int);
104
static void	vtpci_modern_set_vq(device_t, struct virtqueue *);
105
static void	vtpci_modern_disable_vq(device_t, int);
106
static int	vtpci_modern_register_msix(struct vtpci_modern_softc *, int,
107
		    struct vtpci_interrupt *);
108
static int	vtpci_modern_register_cfg_msix(device_t,
109
		    struct vtpci_interrupt *);
110
static int	vtpci_modern_register_vq_msix(device_t, int idx,
111
		    struct vtpci_interrupt *);
112
113
static uint64_t	vtpci_modern_negotiate_features(device_t, uint64_t);
114
static int	vtpci_modern_finalize_features(device_t);
115
static int	vtpci_modern_with_feature(device_t, uint64_t);
116
static int	vtpci_modern_alloc_virtqueues(device_t, int, int,
117
		    struct vq_alloc_info *);
118
static int	vtpci_modern_setup_interrupts(device_t, enum intr_type);
119
static void	vtpci_modern_stop(device_t);
120
static int	vtpci_modern_reinit(device_t, uint64_t);
121
static void	vtpci_modern_reinit_complete(device_t);
122
static void	vtpci_modern_notify_vq(device_t, uint16_t, bus_size_t);
123
static int	vtpci_modern_config_generation(device_t);
124
static void	vtpci_modern_read_dev_config(device_t, bus_size_t, void *, int);
125
static void	vtpci_modern_write_dev_config(device_t, bus_size_t, void *, int);
126
127
static int	vtpci_modern_probe_configs(device_t);
128
static int	vtpci_modern_find_cap(device_t, uint8_t, int *);
129
static int	vtpci_modern_map_configs(struct vtpci_modern_softc *);
130
static void	vtpci_modern_unmap_configs(struct vtpci_modern_softc *);
131
static int	vtpci_modern_find_cap_resource(struct vtpci_modern_softc *,
132
		     uint8_t, int, int, struct vtpci_modern_resource_map *);
133
static int	vtpci_modern_bar_type(struct vtpci_modern_softc *, int);
134
static struct resource *vtpci_modern_get_bar_resource(
135
		    struct vtpci_modern_softc *, int, int);
136
static struct resource *vtpci_modern_alloc_bar_resource(
137
		    struct vtpci_modern_softc *, int, int);
138
static void	vtpci_modern_free_bar_resources(struct vtpci_modern_softc *);
139
static int	vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *,
140
		    struct vtpci_modern_resource_map *);
141
static void	vtpci_modern_free_resource_map(struct vtpci_modern_softc *,
142
		    struct vtpci_modern_resource_map *);
143
static void	vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *);
144
static void	vtpci_modern_free_msix_resource(struct vtpci_modern_softc *);
145
146
static void	vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *);
147
148
static uint64_t vtpci_modern_read_features(struct vtpci_modern_softc *);
149
static void	vtpci_modern_write_features(struct vtpci_modern_softc *,
150
		    uint64_t);
151
static void	vtpci_modern_select_virtqueue(struct vtpci_modern_softc *, int);
152
static uint8_t	vtpci_modern_get_status(struct vtpci_modern_softc *);
153
static void	vtpci_modern_set_status(struct vtpci_modern_softc *, uint8_t);
154
static void	vtpci_modern_reset(struct vtpci_modern_softc *);
155
static void	vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *);
156
157
static uint8_t	vtpci_modern_read_common_1(struct vtpci_modern_softc *,
158
		    bus_size_t);
159
static uint16_t vtpci_modern_read_common_2(struct vtpci_modern_softc *,
160
		    bus_size_t);
161
static uint32_t vtpci_modern_read_common_4(struct vtpci_modern_softc *,
162
		    bus_size_t);
163
static void	vtpci_modern_write_common_1(struct vtpci_modern_softc *,
164
		     bus_size_t, uint8_t);
165
static void	vtpci_modern_write_common_2(struct vtpci_modern_softc *,
166
		     bus_size_t, uint16_t);
167
static void	vtpci_modern_write_common_4(struct vtpci_modern_softc *,
168
		    bus_size_t, uint32_t);
169
static void	vtpci_modern_write_common_8(struct vtpci_modern_softc *,
170
		    bus_size_t, uint64_t);
171
static void	vtpci_modern_write_notify_2(struct vtpci_modern_softc *,
172
		    bus_size_t, uint16_t);
173
static uint8_t  vtpci_modern_read_isr_1(struct vtpci_modern_softc *,
174
		    bus_size_t);
175
static uint8_t	vtpci_modern_read_device_1(struct vtpci_modern_softc *,
176
		    bus_size_t);
177
static uint16_t vtpci_modern_read_device_2(struct vtpci_modern_softc *,
178
		    bus_size_t);
179
static uint32_t vtpci_modern_read_device_4(struct vtpci_modern_softc *,
180
		    bus_size_t);
181
static uint64_t vtpci_modern_read_device_8(struct vtpci_modern_softc *,
182
		    bus_size_t);
183
static void	vtpci_modern_write_device_1(struct vtpci_modern_softc *,
184
		    bus_size_t, uint8_t);
185
static void	vtpci_modern_write_device_2(struct vtpci_modern_softc *,
186
		    bus_size_t, uint16_t);
187
static void	vtpci_modern_write_device_4(struct vtpci_modern_softc *,
188
		    bus_size_t, uint32_t);
189
static void	vtpci_modern_write_device_8(struct vtpci_modern_softc *,
190
		    bus_size_t, uint64_t);
191
192
/* Tunables. */
193
static int vtpci_modern_transitional = 0;
194
TUNABLE_INT("hw.virtio.pci.transitional", &vtpci_modern_transitional);
195
196
static device_method_t vtpci_modern_methods[] = {
197
	/* Device interface. */
198
	DEVMETHOD(device_probe,			vtpci_modern_probe),
199
	DEVMETHOD(device_attach,		vtpci_modern_attach),
200
	DEVMETHOD(device_detach,		vtpci_modern_detach),
201
	DEVMETHOD(device_suspend,		vtpci_modern_suspend),
202
	DEVMETHOD(device_resume,		vtpci_modern_resume),
203
	DEVMETHOD(device_shutdown,		vtpci_modern_shutdown),
204
205
	/* Bus interface. */
206
	DEVMETHOD(bus_driver_added,		vtpci_modern_driver_added),
207
	DEVMETHOD(bus_child_detached,		vtpci_modern_child_detached),
208
	DEVMETHOD(bus_read_ivar,		vtpci_modern_read_ivar),
209
	DEVMETHOD(bus_write_ivar,		vtpci_modern_write_ivar),
210
211
	/* VirtIO PCI interface. */
212
	DEVMETHOD(virtio_pci_read_isr,		 vtpci_modern_read_isr),
213
	DEVMETHOD(virtio_pci_get_vq_size,	 vtpci_modern_get_vq_size),
214
	DEVMETHOD(virtio_pci_get_vq_notify_off,	 vtpci_modern_get_vq_notify_off),
215
	DEVMETHOD(virtio_pci_set_vq,		 vtpci_modern_set_vq),
216
	DEVMETHOD(virtio_pci_disable_vq,	 vtpci_modern_disable_vq),
217
	DEVMETHOD(virtio_pci_register_cfg_msix,	 vtpci_modern_register_cfg_msix),
218
	DEVMETHOD(virtio_pci_register_vq_msix,	 vtpci_modern_register_vq_msix),
219
220
	/* VirtIO bus interface. */
221
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_modern_negotiate_features),
222
	DEVMETHOD(virtio_bus_finalize_features,	  vtpci_modern_finalize_features),
223
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_modern_with_feature),
224
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_modern_alloc_virtqueues),
225
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_modern_setup_interrupts),
226
	DEVMETHOD(virtio_bus_stop,		  vtpci_modern_stop),
227
	DEVMETHOD(virtio_bus_reinit,		  vtpci_modern_reinit),
228
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_modern_reinit_complete),
229
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_modern_notify_vq),
230
	DEVMETHOD(virtio_bus_config_generation,	  vtpci_modern_config_generation),
231
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_modern_read_dev_config),
232
	DEVMETHOD(virtio_bus_write_device_config, vtpci_modern_write_dev_config),
233
234
	DEVMETHOD_END
235
};
236
237
static driver_t vtpci_modern_driver = {
238
	.name = "vtpcim",
239
	.methods = vtpci_modern_methods,
240
	.size = sizeof(struct vtpci_modern_softc)
241
};
242
243
devclass_t vtpci_modern_devclass;
244
245
DRIVER_MODULE(vtpcim, pci, vtpci_modern_driver, vtpci_modern_devclass, 0, 0);
246
247
static int
248
vtpci_modern_probe(device_t dev)
249
{
250
	char desc[64];
251
	const char *name;
252
	uint16_t devid;
253
254
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
255
		return (ENXIO);
256
257
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
258
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MODERN_MAX)
259
		return (ENXIO);
260
261
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN) {
262
		if (!vtpci_modern_transitional)
263
			return (ENXIO);
264
		devid = pci_get_subdevice(dev);
265
	} else
266
		devid = pci_get_device(dev) - VIRTIO_PCI_DEVICEID_MODERN_MIN;
267
268
	if (vtpci_modern_probe_configs(dev) != 0)
269
		return (ENXIO);
270
271
	name = virtio_device_name(devid);
272
	if (name == NULL)
273
		name = "Unknown";
274
275
	snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name);
276
	device_set_desc_copy(dev, desc);
277
278
	return (BUS_PROBE_DEFAULT);
279
}
280
281
static int
282
vtpci_modern_attach(device_t dev)
283
{
284
	struct vtpci_modern_softc *sc;
285
	int error;
286
287
	sc = device_get_softc(dev);
288
	sc->vtpci_dev = dev;
289
	vtpci_init(&sc->vtpci_common, dev, true);
290
291
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN)
292
		sc->vtpci_devid = pci_get_subdevice(dev);
293
	else
294
		sc->vtpci_devid = pci_get_device(dev) -
295
		    VIRTIO_PCI_DEVICEID_MODERN_MIN;
296
297
	error = vtpci_modern_map_configs(sc);
298
	if (error) {
299
		device_printf(dev, "cannot map configs\n");
300
		vtpci_modern_unmap_configs(sc);
301
		return (error);
302
	}
303
304
	vtpci_modern_reset(sc);
305
306
	/* Tell the host we've noticed this device. */
307
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
308
309
	error = vtpci_add_child(&sc->vtpci_common);
310
	if (error)
311
		goto fail;
312
313
	vtpci_modern_probe_and_attach_child(sc);
314
315
	return (0);
316
317
fail:
318
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
319
	vtpci_modern_detach(dev);
320
321
	return (error);
322
}
323
324
static int
325
vtpci_modern_detach(device_t dev)
326
{
327
	struct vtpci_modern_softc *sc;
328
	int error;
329
330
	sc = device_get_softc(dev);
331
332
	error = vtpci_delete_child(&sc->vtpci_common);
333
	if (error)
334
		return (error);
335
336
	vtpci_modern_reset(sc);
337
	vtpci_modern_unmap_configs(sc);
338
339
	return (0);
340
}
341
342
static int
343
vtpci_modern_suspend(device_t dev)
344
{
345
	return (bus_generic_suspend(dev));
346
}
347
348
static int
349
vtpci_modern_resume(device_t dev)
350
{
351
	return (bus_generic_resume(dev));
352
}
353
354
static int
355
vtpci_modern_shutdown(device_t dev)
356
{
357
	(void) bus_generic_shutdown(dev);
358
	/* Forcibly stop the host device. */
359
	vtpci_modern_stop(dev);
360
361
	return (0);
362
}
363
364
static void
365
vtpci_modern_driver_added(device_t dev, driver_t *driver)
366
{
367
	vtpci_modern_probe_and_attach_child(device_get_softc(dev));
368
}
369
370
static void
371
vtpci_modern_child_detached(device_t dev, device_t child)
372
{
373
	struct vtpci_modern_softc *sc;
374
375
	sc = device_get_softc(dev);
376
377
	vtpci_modern_reset(sc);
378
	vtpci_child_detached(&sc->vtpci_common);
379
380
	/* After the reset, retell the host we've noticed this device. */
381
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
382
}
383
384
static int
385
vtpci_modern_read_ivar(device_t dev, device_t child, int index,
386
    uintptr_t *result)
387
{
388
	struct vtpci_modern_softc *sc;
389
	struct vtpci_common *cn;
390
391
	sc = device_get_softc(dev);
392
	cn = &sc->vtpci_common;
393
394
	if (vtpci_child_device(cn) != child)
395
		return (ENOENT);
396
397
	switch (index) {
398
	case VIRTIO_IVAR_DEVTYPE:
399
		*result = sc->vtpci_devid;
400
		break;
401
	default:
402
		return (vtpci_read_ivar(cn, index, result));
403
	}
404
405
	return (0);
406
}
407
408
static int
409
vtpci_modern_write_ivar(device_t dev, device_t child, int index,
410
    uintptr_t value)
411
{
412
	struct vtpci_modern_softc *sc;
413
	struct vtpci_common *cn;
414
415
	sc = device_get_softc(dev);
416
	cn = &sc->vtpci_common;
417
418
	if (vtpci_child_device(cn) != child)
419
		return (ENOENT);
420
421
	switch (index) {
422
	default:
423
		return (vtpci_write_ivar(cn, index, value));
424
	}
425
426
	return (0);
427
}
428
429
static uint64_t
430
vtpci_modern_negotiate_features(device_t dev, uint64_t child_features)
431
{
432
	struct vtpci_modern_softc *sc;
433
	uint64_t host_features, features;
434
435
	sc = device_get_softc(dev);
436
	host_features = vtpci_modern_read_features(sc);
437
438
	/*
439
	 * Since the driver was added as a child of the modern PCI bus,
440
	 * always add the V1 flag.
441
	 */
442
	child_features |= VIRTIO_F_VERSION_1;
443
444
	features = vtpci_negotiate_features(&sc->vtpci_common,
445
	    child_features, host_features);
446
	vtpci_modern_write_features(sc, features);
447
448
	return (features);
449
}
450
451
static int
452
vtpci_modern_finalize_features(device_t dev)
453
{
454
	struct vtpci_modern_softc *sc;
455
	uint8_t status;
456
457
	sc = device_get_softc(dev);
458
459
	/*
460
	 * Must re-read the status after setting it to verify the negotiated
461
	 * features were accepted by the device.
462
	 */
463
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_S_FEATURES_OK);
464
465
	status = vtpci_modern_get_status(sc);
466
	if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
467
		device_printf(dev, "desired features were not accepted\n");
468
		return (ENOTSUP);
469
	}
470
471
	return (0);
472
}
473
474
static int
475
vtpci_modern_with_feature(device_t dev, uint64_t feature)
476
{
477
	struct vtpci_modern_softc *sc;
478
479
	sc = device_get_softc(dev);
480
481
	return (vtpci_with_feature(&sc->vtpci_common, feature));
482
}
483
484
static uint64_t
485
vtpci_modern_read_features(struct vtpci_modern_softc *sc)
486
{
487
	uint32_t features0, features1;
488
489
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 0);
490
	features0 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
491
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 1);
492
	features1 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
493
494
	return (((uint64_t) features1 << 32) | features0);
495
}
496
497
static void
498
vtpci_modern_write_features(struct vtpci_modern_softc *sc, uint64_t features)
499
{
500
	uint32_t features0, features1;
501
502
	features0 = features;
503
	features1 = features >> 32;
504
505
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 0);
506
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features0);
507
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 1);
508
	vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features1);
509
}
510
511
static int
512
vtpci_modern_alloc_virtqueues(device_t dev, int flags, int nvqs,
513
    struct vq_alloc_info *vq_info)
514
{
515
	struct vtpci_modern_softc *sc;
516
	struct vtpci_common *cn;
517
	uint16_t max_nvqs;
518
519
	sc = device_get_softc(dev);
520
	cn = &sc->vtpci_common;
521
522
	max_nvqs = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_NUMQ);
523
	if (nvqs > max_nvqs) {
524
		device_printf(sc->vtpci_dev, "requested virtqueue count %d "
525
		    "exceeds max %d\n", nvqs, max_nvqs);
526
		return (E2BIG);
527
	}
528
529
	return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
530
}
531
532
static int
533
vtpci_modern_setup_interrupts(device_t dev, enum intr_type type)
534
{
535
	struct vtpci_modern_softc *sc;
536
	int error;
537
538
	sc = device_get_softc(dev);
539
540
	error = vtpci_setup_interrupts(&sc->vtpci_common, type);
541
	if (error == 0)
542
		vtpci_modern_enable_virtqueues(sc);
543
544
	return (error);
545
}
546
547
static void
548
vtpci_modern_stop(device_t dev)
549
{
550
	vtpci_modern_reset(device_get_softc(dev));
551
}
552
553
static int
554
vtpci_modern_reinit(device_t dev, uint64_t features)
555
{
556
	struct vtpci_modern_softc *sc;
557
	struct vtpci_common *cn;
558
	int error;
559
560
	sc = device_get_softc(dev);
561
	cn = &sc->vtpci_common;
562
563
	/*
564
	 * Redrive the device initialization. This is a bit of an abuse of
565
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
566
	 * play nice.
567
	 *
568
	 * We do not allow the host device to change from what was originally
569
	 * negotiated beyond what the guest driver changed. MSIX state should
570
	 * not change, number of virtqueues and their size remain the same, etc.
571
	 * This will need to be rethought when we want to support migration.
572
	 */
573
574
	if (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
575
		vtpci_modern_stop(dev);
576
577
	/*
578
	 * Quickly drive the status through ACK and DRIVER. The device does
579
	 * not become usable again until DRIVER_OK in reinit complete.
580
	 */
581
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
582
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
583
584
	/*
585
	 * TODO: Check that features are not added as to what was
586
	 * originally negotiated.
587
	 */
588
	vtpci_modern_negotiate_features(dev, features);
589
	error = vtpci_modern_finalize_features(dev);
590
	if (error) {
591
		device_printf(dev, "cannot finalize features during reinit\n");
592
		return (error);
593
	}
594
595
	error = vtpci_reinit(cn);
596
	if (error)
597
		return (error);
598
599
	return (0);
600
}
601
602
static void
603
vtpci_modern_reinit_complete(device_t dev)
604
{
605
	struct vtpci_modern_softc *sc;
606
607
	sc = device_get_softc(dev);
608
609
	vtpci_modern_enable_virtqueues(sc);
610
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
611
}
612
613
static void
614
vtpci_modern_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
615
{
616
	struct vtpci_modern_softc *sc;
617
618
	sc = device_get_softc(dev);
619
620
	vtpci_modern_write_notify_2(sc, offset, queue);
621
}
622
623
static uint8_t
624
vtpci_modern_get_status(struct vtpci_modern_softc *sc)
625
{
626
	return (vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_STATUS));
627
}
628
629
static void
630
vtpci_modern_set_status(struct vtpci_modern_softc *sc, uint8_t status)
631
{
632
	if (status != VIRTIO_CONFIG_STATUS_RESET)
633
		status |= vtpci_modern_get_status(sc);
634
635
	vtpci_modern_write_common_1(sc, VIRTIO_PCI_COMMON_STATUS, status);
636
}
637
638
static int
639
vtpci_modern_config_generation(device_t dev)
640
{
641
	struct vtpci_modern_softc *sc;
642
	uint8_t gen;
643
644
	sc = device_get_softc(dev);
645
	gen = vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_CFGGENERATION);
646
647
	return (gen);
648
}
649
650
static void
651
vtpci_modern_read_dev_config(device_t dev, bus_size_t offset, void *dst,
652
    int length)
653
{
654
	struct vtpci_modern_softc *sc;
655
656
	sc = device_get_softc(dev);
657
658
	if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
659
		panic("%s: attempt to read dev config but not present",
660
		    __func__);
661
	}
662
663
	switch (length) {
664
	case 1:
665
		*(uint8_t *) dst = vtpci_modern_read_device_1(sc, offset);
666
		break;
667
	case 2:
668
		*(uint16_t *) dst = virtio_htog16(true,
669
		    vtpci_modern_read_device_2(sc, offset));
670
		break;
671
	case 4:
672
		*(uint32_t *) dst = virtio_htog32(true,
673
		    vtpci_modern_read_device_4(sc, offset));
674
		break;
675
	case 8:
676
		*(uint64_t *) dst = virtio_htog64(true,
677
		    vtpci_modern_read_device_8(sc, offset));
678
		break;
679
	default:
680
		panic("%s: device %s invalid device read length %d offset %d",
681
		    __func__, device_get_nameunit(dev), length, (int) offset);
682
	}
683
}
684
685
static void
686
vtpci_modern_write_dev_config(device_t dev, bus_size_t offset, void *src,
687
    int length)
688
{
689
	struct vtpci_modern_softc *sc;
690
691
	sc = device_get_softc(dev);
692
693
	if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
694
		panic("%s: attempt to write dev config but not present",
695
		    __func__);
696
	}
697
698
	switch (length) {
699
	case 1:
700
		vtpci_modern_write_device_1(sc, offset, *(uint8_t *) src);
701
		break;
702
	case 2: {
703
		uint16_t val = virtio_gtoh16(true, *(uint16_t *) src);
704
		vtpci_modern_write_device_2(sc, offset, val);
705
		break;
706
	}
707
	case 4: {
708
		uint32_t val = virtio_gtoh32(true, *(uint32_t *) src);
709
		vtpci_modern_write_device_4(sc, offset, val);
710
		break;
711
	}
712
	case 8: {
713
		uint64_t val = virtio_gtoh64(true, *(uint64_t *) src);
714
		vtpci_modern_write_device_8(sc, offset, val);
715
		break;
716
	}
717
	default:
718
		panic("%s: device %s invalid device write length %d offset %d",
719
		    __func__, device_get_nameunit(dev), length, (int) offset);
720
	}
721
}
722
723
static int
724
vtpci_modern_probe_configs(device_t dev)
725
{
726
	int error;
727
728
	/*
729
	 * These config capabilities must be present. The DEVICE_CFG
730
	 * capability is only present if the device requires it.
731
	 */
732
733
	error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_COMMON_CFG, NULL);
734
	if (error) {
735
		device_printf(dev, "cannot find COMMON_CFG capability\n");
736
		return (error);
737
	}
738
739
	error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_NOTIFY_CFG, NULL);
740
	if (error) {
741
		device_printf(dev, "cannot find NOTIFY_CFG capability\n");
742
		return (error);
743
	}
744
745
	error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_ISR_CFG, NULL);
746
	if (error) {
747
		device_printf(dev, "cannot find ISR_CFG capability\n");
748
		return (error);
749
	}
750
751
	return (0);
752
}
753
754
static int
755
vtpci_modern_find_cap(device_t dev, uint8_t cfg_type, int *cap_offset)
756
{
757
	uint32_t type, bar;
758
	int capreg, error;
759
760
	for (error = pci_find_cap(dev, PCIY_VENDOR, &capreg);
761
	     error == 0;
762
	     error = pci_find_next_cap(dev, PCIY_VENDOR, capreg, &capreg)) {
763
764
		type = pci_read_config(dev, capreg +
765
		    offsetof(struct virtio_pci_cap, cfg_type), 1);
766
		bar = pci_read_config(dev, capreg +
767
		    offsetof(struct virtio_pci_cap, bar), 1);
768
769
		/* Must ignore reserved BARs. */
770
		if (bar >= VTPCI_MODERN_MAX_BARS)
771
			continue;
772
773
		if (type == cfg_type) {
774
			if (cap_offset != NULL)
775
				*cap_offset = capreg;
776
			break;
777
		}
778
	}
779
780
	return (error);
781
}
782
783
static int
784
vtpci_modern_map_common_config(struct vtpci_modern_softc *sc)
785
{
786
	device_t dev;
787
	int error;
788
789
	dev = sc->vtpci_dev;
790
791
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_COMMON_CFG,
792
	    sizeof(struct virtio_pci_common_cfg), 4, &sc->vtpci_common_res_map);
793
	if (error) {
794
		device_printf(dev, "cannot find cap COMMON_CFG resource\n");
795
		return (error);
796
	}
797
798
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_common_res_map);
799
	if (error) {
800
		device_printf(dev, "cannot alloc resource for COMMON_CFG\n");
801
		return (error);
802
	}
803
804
	return (0);
805
}
806
807
static int
808
vtpci_modern_map_notify_config(struct vtpci_modern_softc *sc)
809
{
810
	device_t dev;
811
	int cap_offset, error;
812
813
	dev = sc->vtpci_dev;
814
815
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_NOTIFY_CFG,
816
	    -1, 2, &sc->vtpci_notify_res_map);
817
	if (error) {
818
		device_printf(dev, "cannot find cap NOTIFY_CFG resource\n");
819
		return (error);
820
	}
821
822
	cap_offset = sc->vtpci_notify_res_map.vtrm_cap_offset;
823
824
	sc->vtpci_notify_offset_multiplier = pci_read_config(dev, cap_offset +
825
	    offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), 4);
826
827
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_notify_res_map);
828
	if (error) {
829
		device_printf(dev, "cannot alloc resource for NOTIFY_CFG\n");
830
		return (error);
831
	}
832
833
	return (0);
834
}
835
836
static int
837
vtpci_modern_map_isr_config(struct vtpci_modern_softc *sc)
838
{
839
	device_t dev;
840
	int error;
841
842
	dev = sc->vtpci_dev;
843
844
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_ISR_CFG,
845
	    sizeof(uint8_t), 1, &sc->vtpci_isr_res_map);
846
	if (error) {
847
		device_printf(dev, "cannot find cap ISR_CFG resource\n");
848
		return (error);
849
	}
850
851
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_isr_res_map);
852
	if (error) {
853
		device_printf(dev, "cannot alloc resource for ISR_CFG\n");
854
		return (error);
855
	}
856
857
	return (0);
858
}
859
860
static int
861
vtpci_modern_map_device_config(struct vtpci_modern_softc *sc)
862
{
863
	device_t dev;
864
	int error;
865
866
	dev = sc->vtpci_dev;
867
868
	error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_DEVICE_CFG,
869
	    -1, 4, &sc->vtpci_device_res_map);
870
	if (error == ENOENT) {
871
		/* Device configuration is optional depending on device. */
872
		return (0);
873
	} else if (error) {
874
		device_printf(dev, "cannot find cap DEVICE_CFG resource\n");
875
		return (error);
876
	}
877
878
	error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_device_res_map);
879
	if (error) {
880
		device_printf(dev, "cannot alloc resource for DEVICE_CFG\n");
881
		return (error);
882
	}
883
884
	return (error);
885
}
886
887
static int
888
vtpci_modern_map_configs(struct vtpci_modern_softc *sc)
889
{
890
	int error;
891
892
	error = vtpci_modern_map_common_config(sc);
893
	if (error)
894
		return (error);
895
896
	error = vtpci_modern_map_notify_config(sc);
897
	if (error)
898
		return (error);
899
900
	error = vtpci_modern_map_isr_config(sc);
901
	if (error)
902
		return (error);
903
904
	error = vtpci_modern_map_device_config(sc);
905
	if (error)
906
		return (error);
907
908
	vtpci_modern_alloc_msix_resource(sc);
909
910
	return (0);
911
}
912
913
static void
914
vtpci_modern_unmap_configs(struct vtpci_modern_softc *sc)
915
{
916
917
	vtpci_modern_free_resource_map(sc, &sc->vtpci_common_res_map);
918
	vtpci_modern_free_resource_map(sc, &sc->vtpci_notify_res_map);
919
	vtpci_modern_free_resource_map(sc, &sc->vtpci_isr_res_map);
920
	vtpci_modern_free_resource_map(sc, &sc->vtpci_device_res_map);
921
922
	vtpci_modern_free_bar_resources(sc);
923
	vtpci_modern_free_msix_resource(sc);
924
925
	sc->vtpci_notify_offset_multiplier = 0;
926
}
927
928
static int
929
vtpci_modern_find_cap_resource(struct vtpci_modern_softc *sc, uint8_t cfg_type,
930
    int min_size, int alignment, struct vtpci_modern_resource_map *res)
931
{
932
	device_t dev;
933
	int cap_offset, offset, length, error;
934
	uint8_t bar, cap_length;
935
936
	dev = sc->vtpci_dev;
937
938
	error = vtpci_modern_find_cap(dev, cfg_type, &cap_offset);
939
	if (error)
940
		return (error);
941
942
	cap_length = pci_read_config(dev,
943
	    cap_offset + offsetof(struct virtio_pci_cap, cap_len), 1);
944
945
	if (cap_length < sizeof(struct virtio_pci_cap)) {
946
		device_printf(dev, "cap %u length %d less than expected\n",
947
		    cfg_type, cap_length);
948
		return (ENXIO);
949
	}
950
951
	bar = pci_read_config(dev,
952
	    cap_offset + offsetof(struct virtio_pci_cap, bar), 1);
953
	offset = pci_read_config(dev,
954
	    cap_offset + offsetof(struct virtio_pci_cap, offset), 4);
955
	length = pci_read_config(dev,
956
	    cap_offset + offsetof(struct virtio_pci_cap, length), 4);
957
958
	if (min_size != -1 && length < min_size) {
959
		device_printf(dev, "cap %u struct length %d less than min %d\n",
960
		    cfg_type, length, min_size);
961
		return (ENXIO);
962
	}
963
964
	if (offset % alignment) {
965
		device_printf(dev, "cap %u struct offset %d not aligned to %d\n",
966
		    cfg_type, offset, alignment);
967
		return (ENXIO);
968
	}
969
970
	/* BMV: TODO Can we determine the size of the BAR here? */
971
972
	res->vtrm_cap_offset = cap_offset;
973
	res->vtrm_bar = bar;
974
	res->vtrm_offset = offset;
975
	res->vtrm_length = length;
976
	res->vtrm_type = vtpci_modern_bar_type(sc, bar);
977
978
	return (0);
979
}
980
981
static int
982
vtpci_modern_bar_type(struct vtpci_modern_softc *sc, int bar)
983
{
984
	uint32_t val;
985
986
	/*
987
	 * The BAR described by a config capability may be either an IOPORT or
988
	 * MEM, but we must know the type when calling bus_alloc_resource().
989
	 */
990
	val = pci_read_config(sc->vtpci_dev, PCIR_BAR(bar), 4);
991
	if (PCI_BAR_IO(val))
992
		return (SYS_RES_IOPORT);
993
	else
994
		return (SYS_RES_MEMORY);
995
}
996
997
static struct resource *
998
vtpci_modern_get_bar_resource(struct vtpci_modern_softc *sc, int bar, int type)
999
{
1000
	struct resource *res;
1001
1002
	MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
1003
	res = sc->vtpci_bar_res[bar].vtbr_res;
1004
	MPASS(res == NULL || sc->vtpci_bar_res[bar].vtbr_type == type);
1005
1006
	return (res);
1007
}
1008
1009
static struct resource *
1010
vtpci_modern_alloc_bar_resource(struct vtpci_modern_softc *sc, int bar,
1011
    int type)
1012
{
1013
	struct resource *res;
1014
	int rid;
1015
1016
	MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
1017
	MPASS(type == SYS_RES_MEMORY || type == SYS_RES_IOPORT);
1018
1019
	res = sc->vtpci_bar_res[bar].vtbr_res;
1020
	if (res != NULL) {
1021
		MPASS(sc->vtpci_bar_res[bar].vtbr_type == type);
1022
		return (res);
1023
	}
1024
1025
	rid = PCIR_BAR(bar);
1026
	res = bus_alloc_resource_any(sc->vtpci_dev, type, &rid,
1027
	    RF_ACTIVE | RF_UNMAPPED);
1028
	if (res != NULL) {
1029
		sc->vtpci_bar_res[bar].vtbr_res = res;
1030
		sc->vtpci_bar_res[bar].vtbr_type = type;
1031
	}
1032
1033
	return (res);
1034
}
1035
1036
static void
1037
vtpci_modern_free_bar_resources(struct vtpci_modern_softc *sc)
1038
{
1039
	device_t dev;
1040
	struct resource *res;
1041
	int bar, rid, type;
1042
1043
	dev = sc->vtpci_dev;
1044
1045
	for (bar = 0; bar < VTPCI_MODERN_MAX_BARS; bar++) {
1046
		res = sc->vtpci_bar_res[bar].vtbr_res;
1047
		type = sc->vtpci_bar_res[bar].vtbr_type;
1048
1049
		if (res != NULL) {
1050
			rid = PCIR_BAR(bar);
1051
			bus_release_resource(dev, type, rid, res);
1052
			sc->vtpci_bar_res[bar].vtbr_res = NULL;
1053
			sc->vtpci_bar_res[bar].vtbr_type = 0;
1054
		}
1055
	}
1056
}
1057
1058
static int
1059
vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *sc,
1060
    struct vtpci_modern_resource_map *map)
1061
{
1062
	struct resource_map_request req;
1063
	struct resource *res;
1064
	int type;
1065
1066
	type = map->vtrm_type;
1067
1068
	res = vtpci_modern_alloc_bar_resource(sc, map->vtrm_bar, type);
1069
	if (res == NULL)
1070
		return (ENXIO);
1071
1072
	resource_init_map_request(&req);
1073
	req.offset = map->vtrm_offset;
1074
	req.length = map->vtrm_length;
1075
1076
	return (bus_map_resource(sc->vtpci_dev, type, res, &req,
1077
	    &map->vtrm_map));
1078
}
1079
1080
static void
1081
vtpci_modern_free_resource_map(struct vtpci_modern_softc *sc,
1082
    struct vtpci_modern_resource_map *map)
1083
{
1084
	struct resource *res;
1085
	int type;
1086
1087
	type = map->vtrm_type;
1088
	res = vtpci_modern_get_bar_resource(sc, map->vtrm_bar, type);
1089
1090
	if (res != NULL && map->vtrm_map.r_size != 0) {
1091
		bus_unmap_resource(sc->vtpci_dev, type, res, &map->vtrm_map);
1092
		bzero(map, sizeof(struct vtpci_modern_resource_map));
1093
	}
1094
}
1095
1096
static void
1097
vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *sc)
1098
{
1099
	device_t dev;
1100
	int bar;
1101
1102
	dev = sc->vtpci_dev;
1103
1104
	if (!vtpci_is_msix_available(&sc->vtpci_common) ||
1105
	    (bar = pci_msix_table_bar(dev)) == -1)
1106
		return;
1107
1108
	sc->vtpci_msix_bar = bar;
1109
	if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1110
	    &bar, RF_ACTIVE)) == NULL)
1111
		device_printf(dev, "Unable to map MSIX table\n");
1112
}
1113
1114
static void
1115
vtpci_modern_free_msix_resource(struct vtpci_modern_softc *sc)
1116
{
1117
	device_t dev;
1118
1119
	dev = sc->vtpci_dev;
1120
1121
	if (sc->vtpci_msix_res != NULL) {
1122
		bus_release_resource(dev, SYS_RES_MEMORY, sc->vtpci_msix_bar,
1123
		    sc->vtpci_msix_res);
1124
		sc->vtpci_msix_bar = 0;
1125
		sc->vtpci_msix_res = NULL;
1126
	}
1127
}
1128
1129
static void
1130
vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *sc)
1131
{
1132
	device_t dev, child;
1133
1134
	dev = sc->vtpci_dev;
1135
	child = vtpci_child_device(&sc->vtpci_common);
1136
1137
	if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
1138
		return;
1139
1140
	if (device_probe(child) != 0)
1141
		return;
1142
1143
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
1144
1145
	if (device_attach(child) != 0) {
1146
		vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
1147
		/* Reset state for later attempt. */
1148
		vtpci_modern_child_detached(dev, child);
1149
	} else {
1150
		vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
1151
		VIRTIO_ATTACH_COMPLETED(child);
1152
	}
1153
}
1154
1155
static int
1156
vtpci_modern_register_msix(struct vtpci_modern_softc *sc, int offset,
1157
    struct vtpci_interrupt *intr)
1158
{
1159
	uint16_t vector;
1160
1161
	if (intr != NULL) {
1162
		/* Map from guest rid to host vector. */
1163
		vector = intr->vti_rid - 1;
1164
	} else
1165
		vector = VIRTIO_MSI_NO_VECTOR;
1166
1167
	vtpci_modern_write_common_2(sc, offset, vector);
1168
	return (vtpci_modern_read_common_2(sc, offset) == vector ? 0 : ENODEV);
1169
}
1170
1171
static int
1172
vtpci_modern_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
1173
{
1174
	struct vtpci_modern_softc *sc;
1175
	int error;
1176
1177
	sc = device_get_softc(dev);
1178
1179
	error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_MSIX, intr);
1180
	if (error) {
1181
		device_printf(dev,
1182
		    "unable to register config MSIX interrupt\n");
1183
		return (error);
1184
	}
1185
1186
	return (0);
1187
}
1188
1189
static int
1190
vtpci_modern_register_vq_msix(device_t dev, int idx,
1191
    struct vtpci_interrupt *intr)
1192
{
1193
	struct vtpci_modern_softc *sc;
1194
	int error;
1195
1196
	sc = device_get_softc(dev);
1197
1198
	vtpci_modern_select_virtqueue(sc, idx);
1199
	error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_Q_MSIX, intr);
1200
	if (error) {
1201
		device_printf(dev,
1202
		    "unable to register virtqueue MSIX interrupt\n");
1203
		return (error);
1204
	}
1205
1206
	return (0);
1207
}
1208
1209
static void
1210
vtpci_modern_reset(struct vtpci_modern_softc *sc)
1211
{
1212
	/*
1213
	 * Setting the status to RESET sets the host device to the
1214
	 * original, uninitialized state. Must poll the status until
1215
	 * the reset is complete.
1216
	 */
1217
	vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
1218
1219
	while (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
1220
		cpu_spinwait();
1221
}
1222
1223
static void
1224
vtpci_modern_select_virtqueue(struct vtpci_modern_softc *sc, int idx)
1225
{
1226
	vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_SELECT, idx);
1227
}
1228
1229
static uint8_t
1230
vtpci_modern_read_isr(device_t dev)
1231
{
1232
	return (vtpci_modern_read_isr_1(device_get_softc(dev), 0));
1233
}
1234
1235
static uint16_t
1236
vtpci_modern_get_vq_size(device_t dev, int idx)
1237
{
1238
	struct vtpci_modern_softc *sc;
1239
1240
	sc = device_get_softc(dev);
1241
1242
	vtpci_modern_select_virtqueue(sc, idx);
1243
	return (vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_SIZE));
1244
}
1245
1246
static bus_size_t
1247
vtpci_modern_get_vq_notify_off(device_t dev, int idx)
1248
{
1249
	struct vtpci_modern_softc *sc;
1250
	uint16_t q_notify_off;
1251
1252
	sc = device_get_softc(dev);
1253
1254
	vtpci_modern_select_virtqueue(sc, idx);
1255
	q_notify_off = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_NOFF);
1256
1257
	return (q_notify_off * sc->vtpci_notify_offset_multiplier);
1258
}
1259
1260
static void
1261
vtpci_modern_set_vq(device_t dev, struct virtqueue *vq)
1262
{
1263
	struct vtpci_modern_softc *sc;
1264
1265
	sc = device_get_softc(dev);
1266
1267
	vtpci_modern_select_virtqueue(sc, virtqueue_index(vq));
1268
1269
	/* BMV: Currently we never adjust the device's proposed VQ size. */
1270
	vtpci_modern_write_common_2(sc,
1271
	    VIRTIO_PCI_COMMON_Q_SIZE, virtqueue_size(vq));
1272
1273
	vtpci_modern_write_common_8(sc,
1274
	    VIRTIO_PCI_COMMON_Q_DESCLO, virtqueue_desc_paddr(vq));
1275
	vtpci_modern_write_common_8(sc,
1276
	    VIRTIO_PCI_COMMON_Q_AVAILLO, virtqueue_avail_paddr(vq));
1277
        vtpci_modern_write_common_8(sc,
1278
	    VIRTIO_PCI_COMMON_Q_USEDLO, virtqueue_used_paddr(vq));
1279
}
1280
1281
static void
1282
vtpci_modern_disable_vq(device_t dev, int idx)
1283
{
1284
	struct vtpci_modern_softc *sc;
1285
1286
	sc = device_get_softc(dev);
1287
1288
	vtpci_modern_select_virtqueue(sc, idx);
1289
	vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 0);
1290
	vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_DESCLO, 0ULL);
1291
	vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_AVAILLO, 0ULL);
1292
        vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_USEDLO, 0ULL);
1293
}
1294
1295
static void
1296
vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *sc)
1297
{
1298
	int idx;
1299
1300
	for (idx = 0; idx < sc->vtpci_common.vtpci_nvqs; idx++) {
1301
		vtpci_modern_select_virtqueue(sc, idx);
1302
		vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 1);
1303
	}
1304
}
1305
1306
static uint8_t
1307
vtpci_modern_read_common_1(struct vtpci_modern_softc *sc, bus_size_t off)
1308
{
1309
	return (bus_read_1(&sc->vtpci_common_res_map.vtrm_map, off));
1310
}
1311
1312
static uint16_t
1313
vtpci_modern_read_common_2(struct vtpci_modern_softc *sc, bus_size_t off)
1314
{
1315
	return (bus_read_2(&sc->vtpci_common_res_map.vtrm_map, off));
1316
}
1317
1318
static uint32_t
1319
vtpci_modern_read_common_4(struct vtpci_modern_softc *sc, bus_size_t off)
1320
{
1321
	return (bus_read_4(&sc->vtpci_common_res_map.vtrm_map, off));
1322
}
1323
1324
static void
1325
vtpci_modern_write_common_1(struct vtpci_modern_softc *sc, bus_size_t off,
1326
    uint8_t val)
1327
{
1328
	bus_write_1(&sc->vtpci_common_res_map.vtrm_map, off, val);
1329
}
1330
1331
static void
1332
vtpci_modern_write_common_2(struct vtpci_modern_softc *sc, bus_size_t off,
1333
    uint16_t val)
1334
{
1335
	bus_write_2(&sc->vtpci_common_res_map.vtrm_map, off, val);
1336
}
1337
1338
static void
1339
vtpci_modern_write_common_4(struct vtpci_modern_softc *sc, bus_size_t off,
1340
    uint32_t val)
1341
{
1342
	bus_write_4(&sc->vtpci_common_res_map.vtrm_map, off, val);
1343
}
1344
1345
static void
1346
vtpci_modern_write_common_8(struct vtpci_modern_softc *sc, bus_size_t off,
1347
    uint64_t val)
1348
{
1349
	uint32_t val0, val1;
1350
1351
	val0 = (uint32_t) val;
1352
	val1 = val >> 32;
1353
1354
	vtpci_modern_write_common_4(sc, off, val0);
1355
	vtpci_modern_write_common_4(sc, off + 4, val1);
1356
}
1357
1358
static void
1359
vtpci_modern_write_notify_2(struct vtpci_modern_softc *sc, bus_size_t off,
1360
    uint16_t val)
1361
{
1362
	bus_write_2(&sc->vtpci_notify_res_map.vtrm_map, off, val);
1363
}
1364
1365
static uint8_t
1366
vtpci_modern_read_isr_1(struct vtpci_modern_softc *sc, bus_size_t off)
1367
{
1368
	return (bus_read_1(&sc->vtpci_isr_res_map.vtrm_map, off));
1369
}
1370
1371
static uint8_t
1372
vtpci_modern_read_device_1(struct vtpci_modern_softc *sc, bus_size_t off)
1373
{
1374
	return (bus_read_1(&sc->vtpci_device_res_map.vtrm_map, off));
1375
}
1376
1377
static uint16_t
1378
vtpci_modern_read_device_2(struct vtpci_modern_softc *sc, bus_size_t off)
1379
{
1380
	return (bus_read_2(&sc->vtpci_device_res_map.vtrm_map, off));
1381
}
1382
1383
static uint32_t
1384
vtpci_modern_read_device_4(struct vtpci_modern_softc *sc, bus_size_t off)
1385
{
1386
	return (bus_read_4(&sc->vtpci_device_res_map.vtrm_map, off));
1387
}
1388
1389
static uint64_t
1390
vtpci_modern_read_device_8(struct vtpci_modern_softc *sc, bus_size_t off)
1391
{
1392
	device_t dev;
1393
	int gen;
1394
	uint32_t val0, val1;
1395
1396
	dev = sc->vtpci_dev;
1397
1398
	/*
1399
	 * Treat the 64-bit field as two 32-bit fields. Use the generation
1400
	 * to ensure a consistent read.
1401
	 */
1402
	do {
1403
		gen = vtpci_modern_config_generation(dev);
1404
		val0 = vtpci_modern_read_device_4(sc, off);
1405
		val1 = vtpci_modern_read_device_4(sc, off + 4);
1406
	} while (gen != vtpci_modern_config_generation(dev));
1407
1408
	return (((uint64_t) val1 << 32) | val0);
1409
}
1410
1411
static void
1412
vtpci_modern_write_device_1(struct vtpci_modern_softc *sc, bus_size_t off,
1413
    uint8_t val)
1414
{
1415
	bus_write_1(&sc->vtpci_device_res_map.vtrm_map, off, val);
1416
}
1417
1418
static void
1419
vtpci_modern_write_device_2(struct vtpci_modern_softc *sc, bus_size_t off,
1420
    uint16_t val)
1421
{
1422
	bus_write_2(&sc->vtpci_device_res_map.vtrm_map, off, val);
1423
}
1424
1425
static void
1426
vtpci_modern_write_device_4(struct vtpci_modern_softc *sc, bus_size_t off,
1427
    uint32_t val)
1428
{
1429
	bus_write_4(&sc->vtpci_device_res_map.vtrm_map, off, val);
1430
}
1431
1432
static void
1433
vtpci_modern_write_device_8(struct vtpci_modern_softc *sc, bus_size_t off,
1434
    uint64_t val)
1435
{
1436
	uint32_t val0, val1;
1437
1438
	val0 = (uint32_t) val;
1439
	val1 = val >> 32;
1440
1441
	vtpci_modern_write_device_4(sc, off, val0);
1442
	vtpci_modern_write_device_4(sc, off + 4, val1);
1443
}
(-)sys/dev/virtio/pci/virtio_pci_modern_var.h (+135 lines)
Line 0 Link Here
1
/*
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
35
 * $FreeBSD$
36
 */
37
38
#ifndef _VIRTIO_PCI_MODERN_VAR_H
39
#define _VIRTIO_PCI_MODERN_VAR_H
40
41
#include <dev/virtio/pci/virtio_pci_var.h>
42
43
/* IDs for different capabilities.  Must all exist. */
44
/* Common configuration */
45
#define VIRTIO_PCI_CAP_COMMON_CFG	1
46
/* Notifications */
47
#define VIRTIO_PCI_CAP_NOTIFY_CFG	2
48
/* ISR access */
49
#define VIRTIO_PCI_CAP_ISR_CFG		3
50
/* Device specific configuration */
51
#define VIRTIO_PCI_CAP_DEVICE_CFG	4
52
/* PCI configuration access */
53
#define VIRTIO_PCI_CAP_PCI_CFG		5
54
55
/* This is the PCI capability header: */
56
struct virtio_pci_cap {
57
	uint8_t cap_vndr;		/* Generic PCI field: PCI_CAP_ID_VNDR */
58
	uint8_t cap_next;		/* Generic PCI field: next ptr. */
59
	uint8_t cap_len;		/* Generic PCI field: capability length */
60
	uint8_t cfg_type;		/* Identifies the structure. */
61
	uint8_t bar;			/* Where to find it. */
62
	uint8_t padding[3];		/* Pad to full dword. */
63
	uint32_t offset;		/* Offset within bar. */
64
	uint32_t length;		/* Length of the structure, in bytes. */
65
};
66
67
struct virtio_pci_notify_cap {
68
	struct virtio_pci_cap cap;
69
	uint32_t notify_off_multiplier;	/* Multiplier for queue_notify_off. */
70
};
71
72
/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
73
struct virtio_pci_common_cfg {
74
	/* About the whole device. */
75
	uint32_t device_feature_select;	/* read-write */
76
	uint32_t device_feature;	/* read-only */
77
	uint32_t guest_feature_select;	/* read-write */
78
	uint32_t guest_feature;		/* read-write */
79
	uint16_t msix_config;		/* read-write */
80
	uint16_t num_queues;		/* read-only */
81
	uint8_t device_status;		/* read-write */
82
	uint8_t config_generation;	/* read-only */
83
84
	/* About a specific virtqueue. */
85
	uint16_t queue_select;		/* read-write */
86
	uint16_t queue_size;		/* read-write, power of 2. */
87
	uint16_t queue_msix_vector;	/* read-write */
88
	uint16_t queue_enable;		/* read-write */
89
	uint16_t queue_notify_off;	/* read-only */
90
	uint32_t queue_desc_lo;		/* read-write */
91
	uint32_t queue_desc_hi;		/* read-write */
92
	uint32_t queue_avail_lo;	/* read-write */
93
	uint32_t queue_avail_hi;	/* read-write */
94
	uint32_t queue_used_lo;		/* read-write */
95
	uint32_t queue_used_hi;		/* read-write */
96
};
97
98
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
99
struct virtio_pci_cfg_cap {
100
	struct virtio_pci_cap cap;
101
	uint8_t pci_cfg_data[4]; /* Data for BAR access. */
102
};
103
104
/* Macro versions of offsets for the Old Timers! */
105
#define VIRTIO_PCI_CAP_VNDR		0
106
#define VIRTIO_PCI_CAP_NEXT		1
107
#define VIRTIO_PCI_CAP_LEN		2
108
#define VIRTIO_PCI_CAP_CFG_TYPE		3
109
#define VIRTIO_PCI_CAP_BAR		4
110
#define VIRTIO_PCI_CAP_OFFSET		8
111
#define VIRTIO_PCI_CAP_LENGTH		12
112
113
#define VIRTIO_PCI_NOTIFY_CAP_MULT	16
114
115
#define VIRTIO_PCI_COMMON_DFSELECT	0
116
#define VIRTIO_PCI_COMMON_DF		4
117
#define VIRTIO_PCI_COMMON_GFSELECT	8
118
#define VIRTIO_PCI_COMMON_GF		12
119
#define VIRTIO_PCI_COMMON_MSIX		16
120
#define VIRTIO_PCI_COMMON_NUMQ		18
121
#define VIRTIO_PCI_COMMON_STATUS	20
122
#define VIRTIO_PCI_COMMON_CFGGENERATION	21
123
#define VIRTIO_PCI_COMMON_Q_SELECT	22
124
#define VIRTIO_PCI_COMMON_Q_SIZE	24
125
#define VIRTIO_PCI_COMMON_Q_MSIX	26
126
#define VIRTIO_PCI_COMMON_Q_ENABLE	28
127
#define VIRTIO_PCI_COMMON_Q_NOFF	30
128
#define VIRTIO_PCI_COMMON_Q_DESCLO	32
129
#define VIRTIO_PCI_COMMON_Q_DESCHI	36
130
#define VIRTIO_PCI_COMMON_Q_AVAILLO	40
131
#define VIRTIO_PCI_COMMON_Q_AVAILHI	44
132
#define VIRTIO_PCI_COMMON_Q_USEDLO	48
133
#define VIRTIO_PCI_COMMON_Q_USEDHI	52
134
135
#endif /* _VIRTIO_PCI_MODERN_VAR_H */
(-)sys/dev/virtio/pci/virtio_pci_var.h (+55 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori  <aliguori@us.ibm.com>
8
 *
9
 * This header is BSD licensed so anyone can use the definitions to implement
10
 * compatible drivers/servers.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of IBM nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
27
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
 * SUCH DAMAGE.
34
 *
35
 * $FreeBSD$
36
 */
37
38
#ifndef _VIRTIO_PCI_VAR_H
39
#define _VIRTIO_PCI_VAR_H
40
41
/* VirtIO PCI vendor/device ID. */
42
#define VIRTIO_PCI_VENDORID	0x1AF4
43
#define VIRTIO_PCI_DEVICEID_MIN	0x1000
44
#define VIRTIO_PCI_DEVICEID_LEGACY_MAX	0x103F
45
#define VIRTIO_PCI_DEVICEID_MODERN_MIN	0x1040
46
#define VIRTIO_PCI_DEVICEID_MODERN_MAX	0x107F
47
48
/* The bit of the ISR which indicates a device has an interrupt. */
49
#define VIRTIO_PCI_ISR_INTR	0x1
50
/* The bit of the ISR which indicates a device configuration change. */
51
#define VIRTIO_PCI_ISR_CONFIG	0x2
52
/* Vector value used to disable MSI for queue. */
53
#define VIRTIO_MSI_NO_VECTOR	0xFFFF
54
55
#endif /* _VIRTIO_PCI_VAR_H */
(-)sys/dev/virtio/random/virtio_random.c (-7 / +27 lines)
Lines 58-64 Link Here
58
static int	vtrnd_attach(device_t);
58
static int	vtrnd_attach(device_t);
59
static int	vtrnd_detach(device_t);
59
static int	vtrnd_detach(device_t);
60
60
61
static void	vtrnd_negotiate_features(struct vtrnd_softc *);
61
static int	vtrnd_negotiate_features(struct vtrnd_softc *);
62
static int	vtrnd_setup_features(struct vtrnd_softc *);
62
static int	vtrnd_alloc_virtqueue(struct vtrnd_softc *);
63
static int	vtrnd_alloc_virtqueue(struct vtrnd_softc *);
63
static void	vtrnd_harvest(struct vtrnd_softc *);
64
static void	vtrnd_harvest(struct vtrnd_softc *);
64
static void	vtrnd_timer(void *);
65
static void	vtrnd_timer(void *);
Lines 85-92 Link Here
85
};
86
};
86
static devclass_t vtrnd_devclass;
87
static devclass_t vtrnd_devclass;
87
88
88
DRIVER_MODULE(virtio_random, virtio_pci, vtrnd_driver, vtrnd_devclass,
89
DRIVER_MODULE(virtio_random, vtpcil, vtrnd_driver, vtrnd_devclass,
89
    vtrnd_modevent, 0);
90
    vtrnd_modevent, 0);
91
DRIVER_MODULE(virtio_random, vtpcim, vtrnd_driver, vtrnd_devclass,
92
    vtrnd_modevent, 0);
90
MODULE_VERSION(virtio_random, 1);
93
MODULE_VERSION(virtio_random, 1);
91
MODULE_DEPEND(virtio_random, virtio, 1, 1, 1);
94
MODULE_DEPEND(virtio_random, virtio, 1, 1, 1);
92
95
Lines 130-141 Link Here
130
133
131
	sc = device_get_softc(dev);
134
	sc = device_get_softc(dev);
132
	sc->vtrnd_dev = dev;
135
	sc->vtrnd_dev = dev;
133
136
	virtio_set_feature_desc(dev, vtrnd_feature_desc);
134
	callout_init(&sc->vtrnd_callout, 1);
137
	callout_init(&sc->vtrnd_callout, 1);
135
138
136
	virtio_set_feature_desc(dev, vtrnd_feature_desc);
139
	error = vtrnd_setup_features(sc);
137
	vtrnd_negotiate_features(sc);
140
	if (error) {
138
141
		device_printf(dev, "cannot setup features\n");
142
		goto fail;
143
	}
139
	error = vtrnd_alloc_virtqueue(sc);
144
	error = vtrnd_alloc_virtqueue(sc);
140
	if (error) {
145
	if (error) {
141
		device_printf(dev, "cannot allocate virtqueue\n");
146
		device_printf(dev, "cannot allocate virtqueue\n");
Lines 163-169 Link Here
163
	return (0);
168
	return (0);
164
}
169
}
165
170
166
static void
171
static int
167
vtrnd_negotiate_features(struct vtrnd_softc *sc)
172
vtrnd_negotiate_features(struct vtrnd_softc *sc)
168
{
173
{
169
	device_t dev;
174
	device_t dev;
Lines 173-181 Link Here
173
	features = VTRND_FEATURES;
178
	features = VTRND_FEATURES;
174
179
175
	sc->vtrnd_features = virtio_negotiate_features(dev, features);
180
	sc->vtrnd_features = virtio_negotiate_features(dev, features);
181
	return (virtio_finalize_features(dev));
176
}
182
}
177
183
178
static int
184
static int
185
vtrnd_setup_features(struct vtrnd_softc *sc)
186
{
187
	int error;
188
189
	error = vtrnd_negotiate_features(sc);
190
	if (error)
191
		return (error);
192
193
	return (0);
194
}
195
196
static int
179
vtrnd_alloc_virtqueue(struct vtrnd_softc *sc)
197
vtrnd_alloc_virtqueue(struct vtrnd_softc *sc)
180
{
198
{
181
	device_t dev;
199
	device_t dev;
Lines 217-222 Link Here
217
	virtqueue_notify(vq);
235
	virtqueue_notify(vq);
218
	virtqueue_poll(vq, NULL);
236
	virtqueue_poll(vq, NULL);
219
237
238
//	random_harvest_queue(&value, sizeof(value), sizeof(value) * NBBY / 2,
239
//	    RANDOM_PURE_VIRTIO);
220
	random_harvest_queue(&value, sizeof(value), RANDOM_PURE_VIRTIO);
240
	random_harvest_queue(&value, sizeof(value), RANDOM_PURE_VIRTIO);
221
}
241
}
222
242
(-)sys/dev/virtio/scsi/virtio_scsi.c (-40 / +77 lines)
Lines 76-82 Link Here
76
static int	vtscsi_suspend(device_t);
76
static int	vtscsi_suspend(device_t);
77
static int	vtscsi_resume(device_t);
77
static int	vtscsi_resume(device_t);
78
78
79
static void	vtscsi_negotiate_features(struct vtscsi_softc *);
79
static int	vtscsi_negotiate_features(struct vtscsi_softc *);
80
static int	vtscsi_setup_features(struct vtscsi_softc *);
80
static void	vtscsi_read_config(struct vtscsi_softc *,
81
static void	vtscsi_read_config(struct vtscsi_softc *,
81
		    struct virtio_scsi_config *);
82
		    struct virtio_scsi_config *);
82
static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
83
static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
Lines 135-144 Link Here
135
136
136
static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
137
static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
137
static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
138
static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
138
static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
139
static void	vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
139
		    struct virtio_scsi_cmd_req *);
140
		    struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
140
static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
141
static void	vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
141
		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
142
		    uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
142
143
143
static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
144
static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
144
static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
145
static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
Lines 184-194 Link Here
184
static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
185
static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
185
186
186
static void	vtscsi_get_tunables(struct vtscsi_softc *);
187
static void	vtscsi_get_tunables(struct vtscsi_softc *);
187
static void	vtscsi_add_sysctl(struct vtscsi_softc *);
188
static void	vtscsi_setup_sysctl(struct vtscsi_softc *);
188
189
189
static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
190
static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
190
		    const char *, ...);
191
		    const char *, ...);
191
192
193
#define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
194
#define vtscsi_htog16(_sc, _val)	virtio_htog16(vtscsi_modern(_sc), _val)
195
#define vtscsi_htog32(_sc, _val)	virtio_htog32(vtscsi_modern(_sc), _val)
196
#define vtscsi_htog64(_sc, _val)	virtio_htog64(vtscsi_modern(_sc), _val)
197
#define vtscsi_gtoh16(_sc, _val)	virtio_gtoh16(vtscsi_modern(_sc), _val)
198
#define vtscsi_gtoh32(_sc, _val)	virtio_gtoh32(vtscsi_modern(_sc), _val)
199
#define vtscsi_gtoh64(_sc, _val)	virtio_gtoh64(vtscsi_modern(_sc), _val)
200
192
/* Global tunables. */
201
/* Global tunables. */
193
/*
202
/*
194
 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
203
 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
Lines 206-211 Link Here
206
static struct virtio_feature_desc vtscsi_feature_desc[] = {
215
static struct virtio_feature_desc vtscsi_feature_desc[] = {
207
	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
216
	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
208
	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
217
	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
218
	{ VIRTIO_SCSI_F_CHANGE,		"ChangeEvent"	},
219
	{ VIRTIO_SCSI_F_T10_PI, 	"T10PI"		},
209
220
210
	{ 0, NULL }
221
	{ 0, NULL }
211
};
222
};
Lines 228-235 Link Here
228
};
239
};
229
static devclass_t vtscsi_devclass;
240
static devclass_t vtscsi_devclass;
230
241
231
DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
242
DRIVER_MODULE(virtio_scsi, vtpcil, vtscsi_driver, vtscsi_devclass,
232
    vtscsi_modevent, 0);
243
    vtscsi_modevent, 0);
244
DRIVER_MODULE(virtio_scsi, vtpcim, vtscsi_driver, vtscsi_devclass,
245
    vtscsi_modevent, 0);
233
MODULE_VERSION(virtio_scsi, 1);
246
MODULE_VERSION(virtio_scsi, 1);
234
MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
247
MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
235
MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
248
MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
Lines 275-297 Link Here
275
288
276
	sc = device_get_softc(dev);
289
	sc = device_get_softc(dev);
277
	sc->vtscsi_dev = dev;
290
	sc->vtscsi_dev = dev;
291
	virtio_set_feature_desc(dev, vtscsi_feature_desc);
278
292
279
	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
293
	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
280
	TAILQ_INIT(&sc->vtscsi_req_free);
294
	TAILQ_INIT(&sc->vtscsi_req_free);
281
295
282
	vtscsi_get_tunables(sc);
296
	vtscsi_get_tunables(sc);
283
	vtscsi_add_sysctl(sc);
297
	vtscsi_setup_sysctl(sc);
284
298
285
	virtio_set_feature_desc(dev, vtscsi_feature_desc);
299
	error = vtscsi_setup_features(sc);
286
	vtscsi_negotiate_features(sc);
300
	if (error) {
301
		device_printf(dev, "cannot setup features\n");
302
		goto fail;
303
	}
287
304
288
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
289
		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
290
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
291
		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
292
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
293
		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
294
295
	vtscsi_read_config(sc, &scsicfg);
305
	vtscsi_read_config(sc, &scsicfg);
296
306
297
	sc->vtscsi_max_channel = scsicfg.max_channel;
307
	sc->vtscsi_max_channel = scsicfg.max_channel;
Lines 403-419 Link Here
403
	return (0);
413
	return (0);
404
}
414
}
405
415
406
static void
416
static int
407
vtscsi_negotiate_features(struct vtscsi_softc *sc)
417
vtscsi_negotiate_features(struct vtscsi_softc *sc)
408
{
418
{
409
	device_t dev;
419
	device_t dev;
410
	uint64_t features;
420
	uint64_t features;
411
421
412
	dev = sc->vtscsi_dev;
422
	dev = sc->vtscsi_dev;
413
	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
423
	features = VTSCSI_FEATURES;
414
	sc->vtscsi_features = features;
424
425
	sc->vtscsi_features = virtio_negotiate_features(dev, features);
426
	return (virtio_finalize_features(dev));
415
}
427
}
416
428
429
static int
430
vtscsi_setup_features(struct vtscsi_softc *sc)
431
{
432
	device_t dev;
433
	int error;
434
435
	dev = sc->vtscsi_dev;
436
437
	error = vtscsi_negotiate_features(sc);
438
	if (error)
439
		return (error);
440
441
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
442
		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
443
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
444
		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
445
	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
446
		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
447
448
	return (0);
449
}
450
417
#define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
451
#define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
418
	virtio_read_device_config(_dev,				\
452
	virtio_read_device_config(_dev,				\
419
	    offsetof(struct virtio_scsi_config, _field),	\
453
	    offsetof(struct virtio_scsi_config, _field),	\
Lines 531-538 Link Here
531
	error = virtio_reinit(dev, sc->vtscsi_features);
565
	error = virtio_reinit(dev, sc->vtscsi_features);
532
	if (error == 0) {
566
	if (error == 0) {
533
		vtscsi_write_device_config(sc);
567
		vtscsi_write_device_config(sc);
534
		vtscsi_reinit_event_vq(sc);
535
		virtio_reinit_complete(dev);
568
		virtio_reinit_complete(dev);
569
		vtscsi_reinit_event_vq(sc);
536
570
537
		vtscsi_enable_vqs_intr(sc);
571
		vtscsi_enable_vqs_intr(sc);
538
	}
572
	}
Lines 940-946 Link Here
940
974
941
	cpi->max_target = sc->vtscsi_max_target;
975
	cpi->max_target = sc->vtscsi_max_target;
942
	cpi->max_lun = sc->vtscsi_max_lun;
976
	cpi->max_lun = sc->vtscsi_max_lun;
943
	cpi->initiator_id = VTSCSI_INITIATOR_ID;
977
	cpi->initiator_id = cpi->max_target + 1;
944
978
945
	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
979
	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
946
	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
980
	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
Lines 1086-1092 Link Here
1086
	cmd_req = &req->vsr_cmd_req;
1120
	cmd_req = &req->vsr_cmd_req;
1087
	cmd_resp = &req->vsr_cmd_resp;
1121
	cmd_resp = &req->vsr_cmd_resp;
1088
1122
1089
	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1123
	vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1090
1124
1091
	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1125
	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1092
	if (error)
1126
	if (error)
Lines 1206-1212 Link Here
1206
	tmf_req = &req->vsr_tmf_req;
1240
	tmf_req = &req->vsr_tmf_req;
1207
	tmf_resp = &req->vsr_tmf_resp;
1241
	tmf_resp = &req->vsr_tmf_resp;
1208
1242
1209
	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1243
	vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1210
	    (uintptr_t) to_ccbh, tmf_req);
1244
	    (uintptr_t) to_ccbh, tmf_req);
1211
1245
1212
	sglist_reset(sg);
1246
	sglist_reset(sg);
Lines 1314-1339 Link Here
1314
vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1348
vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1315
    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1349
    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1316
{
1350
{
1351
	uint32_t resp_sense_length;
1317
	cam_status status;
1352
	cam_status status;
1318
1353
1319
	csio->scsi_status = cmd_resp->status;
1354
	csio->scsi_status = cmd_resp->status;
1320
	csio->resid = cmd_resp->resid;
1355
	csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1321
1356
1322
	if (csio->scsi_status == SCSI_STATUS_OK)
1357
	if (csio->scsi_status == SCSI_STATUS_OK)
1323
		status = CAM_REQ_CMP;
1358
		status = CAM_REQ_CMP;
1324
	else
1359
	else
1325
		status = CAM_SCSI_STATUS_ERROR;
1360
		status = CAM_SCSI_STATUS_ERROR;
1326
1361
1327
	if (cmd_resp->sense_len > 0) {
1362
	resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1363
1364
	if (resp_sense_length > 0) {
1328
		status |= CAM_AUTOSNS_VALID;
1365
		status |= CAM_AUTOSNS_VALID;
1329
1366
1330
		if (cmd_resp->sense_len < csio->sense_len)
1367
		if (resp_sense_length < csio->sense_len)
1331
			csio->sense_resid = csio->sense_len -
1368
			csio->sense_resid = csio->sense_len - resp_sense_length;
1332
			    cmd_resp->sense_len;
1333
		else
1369
		else
1334
			csio->sense_resid = 0;
1370
			csio->sense_resid = 0;
1335
1371
1336
		memcpy(&csio->sense_data, cmd_resp->sense,
1372
		bzero(&csio->sense_data, sizeof(csio->sense_data));
1373
		memcpy(cmd_resp->sense, &csio->sense_data,
1337
		    csio->sense_len - csio->sense_resid);
1374
		    csio->sense_len - csio->sense_resid);
1338
	}
1375
	}
1339
1376
Lines 1494-1500 Link Here
1494
	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1531
	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1495
		callout_stop(&abort_req->vsr_callout);
1532
		callout_stop(&abort_req->vsr_callout);
1496
1533
1497
	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1534
	vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1498
	    (uintptr_t) abort_ccbh, tmf_req);
1535
	    (uintptr_t) abort_ccbh, tmf_req);
1499
1536
1500
	sglist_reset(sg);
1537
	sglist_reset(sg);
Lines 1563-1569 Link Here
1563
	else
1600
	else
1564
		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1601
		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1565
1602
1566
	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1603
	vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1567
1604
1568
	sglist_reset(sg);
1605
	sglist_reset(sg);
1569
	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1606
	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
Lines 1600-1606 Link Here
1600
}
1637
}
1601
1638
1602
static void
1639
static void
1603
vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1640
vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1604
    struct virtio_scsi_cmd_req *cmd_req)
1641
    struct virtio_scsi_cmd_req *cmd_req)
1605
{
1642
{
1606
	uint8_t attr;
1643
	uint8_t attr;
Lines 1621-1627 Link Here
1621
	}
1658
	}
1622
1659
1623
	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1660
	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1624
	cmd_req->tag = (uintptr_t) csio;
1661
	cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1625
	cmd_req->task_attr = attr;
1662
	cmd_req->task_attr = attr;
1626
1663
1627
	memcpy(cmd_req->cdb,
1664
	memcpy(cmd_req->cdb,
Lines 1631-1645 Link Here
1631
}
1668
}
1632
1669
1633
static void
1670
static void
1634
vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1671
vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1635
    uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1672
    uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1636
{
1673
{
1637
1674
1638
	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1675
	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1639
1676
1640
	tmf_req->type = VIRTIO_SCSI_T_TMF;
1677
	tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1641
	tmf_req->subtype = subtype;
1678
	tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1642
	tmf_req->tag = tag;
1679
	tmf_req->tag = vtscsi_gtoh64(sc, tag);
1643
}
1680
}
1644
1681
1645
static void
1682
static void
Lines 2273-2279 Link Here
2273
}
2310
}
2274
2311
2275
static void
2312
static void
2276
vtscsi_add_sysctl(struct vtscsi_softc *sc)
2313
vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2277
{
2314
{
2278
	device_t dev;
2315
	device_t dev;
2279
	struct vtscsi_statistics *stats;
2316
	struct vtscsi_statistics *stats;
(-)sys/dev/virtio/scsi/virtio_scsi.h (-8 / +31 lines)
Lines 31-43 Link Here
31
#ifndef _VIRTIO_SCSI_H
31
#ifndef _VIRTIO_SCSI_H
32
#define _VIRTIO_SCSI_H
32
#define _VIRTIO_SCSI_H
33
33
34
/* Feature bits */
34
/* Default values of the CDB and sense data size configuration fields */
35
#define VIRTIO_SCSI_F_INOUT	0x0001	/* Single request can contain both
36
					 * read and write buffers */
37
#define VIRTIO_SCSI_F_HOTPLUG	0x0002	/* Host should enable hot plug/unplug
38
					 * of new LUNs and targets.
39
					 */
40
41
#define VIRTIO_SCSI_CDB_SIZE	32
35
#define VIRTIO_SCSI_CDB_SIZE	32
42
#define VIRTIO_SCSI_SENSE_SIZE	96
36
#define VIRTIO_SCSI_SENSE_SIZE	96
43
37
Lines 46-56 Link Here
46
	uint8_t lun[8];		/* Logical Unit Number */
40
	uint8_t lun[8];		/* Logical Unit Number */
47
	uint64_t tag;		/* Command identifier */
41
	uint64_t tag;		/* Command identifier */
48
	uint8_t task_attr;	/* Task attribute */
42
	uint8_t task_attr;	/* Task attribute */
49
	uint8_t prio;
43
	uint8_t prio;		/* SAM command priority field */
50
	uint8_t crn;
44
	uint8_t crn;
51
	uint8_t cdb[VIRTIO_SCSI_CDB_SIZE];
45
	uint8_t cdb[VIRTIO_SCSI_CDB_SIZE];
52
} __packed;
46
} __packed;
53
47
48
/* SCSI command request, followed by protection information */
49
struct virtio_scsi_cmd_req_pi {
50
	uint8_t lun[8];		/* Logical Unit Number */
51
	uint64_t tag;		/* Command identifier */
52
	uint8_t task_attr;	/* Task attribute */
53
	uint8_t prio;		/* SAM command priority field */
54
	uint8_t crn;
55
	uint32_t pi_bytesout;	/* DataOUT PI Number of bytes */
56
	uint32_t pi_bytesin;	/* DataIN PI Number of bytes */
57
	uint8_t cdb[VIRTIO_SCSI_CDB_SIZE];
58
} __packed;
59
54
/* Response, followed by sense data and data-in */
60
/* Response, followed by sense data and data-in */
55
struct virtio_scsi_cmd_resp {
61
struct virtio_scsi_cmd_resp {
56
	uint32_t sense_len;		/* Sense data length */
62
	uint32_t sense_len;		/* Sense data length */
Lines 104-109 Link Here
104
	uint32_t max_lun;
110
	uint32_t max_lun;
105
} __packed;
111
} __packed;
106
112
113
/* Feature bits */
114
#define VIRTIO_SCSI_F_INOUT	0x0001	/* Single request can contain both
115
					 * read and write buffers.
116
					 */
117
#define VIRTIO_SCSI_F_HOTPLUG	0x0002	/* Host should enable hot plug/unplug
118
					 * of new LUNs and targets.
119
					 */
120
#define VIRTIO_SCSI_F_CHANGE	0x0004	/* Host will report changes to LUN
121
					 * parameters via a
122
					 * VIRTIO_SCSI_T_PARAM_CHANGE event.
123
					 */
124
#define VIRTIO_SCSI_F_T10_PI 	0x0008	/* Extended fields for T10 protection
125
					 * information (DIF/DIX) are included
126
					 * in the SCSI request header.
127
					 */
128
107
/* Response codes */
129
/* Response codes */
108
#define VIRTIO_SCSI_S_OK                       0
130
#define VIRTIO_SCSI_S_OK                       0
109
#define VIRTIO_SCSI_S_FUNCTION_COMPLETE        0
131
#define VIRTIO_SCSI_S_FUNCTION_COMPLETE        0
Lines 140-145 Link Here
140
#define VIRTIO_SCSI_T_NO_EVENT                 0
162
#define VIRTIO_SCSI_T_NO_EVENT                 0
141
#define VIRTIO_SCSI_T_TRANSPORT_RESET          1
163
#define VIRTIO_SCSI_T_TRANSPORT_RESET          1
142
#define VIRTIO_SCSI_T_ASYNC_NOTIFY             2
164
#define VIRTIO_SCSI_T_ASYNC_NOTIFY             2
165
#define VIRTIO_SCSI_T_PARAM_CHANGE             3
143
166
144
/* Reasons of transport reset event */
167
/* Reasons of transport reset event */
145
#define VIRTIO_SCSI_EVT_RESET_HARD             0
168
#define VIRTIO_SCSI_EVT_RESET_HARD             0
(-)sys/dev/virtio/scsi/virtio_scsivar.h (-5 lines)
Lines 205-215 Link Here
205
#define VTSCSI_RESERVED_REQUESTS	10
205
#define VTSCSI_RESERVED_REQUESTS	10
206
206
207
/*
207
/*
208
 * Specification doesn't say, use traditional SCSI default.
209
 */
210
#define VTSCSI_INITIATOR_ID	7
211
212
/*
213
 * How to wait (or not) for request completion.
208
 * How to wait (or not) for request completion.
214
 */
209
 */
215
#define VTSCSI_EXECUTE_ASYNC	0
210
#define VTSCSI_EXECUTE_ASYNC	0
(-)sys/dev/virtio/virtio.c (-27 / +92 lines)
Lines 75-84 Link Here
75
75
76
/* Device independent features. */
76
/* Device independent features. */
77
static struct virtio_feature_desc virtio_common_feature_desc[] = {
77
static struct virtio_feature_desc virtio_common_feature_desc[] = {
78
	{ VIRTIO_F_NOTIFY_ON_EMPTY,	"NotifyOnEmpty"	},
78
	{ VIRTIO_F_NOTIFY_ON_EMPTY,	"NotifyOnEmpty"		}, /* Legacy */
79
	{ VIRTIO_RING_F_INDIRECT_DESC,	"RingIndirect"	},
79
	{ VIRTIO_F_ANY_LAYOUT,		"AnyLayout"		}, /* Legacy */
80
	{ VIRTIO_RING_F_EVENT_IDX,	"EventIdx"	},
80
	{ VIRTIO_RING_F_INDIRECT_DESC,	"RingIndirectDesc"	},
81
	{ VIRTIO_F_BAD_FEATURE,		"BadFeature"	},
81
	{ VIRTIO_RING_F_EVENT_IDX,	"RingEventIdx"		},
82
	{ VIRTIO_F_BAD_FEATURE,		"BadFeature"		}, /* Legacy */
83
	{ VIRTIO_F_VERSION_1,		"Version1"		},
84
	{ VIRTIO_F_IOMMU_PLATFORM,	"IOMMUPlatform"		},
82
85
83
	{ 0, NULL }
86
	{ 0, NULL }
84
};
87
};
Lines 116-139 Link Here
116
	return (NULL);
119
	return (NULL);
117
}
120
}
118
121
119
void
122
int
120
virtio_describe(device_t dev, const char *msg,
123
virtio_describe_sbuf(struct sbuf *sb, uint64_t features,
121
    uint64_t features, struct virtio_feature_desc *desc)
124
    struct virtio_feature_desc *desc)
122
{
125
{
123
	struct sbuf sb;
124
	uint64_t val;
126
	uint64_t val;
125
	char *buf;
126
	const char *name;
127
	const char *name;
127
	int n;
128
	int n;
128
129
129
	if ((buf = malloc(512, M_TEMP, M_NOWAIT)) == NULL) {
130
	sbuf_printf(sb, "%#jx", (uintmax_t) features);
130
		device_printf(dev, "%s features: %#jx\n", msg, (uintmax_t) features);
131
		return;
132
	}
133
131
134
	sbuf_new(&sb, buf, 512, SBUF_FIXEDLEN);
135
	sbuf_printf(&sb, "%s features: %#jx", msg, (uintmax_t) features);
136
137
	for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
132
	for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
138
		/*
133
		/*
139
		 * BAD_FEATURE is used to detect broken Linux clients
134
		 * BAD_FEATURE is used to detect broken Linux clients
Lines 143-174 Link Here
143
			continue;
138
			continue;
144
139
145
		if (n++ == 0)
140
		if (n++ == 0)
146
			sbuf_cat(&sb, " <");
141
			sbuf_cat(sb, " <");
147
		else
142
		else
148
			sbuf_cat(&sb, ",");
143
			sbuf_cat(sb, ",");
149
144
150
		name = virtio_feature_name(val, desc);
145
		name = virtio_feature_name(val, desc);
151
		if (name == NULL)
146
		if (name == NULL)
152
			sbuf_printf(&sb, "%#jx", (uintmax_t) val);
147
			sbuf_printf(sb, "%#jx", (uintmax_t) val);
153
		else
148
		else
154
			sbuf_cat(&sb, name);
149
			sbuf_cat(sb, name);
155
	}
150
	}
156
151
157
	if (n > 0)
152
	if (n > 0)
158
		sbuf_cat(&sb, ">");
153
		sbuf_cat(sb, ">");
159
154
160
#if __FreeBSD_version < 900020
155
	return (sbuf_finish(sb));
161
	sbuf_finish(&sb);
156
}
162
	if (sbuf_overflowed(&sb) == 0)
157
163
#else
158
void
164
	if (sbuf_finish(&sb) == 0)
159
virtio_describe(device_t dev, const char *msg, uint64_t features,
165
#endif
160
    struct virtio_feature_desc *desc)
161
{
162
	struct sbuf sb;
163
	char *buf;
164
	int error;
165
166
	if ((buf = malloc(1024, M_TEMP, M_NOWAIT)) == NULL) {
167
		error = ENOMEM;
168
		goto out;
169
	}
170
171
	sbuf_new(&sb, buf, 1024, SBUF_FIXEDLEN);
172
	sbuf_printf(&sb, "%s features: ", msg);
173
174
	error = virtio_describe_sbuf(&sb, features, desc);
175
	if (error == 0)
166
		device_printf(dev, "%s\n", sbuf_data(&sb));
176
		device_printf(dev, "%s\n", sbuf_data(&sb));
167
177
168
	sbuf_delete(&sb);
178
	sbuf_delete(&sb);
169
	free(buf, M_TEMP);
179
	free(buf, M_TEMP);
180
181
out:
182
	if (error != 0) {
183
		device_printf(dev, "%s features: %#jx\n", msg,
184
		    (uintmax_t) features);
185
	}
170
}
186
}
171
187
188
uint64_t
189
virtio_filter_transport_features(uint64_t features)
190
{
191
	uint64_t transport, mask;
192
193
	transport = (1ULL <<
194
	    (VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START)) - 1;
195
	transport <<= VIRTIO_TRANSPORT_F_START;
196
197
	mask = -1ULL & ~transport;
198
	mask |= VIRTIO_RING_F_INDIRECT_DESC;
199
	mask |= VIRTIO_RING_F_EVENT_IDX;
200
	mask |= VIRTIO_F_VERSION_1;
201
202
	return (features & mask);
203
}
204
205
int
206
virtio_bus_is_modern(device_t dev)
207
{
208
	uintptr_t modern;
209
210
	virtio_read_ivar(dev, VIRTIO_IVAR_MODERN, &modern);
211
	return (modern != 0);
212
}
213
214
void
215
virtio_read_device_config_array(device_t dev, bus_size_t offset, void *dst,
216
    int size, int count)
217
{
218
	int i, gen;
219
220
	do {
221
		gen = virtio_config_generation(dev);
222
223
		for (i = 0; i < count; i++) {
224
			virtio_read_device_config(dev, offset + i * size,
225
			    (uint8_t *) dst + i * size, size);
226
		}
227
	} while (gen != virtio_config_generation(dev));
228
}
229
172
/*
230
/*
173
 * VirtIO bus method wrappers.
231
 * VirtIO bus method wrappers.
174
 */
232
 */
Lines 194-199 Link Here
194
252
195
	return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
253
	return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
196
	    child_features));
254
	    child_features));
255
}
256
257
int
258
virtio_finalize_features(device_t dev)
259
{
260
261
	return (VIRTIO_BUS_FINALIZE_FEATURES(device_get_parent(dev)));
197
}
262
}
198
263
199
int
264
int
(-)sys/dev/virtio/virtio.h (-1 / +12 lines)
Lines 31-39 Link Here
31
#ifndef _VIRTIO_H_
31
#ifndef _VIRTIO_H_
32
#define _VIRTIO_H_
32
#define _VIRTIO_H_
33
33
34
#include <dev/virtio/virtio_endian.h>
34
#include <dev/virtio/virtio_ids.h>
35
#include <dev/virtio/virtio_ids.h>
35
#include <dev/virtio/virtio_config.h>
36
#include <dev/virtio/virtio_config.h>
36
37
38
struct sbuf;
37
struct vq_alloc_info;
39
struct vq_alloc_info;
38
40
39
/*
41
/*
Lines 57-62 Link Here
57
#define VIRTIO_IVAR_DEVICE		4
59
#define VIRTIO_IVAR_DEVICE		4
58
#define VIRTIO_IVAR_SUBVENDOR		5
60
#define VIRTIO_IVAR_SUBVENDOR		5
59
#define VIRTIO_IVAR_SUBDEVICE		6
61
#define VIRTIO_IVAR_SUBDEVICE		6
62
#define VIRTIO_IVAR_MODERN		7
60
63
61
struct virtio_feature_desc {
64
struct virtio_feature_desc {
62
	uint64_t	 vfd_val;
65
	uint64_t	 vfd_val;
Lines 65-71 Link Here
65
68
66
const char *virtio_device_name(uint16_t devid);
69
const char *virtio_device_name(uint16_t devid);
67
void	 virtio_describe(device_t dev, const char *msg,
70
void	 virtio_describe(device_t dev, const char *msg,
68
	     uint64_t features, struct virtio_feature_desc *feature_desc);
71
	     uint64_t features, struct virtio_feature_desc *desc);
72
int	 virtio_describe_sbuf(struct sbuf *sb, uint64_t features,
73
	     struct virtio_feature_desc *desc);
74
uint64_t virtio_filter_transport_features(uint64_t features);
75
int	 virtio_bus_is_modern(device_t dev);
76
void	 virtio_read_device_config_array(device_t dev, bus_size_t offset,
77
	     void *dst, int size, int count);
69
78
70
/*
79
/*
71
 * VirtIO Bus Methods.
80
 * VirtIO Bus Methods.
Lines 73-78 Link Here
73
void	 virtio_read_ivar(device_t dev, int ivar, uintptr_t *val);
82
void	 virtio_read_ivar(device_t dev, int ivar, uintptr_t *val);
74
void	 virtio_write_ivar(device_t dev, int ivar, uintptr_t val);
83
void	 virtio_write_ivar(device_t dev, int ivar, uintptr_t val);
75
uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features);
84
uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features);
85
int	 virtio_finalize_features(device_t dev);
76
int	 virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
86
int	 virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
77
	     struct vq_alloc_info *info);
87
	     struct vq_alloc_info *info);
78
int	 virtio_setup_intr(device_t dev, enum intr_type type);
88
int	 virtio_setup_intr(device_t dev, enum intr_type type);
Lines 130-135 Link Here
130
VIRTIO_READ_IVAR(device,	VIRTIO_IVAR_DEVICE);
140
VIRTIO_READ_IVAR(device,	VIRTIO_IVAR_DEVICE);
131
VIRTIO_READ_IVAR(subvendor,	VIRTIO_IVAR_SUBVENDOR);
141
VIRTIO_READ_IVAR(subvendor,	VIRTIO_IVAR_SUBVENDOR);
132
VIRTIO_READ_IVAR(subdevice,	VIRTIO_IVAR_SUBDEVICE);
142
VIRTIO_READ_IVAR(subdevice,	VIRTIO_IVAR_SUBDEVICE);
143
VIRTIO_READ_IVAR(modern,	VIRTIO_IVAR_MODERN);
133
144
134
#undef VIRTIO_READ_IVAR
145
#undef VIRTIO_READ_IVAR
135
146
(-)sys/dev/virtio/virtio_bus_if.m (+11 lines)
Lines 36-41 Link Here
36
36
37
CODE {
37
CODE {
38
	static int
38
	static int
39
	virtio_bus_default_finalize_features(device_t dev)
40
	{
41
		return (0);
42
	}
43
44
	static int
39
	virtio_bus_default_config_generation(device_t dev)
45
	virtio_bus_default_config_generation(device_t dev)
40
	{
46
	{
41
		return (0);
47
		return (0);
Lines 47-52 Link Here
47
	uint64_t	child_features;
53
	uint64_t	child_features;
48
};
54
};
49
55
56
METHOD int finalize_features {
57
	device_t	dev;
58
} DEFAULT virtio_bus_default_finalize_features;
59
50
METHOD int with_feature {
60
METHOD int with_feature {
51
	device_t	dev;
61
	device_t	dev;
52
	uint64_t	feature;
62
	uint64_t	feature;
Lines 80-85 Link Here
80
METHOD void notify_vq {
90
METHOD void notify_vq {
81
	device_t	dev;
91
	device_t	dev;
82
	uint16_t	queue;
92
	uint16_t	queue;
93
	bus_size_t	offset;
83
};
94
};
84
95
85
METHOD int config_generation {
96
METHOD int config_generation {
(-)sys/dev/virtio/virtio_endian.h (+106 lines)
Line 0 Link Here
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause
3
 *
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 *
28
 * $FreeBSD$
29
 */
30
31
#ifndef _VIRTIO_ENDIAN_H_
32
#define _VIRTIO_ENDIAN_H_
33
34
#include <sys/endian.h>
35
36
/*
37
 * VirtIO V1 (modern) uses little endian, while legacy VirtIO uses the guest's
38
 * native endian. These functions convert to and from the Guest's (driver's)
39
 * and the Host's (device's) endianness when needed.
40
 */
41
42
static inline bool
43
virtio_swap_endian(bool modern)
44
{
45
#if _BYTE_ORDER == _LITTLE_ENDIAN
46
	return (false);
47
#else
48
	return (modern);
49
#endif
50
}
51
52
static inline uint16_t
53
virtio_htog16(bool modern, uint16_t val)
54
{
55
	if (virtio_swap_endian(modern))
56
		return (le16toh(val));
57
	else
58
		return (val);
59
}
60
61
static inline uint16_t
62
virtio_gtoh16(bool modern, uint16_t val)
63
{
64
	if (virtio_swap_endian(modern))
65
		return (htole16(val));
66
	else
67
		return (val);
68
}
69
70
static inline uint32_t
71
virtio_htog32(bool modern, uint32_t val)
72
{
73
	if (virtio_swap_endian(modern))
74
		return (le32toh(val));
75
	else
76
		return (val);
77
}
78
79
static inline uint32_t
80
virtio_gtoh32(bool modern, uint32_t val)
81
{
82
	if (virtio_swap_endian(modern))
83
		return (htole32(val));
84
	else
85
		return (val);
86
}
87
88
static inline uint64_t
89
virtio_htog64(bool modern, uint64_t val)
90
{
91
	if (virtio_swap_endian(modern))
92
		return (le64toh(val));
93
	else
94
		return (val);
95
}
96
97
static inline uint64_t
98
virtio_gtoh64(bool modern, uint64_t val)
99
{
100
	if (virtio_swap_endian(modern))
101
		return (htole64(val));
102
	else
103
		return (val);
104
}
105
106
#endif /* _VIRTIO_ENDIAN_H_ */
(-)sys/dev/virtio/virtqueue.c (-59 / +81 lines)
Lines 57-74 Link Here
57
57
58
struct virtqueue {
58
struct virtqueue {
59
	device_t		 vq_dev;
59
	device_t		 vq_dev;
60
	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
61
	uint16_t		 vq_queue_index;
60
	uint16_t		 vq_queue_index;
62
	uint16_t		 vq_nentries;
61
	uint16_t		 vq_nentries;
63
	uint32_t		 vq_flags;
62
	uint32_t		 vq_flags;
64
#define	VIRTQUEUE_FLAG_INDIRECT	 0x0001
63
#define	VIRTQUEUE_FLAG_MODERN	 0x0001
65
#define	VIRTQUEUE_FLAG_EVENT_IDX 0x0002
64
#define	VIRTQUEUE_FLAG_INDIRECT	 0x0002
65
#define	VIRTQUEUE_FLAG_EVENT_IDX 0x0004
66
66
67
	int			 vq_alignment;
68
	int			 vq_ring_size;
69
	void			*vq_ring_mem;
70
	int			 vq_max_indirect_size;
67
	int			 vq_max_indirect_size;
71
	int			 vq_indirect_mem_size;
68
	bus_size_t		 vq_notify_offset;
72
	virtqueue_intr_t	*vq_intrhand;
69
	virtqueue_intr_t	*vq_intrhand;
73
	void			*vq_intrhand_arg;
70
	void			*vq_intrhand_arg;
74
71
Lines 87-92 Link Here
87
	 */
84
	 */
88
	uint16_t		 vq_used_cons_idx;
85
	uint16_t		 vq_used_cons_idx;
89
86
87
	void			*vq_ring_mem;
88
	int			 vq_indirect_mem_size;
89
	int			 vq_alignment;
90
	int			 vq_ring_size;
91
	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
92
90
	struct vq_desc_extra {
93
	struct vq_desc_extra {
91
		void		  *cookie;
94
		void		  *cookie;
92
		struct vring_desc *indirect;
95
		struct vring_desc *indirect;
Lines 134-139 Link Here
134
static void	vq_ring_notify_host(struct virtqueue *);
137
static void	vq_ring_notify_host(struct virtqueue *);
135
static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
138
static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
136
139
140
#define vq_modern(_vq) 		(((_vq)->vq_flags & VIRTQUEUE_FLAG_MODERN) != 0)
141
#define vq_htog16(_vq, _val) 	virtio_htog16(vq_modern(_vq), _val)
142
#define vq_htog32(_vq, _val) 	virtio_htog32(vq_modern(_vq), _val)
143
#define vq_htog64(_vq, _val) 	virtio_htog64(vq_modern(_vq), _val)
144
#define vq_gtoh16(_vq, _val) 	virtio_gtoh16(vq_modern(_vq), _val)
145
#define vq_gtoh32(_vq, _val) 	virtio_gtoh32(vq_modern(_vq), _val)
146
#define vq_gtoh64(_vq, _val) 	virtio_gtoh64(vq_modern(_vq), _val)
147
137
uint64_t
148
uint64_t
138
virtqueue_filter_features(uint64_t features)
149
virtqueue_filter_features(uint64_t features)
139
{
150
{
Lines 147-154 Link Here
147
}
158
}
148
159
149
int
160
int
150
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
161
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
151
    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
162
    bus_size_t notify_offset, int align, vm_paddr_t highaddr,
163
    struct vq_alloc_info *info, struct virtqueue **vqp)
152
{
164
{
153
	struct virtqueue *vq;
165
	struct virtqueue *vq;
154
	int error;
166
	int error;
Lines 184-195 Link Here
184
	vq->vq_dev = dev;
196
	vq->vq_dev = dev;
185
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
197
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
186
	vq->vq_queue_index = queue;
198
	vq->vq_queue_index = queue;
199
	vq->vq_notify_offset = notify_offset;
187
	vq->vq_alignment = align;
200
	vq->vq_alignment = align;
188
	vq->vq_nentries = size;
201
	vq->vq_nentries = size;
189
	vq->vq_free_cnt = size;
202
	vq->vq_free_cnt = size;
190
	vq->vq_intrhand = info->vqai_intr;
203
	vq->vq_intrhand = info->vqai_intr;
191
	vq->vq_intrhand_arg = info->vqai_intr_arg;
204
	vq->vq_intrhand_arg = info->vqai_intr_arg;
192
205
206
	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_VERSION_1) != 0)
207
		vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
193
	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
208
	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
194
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
209
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
195
210
Lines 294-301 Link Here
294
	bzero(indirect, vq->vq_indirect_mem_size);
309
	bzero(indirect, vq->vq_indirect_mem_size);
295
310
296
	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
311
	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
297
		indirect[i].next = i + 1;
312
		indirect[i].next = vq_gtoh16(vq, i + 1);
298
	indirect[i].next = VQ_RING_DESC_CHAIN_END;
313
	indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
299
}
314
}
300
315
301
int
316
int
Lines 441-447 Link Here
441
{
456
{
442
	uint16_t used_idx, nused;
457
	uint16_t used_idx, nused;
443
458
444
	used_idx = vq->vq_ring.used->idx;
459
	used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
445
460
446
	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
461
	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
447
	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
462
	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
Lines 453-459 Link Here
453
virtqueue_intr_filter(struct virtqueue *vq)
468
virtqueue_intr_filter(struct virtqueue *vq)
454
{
469
{
455
470
456
	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
471
	if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
457
		return (0);
472
		return (0);
458
473
459
	virtqueue_disable_intr(vq);
474
	virtqueue_disable_intr(vq);
Lines 480-486 Link Here
480
{
495
{
481
	uint16_t ndesc, avail_idx;
496
	uint16_t ndesc, avail_idx;
482
497
483
	avail_idx = vq->vq_ring.avail->idx;
498
	avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
484
	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
499
	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
485
500
486
	switch (hint) {
501
	switch (hint) {
Lines 505-514 Link Here
505
{
520
{
506
521
507
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
522
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
508
		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
523
		vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
509
		    vq->vq_nentries - 1;
524
		    vq->vq_used_cons_idx - vq->vq_nentries - 1);
510
	} else
525
		return;
511
		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
526
	}
527
528
	vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
512
}
529
}
513
530
514
int
531
int
Lines 571-586 Link Here
571
	void *cookie;
588
	void *cookie;
572
	uint16_t used_idx, desc_idx;
589
	uint16_t used_idx, desc_idx;
573
590
574
	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
591
	if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
575
		return (NULL);
592
		return (NULL);
576
593
577
	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
594
	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
578
	uep = &vq->vq_ring.used->ring[used_idx];
595
	uep = &vq->vq_ring.used->ring[used_idx];
579
596
580
	rmb();
597
	rmb();
581
	desc_idx = (uint16_t) uep->id;
598
	desc_idx = (uint16_t) vq_htog32(vq, uep->id);
582
	if (len != NULL)
599
	if (len != NULL)
583
		*len = uep->len;
600
		*len = vq_htog32(vq, uep->len);
584
601
585
	vq_ring_free_chain(vq, desc_idx);
602
	vq_ring_free_chain(vq, desc_idx);
586
603
Lines 638-650 Link Here
638
	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
655
	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
639
	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
656
	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
640
	    "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
657
	    "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
641
	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
658
	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
642
	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
659
	    vq->vq_queued_cnt, vq->vq_desc_head_idx,
643
	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
660
	    vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
644
	    vq->vq_ring.used->idx,
661
	    vq_htog16(vq, vq->vq_ring.used->idx),
645
		vring_used_event(&vq->vq_ring),
662
	    vq_htog16(vq, vring_used_event(&vq->vq_ring)),
646
	    vq->vq_ring.avail->flags,
663
	    vq_htog16(vq, vq->vq_ring.avail->flags),
647
	    vq->vq_ring.used->flags);
664
	    vq_htog16(vq, vq->vq_ring.used->flags));
648
}
665
}
649
666
650
static void
667
static void
Lines 661-674 Link Here
661
	vring_init(vr, size, ring_mem, vq->vq_alignment);
678
	vring_init(vr, size, ring_mem, vq->vq_alignment);
662
679
663
	for (i = 0; i < size - 1; i++)
680
	for (i = 0; i < size - 1; i++)
664
		vr->desc[i].next = i + 1;
681
		vr->desc[i].next = vq_gtoh16(vq, i + 1);
665
	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
682
	vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
666
}
683
}
667
684
668
static void
685
static void
669
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
686
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
670
{
687
{
671
	uint16_t avail_idx;
688
	uint16_t avail_idx, avail_ring_idx;
672
689
673
	/*
690
	/*
674
	 * Place the head of the descriptor chain into the next slot and make
691
	 * Place the head of the descriptor chain into the next slot and make
Lines 677-687 Link Here
677
	 * currently running on another CPU, we can keep it processing the new
694
	 * currently running on another CPU, we can keep it processing the new
678
	 * descriptor.
695
	 * descriptor.
679
	 */
696
	 */
680
	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
697
	avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
681
	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
698
	avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
682
699
	vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
683
	wmb();
700
	wmb();
684
	vq->vq_ring.avail->idx++;
701
	vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
685
702
686
	/* Keep pending count until virtqueue_notify(). */
703
	/* Keep pending count until virtqueue_notify(). */
687
	vq->vq_queued_cnt++;
704
	vq->vq_queued_cnt++;
Lines 700-718 Link Here
700
717
701
	for (i = 0, idx = head_idx, seg = sg->sg_segs;
718
	for (i = 0, idx = head_idx, seg = sg->sg_segs;
702
	     i < needed;
719
	     i < needed;
703
	     i++, idx = dp->next, seg++) {
720
	     i++, idx = vq_htog16(vq, dp->next), seg++) {
704
		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
721
		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
705
		    "premature end of free desc chain");
722
		    "premature end of free desc chain");
706
723
707
		dp = &desc[idx];
724
		dp = &desc[idx];
708
		dp->addr = seg->ss_paddr;
725
		dp->addr = vq_gtoh64(vq, seg->ss_paddr);
709
		dp->len = seg->ss_len;
726
		dp->len = vq_gtoh32(vq, seg->ss_len);
710
		dp->flags = 0;
727
		dp->flags = 0;
711
728
712
		if (i < needed - 1)
729
		if (i < needed - 1)
713
			dp->flags |= VRING_DESC_F_NEXT;
730
			dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
714
		if (i >= readable)
731
		if (i >= readable)
715
			dp->flags |= VRING_DESC_F_WRITE;
732
			dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
716
	}
733
	}
717
734
718
	return (idx);
735
	return (idx);
Lines 757-770 Link Here
757
	dxp->cookie = cookie;
774
	dxp->cookie = cookie;
758
	dxp->ndescs = 1;
775
	dxp->ndescs = 1;
759
776
760
	dp->addr = dxp->indirect_paddr;
777
	dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
761
	dp->len = needed * sizeof(struct vring_desc);
778
	dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
762
	dp->flags = VRING_DESC_F_INDIRECT;
779
	dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
763
780
764
	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
781
	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
765
	    sg, readable, writable);
782
	    sg, readable, writable);
766
783
767
	vq->vq_desc_head_idx = dp->next;
784
	vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
768
	vq->vq_free_cnt--;
785
	vq->vq_free_cnt--;
769
	if (vq->vq_free_cnt == 0)
786
	if (vq->vq_free_cnt == 0)
770
		VQ_RING_ASSERT_CHAIN_TERM(vq);
787
		VQ_RING_ASSERT_CHAIN_TERM(vq);
Lines 782-792 Link Here
782
	 * Enable interrupts, making sure we get the latest index of
799
	 * Enable interrupts, making sure we get the latest index of
783
	 * what's already been consumed.
800
	 * what's already been consumed.
784
	 */
801
	 */
785
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
802
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
786
		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
803
		vring_used_event(&vq->vq_ring) =
787
	else
804
		    vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
788
		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
805
	} else {
789
806
		vq->vq_ring.avail->flags &=
807
		    vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
808
	}
790
	mb();
809
	mb();
791
810
792
	/*
811
	/*
Lines 803-826 Link Here
803
static int
822
static int
804
vq_ring_must_notify_host(struct virtqueue *vq)
823
vq_ring_must_notify_host(struct virtqueue *vq)
805
{
824
{
806
	uint16_t new_idx, prev_idx, event_idx;
825
	uint16_t new_idx, prev_idx, event_idx, flags;
807
826
808
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
827
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
809
		new_idx = vq->vq_ring.avail->idx;
828
		new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
810
		prev_idx = new_idx - vq->vq_queued_cnt;
829
		prev_idx = new_idx - vq->vq_queued_cnt;
811
		event_idx = vring_avail_event(&vq->vq_ring);
830
		event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
812
831
813
		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
832
		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
814
	}
833
	}
815
834
816
	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
835
	flags = vq->vq_ring.used->flags;
836
	return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
817
}
837
}
818
838
819
static void
839
static void
820
vq_ring_notify_host(struct virtqueue *vq)
840
vq_ring_notify_host(struct virtqueue *vq)
821
{
841
{
822
842
823
	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
843
	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
844
	    vq->vq_notify_offset);
824
}
845
}
825
846
826
static void
847
static void
Lines 839-848 Link Here
839
	vq->vq_free_cnt += dxp->ndescs;
860
	vq->vq_free_cnt += dxp->ndescs;
840
	dxp->ndescs--;
861
	dxp->ndescs--;
841
862
842
	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
863
	if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
843
		while (dp->flags & VRING_DESC_F_NEXT) {
864
		while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
844
			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
865
			uint16_t next_idx = vq_htog16(vq, dp->next);
845
			dp = &vq->vq_ring.desc[dp->next];
866
			VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
867
			dp = &vq->vq_ring.desc[next_idx];
846
			dxp->ndescs--;
868
			dxp->ndescs--;
847
		}
869
		}
848
	}
870
	}
Lines 855-860 Link Here
855
	 * newly freed chain. If the virtqueue was completely used, then
877
	 * newly freed chain. If the virtqueue was completely used, then
856
	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
878
	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
857
	 */
879
	 */
858
	dp->next = vq->vq_desc_head_idx;
880
	dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
859
	vq->vq_desc_head_idx = desc_idx;
881
	vq->vq_desc_head_idx = desc_idx;
860
}
882
}
(-)sys/dev/virtio/virtqueue.h (-2 / +2 lines)
Lines 70-77 Link Here
70
uint64_t virtqueue_filter_features(uint64_t features);
70
uint64_t virtqueue_filter_features(uint64_t features);
71
71
72
int	 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
72
int	 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
73
	     int align, vm_paddr_t highaddr, struct vq_alloc_info *info,
73
	     bus_size_t notify_offset, int align, vm_paddr_t highaddr,
74
	     struct virtqueue **vqp);
74
	     struct vq_alloc_info *info, struct virtqueue **vqp);
75
void	*virtqueue_drain(struct virtqueue *vq, int *last);
75
void	*virtqueue_drain(struct virtqueue *vq, int *last);
76
void	 virtqueue_free(struct virtqueue *vq);
76
void	 virtqueue_free(struct virtqueue *vq);
77
int	 virtqueue_reinit(struct virtqueue *vq, uint16_t size);
77
int	 virtqueue_reinit(struct virtqueue *vq, uint16_t size);

Return to bug 236922