View | Details | Raw Unified | Return to bug 236922 | Differences between
and this patch

Collapse All | Expand All

(-)sys/conf/files (+3 lines)
Lines 3478-3483 Link Here
3478
dev/virtio/virtio_bus_if.m		optional	virtio
3478
dev/virtio/virtio_bus_if.m		optional	virtio
3479
dev/virtio/virtio_if.m			optional	virtio
3479
dev/virtio/virtio_if.m			optional	virtio
3480
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3480
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3481
dev/virtio/pci/virtio_pci_if.m          optional        virtio_pci
3482
dev/virtio/pci/virtio_pci_legacy.c      optional        virtio_pci
3483
dev/virtio/pci/virtio_pci_modern.c      optional        virtio_pci
3481
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio fdt
3484
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio fdt
3482
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio fdt
3485
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio fdt
3483
dev/virtio/network/if_vtnet.c		optional	vtnet
3486
dev/virtio/network/if_vtnet.c		optional	vtnet
(-)sys/modules/virtio/pci/Makefile (+2 lines)
Lines 27-32 Link Here
27
27
28
KMOD=	virtio_pci
28
KMOD=	virtio_pci
29
SRCS=	virtio_pci.c
29
SRCS=	virtio_pci.c
30
SRCS+=	virtio_pci_legacy.c virtio_pci_modern.c
31
SRCS+=	virtio_pci_if.c virtio_pci_if.h
30
SRCS+=	virtio_bus_if.h virtio_if.h 
32
SRCS+=	virtio_bus_if.h virtio_if.h 
31
SRCS+=	bus_if.h device_if.h pci_if.h
33
SRCS+=	bus_if.h device_if.h pci_if.h
32
34
(-)sys/dev/virtio/balloon/virtio_balloon.c (-15 / +49 lines)
Lines 80-85 Link Here
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
83
	{ VIRTIO_BALLOON_F_DEFLATE_ON_OOM,	"DeflateOnOOM"	},
83
84
84
	{ 0, NULL }
85
	{ 0, NULL }
85
};
86
};
Lines 89-95 Link Here
89
static int	vtballoon_detach(device_t);
90
static int	vtballoon_detach(device_t);
90
static int	vtballoon_config_change(device_t);
91
static int	vtballoon_config_change(device_t);
91
92
92
static void	vtballoon_negotiate_features(struct vtballoon_softc *);
93
static int	vtballoon_negotiate_features(struct vtballoon_softc *);
94
static int	vtballoon_setup_features(struct vtballoon_softc *);
93
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
95
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
94
96
95
static void	vtballoon_vq_intr(void *);
97
static void	vtballoon_vq_intr(void *);
Lines 109-118 Link Here
109
111
110
static int	vtballoon_sleep(struct vtballoon_softc *);
112
static int	vtballoon_sleep(struct vtballoon_softc *);
111
static void	vtballoon_thread(void *);
113
static void	vtballoon_thread(void *);
112
static void	vtballoon_add_sysctl(struct vtballoon_softc *);
114
static void	vtballoon_setup_sysctl(struct vtballoon_softc *);
113
115
116
#define vtballoon_modern(_sc) \
117
    (((_sc)->vtballoon_features & VIRTIO_F_VERSION_1) != 0)
118
114
/* Features desired/implemented by this driver. */
119
/* Features desired/implemented by this driver. */
115
#define VTBALLOON_FEATURES		0
120
#define VTBALLOON_FEATURES		VIRTIO_BALLOON_F_MUST_TELL_HOST
116
121
117
/* Timeout between retries when the balloon needs inflating. */
122
/* Timeout between retries when the balloon needs inflating. */
118
#define VTBALLOON_LOWMEM_TIMEOUT	hz
123
#define VTBALLOON_LOWMEM_TIMEOUT	hz
Lines 153-160 Link Here
153
};
158
};
154
static devclass_t vtballoon_devclass;
159
static devclass_t vtballoon_devclass;
155
160
156
DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver,
161
DRIVER_MODULE(virtio_balloon, vtpcil, vtballoon_driver,
157
    vtballoon_devclass, 0, 0);
162
    vtballoon_devclass, 0, 0);
163
DRIVER_MODULE(virtio_balloon, vtpcim, vtballoon_driver,
164
    vtballoon_devclass, 0, 0);
158
MODULE_VERSION(virtio_balloon, 1);
165
MODULE_VERSION(virtio_balloon, 1);
159
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
166
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
160
167
Lines 178-191 Link Here
178
185
179
	sc = device_get_softc(dev);
186
	sc = device_get_softc(dev);
180
	sc->vtballoon_dev = dev;
187
	sc->vtballoon_dev = dev;
188
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
181
189
182
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
190
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
183
	TAILQ_INIT(&sc->vtballoon_pages);
191
	TAILQ_INIT(&sc->vtballoon_pages);
184
192
185
	vtballoon_add_sysctl(sc);
193
	vtballoon_setup_sysctl(sc);
186
194
187
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
195
	error = vtballoon_setup_features(sc);
188
	vtballoon_negotiate_features(sc);
196
	if (error) {
197
		device_printf(dev, "cannot setup features\n");
198
		goto fail;
199
	}
189
200
190
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
201
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
191
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
202
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
Lines 271-288 Link Here
271
	return (1);
282
	return (1);
272
}
283
}
273
284
274
static void
285
static int
275
vtballoon_negotiate_features(struct vtballoon_softc *sc)
286
vtballoon_negotiate_features(struct vtballoon_softc *sc)
276
{
287
{
277
	device_t dev;
288
	device_t dev;
278
	uint64_t features;
289
	uint64_t features;
279
290
280
	dev = sc->vtballoon_dev;
291
	dev = sc->vtballoon_dev;
281
	features = virtio_negotiate_features(dev, VTBALLOON_FEATURES);
292
	features = VTBALLOON_FEATURES;
282
	sc->vtballoon_features = features;
293
294
	sc->vtballoon_features = virtio_negotiate_features(dev, features);
295
	return (virtio_finalize_features(dev));
283
}
296
}
284
297
285
static int
298
static int
299
vtballoon_setup_features(struct vtballoon_softc *sc)
300
{
301
	int error;
302
303
	error = vtballoon_negotiate_features(sc);
304
	if (error)
305
		return (error);
306
307
	return (0);
308
}
309
310
static int
286
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
311
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
287
{
312
{
288
	device_t dev;
313
	device_t dev;
Lines 440-446 Link Here
440
{
465
{
441
	vm_page_t m;
466
	vm_page_t m;
442
467
443
	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
468
	m = vm_page_alloc(NULL, 0,
469
	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP);
444
	if (m != NULL)
470
	if (m != NULL)
445
		sc->vtballoon_current_npages++;
471
		sc->vtballoon_current_npages++;
446
472
Lines 463-478 Link Here
463
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
489
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
464
	    offsetof(struct virtio_balloon_config, num_pages));
490
	    offsetof(struct virtio_balloon_config, num_pages));
465
491
466
	return (le32toh(desired));
492
	if (vtballoon_modern(sc))
493
		return (desired);
494
	else
495
		return (le32toh(desired));
467
}
496
}
468
497
469
static void
498
static void
470
vtballoon_update_size(struct vtballoon_softc *sc)
499
vtballoon_update_size(struct vtballoon_softc *sc)
471
{
500
{
501
	uint32_t npages;
472
502
503
	npages = sc->vtballoon_current_npages;
504
	if (!vtballoon_modern(sc))
505
		npages = htole32(npages);
506
473
	virtio_write_dev_config_4(sc->vtballoon_dev,
507
	virtio_write_dev_config_4(sc->vtballoon_dev,
474
	    offsetof(struct virtio_balloon_config, actual),
508
	    offsetof(struct virtio_balloon_config, actual), npages);
475
	    htole32(sc->vtballoon_current_npages));
509
476
}
510
}
477
511
478
static int
512
static int
Lines 544-550 Link Here
544
}
578
}
545
579
546
static void
580
static void
547
vtballoon_add_sysctl(struct vtballoon_softc *sc)
581
vtballoon_setup_sysctl(struct vtballoon_softc *sc)
548
{
582
{
549
	device_t dev;
583
	device_t dev;
550
	struct sysctl_ctx_list *ctx;
584
	struct sysctl_ctx_list *ctx;
(-)sys/dev/virtio/balloon/virtio_balloon.h (-1 / +27 lines)
Lines 36-41 Link Here
36
/* Feature bits. */
36
/* Feature bits. */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
39
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	0x4 /* Deflate balloon on OOM */
39
40
40
/* Size of a PFN in the balloon interface. */
41
/* Size of a PFN in the balloon interface. */
41
#define VIRTIO_BALLOON_PFN_SHIFT 12
42
#define VIRTIO_BALLOON_PFN_SHIFT 12
Lines 54-61 Link Here
54
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
55
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
55
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
56
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
56
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
57
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
57
#define VIRTIO_BALLOON_S_NR       6
58
#define VIRTIO_BALLOON_S_AVAIL    6   /* Available memory as in /proc */
59
#define VIRTIO_BALLOON_S_CACHES   7   /* Disk caches */
60
#define VIRTIO_BALLOON_S_NR       8
58
61
62
/*
63
 * Memory statistics structure.
64
 * Driver fills an array of these structures and passes to device.
65
 *
66
 * NOTE: fields are laid out in a way that would make compiler add padding
67
 * between and after fields, so we have to use compiler-specific attributes to
68
 * pack it, to disable this padding. This also often causes compiler to
69
 * generate suboptimal code.
70
 *
71
 * We maintain this statistics structure format for backwards compatibility,
72
 * but don't follow this example.
73
 *
74
 * If implementing a similar structure, do something like the below instead:
75
 *     struct virtio_balloon_stat {
76
 *         __virtio16 tag;
77
 *         __u8 reserved[6];
78
 *         __virtio64 val;
79
 *     };
80
 *
81
 * In other words, add explicit reserved fields to align field and
82
 * structure boundaries at field size, avoiding compiler padding
83
 * without the packed attribute.
84
 */
59
struct virtio_balloon_stat {
85
struct virtio_balloon_stat {
60
	uint16_t tag;
86
	uint16_t tag;
61
	uint64_t val;
87
	uint64_t val;
(-)sys/dev/virtio/block/virtio_blk.c (-64 / +95 lines)
Lines 76-86 Link Here
76
	uint64_t		 vtblk_features;
76
	uint64_t		 vtblk_features;
77
	uint32_t		 vtblk_flags;
77
	uint32_t		 vtblk_flags;
78
#define VTBLK_FLAG_INDIRECT	0x0001
78
#define VTBLK_FLAG_INDIRECT	0x0001
79
#define VTBLK_FLAG_READONLY	0x0002
79
#define VTBLK_FLAG_DETACH	0x0002
80
#define VTBLK_FLAG_DETACH	0x0004
80
#define VTBLK_FLAG_SUSPEND	0x0004
81
#define VTBLK_FLAG_SUSPEND	0x0008
81
#define VTBLK_FLAG_BARRIER	0x0008
82
#define VTBLK_FLAG_BARRIER	0x0010
82
#define VTBLK_FLAG_WCE_CONFIG	0x0010
83
#define VTBLK_FLAG_WC_CONFIG	0x0020
84
83
85
	struct virtqueue	*vtblk_vq;
84
	struct virtqueue	*vtblk_vq;
86
	struct sglist		*vtblk_sglist;
85
	struct sglist		*vtblk_sglist;
Lines 109-117 Link Here
109
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
110
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
111
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
112
	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
111
	{ VIRTIO_BLK_F_FLUSH,		"FlushCmd"	},
113
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
114
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
113
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
114
	{ VIRTIO_BLK_F_MQ,		"Multiqueue"	},
115
115
116
	{ 0, NULL }
116
	{ 0, NULL }
117
};
117
};
Lines 133-140 Link Here
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
134
static void	vtblk_strategy(struct bio *);
134
static void	vtblk_strategy(struct bio *);
135
135
136
static void	vtblk_negotiate_features(struct vtblk_softc *);
136
static int	vtblk_negotiate_features(struct vtblk_softc *);
137
static void	vtblk_setup_features(struct vtblk_softc *);
137
static int	vtblk_setup_features(struct vtblk_softc *);
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
139
		    struct virtio_blk_config *);
139
		    struct virtio_blk_config *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
Lines 193-198 Link Here
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
195
195
196
#define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
197
#define vtblk_htog16(_sc, _val)	virtio_htog16(vtblk_modern(_sc), _val)
198
#define vtblk_htog32(_sc, _val)	virtio_htog32(vtblk_modern(_sc), _val)
199
#define vtblk_htog64(_sc, _val)	virtio_htog64(vtblk_modern(_sc), _val)
200
#define vtblk_gtoh16(_sc, _val)	virtio_gtoh16(vtblk_modern(_sc), _val)
201
#define vtblk_gtoh32(_sc, _val)	virtio_gtoh32(vtblk_modern(_sc), _val)
202
#define vtblk_gtoh64(_sc, _val)	virtio_gtoh64(vtblk_modern(_sc), _val)
203
196
/* Tunables. */
204
/* Tunables. */
197
static int vtblk_no_ident = 0;
205
static int vtblk_no_ident = 0;
198
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
206
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
Lines 200-217 Link Here
200
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
208
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
201
209
202
/* Features desired/implemented by this driver. */
210
/* Features desired/implemented by this driver. */
203
#define VTBLK_FEATURES \
211
#define VTBLK_COMMON_FEATURES \
204
    (VIRTIO_BLK_F_BARRIER		| \
212
    (VIRTIO_BLK_F_SIZE_MAX		| \
205
     VIRTIO_BLK_F_SIZE_MAX		| \
206
     VIRTIO_BLK_F_SEG_MAX		| \
213
     VIRTIO_BLK_F_SEG_MAX		| \
207
     VIRTIO_BLK_F_GEOMETRY		| \
214
     VIRTIO_BLK_F_GEOMETRY		| \
208
     VIRTIO_BLK_F_RO			| \
215
     VIRTIO_BLK_F_RO			| \
209
     VIRTIO_BLK_F_BLK_SIZE		| \
216
     VIRTIO_BLK_F_BLK_SIZE		| \
210
     VIRTIO_BLK_F_WCE			| \
217
     VIRTIO_BLK_F_FLUSH			| \
211
     VIRTIO_BLK_F_TOPOLOGY		| \
218
     VIRTIO_BLK_F_TOPOLOGY		| \
212
     VIRTIO_BLK_F_CONFIG_WCE		| \
219
     VIRTIO_BLK_F_CONFIG_WCE		| \
213
     VIRTIO_RING_F_INDIRECT_DESC)
220
     VIRTIO_RING_F_INDIRECT_DESC)
214
221
222
#define VTBLK_MODERN_FEATURES 	(VTBLK_COMMON_FEATURES)
223
#define VTBLK_LEGACY_FEATURES	(VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
224
215
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
225
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
216
#define VTBLK_LOCK_INIT(_sc, _name) \
226
#define VTBLK_LOCK_INIT(_sc, _name) \
217
				mtx_init(VTBLK_MTX((_sc)), (_name), \
227
				mtx_init(VTBLK_MTX((_sc)), (_name), \
Lines 256-263 Link Here
256
266
257
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
267
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
258
    vtblk_modevent, 0);
268
    vtblk_modevent, 0);
259
DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
269
DRIVER_MODULE(virtio_blk, vtpcil, vtblk_driver, vtblk_devclass,
260
    vtblk_modevent, 0);
270
    vtblk_modevent, 0);
271
DRIVER_MODULE(virtio_blk, vtpcim, vtblk_driver, vtblk_devclass,
272
    vtblk_modevent, 0);
261
MODULE_VERSION(virtio_blk, 1);
273
MODULE_VERSION(virtio_blk, 1);
262
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
274
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
263
275
Lines 301-310 Link Here
301
	struct virtio_blk_config blkcfg;
313
	struct virtio_blk_config blkcfg;
302
	int error;
314
	int error;
303
315
304
	virtio_set_feature_desc(dev, vtblk_feature_desc);
305
306
	sc = device_get_softc(dev);
316
	sc = device_get_softc(dev);
307
	sc->vtblk_dev = dev;
317
	sc->vtblk_dev = dev;
318
	virtio_set_feature_desc(dev, vtblk_feature_desc);
319
308
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
320
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
309
	bioq_init(&sc->vtblk_bioq);
321
	bioq_init(&sc->vtblk_bioq);
310
	TAILQ_INIT(&sc->vtblk_dump_queue);
322
	TAILQ_INIT(&sc->vtblk_dump_queue);
Lines 312-319 Link Here
312
	TAILQ_INIT(&sc->vtblk_req_ready);
324
	TAILQ_INIT(&sc->vtblk_req_ready);
313
325
314
	vtblk_setup_sysctl(sc);
326
	vtblk_setup_sysctl(sc);
315
	vtblk_setup_features(sc);
316
327
328
	error = vtblk_setup_features(sc);
329
	if (error) {
330
		device_printf(dev, "cannot setup features\n");
331
		goto fail;
332
	}
333
317
	vtblk_read_config(sc, &blkcfg);
334
	vtblk_read_config(sc, &blkcfg);
318
335
319
	/*
336
	/*
Lines 541-556 Link Here
541
		return;
558
		return;
542
	}
559
	}
543
560
544
	/*
545
	 * Fail any write if RO. Unfortunately, there does not seem to
546
	 * be a better way to report our readonly'ness to GEOM above.
547
	 */
548
	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
549
	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
550
		vtblk_bio_done(sc, bp, EROFS);
551
		return;
552
	}
553
554
	VTBLK_LOCK(sc);
561
	VTBLK_LOCK(sc);
555
562
556
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
563
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
Lines 565-599 Link Here
565
	VTBLK_UNLOCK(sc);
572
	VTBLK_UNLOCK(sc);
566
}
573
}
567
574
568
static void
575
static int
569
vtblk_negotiate_features(struct vtblk_softc *sc)
576
vtblk_negotiate_features(struct vtblk_softc *sc)
570
{
577
{
571
	device_t dev;
578
	device_t dev;
572
	uint64_t features;
579
	uint64_t features;
573
580
574
	dev = sc->vtblk_dev;
581
	dev = sc->vtblk_dev;
575
	features = VTBLK_FEATURES;
582
	features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
583
	    VTBLK_LEGACY_FEATURES;
576
584
577
	sc->vtblk_features = virtio_negotiate_features(dev, features);
585
	sc->vtblk_features = virtio_negotiate_features(dev, features);
586
	return (virtio_finalize_features(dev));
578
}
587
}
579
588
580
static void
589
static int
581
vtblk_setup_features(struct vtblk_softc *sc)
590
vtblk_setup_features(struct vtblk_softc *sc)
582
{
591
{
583
	device_t dev;
592
	device_t dev;
584
593
	int error;
594
	
585
	dev = sc->vtblk_dev;
595
	dev = sc->vtblk_dev;
586
596
587
	vtblk_negotiate_features(sc);
597
	error = vtblk_negotiate_features(sc);
598
	if (error)
599
		return (error);
588
600
589
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
601
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
590
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
602
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
591
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
592
		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
593
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
594
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
595
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
603
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
596
		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
604
		sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
605
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
606
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER; /* Legacy. */
607
608
	return (0);
597
}
609
}
598
610
599
static int
611
static int
Lines 672-686 Link Here
672
	dp->d_name = VTBLK_DISK_NAME;
684
	dp->d_name = VTBLK_DISK_NAME;
673
	dp->d_unit = device_get_unit(dev);
685
	dp->d_unit = device_get_unit(dev);
674
	dp->d_drv1 = sc;
686
	dp->d_drv1 = sc;
675
	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
687
	dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
676
	    DISKFLAG_DIRECT_COMPLETION;
677
	dp->d_hba_vendor = virtio_get_vendor(dev);
688
	dp->d_hba_vendor = virtio_get_vendor(dev);
678
	dp->d_hba_device = virtio_get_device(dev);
689
	dp->d_hba_device = virtio_get_device(dev);
679
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
690
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
680
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
691
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
681
692
682
	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
693
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
694
		dp->d_flags |= DISKFLAG_WRITE_PROTECT;
695
	else {
696
		if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
697
			dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
683
		dp->d_dump = vtblk_dump;
698
		dp->d_dump = vtblk_dump;
699
	}
684
700
685
	/* Capacity is always in 512-byte units. */
701
	/* Capacity is always in 512-byte units. */
686
	dp->d_mediasize = blkcfg->capacity * 512;
702
	dp->d_mediasize = blkcfg->capacity * 512;
Lines 864-889 Link Here
864
	bp = bioq_takefirst(bioq);
880
	bp = bioq_takefirst(bioq);
865
	req->vbr_bp = bp;
881
	req->vbr_bp = bp;
866
	req->vbr_ack = -1;
882
	req->vbr_ack = -1;
867
	req->vbr_hdr.ioprio = 1;
883
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
868
884
869
	switch (bp->bio_cmd) {
885
	switch (bp->bio_cmd) {
870
	case BIO_FLUSH:
886
	case BIO_FLUSH:
871
		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
887
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
888
		req->vbr_hdr.sector = 0;
872
		break;
889
		break;
873
	case BIO_READ:
890
	case BIO_READ:
874
		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
891
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
875
		req->vbr_hdr.sector = bp->bio_offset / 512;
892
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
876
		break;
893
		break;
877
	case BIO_WRITE:
894
	case BIO_WRITE:
878
		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
895
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
879
		req->vbr_hdr.sector = bp->bio_offset / 512;
896
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
880
		break;
897
		break;
881
	default:
898
	default:
882
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
899
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
883
	}
900
	}
884
901
885
	if (bp->bio_flags & BIO_ORDERED)
902
	if (bp->bio_flags & BIO_ORDERED)
886
		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
903
		req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
887
904
888
	return (req);
905
	return (req);
889
}
906
}
Lines 914-920 Link Here
914
			if (!virtqueue_empty(vq))
931
			if (!virtqueue_empty(vq))
915
				return (EBUSY);
932
				return (EBUSY);
916
			ordered = 1;
933
			ordered = 1;
917
			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
934
			req->vbr_hdr.type &= vtblk_gtoh32(sc,
935
				~VIRTIO_BLK_T_BARRIER);
918
		}
936
		}
919
	}
937
	}
920
938
Lines 1018-1032 Link Here
1018
static void
1036
static void
1019
vtblk_drain(struct vtblk_softc *sc)
1037
vtblk_drain(struct vtblk_softc *sc)
1020
{
1038
{
1021
	struct bio_queue queue;
1022
	struct bio_queue_head *bioq;
1039
	struct bio_queue_head *bioq;
1023
	struct vtblk_request *req;
1040
	struct vtblk_request *req;
1024
	struct bio *bp;
1041
	struct bio *bp;
1025
1042
1026
	bioq = &sc->vtblk_bioq;
1043
	bioq = &sc->vtblk_bioq;
1027
	TAILQ_INIT(&queue);
1028
1044
1029
	if (sc->vtblk_vq != NULL) {
1045
	if (sc->vtblk_vq != NULL) {
1046
		struct bio_queue queue;
1047
1048
		TAILQ_INIT(&queue);
1030
		vtblk_queue_completed(sc, &queue);
1049
		vtblk_queue_completed(sc, &queue);
1031
		vtblk_done_completed(sc, &queue);
1050
		vtblk_done_completed(sc, &queue);
1032
1051
Lines 1117-1126 Link Here
1117
	/* Read the configuration if the feature was negotiated. */
1136
	/* Read the configuration if the feature was negotiated. */
1118
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1137
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1119
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1138
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1120
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1139
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1140
	    geometry.cylinders, blkcfg);
1141
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1142
	    geometry.heads, blkcfg);
1143
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1144
	    geometry.sectors, blkcfg);
1121
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1145
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1122
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1146
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1123
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1147
	    topology.physical_block_exp, blkcfg);
1148
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1149
	    topology.alignment_offset, blkcfg);
1150
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1151
	    topology.min_io_size, blkcfg);
1152
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1153
	    topology.opt_io_size, blkcfg);
1154
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1124
}
1155
}
1125
1156
1126
#undef VTBLK_GET_CONFIG
1157
#undef VTBLK_GET_CONFIG
Lines 1144-1151 Link Here
1144
		return;
1175
		return;
1145
1176
1146
	req->vbr_ack = -1;
1177
	req->vbr_ack = -1;
1147
	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1178
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
1148
	req->vbr_hdr.ioprio = 1;
1179
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1149
	req->vbr_hdr.sector = 0;
1180
	req->vbr_hdr.sector = 0;
1150
1181
1151
	req->vbr_bp = &buf;
1182
	req->vbr_bp = &buf;
Lines 1276-1284 Link Here
1276
1307
1277
	req = &sc->vtblk_dump_request;
1308
	req = &sc->vtblk_dump_request;
1278
	req->vbr_ack = -1;
1309
	req->vbr_ack = -1;
1279
	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1310
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
1280
	req->vbr_hdr.ioprio = 1;
1311
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1281
	req->vbr_hdr.sector = offset / 512;
1312
	req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / 512);
1282
1313
1283
	req->vbr_bp = &buf;
1314
	req->vbr_bp = &buf;
1284
	g_reset_bio(&buf);
1315
	g_reset_bio(&buf);
Lines 1298-1305 Link Here
1298
1329
1299
	req = &sc->vtblk_dump_request;
1330
	req = &sc->vtblk_dump_request;
1300
	req->vbr_ack = -1;
1331
	req->vbr_ack = -1;
1301
	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1332
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
1302
	req->vbr_hdr.ioprio = 1;
1333
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1303
	req->vbr_hdr.sector = 0;
1334
	req->vbr_hdr.sector = 0;
1304
1335
1305
	req->vbr_bp = &buf;
1336
	req->vbr_bp = &buf;
Lines 1327-1333 Link Here
1327
1358
1328
	/* Set either writeback (1) or writethrough (0) mode. */
1359
	/* Set either writeback (1) or writethrough (0) mode. */
1329
	virtio_write_dev_config_1(sc->vtblk_dev,
1360
	virtio_write_dev_config_1(sc->vtblk_dev,
1330
	    offsetof(struct virtio_blk_config, writeback), wc);
1361
	    offsetof(struct virtio_blk_config, wce), wc);
1331
}
1362
}
1332
1363
1333
static int
1364
static int
Lines 1336-1350 Link Here
1336
{
1367
{
1337
	int wc;
1368
	int wc;
1338
1369
1339
	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1370
	if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
1340
		wc = vtblk_tunable_int(sc, "writecache_mode",
1371
		wc = vtblk_tunable_int(sc, "writecache_mode",
1341
		    vtblk_writecache_mode);
1372
		    vtblk_writecache_mode);
1342
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1373
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1343
			vtblk_set_write_cache(sc, wc);
1374
			vtblk_set_write_cache(sc, wc);
1344
		else
1375
		else
1345
			wc = blkcfg->writeback;
1376
			wc = blkcfg->wce;
1346
	} else
1377
	} else
1347
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1378
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
1348
1379
1349
	return (wc);
1380
	return (wc);
1350
}
1381
}
Lines 1361-1367 Link Here
1361
	error = sysctl_handle_int(oidp, &wc, 0, req);
1392
	error = sysctl_handle_int(oidp, &wc, 0, req);
1362
	if (error || req->newptr == NULL)
1393
	if (error || req->newptr == NULL)
1363
		return (error);
1394
		return (error);
1364
	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1395
	if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
1365
		return (EPERM);
1396
		return (EPERM);
1366
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1397
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1367
		return (EINVAL);
1398
		return (EINVAL);
(-)sys/dev/virtio/block/virtio_blk.h (-5 / +22 lines)
Lines 34-50 Link Here
34
#define _VIRTIO_BLK_H
34
#define _VIRTIO_BLK_H
35
35
36
/* Feature bits */
36
/* Feature bits */
37
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
38
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
37
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
39
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
38
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
40
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
39
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
41
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
40
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
42
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
41
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
43
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
42
#define VIRTIO_BLK_F_FLUSH	0x0200	/* Flush command supported */
44
#define VIRTIO_BLK_F_WCE	0x0200	/* Writeback mode enabled after reset */
45
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
43
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
46
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
44
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
45
#define VIRTIO_BLK_F_MQ 	0x1000 	/* Support more than one vq */
47
46
47
/* Legacy feature bits */
48
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
49
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
50
51
/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
52
#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
48
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
53
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
49
54
50
struct virtio_blk_config {
55
struct virtio_blk_config {
Lines 66-80 Link Here
66
71
67
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
72
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
68
	struct virtio_blk_topology {
73
	struct virtio_blk_topology {
74
		/* exponent for physical block per logical block. */
69
		uint8_t physical_block_exp;
75
		uint8_t physical_block_exp;
76
		/* alignment offset in logical blocks. */
70
		uint8_t alignment_offset;
77
		uint8_t alignment_offset;
78
		/* minimum I/O size without performance penalty in logical
79
		 * blocks. */
71
		uint16_t min_io_size;
80
		uint16_t min_io_size;
81
		/* optimal sustained I/O size in logical blocks. */
72
		uint32_t opt_io_size;
82
		uint32_t opt_io_size;
73
	} topology;
83
	} topology;
74
84
75
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
85
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
76
	uint8_t writeback;
86
	uint8_t wce;
87
	uint8_t unused;
77
88
89
	/* Number of vqs, only available when VIRTIO_BLK_F_MQ is set */
90
	uint16_t num_queues;
78
} __packed;
91
} __packed;
79
92
80
/*
93
/*
Lines 107-113 Link Here
107
/* ID string length */
120
/* ID string length */
108
#define VIRTIO_BLK_ID_BYTES	20
121
#define VIRTIO_BLK_ID_BYTES	20
109
122
110
/* This is the first element of the read scatter-gather list. */
123
/*
124
 * This comes first in the read scatter-gather list.
125
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
126
 * this is the first element of the read scatter-gather list.
127
 */
111
struct virtio_blk_outhdr {
128
struct virtio_blk_outhdr {
112
	/* VIRTIO_BLK_T* */
129
	/* VIRTIO_BLK_T* */
113
	uint32_t type;
130
	uint32_t type;
(-)sys/dev/virtio/console/virtio_console.c (-16 / +39 lines)
Lines 158-165 Link Here
158
static int	 vtcon_detach(device_t);
158
static int	 vtcon_detach(device_t);
159
static int	 vtcon_config_change(device_t);
159
static int	 vtcon_config_change(device_t);
160
160
161
static void	 vtcon_setup_features(struct vtcon_softc *);
161
static int	 vtcon_setup_features(struct vtcon_softc *);
162
static void	 vtcon_negotiate_features(struct vtcon_softc *);
162
static int	 vtcon_negotiate_features(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
165
static void	 vtcon_read_config(struct vtcon_softc *,
165
static void	 vtcon_read_config(struct vtcon_softc *,
Lines 227-232 Link Here
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
229
229
230
#define vtcon_modern(_sc) (((_sc)->vtcon_features & VIRTIO_F_VERSION_1) != 0)
231
#define vtcon_htog16(_sc, _val)	virtio_htog16(vtcon_modern(_sc), _val)
232
#define vtcon_htog32(_sc, _val)	virtio_htog32(vtcon_modern(_sc), _val)
233
#define vtcon_htog64(_sc, _val)	virtio_htog64(vtcon_modern(_sc), _val)
234
#define vtcon_gtoh16(_sc, _val)	virtio_gtoh16(vtcon_modern(_sc), _val)
235
#define vtcon_gtoh32(_sc, _val)	virtio_gtoh32(vtcon_modern(_sc), _val)
236
#define vtcon_gtoh64(_sc, _val)	virtio_gtoh64(vtcon_modern(_sc), _val)
237
230
static int	 vtcon_pending_free;
238
static int	 vtcon_pending_free;
231
239
232
static struct ttydevsw vtcon_tty_class = {
240
static struct ttydevsw vtcon_tty_class = {
Lines 256-263 Link Here
256
};
264
};
257
static devclass_t vtcon_devclass;
265
static devclass_t vtcon_devclass;
258
266
259
DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass,
267
DRIVER_MODULE(virtio_console, vtpcil, vtcon_driver, vtcon_devclass,
260
    vtcon_modevent, 0);
268
    vtcon_modevent, 0);
269
DRIVER_MODULE(virtio_console, vtpcim, vtcon_driver, vtcon_devclass,
270
    vtcon_modevent, 0);
261
MODULE_VERSION(virtio_console, 1);
271
MODULE_VERSION(virtio_console, 1);
262
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
272
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
263
273
Lines 323-334 Link Here
323
333
324
	sc = device_get_softc(dev);
334
	sc = device_get_softc(dev);
325
	sc->vtcon_dev = dev;
335
	sc->vtcon_dev = dev;
336
	virtio_set_feature_desc(dev, vtcon_feature_desc);
326
337
327
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
338
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
328
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
339
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
329
340
330
	virtio_set_feature_desc(dev, vtcon_feature_desc);
341
	error = vtcon_setup_features(sc);
331
	vtcon_setup_features(sc);
342
	if (error) {
343
		device_printf(dev, "cannot setup features\n");
344
		goto fail;
345
	}
332
346
333
	vtcon_read_config(sc, &concfg);
347
	vtcon_read_config(sc, &concfg);
334
	vtcon_determine_max_ports(sc, &concfg);
348
	vtcon_determine_max_ports(sc, &concfg);
Lines 420-426 Link Here
420
	return (0);
434
	return (0);
421
}
435
}
422
436
423
static void
437
static int
424
vtcon_negotiate_features(struct vtcon_softc *sc)
438
vtcon_negotiate_features(struct vtcon_softc *sc)
425
{
439
{
426
	device_t dev;
440
	device_t dev;
Lines 430-450 Link Here
430
	features = VTCON_FEATURES;
444
	features = VTCON_FEATURES;
431
445
432
	sc->vtcon_features = virtio_negotiate_features(dev, features);
446
	sc->vtcon_features = virtio_negotiate_features(dev, features);
447
	return (virtio_finalize_features(dev));
433
}
448
}
434
449
435
static void
450
static int
436
vtcon_setup_features(struct vtcon_softc *sc)
451
vtcon_setup_features(struct vtcon_softc *sc)
437
{
452
{
438
	device_t dev;
453
	device_t dev;
454
	int error;
439
455
440
	dev = sc->vtcon_dev;
456
	dev = sc->vtcon_dev;
441
457
442
	vtcon_negotiate_features(sc);
458
	error = vtcon_negotiate_features(sc);
459
	if (error)
460
		return (error);
443
461
444
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
462
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
445
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
463
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
446
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
464
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
447
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
465
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
466
467
	return (0);
448
}
468
}
449
469
450
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
470
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
Lines 847-863 Link Here
847
    struct virtio_console_control *control, void *data, size_t data_len)
867
    struct virtio_console_control *control, void *data, size_t data_len)
848
{
868
{
849
	device_t dev;
869
	device_t dev;
850
	int id;
870
	uint32_t id;
871
	uint16_t event;
851
872
852
	dev = sc->vtcon_dev;
873
	dev = sc->vtcon_dev;
853
	id = control->id;
874
	id = vtcon_htog32(sc, control->id);
875
	event = vtcon_htog16(sc, control->event);
854
876
855
	if (id < 0 || id >= sc->vtcon_max_ports) {
877
	if (id >= sc->vtcon_max_ports) {
856
		device_printf(dev, "%s: invalid port ID %d\n", __func__, id);
878
		device_printf(dev, "%s: event %d invalid port ID %d\n",
879
		    __func__, event, id);
857
		return;
880
		return;
858
	}
881
	}
859
882
860
	switch (control->event) {
883
	switch (event) {
861
	case VIRTIO_CONSOLE_PORT_ADD:
884
	case VIRTIO_CONSOLE_PORT_ADD:
862
		vtcon_ctrl_port_add_event(sc, id);
885
		vtcon_ctrl_port_add_event(sc, id);
863
		break;
886
		break;
Lines 985-993 Link Here
985
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
1008
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
986
		return;
1009
		return;
987
1010
988
	control.id = portid;
1011
	control.id = vtcon_gtoh32(sc, portid);
989
	control.event = event;
1012
	control.event = vtcon_gtoh16(sc, event);
990
	control.value = value;
1013
	control.value = vtcon_gtoh16(sc, value);
991
1014
992
	vtcon_ctrl_poll(sc, &control);
1015
	vtcon_ctrl_poll(sc, &control);
993
}
1016
}
(-)sys/dev/virtio/mmio/virtio_mmio.c (+4 lines)
Lines 426-431 Link Here
426
	case VIRTIO_IVAR_VENDOR:
426
	case VIRTIO_IVAR_VENDOR:
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
428
		break;
428
		break;
429
	case VIRTIO_IVAR_SUBVENDOR:
430
	case VIRTIO_IVAR_MODERN:
431
		*result = 0;
432
		break;
429
	default:
433
	default:
430
		return (ENOENT);
434
		return (ENOENT);
431
	}
435
	}
(-)sys/dev/virtio/network/if_vtnet.c (-846 / +1181 lines)
Lines 69-74 Link Here
69
#include <netinet6/ip6_var.h>
69
#include <netinet6/ip6_var.h>
70
#include <netinet/udp.h>
70
#include <netinet/udp.h>
71
#include <netinet/tcp.h>
71
#include <netinet/tcp.h>
72
#include <netinet/tcp_lro.h>
72
#include <netinet/netdump/netdump.h>
73
#include <netinet/netdump/netdump.h>
73
74
74
#include <machine/bus.h>
75
#include <machine/bus.h>
Lines 85-90 Link Here
85
#include "opt_inet.h"
86
#include "opt_inet.h"
86
#include "opt_inet6.h"
87
#include "opt_inet6.h"
87
88
89
#if defined(INET) || defined(INET6)
90
#include <machine/in_cksum.h>
91
#endif
92
88
static int	vtnet_modevent(module_t, int, void *);
93
static int	vtnet_modevent(module_t, int, void *);
89
94
90
static int	vtnet_probe(device_t);
95
static int	vtnet_probe(device_t);
Lines 96-103 Link Here
96
static int	vtnet_attach_completed(device_t);
101
static int	vtnet_attach_completed(device_t);
97
static int	vtnet_config_change(device_t);
102
static int	vtnet_config_change(device_t);
98
103
99
static void	vtnet_negotiate_features(struct vtnet_softc *);
104
static int	vtnet_negotiate_features(struct vtnet_softc *);
100
static void	vtnet_setup_features(struct vtnet_softc *);
105
static int	vtnet_setup_features(struct vtnet_softc *);
101
static int	vtnet_init_rxq(struct vtnet_softc *, int);
106
static int	vtnet_init_rxq(struct vtnet_softc *, int);
102
static int	vtnet_init_txq(struct vtnet_softc *, int);
107
static int	vtnet_init_txq(struct vtnet_softc *, int);
103
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
108
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
Lines 105-112 Link Here
105
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
110
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
106
static void	vtnet_free_rx_filters(struct vtnet_softc *);
111
static void	vtnet_free_rx_filters(struct vtnet_softc *);
107
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
112
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
113
static int	vtnet_alloc_interface(struct vtnet_softc *);
108
static int	vtnet_setup_interface(struct vtnet_softc *);
114
static int	vtnet_setup_interface(struct vtnet_softc *);
109
static int	vtnet_change_mtu(struct vtnet_softc *, int);
115
static int	vtnet_ioctl_mtu(struct vtnet_softc *, int);
116
static int	vtnet_ioctl_ifflags(struct vtnet_softc *);
117
static int	vtnet_ioctl_multi(struct vtnet_softc *);
118
static int	vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
110
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
119
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
111
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
120
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
112
121
Lines 114-124 Link Here
114
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
123
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
115
static struct mbuf *
124
static struct mbuf *
116
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
125
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
117
static int	vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
126
static int	vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
118
		    struct mbuf *, int);
127
		    struct mbuf *, int);
119
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
128
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
120
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
129
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
121
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
130
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
131
static int	vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
132
		     uint16_t, int, struct virtio_net_hdr *);
133
static int	vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
134
		     uint16_t, int, struct virtio_net_hdr *);
122
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
135
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
123
		     struct virtio_net_hdr *);
136
		     struct virtio_net_hdr *);
124
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
137
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
Lines 130-135 Link Here
130
static void	vtnet_rx_vq_intr(void *);
143
static void	vtnet_rx_vq_intr(void *);
131
static void	vtnet_rxq_tq_intr(void *, int);
144
static void	vtnet_rxq_tq_intr(void *, int);
132
145
146
static int	vtnet_txq_intr_threshold(struct vtnet_txq *);
133
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
147
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
134
static int	vtnet_txq_notify(struct vtnet_txq *);
148
static int	vtnet_txq_notify(struct vtnet_txq *);
135
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
149
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
Lines 179-184 Link Here
179
static int	vtnet_init_tx_queues(struct vtnet_softc *);
193
static int	vtnet_init_tx_queues(struct vtnet_softc *);
180
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
194
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
181
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
195
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
196
static void	vtnet_update_rx_offloads(struct vtnet_softc *);
182
static int	vtnet_reinit(struct vtnet_softc *);
197
static int	vtnet_reinit(struct vtnet_softc *);
183
static void	vtnet_init_locked(struct vtnet_softc *);
198
static void	vtnet_init_locked(struct vtnet_softc *);
184
static void	vtnet_init(void *);
199
static void	vtnet_init(void *);
Lines 187-197 Link Here
187
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
202
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
188
		    struct sglist *, int, int);
203
		    struct sglist *, int, int);
189
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
204
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
205
static int	vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
190
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
206
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
191
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
207
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int);
192
static int	vtnet_set_promisc(struct vtnet_softc *, int);
208
static int	vtnet_set_promisc(struct vtnet_softc *, int);
193
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
209
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
194
static void	vtnet_attach_disable_promisc(struct vtnet_softc *);
195
static void	vtnet_rx_filter(struct vtnet_softc *);
210
static void	vtnet_rx_filter(struct vtnet_softc *);
196
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
211
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
197
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
212
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
Lines 200-220 Link Here
200
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
215
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
201
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
216
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
202
217
218
static void	vtnet_update_speed_duplex(struct vtnet_softc *);
203
static int	vtnet_is_link_up(struct vtnet_softc *);
219
static int	vtnet_is_link_up(struct vtnet_softc *);
204
static void	vtnet_update_link_status(struct vtnet_softc *);
220
static void	vtnet_update_link_status(struct vtnet_softc *);
205
static int	vtnet_ifmedia_upd(struct ifnet *);
221
static int	vtnet_ifmedia_upd(struct ifnet *);
206
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
222
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
207
static void	vtnet_get_hwaddr(struct vtnet_softc *);
223
static void	vtnet_get_macaddr(struct vtnet_softc *);
208
static void	vtnet_set_hwaddr(struct vtnet_softc *);
224
static void	vtnet_set_macaddr(struct vtnet_softc *);
225
static void	vtnet_attached_set_macaddr(struct vtnet_softc *);
209
static void	vtnet_vlan_tag_remove(struct mbuf *);
226
static void	vtnet_vlan_tag_remove(struct mbuf *);
210
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
227
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
211
static void	vtnet_set_tx_intr_threshold(struct vtnet_softc *);
212
228
213
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
229
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
214
		    struct sysctl_oid_list *, struct vtnet_rxq *);
230
		    struct sysctl_oid_list *, struct vtnet_rxq *);
215
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
231
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
216
		    struct sysctl_oid_list *, struct vtnet_txq *);
232
		    struct sysctl_oid_list *, struct vtnet_txq *);
217
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
233
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
234
static void	vtnet_load_tunables(struct vtnet_softc *);
218
static void	vtnet_setup_sysctl(struct vtnet_softc *);
235
static void	vtnet_setup_sysctl(struct vtnet_softc *);
219
236
220
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
237
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
Lines 232-289 Link Here
232
249
233
NETDUMP_DEFINE(vtnet);
250
NETDUMP_DEFINE(vtnet);
234
251
235
/* Tunables. */
252
#define vtnet_htog16(_sc, _val)	virtio_htog16(vtnet_modern(_sc), _val)
236
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
253
#define vtnet_htog32(_sc, _val)	virtio_htog32(vtnet_modern(_sc), _val)
254
#define vtnet_htog64(_sc, _val)	virtio_htog64(vtnet_modern(_sc), _val)
255
#define vtnet_gtoh16(_sc, _val)	virtio_gtoh16(vtnet_modern(_sc), _val)
256
#define vtnet_gtoh32(_sc, _val)	virtio_gtoh32(vtnet_modern(_sc), _val)
257
#define vtnet_gtoh64(_sc, _val)	virtio_gtoh64(vtnet_modern(_sc), _val)
258
259
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VirtIO Net driver");
260
237
static int vtnet_csum_disable = 0;
261
static int vtnet_csum_disable = 0;
238
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
239
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
262
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
240
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
263
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
264
265
static int vtnet_fixup_needs_csum = 0;
266
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
267
    &vtnet_fixup_needs_csum, 0,
268
    "Calculate valid checksum for NEEDS_CSUM packets");
269
241
static int vtnet_tso_disable = 0;
270
static int vtnet_tso_disable = 0;
242
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
271
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
243
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
272
    &vtnet_tso_disable, 0, "Disables TSO");
244
    0, "Disables TCP Segmentation Offload");
273
245
static int vtnet_lro_disable = 0;
274
static int vtnet_lro_disable = 0;
246
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
275
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
247
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
276
    &vtnet_lro_disable, 0, "Disables hardware LRO");
248
    0, "Disables TCP Large Receive Offload");
277
249
static int vtnet_mq_disable = 0;
278
static int vtnet_mq_disable = 0;
250
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
279
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
251
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
280
    &vtnet_mq_disable, 0, "Disables multiqueue support");
252
    0, "Disables Multi Queue support");
281
253
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
282
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
254
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
255
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
283
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
256
    &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
284
    &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
257
static int vtnet_rx_process_limit = 512;
285
258
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
286
static int vtnet_tso_maxlen = IP_MAXPACKET;
287
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
288
    &vtnet_tso_maxlen, 0, "TSO burst limit");
289
290
static int vtnet_rx_process_limit = 1024;
259
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
291
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
260
    &vtnet_rx_process_limit, 0,
292
    &vtnet_rx_process_limit, 0,
261
    "Limits the number RX segments processed in a single pass");
293
    "Number of RX segments processed in one pass");
262
294
295
static int vtnet_lro_entry_count = 128;
296
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
297
    &vtnet_lro_entry_count, 0, "Software LRO entry count");
298
299
/* Enable sorted LRO, and the depth of the mbuf queue. */
300
static int vtnet_lro_mbufq_depth = 0;
301
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
302
    &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
303
263
static uma_zone_t vtnet_tx_header_zone;
304
static uma_zone_t vtnet_tx_header_zone;
264
305
265
static struct virtio_feature_desc vtnet_feature_desc[] = {
306
static struct virtio_feature_desc vtnet_feature_desc[] = {
266
	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
307
	{ VIRTIO_NET_F_CSUM,			"TxChecksum"		},
267
	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
308
	{ VIRTIO_NET_F_GUEST_CSUM,		"RxChecksum"		},
268
	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
309
	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,	"CtrlRxOffloads"	},
269
	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
310
	{ VIRTIO_NET_F_MAC,			"MAC"			},
270
	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
311
	{ VIRTIO_NET_F_GSO,			"TxGSO"			},
271
	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
312
	{ VIRTIO_NET_F_GUEST_TSO4,		"RxLROv4"		},
272
	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
313
	{ VIRTIO_NET_F_GUEST_TSO6,		"RxLROv6"		},
273
	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
314
	{ VIRTIO_NET_F_GUEST_ECN,		"RxLROECN"		},
274
	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
315
	{ VIRTIO_NET_F_GUEST_UFO,		"RxUFO"			},
275
	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
316
	{ VIRTIO_NET_F_HOST_TSO4,		"TxTSOv4"		},
276
	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
317
	{ VIRTIO_NET_F_HOST_TSO6,		"TxTSOv6"		},
277
	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
318
	{ VIRTIO_NET_F_HOST_ECN,		"TxTSOECN"		},
278
	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
319
	{ VIRTIO_NET_F_HOST_UFO,		"TxUFO"			},
279
	{ VIRTIO_NET_F_STATUS,		"Status"	},
320
	{ VIRTIO_NET_F_MRG_RXBUF,		"MrgRxBuf"		},
280
	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
321
	{ VIRTIO_NET_F_STATUS,			"Status"		},
281
	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
322
	{ VIRTIO_NET_F_CTRL_VQ,			"CtrlVq"		},
282
	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
323
	{ VIRTIO_NET_F_CTRL_RX,			"CtrlRxMode"		},
283
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
324
	{ VIRTIO_NET_F_CTRL_VLAN,		"CtrlVLANFilter"	},
284
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
325
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,		"CtrlRxModeExtra"	},
285
	{ VIRTIO_NET_F_MQ,		"Multiqueue"	},
326
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,		"GuestAnnounce"		},
286
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
327
	{ VIRTIO_NET_F_MQ,			"Multiqueue"		},
328
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,		"CtrlMacAddr"		},
329
	{ VIRTIO_NET_F_SPEED_DUPLEX,		"SpeedDuplex"		},
287
330
288
	{ 0, NULL }
331
	{ 0, NULL }
289
};
332
};
Lines 306-329 Link Here
306
349
307
#ifdef DEV_NETMAP
350
#ifdef DEV_NETMAP
308
#include <dev/netmap/if_vtnet_netmap.h>
351
#include <dev/netmap/if_vtnet_netmap.h>
309
#endif /* DEV_NETMAP */
352
#endif
310
353
311
static driver_t vtnet_driver = {
354
static driver_t vtnet_driver = {
312
	"vtnet",
355
	.name = "vtnet",
313
	vtnet_methods,
356
	.methods = vtnet_methods,
314
	sizeof(struct vtnet_softc)
357
	.size = sizeof(struct vtnet_softc)
315
};
358
};
316
static devclass_t vtnet_devclass;
359
static devclass_t vtnet_devclass;
317
360
318
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
361
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
319
    vtnet_modevent, 0);
362
    vtnet_modevent, 0);
320
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
363
DRIVER_MODULE(vtnet, vtpcil, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
321
    vtnet_modevent, 0);
364
DRIVER_MODULE(vtnet, vtpcim, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
322
MODULE_VERSION(vtnet, 1);
365
MODULE_VERSION(vtnet, 1);
323
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
366
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
324
#ifdef DEV_NETMAP
367
#ifdef DEV_NETMAP
325
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
368
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
326
#endif /* DEV_NETMAP */
369
#endif
327
370
328
static int
371
static int
329
vtnet_modevent(module_t mod, int type, void *unused)
372
vtnet_modevent(module_t mod, int type, void *unused)
Lines 365-371 Link Here
365
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
408
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
366
		return (ENXIO);
409
		return (ENXIO);
367
410
368
	device_set_desc(dev, "VirtIO Networking Adapter");
411
	device_set_desc(dev, "VirtIO Network Adapter");
369
412
370
	return (BUS_PROBE_DEFAULT);
413
	return (BUS_PROBE_DEFAULT);
371
}
414
}
Lines 378-393 Link Here
378
421
379
	sc = device_get_softc(dev);
422
	sc = device_get_softc(dev);
380
	sc->vtnet_dev = dev;
423
	sc->vtnet_dev = dev;
381
382
	/* Register our feature descriptions. */
383
	virtio_set_feature_desc(dev, vtnet_feature_desc);
424
	virtio_set_feature_desc(dev, vtnet_feature_desc);
384
425
385
	VTNET_CORE_LOCK_INIT(sc);
426
	VTNET_CORE_LOCK_INIT(sc);
386
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
427
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
428
	vtnet_load_tunables(sc);
387
429
430
	error = vtnet_alloc_interface(sc);
431
	if (error) {
432
		device_printf(dev, "cannot allocate interface\n");
433
		goto fail;
434
	}
435
388
	vtnet_setup_sysctl(sc);
436
	vtnet_setup_sysctl(sc);
389
	vtnet_setup_features(sc);
390
437
438
	error = vtnet_setup_features(sc);
439
	if (error) {
440
		device_printf(dev, "cannot setup features\n");
441
		goto fail;
442
	}
443
391
	error = vtnet_alloc_rx_filters(sc);
444
	error = vtnet_alloc_rx_filters(sc);
392
	if (error) {
445
	if (error) {
393
		device_printf(dev, "cannot allocate Rx filters\n");
446
		device_printf(dev, "cannot allocate Rx filters\n");
Lines 414-429 Link Here
414
467
415
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
468
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
416
	if (error) {
469
	if (error) {
417
		device_printf(dev, "cannot setup virtqueue interrupts\n");
470
		device_printf(dev, "cannot setup interrupts\n");
418
		/* BMV: This will crash if during boot! */
419
		ether_ifdetach(sc->vtnet_ifp);
471
		ether_ifdetach(sc->vtnet_ifp);
420
		goto fail;
472
		goto fail;
421
	}
473
	}
422
474
423
#ifdef DEV_NETMAP
475
#ifdef DEV_NETMAP
424
	vtnet_netmap_attach(sc);
476
	vtnet_netmap_attach(sc);
425
#endif /* DEV_NETMAP */
477
#endif
426
427
	vtnet_start_taskqueues(sc);
478
	vtnet_start_taskqueues(sc);
428
479
429
fail:
480
fail:
Lines 455-461 Link Here
455
506
456
#ifdef DEV_NETMAP
507
#ifdef DEV_NETMAP
457
	netmap_detach(ifp);
508
	netmap_detach(ifp);
458
#endif /* DEV_NETMAP */
509
#endif
459
510
460
	vtnet_free_taskqueues(sc);
511
	vtnet_free_taskqueues(sc);
461
512
Lines 522-528 Link Here
522
static int
573
static int
523
vtnet_shutdown(device_t dev)
574
vtnet_shutdown(device_t dev)
524
{
575
{
525
526
	/*
576
	/*
527
	 * Suspend already does all of what we need to
577
	 * Suspend already does all of what we need to
528
	 * do here; we just never expect to be resumed.
578
	 * do here; we just never expect to be resumed.
Lines 533-541 Link Here
533
static int
583
static int
534
vtnet_attach_completed(device_t dev)
584
vtnet_attach_completed(device_t dev)
535
{
585
{
586
	struct vtnet_softc *sc;
536
587
537
	vtnet_attach_disable_promisc(device_get_softc(dev));
588
	sc = device_get_softc(dev);
538
589
590
	VTNET_CORE_LOCK(sc);
591
	vtnet_attached_set_macaddr(sc);
592
	VTNET_CORE_UNLOCK(sc);
593
539
	return (0);
594
	return (0);
540
}
595
}
541
596
Lines 555-591 Link Here
555
	return (0);
610
	return (0);
556
}
611
}
557
612
558
static void
613
static int
559
vtnet_negotiate_features(struct vtnet_softc *sc)
614
vtnet_negotiate_features(struct vtnet_softc *sc)
560
{
615
{
561
	device_t dev;
616
	device_t dev;
562
	uint64_t mask, features;
617
	uint64_t features, negotiated_features;
618
	int no_csum;
563
619
564
	dev = sc->vtnet_dev;
620
	dev = sc->vtnet_dev;
565
	mask = 0;
621
	features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
622
	    VTNET_LEGACY_FEATURES;
566
623
567
	/*
624
	/*
568
	 * TSO and LRO are only available when their corresponding checksum
625
	 * TSO and LRO are only available when their corresponding checksum
569
	 * offload feature is also negotiated.
626
	 * offload feature is also negotiated.
570
	 */
627
	 */
571
	if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
628
	no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
572
		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
629
	if (no_csum)
573
		mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
630
		features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
574
	}
631
	if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
575
	if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
632
		features &= ~VTNET_TSO_FEATURES;
576
		mask |= VTNET_TSO_FEATURES;
633
	if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
577
	if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
634
		features &= ~VTNET_LRO_FEATURES;
578
		mask |= VTNET_LRO_FEATURES;
635
579
#ifndef VTNET_LEGACY_TX
636
#ifndef VTNET_LEGACY_TX
580
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
637
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
581
		mask |= VIRTIO_NET_F_MQ;
638
		features &= ~VIRTIO_NET_F_MQ;
582
#else
639
#else
583
	mask |= VIRTIO_NET_F_MQ;
640
	features &= ~VIRTIO_NET_F_MQ;
584
#endif
641
#endif
585
642
586
	features = VTNET_FEATURES & ~mask;
643
	negotiated_features = virtio_negotiate_features(dev, features);
587
	sc->vtnet_features = virtio_negotiate_features(dev, features);
588
644
645
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
646
		uint16_t mtu;
647
648
		mtu = virtio_read_dev_config_2(dev,
649
		    offsetof(struct virtio_net_config, mtu));
650
		if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
651
			device_printf(dev, "Invalid MTU value: %d. "
652
			    "MTU feature disabled.\n", mtu);
653
			features &= ~VIRTIO_NET_F_MTU;
654
			negotiated_features =
655
			    virtio_negotiate_features(dev, features);
656
		}
657
	}
658
659
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
660
		uint16_t npairs;
661
662
		npairs = virtio_read_dev_config_2(dev,
663
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
664
		if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
665
		    npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
666
			device_printf(dev, "Invalid max_virtqueue_pairs value: "
667
			    "%d. Multiqueue feature disabled.\n", npairs);
668
			features &= ~VIRTIO_NET_F_MQ;
669
			negotiated_features =
670
			    virtio_negotiate_features(dev, features);
671
		}
672
	}
673
589
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
674
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
590
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
675
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
591
		/*
676
		/*
Lines 599-624 Link Here
599
		 */
684
		 */
600
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
685
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
601
			device_printf(dev,
686
			device_printf(dev,
602
			    "LRO disabled due to both mergeable buffers and "
687
			    "Host LRO disabled since both mergeable buffers "
603
			    "indirect descriptors not negotiated\n");
688
			    "and indirect descriptors were not negotiated\n");
604
605
			features &= ~VTNET_LRO_FEATURES;
689
			features &= ~VTNET_LRO_FEATURES;
606
			sc->vtnet_features =
690
			negotiated_features =
607
			    virtio_negotiate_features(dev, features);
691
			    virtio_negotiate_features(dev, features);
608
		} else
692
		} else
609
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
693
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
610
	}
694
	}
695
696
	sc->vtnet_features = negotiated_features;
697
	sc->vtnet_negotiated_features = negotiated_features;
698
699
	return (virtio_finalize_features(dev));
611
}
700
}
612
701
613
static void
702
static int
614
vtnet_setup_features(struct vtnet_softc *sc)
703
vtnet_setup_features(struct vtnet_softc *sc)
615
{
704
{
616
	device_t dev;
705
	device_t dev;
706
	int error;
617
707
618
	dev = sc->vtnet_dev;
708
	dev = sc->vtnet_dev;
619
709
620
	vtnet_negotiate_features(sc);
710
	error = vtnet_negotiate_features(sc);
711
	if (error)
712
		return (error);
621
713
714
	if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
715
		sc->vtnet_flags |= VTNET_FLAG_MODERN;
622
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
716
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
623
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
717
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
624
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
718
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
Lines 629-654 Link Here
629
		sc->vtnet_flags |= VTNET_FLAG_MAC;
723
		sc->vtnet_flags |= VTNET_FLAG_MAC;
630
	}
724
	}
631
725
726
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
727
		sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
728
		    offsetof(struct virtio_net_config, mtu));
729
	} else
730
		sc->vtnet_max_mtu = VTNET_MAX_MTU;
731
632
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
732
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
633
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
733
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
634
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
734
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
735
	} else if (vtnet_modern(sc)) {
736
		/* This is identical to the mergeable header. */
737
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
635
	} else
738
	} else
636
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
739
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
637
740
638
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
741
	if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
639
		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
742
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
640
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
743
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
641
		sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
744
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
642
	else
745
	else
643
		sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
746
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
644
747
748
	/*
749
	 * Favor "hardware" LRO if negotiated, but support software LRO as
750
	 * a fallback; there is usually little benefit (or worse) with both.
751
	 */
752
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
753
	    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
754
		sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
755
645
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
756
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
646
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
757
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
647
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
758
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
648
		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
759
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
649
	else
760
	else
650
		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
761
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
651
762
763
	sc->vtnet_req_vq_pairs = 1;
764
	sc->vtnet_max_vq_pairs = 1;
765
652
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
766
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
653
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
767
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
654
768
Lines 658-692 Link Here
658
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
772
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
659
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
773
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
660
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
774
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
775
776
		if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
777
			sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
778
			    offsetof(struct virtio_net_config,
779
			    max_virtqueue_pairs));
780
		}
661
	}
781
	}
662
782
663
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
664
	    sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
665
		sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
666
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
667
	} else
668
		sc->vtnet_max_vq_pairs = 1;
669
670
	if (sc->vtnet_max_vq_pairs > 1) {
783
	if (sc->vtnet_max_vq_pairs > 1) {
784
		int req;
785
671
		/*
786
		/*
672
		 * Limit the maximum number of queue pairs to the lower of
787
		 * Limit the maximum number of requested queue pairs to the
673
		 * the number of CPUs and the configured maximum.
788
		 * number of CPUs and the configured maximum.
674
		 * The actual number of queues that get used may be less.
675
		 */
789
		 */
676
		int max;
790
		req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
677
791
		if (req < 0)
678
		max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
792
			req = 1;
679
		if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
793
		if (req == 0)
680
			if (max > mp_ncpus)
794
			req = mp_ncpus;
681
				max = mp_ncpus;
795
		if (req > sc->vtnet_max_vq_pairs)
682
			if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
796
			req = sc->vtnet_max_vq_pairs;
683
				max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
797
		if (req > mp_ncpus)
684
			if (max > 1) {
798
			req = mp_ncpus;
685
				sc->vtnet_requested_vq_pairs = max;
799
		if (req > 1) {
686
				sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
800
			sc->vtnet_req_vq_pairs = req;
687
			}
801
			sc->vtnet_flags |= VTNET_FLAG_MQ;
688
		}
802
		}
689
	}
803
	}
804
805
	return (0);
690
}
806
}
691
807
692
static int
808
static int
Lines 707-712 Link Here
707
	if (rxq->vtnrx_sg == NULL)
823
	if (rxq->vtnrx_sg == NULL)
708
		return (ENOMEM);
824
		return (ENOMEM);
709
825
826
#if defined(INET) || defined(INET6)
827
	if (vtnet_software_lro(sc)) {
828
		if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
829
		    sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
830
			return (ENOMEM);
831
	}
832
#endif
833
710
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
834
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
711
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
835
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
712
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
836
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
Lines 772-777 Link Here
772
			return (error);
896
			return (error);
773
	}
897
	}
774
898
899
	vtnet_set_rx_process_limit(sc);
775
	vtnet_setup_queue_sysctl(sc);
900
	vtnet_setup_queue_sysctl(sc);
776
901
777
	return (0);
902
	return (0);
Lines 784-789 Link Here
784
	rxq->vtnrx_sc = NULL;
909
	rxq->vtnrx_sc = NULL;
785
	rxq->vtnrx_id = -1;
910
	rxq->vtnrx_id = -1;
786
911
912
#if defined(INET) || defined(INET6)
913
	tcp_lro_free(&rxq->vtnrx_lro);
914
#endif
915
787
	if (rxq->vtnrx_sg != NULL) {
916
	if (rxq->vtnrx_sg != NULL) {
788
		sglist_free(rxq->vtnrx_sg);
917
		sglist_free(rxq->vtnrx_sg);
789
		rxq->vtnrx_sg = NULL;
918
		rxq->vtnrx_sg = NULL;
Lines 892-919 Link Here
892
	if (info == NULL)
1021
	if (info == NULL)
893
		return (ENOMEM);
1022
		return (ENOMEM);
894
1023
895
	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
1024
	for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
896
		rxq = &sc->vtnet_rxqs[i];
1025
		rxq = &sc->vtnet_rxqs[i];
897
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
1026
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
898
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
1027
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
899
		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
1028
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
900
1029
901
		txq = &sc->vtnet_txqs[i];
1030
		txq = &sc->vtnet_txqs[i];
902
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
1031
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
903
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
1032
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
904
		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
1033
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
905
	}
1034
	}
906
1035
1036
	/* These queues will not be used so allocate the minimum resources. */
1037
	for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
1038
		rxq = &sc->vtnet_rxqs[i];
1039
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
1040
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1041
1042
		txq = &sc->vtnet_txqs[i];
1043
		VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
1044
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1045
	}
1046
907
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
1047
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
908
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
1048
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
909
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
1049
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
910
	}
1050
	}
911
1051
912
	/*
1052
	/*
913
	 * Enable interrupt binding if this is multiqueue. This only matters
1053
	 * TODO: Enable interrupt binding if this is multiqueue. This will
914
	 * when per-vq MSIX is available.
1054
	 * only matter when per-virtqueue MSIX is available.
915
	 */
1055
	 */
916
	if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
1056
	if (sc->vtnet_flags & VTNET_FLAG_MQ)
917
		flags |= 0;
1057
		flags |= 0;
918
1058
919
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
1059
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
Lines 923-945 Link Here
923
}
1063
}
924
1064
925
static int
1065
static int
926
vtnet_setup_interface(struct vtnet_softc *sc)
1066
vtnet_alloc_interface(struct vtnet_softc *sc)
927
{
1067
{
928
	device_t dev;
1068
	device_t dev;
929
	struct ifnet *ifp;
1069
	struct ifnet *ifp;
930
1070
931
	dev = sc->vtnet_dev;
1071
	dev = sc->vtnet_dev;
932
1072
933
	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
1073
	ifp = if_alloc(IFT_ETHER);
934
	if (ifp == NULL) {
1074
	if (ifp == NULL)
935
		device_printf(dev, "cannot allocate ifnet structure\n");
1075
		return (ENOMEM);
936
		return (ENOSPC);
937
	}
938
1076
939
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1077
	sc->vtnet_ifp = ifp;
940
	ifp->if_baudrate = IF_Gbps(10);	/* Approx. */
941
	ifp->if_softc = sc;
1078
	ifp->if_softc = sc;
1079
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1080
1081
	return (0);
1082
}
1083
1084
static int
1085
vtnet_setup_interface(struct vtnet_softc *sc)
1086
{
1087
	device_t dev;
1088
	struct ifnet *ifp;
1089
1090
	dev = sc->vtnet_dev;
1091
	ifp = sc->vtnet_ifp;
1092
942
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1093
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1094
	ifp->if_baudrate = IF_Gbps(10);
943
	ifp->if_init = vtnet_init;
1095
	ifp->if_init = vtnet_init;
944
	ifp->if_ioctl = vtnet_ioctl;
1096
	ifp->if_ioctl = vtnet_ioctl;
945
	ifp->if_get_counter = vtnet_get_counter;
1097
	ifp->if_get_counter = vtnet_get_counter;
Lines 954-1004 Link Here
954
	IFQ_SET_READY(&ifp->if_snd);
1106
	IFQ_SET_READY(&ifp->if_snd);
955
#endif
1107
#endif
956
1108
957
	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
1109
	vtnet_get_macaddr(sc);
958
	    vtnet_ifmedia_sts);
959
	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
960
	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
961
1110
962
	/* Read (or generate) the MAC address for the adapter. */
963
	vtnet_get_hwaddr(sc);
964
965
	ether_ifattach(ifp, sc->vtnet_hwaddr);
966
967
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
1111
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
968
		ifp->if_capabilities |= IFCAP_LINKSTATE;
1112
		ifp->if_capabilities |= IFCAP_LINKSTATE;
969
1113
970
	/* Tell the upper layer(s) we support long frames. */
1114
	ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
971
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1115
	ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
972
	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
1116
	ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
973
1117
974
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1118
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1119
		int gso;
1120
975
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
1121
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
976
1122
977
		if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
1123
		gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
978
			ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1124
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
1125
			ifp->if_capabilities |= IFCAP_TSO4;
1126
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
1127
			ifp->if_capabilities |= IFCAP_TSO6;
1128
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
979
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
1129
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
980
		} else {
981
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
982
				ifp->if_capabilities |= IFCAP_TSO4;
983
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
984
				ifp->if_capabilities |= IFCAP_TSO6;
985
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
986
				sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
987
		}
988
1130
989
		if (ifp->if_capabilities & IFCAP_TSO)
1131
		if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) {
1132
			int tso_maxlen;
1133
990
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1134
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1135
1136
			tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
1137
			    vtnet_tso_maxlen);
1138
			ifp->if_hw_tsomax = tso_maxlen -
1139
			    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1140
			ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1;
1141
			ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1142
		}
991
	}
1143
	}
992
1144
993
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
1145
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
994
		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
1146
		ifp->if_capabilities |= IFCAP_RXCSUM;
1147
#ifdef notyet
1148
		/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
1149
		ifp->if_capabilities |= IFCAP_RXCSUM_IPV6;
1150
#endif
995
1151
996
		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
1152
		if (vtnet_tunable_int(sc, "fixup_needs_csum",
997
		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
1153
		    vtnet_fixup_needs_csum) != 0)
998
			ifp->if_capabilities |= IFCAP_LRO;
1154
			sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
1155
1156
		/* Support either "hardware" or software LRO. */
1157
		ifp->if_capabilities |= IFCAP_LRO;
999
	}
1158
	}
1000
1159
1001
	if (ifp->if_capabilities & IFCAP_HWCSUM) {
1160
	if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
1002
		/*
1161
		/*
1003
		 * VirtIO does not support VLAN tagging, but we can fake
1162
		 * VirtIO does not support VLAN tagging, but we can fake
1004
		 * it by inserting and removing the 802.1Q header during
1163
		 * it by inserting and removing the 802.1Q header during
Lines 1009-1019 Link Here
1009
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1168
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1010
	}
1169
	}
1011
1170
1012
	ifp->if_capenable = ifp->if_capabilities;
1171
	if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
1172
		ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1173
	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1013
1174
1014
	/*
1175
	/*
1015
	 * Capabilities after here are not enabled by default.
1176
	 * Capabilities after here are not enabled by default.
1016
	 */
1177
	 */
1178
	ifp->if_capenable = ifp->if_capabilities;
1017
1179
1018
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1180
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1019
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1181
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
Lines 1024-1082 Link Here
1024
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1186
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1025
	}
1187
	}
1026
1188
1027
	vtnet_set_rx_process_limit(sc);
1189
	ether_ifattach(ifp, sc->vtnet_hwaddr);
1028
	vtnet_set_tx_intr_threshold(sc);
1029
1190
1191
	/* Tell the upper layer(s) we support long frames. */
1192
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1193
1030
	NETDUMP_SET(ifp, vtnet);
1194
	NETDUMP_SET(ifp, vtnet);
1031
1195
1032
	return (0);
1196
	return (0);
1033
}
1197
}
1034
1198
1035
static int
1199
static int
1036
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1200
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
1037
{
1201
{
1202
	int framesz;
1203
1204
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
1205
		return (MJUMPAGESIZE);
1206
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1207
		return (MCLBYTES);
1208
1209
	/*
1210
	 * Try to scale the receive mbuf cluster size from the MTU. Without
1211
	 * the GUEST_TSO[46] features, the VirtIO specification says the
1212
	 * driver must only be able to receive ~1500 byte frames. But if
1213
	 * jumbo frames can be transmitted then try to receive jumbo.
1214
	 *
1215
	 * BMV: Not quite true when F_MTU is negotiated!
1216
	 */
1217
	if (vtnet_modern(sc)) {
1218
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
1219
		framesz = sizeof(struct virtio_net_hdr_v1);
1220
	} else
1221
		framesz = sizeof(struct vtnet_rx_header);
1222
	framesz += sizeof(struct ether_vlan_header) + mtu;
1223
1224
	if (framesz <= MCLBYTES)
1225
		return (MCLBYTES);
1226
	else if (framesz <= MJUMPAGESIZE)
1227
		return (MJUMPAGESIZE);
1228
	else if (framesz <= MJUM9BYTES)
1229
		return (MJUM9BYTES);
1230
1231
	/* Sane default; avoid 16KB clusters. */
1232
	return (MCLBYTES);
1233
}
1234
1235
static int
1236
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu)
1237
{
1038
	struct ifnet *ifp;
1238
	struct ifnet *ifp;
1039
	int frame_size, clsize;
1239
	int clustersz;
1040
1240
1041
	ifp = sc->vtnet_ifp;
1241
	ifp = sc->vtnet_ifp;
1242
	VTNET_CORE_LOCK_ASSERT(sc);
1042
1243
1043
	if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1244
	if (ifp->if_mtu == mtu)
1245
		return (0);
1246
	else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
1044
		return (EINVAL);
1247
		return (EINVAL);
1045
1248
1046
	frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1249
	ifp->if_mtu = mtu;
1047
	    new_mtu;
1250
	clustersz = vtnet_rx_cluster_size(sc, mtu);
1048
1251
1049
	/*
1252
	if (clustersz != sc->vtnet_rx_clustersz &&
1050
	 * Based on the new MTU (and hence frame size) determine which
1253
	    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1051
	 * cluster size is most appropriate for the receive queues.
1254
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1052
	 */
1255
		vtnet_init_locked(sc);
1053
	if (frame_size <= MCLBYTES) {
1256
	}
1054
		clsize = MCLBYTES;
1055
	} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1056
		/* Avoid going past 9K jumbos. */
1057
		if (frame_size > MJUM9BYTES)
1058
			return (EINVAL);
1059
		clsize = MJUM9BYTES;
1060
	} else
1061
		clsize = MJUMPAGESIZE;
1062
1257
1063
	ifp->if_mtu = new_mtu;
1258
	return (0);
1064
	sc->vtnet_rx_new_clsize = clsize;
1259
}
1065
1260
1066
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1261
static int
1067
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1262
vtnet_ioctl_ifflags(struct vtnet_softc *sc)
1263
{
1264
	struct ifnet *ifp;
1265
	int drv_running;
1266
1267
	ifp = sc->vtnet_ifp;
1268
	drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1269
1270
	VTNET_CORE_LOCK_ASSERT(sc);
1271
1272
	if ((ifp->if_flags & IFF_UP) == 0) {
1273
		if (drv_running)
1274
			vtnet_stop(sc);
1275
		goto out;
1276
	}
1277
1278
	if (!drv_running) {
1068
		vtnet_init_locked(sc);
1279
		vtnet_init_locked(sc);
1280
		goto out;
1069
	}
1281
	}
1070
1282
1283
	if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1284
	    (IFF_PROMISC | IFF_ALLMULTI)) {
1285
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1286
			return (ENOTSUP);
1287
		vtnet_rx_filter(sc);
1288
	}
1289
1290
out:
1291
	sc->vtnet_if_flags = ifp->if_flags;
1071
	return (0);
1292
	return (0);
1072
}
1293
}
1073
1294
1074
static int
1295
static int
1296
vtnet_ioctl_multi(struct vtnet_softc *sc)
1297
{
1298
	struct ifnet *ifp;
1299
1300
	ifp = sc->vtnet_ifp;
1301
1302
	VTNET_CORE_LOCK_ASSERT(sc);
1303
1304
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
1305
	    ifp->if_drv_flags & IFF_DRV_RUNNING)
1306
		vtnet_rx_filter_mac(sc);
1307
1308
	return (0);
1309
}
1310
1311
static int
1312
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
1313
{
1314
	struct ifnet *ifp;
1315
	int mask, reinit, update;
1316
1317
	ifp = sc->vtnet_ifp;
1318
	mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable;
1319
	reinit = update = 0;
1320
1321
	VTNET_CORE_LOCK_ASSERT(sc);
1322
1323
	if (mask & IFCAP_TXCSUM)
1324
		ifp->if_capenable ^= IFCAP_TXCSUM;
1325
	if (mask & IFCAP_TXCSUM_IPV6)
1326
		ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1327
	if (mask & IFCAP_TSO4)
1328
		ifp->if_capenable ^= IFCAP_TSO4;
1329
	if (mask & IFCAP_TSO6)
1330
		ifp->if_capenable ^= IFCAP_TSO6;
1331
1332
	if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
1333
		/*
1334
		 * These Rx features require the negotiated features to
1335
		 * be updated. Avoid a full reinit if possible.
1336
		 */
1337
		if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
1338
			update = 1;
1339
		else
1340
			reinit = 1;
1341
1342
		/* BMV: Avoid needless renegotiation for just software LRO. */
1343
		if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
1344
		    IFCAP_LRO && vtnet_software_lro(sc))
1345
			reinit = update = 0;
1346
1347
		if (mask & IFCAP_RXCSUM)
1348
			ifp->if_capenable ^= IFCAP_RXCSUM;
1349
		if (mask & IFCAP_RXCSUM_IPV6)
1350
			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1351
		if (mask & IFCAP_LRO)
1352
			ifp->if_capenable ^= IFCAP_LRO;
1353
1354
		/*
1355
		 * VirtIO does not distinguish between IPv4 and IPv6 checksums
1356
		 * so treat them as a pair. Guest TSO (LRO) requires receive
1357
		 * checksums.
1358
		 */
1359
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
1360
			ifp->if_capenable |= IFCAP_RXCSUM;
1361
#ifdef notyet
1362
			ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
1363
#endif
1364
		} else
1365
			ifp->if_capenable &=
1366
			    ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO);
1367
	}
1368
1369
	if (mask & IFCAP_VLAN_HWFILTER) {
1370
		/* These Rx features require renegotiation. */
1371
		reinit = 1;
1372
1373
		if (mask & IFCAP_VLAN_HWFILTER)
1374
			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1375
	}
1376
1377
	if (mask & IFCAP_VLAN_HWTSO)
1378
		ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1379
	if (mask & IFCAP_VLAN_HWTAGGING)
1380
		ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1381
1382
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1383
		if (reinit) {
1384
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1385
			vtnet_init_locked(sc);
1386
		} else if (update)
1387
			vtnet_update_rx_offloads(sc);
1388
	}
1389
1390
	return (0);
1391
}
1392
1393
static int
1075
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1394
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1076
{
1395
{
1077
	struct vtnet_softc *sc;
1396
	struct vtnet_softc *sc;
1078
	struct ifreq *ifr;
1397
	struct ifreq *ifr;
1079
	int reinit, mask, error;
1398
	int error;
1080
1399
1081
	sc = ifp->if_softc;
1400
	sc = ifp->if_softc;
1082
	ifr = (struct ifreq *) data;
1401
	ifr = (struct ifreq *) data;
Lines 1084-1128 Link Here
1084
1403
1085
	switch (cmd) {
1404
	switch (cmd) {
1086
	case SIOCSIFMTU:
1405
	case SIOCSIFMTU:
1087
		if (ifp->if_mtu != ifr->ifr_mtu) {
1406
		VTNET_CORE_LOCK(sc);
1088
			VTNET_CORE_LOCK(sc);
1407
		error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
1089
			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1408
		VTNET_CORE_UNLOCK(sc);
1090
			VTNET_CORE_UNLOCK(sc);
1091
		}
1092
		break;
1409
		break;
1093
1410
1094
	case SIOCSIFFLAGS:
1411
	case SIOCSIFFLAGS:
1095
		VTNET_CORE_LOCK(sc);
1412
		VTNET_CORE_LOCK(sc);
1096
		if ((ifp->if_flags & IFF_UP) == 0) {
1413
		error = vtnet_ioctl_ifflags(sc);
1097
			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098
				vtnet_stop(sc);
1099
		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1100
			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1101
			    (IFF_PROMISC | IFF_ALLMULTI)) {
1102
				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1103
					vtnet_rx_filter(sc);
1104
				else {
1105
					ifp->if_flags |= IFF_PROMISC;
1106
					if ((ifp->if_flags ^ sc->vtnet_if_flags)
1107
					    & IFF_ALLMULTI)
1108
						error = ENOTSUP;
1109
				}
1110
			}
1111
		} else
1112
			vtnet_init_locked(sc);
1113
1114
		if (error == 0)
1115
			sc->vtnet_if_flags = ifp->if_flags;
1116
		VTNET_CORE_UNLOCK(sc);
1414
		VTNET_CORE_UNLOCK(sc);
1117
		break;
1415
		break;
1118
1416
1119
	case SIOCADDMULTI:
1417
	case SIOCADDMULTI:
1120
	case SIOCDELMULTI:
1418
	case SIOCDELMULTI:
1121
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1122
			break;
1123
		VTNET_CORE_LOCK(sc);
1419
		VTNET_CORE_LOCK(sc);
1124
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1420
		error = vtnet_ioctl_multi(sc);
1125
			vtnet_rx_filter_mac(sc);
1126
		VTNET_CORE_UNLOCK(sc);
1421
		VTNET_CORE_UNLOCK(sc);
1127
		break;
1422
		break;
1128
1423
Lines 1133-1178 Link Here
1133
1428
1134
	case SIOCSIFCAP:
1429
	case SIOCSIFCAP:
1135
		VTNET_CORE_LOCK(sc);
1430
		VTNET_CORE_LOCK(sc);
1136
		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1431
		error = vtnet_ioctl_ifcap(sc, ifr);
1137
1138
		if (mask & IFCAP_TXCSUM)
1139
			ifp->if_capenable ^= IFCAP_TXCSUM;
1140
		if (mask & IFCAP_TXCSUM_IPV6)
1141
			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1142
		if (mask & IFCAP_TSO4)
1143
			ifp->if_capenable ^= IFCAP_TSO4;
1144
		if (mask & IFCAP_TSO6)
1145
			ifp->if_capenable ^= IFCAP_TSO6;
1146
1147
		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1148
		    IFCAP_VLAN_HWFILTER)) {
1149
			/* These Rx features require us to renegotiate. */
1150
			reinit = 1;
1151
1152
			if (mask & IFCAP_RXCSUM)
1153
				ifp->if_capenable ^= IFCAP_RXCSUM;
1154
			if (mask & IFCAP_RXCSUM_IPV6)
1155
				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1156
			if (mask & IFCAP_LRO)
1157
				ifp->if_capenable ^= IFCAP_LRO;
1158
			if (mask & IFCAP_VLAN_HWFILTER)
1159
				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1160
		} else
1161
			reinit = 0;
1162
1163
		if (mask & IFCAP_VLAN_HWTSO)
1164
			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1165
		if (mask & IFCAP_VLAN_HWTAGGING)
1166
			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1167
1168
		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1169
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1170
			vtnet_init_locked(sc);
1171
		}
1172
1173
		VTNET_CORE_UNLOCK(sc);
1432
		VTNET_CORE_UNLOCK(sc);
1174
		VLAN_CAPABILITIES(ifp);
1433
		VLAN_CAPABILITIES(ifp);
1175
1176
		break;
1434
		break;
1177
1435
1178
	default:
1436
	default:
Lines 1191-1202 Link Here
1191
	struct virtqueue *vq;
1449
	struct virtqueue *vq;
1192
	int nbufs, error;
1450
	int nbufs, error;
1193
1451
1194
#ifdef DEV_NETMAP
1195
	error = vtnet_netmap_rxq_populate(rxq);
1196
	if (error >= 0)
1197
		return (error);
1198
#endif  /* DEV_NETMAP */
1199
1200
	vq = rxq->vtnrx_vq;
1452
	vq = rxq->vtnrx_vq;
1201
	error = ENOSPC;
1453
	error = ENOSPC;
1202
1454
Lines 1226-1245 Link Here
1226
	struct virtqueue *vq;
1478
	struct virtqueue *vq;
1227
	struct mbuf *m;
1479
	struct mbuf *m;
1228
	int last;
1480
	int last;
1229
#ifdef DEV_NETMAP
1230
	int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX,
1231
						rxq->vtnrx_id);
1232
#else  /* !DEV_NETMAP */
1233
	int netmap_bufs = 0;
1234
#endif /* !DEV_NETMAP */
1235
1481
1236
	vq = rxq->vtnrx_vq;
1482
	vq = rxq->vtnrx_vq;
1237
	last = 0;
1483
	last = 0;
1238
1484
1239
	while ((m = virtqueue_drain(vq, &last)) != NULL) {
1485
	while ((m = virtqueue_drain(vq, &last)) != NULL)
1240
		if (!netmap_bufs)
1486
		m_freem(m);
1241
			m_freem(m);
1242
	}
1243
1487
1244
	KASSERT(virtqueue_empty(vq),
1488
	KASSERT(virtqueue_empty(vq),
1245
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1489
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
Lines 1249-1305 Link Here
1249
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1493
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1250
{
1494
{
1251
	struct mbuf *m_head, *m_tail, *m;
1495
	struct mbuf *m_head, *m_tail, *m;
1252
	int i, clsize;
1496
	int i, size;
1253
1497
1254
	clsize = sc->vtnet_rx_clsize;
1498
	m_head = NULL;
1499
	size = sc->vtnet_rx_clustersz;
1255
1500
1256
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1501
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1257
	    ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1502
	    ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
1258
1503
1259
	m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1504
	for (i = 0; i < nbufs; i++) {
1260
	if (m_head == NULL)
1505
		m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
1261
		goto fail;
1506
		if (m == NULL) {
1507
			sc->vtnet_stats.mbuf_alloc_failed++;
1508
			m_freem(m_head);
1509
			return (NULL);
1510
		}
1262
1511
1263
	m_head->m_len = clsize;
1512
		m->m_len = size;
1264
	m_tail = m_head;
1513
		if (m_head != NULL) {
1265
1514
			m_tail->m_next = m;
1266
	/* Allocate the rest of the chain. */
1515
			m_tail = m;
1267
	for (i = 1; i < nbufs; i++) {
1516
		} else
1268
		m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1517
			m_head = m_tail = m;
1269
		if (m == NULL)
1270
			goto fail;
1271
1272
		m->m_len = clsize;
1273
		m_tail->m_next = m;
1274
		m_tail = m;
1275
	}
1518
	}
1276
1519
1277
	if (m_tailp != NULL)
1520
	if (m_tailp != NULL)
1278
		*m_tailp = m_tail;
1521
		*m_tailp = m_tail;
1279
1522
1280
	return (m_head);
1523
	return (m_head);
1281
1282
fail:
1283
	sc->vtnet_stats.mbuf_alloc_failed++;
1284
	m_freem(m_head);
1285
1286
	return (NULL);
1287
}
1524
}
1288
1525
1289
/*
1526
/*
1290
 * Slow path for when LRO without mergeable buffers is negotiated.
1527
 * Slow path for when LRO without mergeable buffers is negotiated.
1291
 */
1528
 */
1292
static int
1529
static int
1293
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1530
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1294
    int len0)
1531
    int len0)
1295
{
1532
{
1296
	struct vtnet_softc *sc;
1533
	struct vtnet_softc *sc;
1297
	struct mbuf *m, *m_prev;
1534
	struct mbuf *m, *m_prev, *m_new, *m_tail;
1298
	struct mbuf *m_new, *m_tail;
1535
	int len, clustersz, nreplace, error;
1299
	int len, clsize, nreplace, error;
1300
1536
1301
	sc = rxq->vtnrx_sc;
1537
	sc = rxq->vtnrx_sc;
1302
	clsize = sc->vtnet_rx_clsize;
1538
	clustersz = sc->vtnet_rx_clustersz;
1303
1539
1304
	m_prev = NULL;
1540
	m_prev = NULL;
1305
	m_tail = NULL;
1541
	m_tail = NULL;
Lines 1309-1333 Link Here
1309
	len = len0;
1545
	len = len0;
1310
1546
1311
	/*
1547
	/*
1312
	 * Since these mbuf chains are so large, we avoid allocating an
1548
	 * Since these mbuf chains are so large, avoid allocating a complete
1313
	 * entire replacement chain if possible. When the received frame
1549
	 * replacement when the received frame did not consume the entire
1314
	 * did not consume the entire chain, the unused mbufs are moved
1550
	 * chain. Unused mbufs are moved to the tail of the replacement mbuf.
1315
	 * to the replacement chain.
1316
	 */
1551
	 */
1317
	while (len > 0) {
1552
	while (len > 0) {
1318
		/*
1319
		 * Something is seriously wrong if we received a frame
1320
		 * larger than the chain. Drop it.
1321
		 */
1322
		if (m == NULL) {
1553
		if (m == NULL) {
1323
			sc->vtnet_stats.rx_frame_too_large++;
1554
			sc->vtnet_stats.rx_frame_too_large++;
1324
			return (EMSGSIZE);
1555
			return (EMSGSIZE);
1325
		}
1556
		}
1326
1557
1327
		/* We always allocate the same cluster size. */
1558
		/*
1328
		KASSERT(m->m_len == clsize,
1559
		 * Every mbuf should have the expected cluster size sincethat
1329
		    ("%s: mbuf size %d is not the cluster size %d",
1560
		 * is also used to allocate the replacements.
1330
		    __func__, m->m_len, clsize));
1561
		 */
1562
		KASSERT(m->m_len == clustersz,
1563
		    ("%s: mbuf size %d not expected cluster size %d", __func__,
1564
		    m->m_len, clustersz));
1331
1565
1332
		m->m_len = MIN(m->m_len, len);
1566
		m->m_len = MIN(m->m_len, len);
1333
		len -= m->m_len;
1567
		len -= m->m_len;
Lines 1337-1355 Link Here
1337
		nreplace++;
1571
		nreplace++;
1338
	}
1572
	}
1339
1573
1340
	KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1574
	KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
1341
	    ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1575
	    ("%s: invalid replacement mbuf count %d max %d", __func__,
1342
	    sc->vtnet_rx_nmbufs));
1576
	    nreplace, sc->vtnet_rx_nmbufs));
1343
1577
1344
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1578
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1345
	if (m_new == NULL) {
1579
	if (m_new == NULL) {
1346
		m_prev->m_len = clsize;
1580
		m_prev->m_len = clustersz;
1347
		return (ENOBUFS);
1581
		return (ENOBUFS);
1348
	}
1582
	}
1349
1583
1350
	/*
1584
	/*
1351
	 * Move any unused mbufs from the received chain onto the end
1585
	 * Move any unused mbufs from the received mbuf chain onto the
1352
	 * of the new chain.
1586
	 * end of the replacement chain.
1353
	 */
1587
	 */
1354
	if (m_prev->m_next != NULL) {
1588
	if (m_prev->m_next != NULL) {
1355
		m_tail->m_next = m_prev->m_next;
1589
		m_tail->m_next = m_prev->m_next;
Lines 1359-1379 Link Here
1359
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1593
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1360
	if (error) {
1594
	if (error) {
1361
		/*
1595
		/*
1362
		 * BAD! We could not enqueue the replacement mbuf chain. We
1596
		 * The replacement is suppose to be an copy of the one
1363
		 * must restore the m0 chain to the original state if it was
1597
		 * dequeued so this is a very unexpected error.
1364
		 * modified so we can subsequently discard it.
1365
		 *
1598
		 *
1366
		 * NOTE: The replacement is suppose to be an identical copy
1599
		 * Restore the m0 chain to the original state if it was
1367
		 * to the one just dequeued so this is an unexpected error.
1600
		 * modified so we can then discard it.
1368
		 */
1601
		 */
1369
		sc->vtnet_stats.rx_enq_replacement_failed++;
1370
1371
		if (m_tail->m_next != NULL) {
1602
		if (m_tail->m_next != NULL) {
1372
			m_prev->m_next = m_tail->m_next;
1603
			m_prev->m_next = m_tail->m_next;
1373
			m_tail->m_next = NULL;
1604
			m_tail->m_next = NULL;
1374
		}
1605
		}
1375
1606
		m_prev->m_len = clustersz;
1376
		m_prev->m_len = clsize;
1607
		sc->vtnet_stats.rx_enq_replacement_failed++;
1377
		m_freem(m_new);
1608
		m_freem(m_new);
1378
	}
1609
	}
1379
1610
Lines 1389-1419 Link Here
1389
1620
1390
	sc = rxq->vtnrx_sc;
1621
	sc = rxq->vtnrx_sc;
1391
1622
1392
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1623
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1393
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1624
		return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
1394
1625
1395
	if (m->m_next == NULL) {
1626
	MPASS(m->m_next == NULL);
1396
		/* Fast-path for the common case of just one mbuf. */
1627
	if (m->m_len < len)
1397
		if (m->m_len < len)
1628
		return (EMSGSIZE);
1398
			return (EINVAL);
1399
1629
1400
		m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1630
	m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1401
		if (m_new == NULL)
1631
	if (m_new == NULL)
1402
			return (ENOBUFS);
1632
		return (ENOBUFS);
1403
1633
1404
		error = vtnet_rxq_enqueue_buf(rxq, m_new);
1634
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1405
		if (error) {
1635
	if (error) {
1406
			/*
1636
		sc->vtnet_stats.rx_enq_replacement_failed++;
1407
			 * The new mbuf is suppose to be an identical
1637
		m_freem(m_new);
1408
			 * copy of the one just dequeued so this is an
1409
			 * unexpected error.
1410
			 */
1411
			m_freem(m_new);
1412
			sc->vtnet_stats.rx_enq_replacement_failed++;
1413
		} else
1414
			m->m_len = len;
1415
	} else
1638
	} else
1416
		error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1639
		m->m_len = len;
1417
1640
1418
	return (error);
1641
	return (error);
1419
}
1642
}
Lines 1423-1461 Link Here
1423
{
1646
{
1424
	struct vtnet_softc *sc;
1647
	struct vtnet_softc *sc;
1425
	struct sglist *sg;
1648
	struct sglist *sg;
1426
	struct vtnet_rx_header *rxhdr;
1649
	int header_inlined, error;
1427
	uint8_t *mdata;
1428
	int offset, error;
1429
1650
1430
	sc = rxq->vtnrx_sc;
1651
	sc = rxq->vtnrx_sc;
1431
	sg = rxq->vtnrx_sg;
1652
	sg = rxq->vtnrx_sg;
1432
	mdata = mtod(m, uint8_t *);
1433
1653
1654
	KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1655
	    ("%s: mbuf chain without LRO_NOMRG", __func__));
1434
	VTNET_RXQ_LOCK_ASSERT(rxq);
1656
	VTNET_RXQ_LOCK_ASSERT(rxq);
1435
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1436
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1437
	KASSERT(m->m_len == sc->vtnet_rx_clsize,
1438
	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1439
	     sc->vtnet_rx_clsize));
1440
1657
1441
	sglist_reset(sg);
1658
	sglist_reset(sg);
1442
	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1659
	header_inlined = vtnet_modern(sc) ||
1660
	    (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
1661
1662
	if (header_inlined)
1663
		error = sglist_append_mbuf(sg, m);
1664
	else {
1665
		struct vtnet_rx_header *rxhdr =
1666
		    mtod(m, struct vtnet_rx_header *);
1443
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1667
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1444
		rxhdr = (struct vtnet_rx_header *) mdata;
1445
		sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1446
		offset = sizeof(struct vtnet_rx_header);
1447
	} else
1448
		offset = 0;
1449
1668
1450
	sglist_append(sg, mdata + offset, m->m_len - offset);
1669
		/* Append the header and remaining mbuf data. */
1451
	if (m->m_next != NULL) {
1670
		error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1452
		error = sglist_append_mbuf(sg, m->m_next);
1671
		if (error)
1453
		MPASS(error == 0);
1672
			return (error);
1673
		error = sglist_append(sg, &rxhdr[1],
1674
		    m->m_len - sizeof(struct vtnet_rx_header));
1675
		if (error)
1676
			return (error);
1677
1678
		if (m->m_next != NULL)
1679
			error = sglist_append_mbuf(sg, m->m_next);
1454
	}
1680
	}
1455
1681
1456
	error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1682
	if (error)
1683
		return (error);
1457
1684
1458
	return (error);
1685
	return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
1459
}
1686
}
1460
1687
1461
static int
1688
static int
Lines 1478-1531 Link Here
1478
	return (error);
1705
	return (error);
1479
}
1706
}
1480
1707
1481
/*
1482
 * Use the checksum offset in the VirtIO header to set the
1483
 * correct CSUM_* flags.
1484
 */
1485
static int
1708
static int
1486
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1709
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
1487
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1710
    int hoff, struct virtio_net_hdr *hdr)
1488
{
1711
{
1489
	struct vtnet_softc *sc;
1712
	struct vtnet_softc *sc;
1490
#if defined(INET) || defined(INET6)
1713
	int error;
1491
	int offset = hdr->csum_start + hdr->csum_offset;
1492
#endif
1493
1714
1494
	sc = rxq->vtnrx_sc;
1715
	sc = rxq->vtnrx_sc;
1495
1716
1496
	/* Only do a basic sanity check on the offset. */
1717
	/*
1497
	switch (eth_type) {
1718
	 * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
1498
#if defined(INET)
1719
	 * not have an analogous CSUM flag. The checksum has been validated,
1499
	case ETHERTYPE_IP:
1720
	 * but is incomplete (TCP/UDP pseudo header).
1500
		if (__predict_false(offset < ip_start + sizeof(struct ip)))
1721
	 *
1501
			return (1);
1722
	 * The packet is likely from another VM on the same host that itself
1502
		break;
1723
	 * performed checksum offloading so Tx/Rx is basically a memcpy and
1503
#endif
1724
	 * the checksum has little value.
1504
#if defined(INET6)
1725
	 *
1505
	case ETHERTYPE_IPV6:
1726
	 * Default to receiving the packet as-is for performance reasons, but
1506
		if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1727
	 * this can cause issues if the packet is to be forwarded because it
1507
			return (1);
1728
	 * does not contain a valid checksum. This patch may be helpful:
1508
		break;
1729
	 * https://reviews.freebsd.org/D6611. In the meantime, have the driver
1509
#endif
1730
	 * compute the checksum if requested.
1510
	default:
1731
	 *
1511
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1732
	 * BMV: Need to add an CSUM_PARTIAL flag?
1512
		return (1);
1733
	 */
1734
	if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
1735
		error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
1736
		return (error);
1513
	}
1737
	}
1514
1738
1515
	/*
1739
	/*
1516
	 * Use the offset to determine the appropriate CSUM_* flags. This is
1740
	 * Compute the checksum in the driver so the packet will contain a
1517
	 * a bit dirty, but we can get by with it since the checksum offsets
1741
	 * valid checksum. The checksum is at csum_offset from csum_start.
1518
	 * happen to be different. We assume the host host does not do IPv4
1519
	 * header checksum offloading.
1520
	 */
1742
	 */
1521
	switch (hdr->csum_offset) {
1743
	switch (etype) {
1522
	case offsetof(struct udphdr, uh_sum):
1744
#if defined(INET) || defined(INET6)
1523
	case offsetof(struct tcphdr, th_sum):
1745
	case ETHERTYPE_IP:
1746
	case ETHERTYPE_IPV6: {
1747
		int csum_off, csum_end;
1748
		uint16_t csum;
1749
1750
		csum_off = hdr->csum_start + hdr->csum_offset;
1751
		csum_end = csum_off + sizeof(uint16_t);
1752
1753
		/* Assume checksum will be in the first mbuf. */
1754
		if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
1755
			return (1);
1756
1757
		/*
1758
		 * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
1759
		 * checksum and write it at the specified offset. We could
1760
		 * try to verify the packet: csum_start should probably
1761
		 * correspond to the start of the TCP/UDP header.
1762
		 *
1763
		 * BMV: Need to properly handle UDP with zero checksum. Is
1764
		 * the IPv4 header checksum implicitly validated?
1765
		 */
1766
		csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
1767
		*(uint16_t *)(mtodo(m, csum_off)) = csum;
1524
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1768
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1525
		m->m_pkthdr.csum_data = 0xFFFF;
1769
		m->m_pkthdr.csum_data = 0xFFFF;
1526
		break;
1770
		break;
1771
	}
1772
#endif
1527
	default:
1773
	default:
1528
		sc->vtnet_stats.rx_csum_bad_offset++;
1774
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1529
		return (1);
1775
		return (1);
1530
	}
1776
	}
1531
1777
Lines 1533-1596 Link Here
1533
}
1779
}
1534
1780
1535
static int
1781
static int
1536
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1782
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
1537
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1783
    uint16_t etype, int hoff, struct virtio_net_hdr *hdr)
1538
{
1784
{
1539
	struct vtnet_softc *sc;
1785
	struct vtnet_softc *sc;
1540
	int offset, proto;
1786
	int protocol;
1541
1787
1542
	sc = rxq->vtnrx_sc;
1788
	sc = rxq->vtnrx_sc;
1543
1789
1544
	switch (eth_type) {
1790
	switch (etype) {
1545
#if defined(INET)
1791
#if defined(INET)
1546
	case ETHERTYPE_IP: {
1792
	case ETHERTYPE_IP:
1547
		struct ip *ip;
1793
		if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
1548
		if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1794
			protocol = IPPROTO_DONE;
1549
			return (1);
1795
		else {
1550
		ip = (struct ip *)(m->m_data + ip_start);
1796
			struct ip *ip = (struct ip *)(m->m_data + hoff);
1551
		proto = ip->ip_p;
1797
			protocol = ip->ip_p;
1552
		offset = ip_start + (ip->ip_hl << 2);
1798
		}
1553
		break;
1799
		break;
1554
	}
1555
#endif
1800
#endif
1556
#if defined(INET6)
1801
#if defined(INET6)
1557
	case ETHERTYPE_IPV6:
1802
	case ETHERTYPE_IPV6:
1558
		if (__predict_false(m->m_len < ip_start +
1803
		if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
1559
		    sizeof(struct ip6_hdr)))
1804
		    || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
1560
			return (1);
1805
			protocol = IPPROTO_DONE;
1561
		offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1562
		if (__predict_false(offset < 0))
1563
			return (1);
1564
		break;
1806
		break;
1565
#endif
1807
#endif
1566
	default:
1808
	default:
1567
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1809
		protocol = IPPROTO_DONE;
1568
		return (1);
1810
		break;
1569
	}
1811
	}
1570
1812
1571
	switch (proto) {
1813
	switch (protocol) {
1572
	case IPPROTO_TCP:
1814
	case IPPROTO_TCP:
1573
		if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1574
			return (1);
1575
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1576
		m->m_pkthdr.csum_data = 0xFFFF;
1577
		break;
1578
	case IPPROTO_UDP:
1815
	case IPPROTO_UDP:
1579
		if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1580
			return (1);
1581
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1816
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1582
		m->m_pkthdr.csum_data = 0xFFFF;
1817
		m->m_pkthdr.csum_data = 0xFFFF;
1583
		break;
1818
		break;
1584
	default:
1819
	default:
1585
		/*
1820
		/*
1586
		 * For the remaining protocols, FreeBSD does not support
1821
		 * FreeBSD does not support checksum offloading of this
1587
		 * checksum offloading, so the checksum will be recomputed.
1822
		 * protocol. Let the stack re-verify the checksum later
1823
		 * if the protocol is supported.
1588
		 */
1824
		 */
1589
#if 0
1825
#if 0
1590
		if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1826
		if_printf(sc->vtnet_ifp,
1591
		    "protocol eth_type=%#x proto=%d csum_start=%d "
1827
		    "%s: checksum offload of unsupported protocol "
1592
		    "csum_offset=%d\n", __func__, eth_type, proto,
1828
		    "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
1593
		    hdr->csum_start, hdr->csum_offset);
1829
		    __func__, etype, protocol, hdr->csum_start,
1830
		    hdr->csum_offset);
1594
#endif
1831
#endif
1595
		break;
1832
		break;
1596
	}
1833
	}
Lines 1598-1638 Link Here
1598
	return (0);
1835
	return (0);
1599
}
1836
}
1600
1837
1601
/*
1602
 * Set the appropriate CSUM_* flags. Unfortunately, the information
1603
 * provided is not directly useful to us. The VirtIO header gives the
1604
 * offset of the checksum, which is all Linux needs, but this is not
1605
 * how FreeBSD does things. We are forced to peek inside the packet
1606
 * a bit.
1607
 *
1608
 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1609
 * could accept the offsets and let the stack figure it out.
1610
 */
1611
static int
1838
static int
1612
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1839
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1613
    struct virtio_net_hdr *hdr)
1840
    struct virtio_net_hdr *hdr)
1614
{
1841
{
1615
	struct ether_header *eh;
1842
	const struct ether_header *eh;
1616
	struct ether_vlan_header *evh;
1843
	int hoff;
1617
	uint16_t eth_type;
1844
	uint16_t etype;
1618
	int offset, error;
1619
1845
1620
	eh = mtod(m, struct ether_header *);
1846
	eh = mtod(m, const struct ether_header *);
1621
	eth_type = ntohs(eh->ether_type);
1847
	etype = ntohs(eh->ether_type);
1622
	if (eth_type == ETHERTYPE_VLAN) {
1848
	if (etype == ETHERTYPE_VLAN) {
1623
		/* BMV: We should handle nested VLAN tags too. */
1849
		/* TODO BMV: Handle QinQ. */
1624
		evh = mtod(m, struct ether_vlan_header *);
1850
		const struct ether_vlan_header *evh =
1625
		eth_type = ntohs(evh->evl_proto);
1851
		    mtod(m, const struct ether_vlan_header *);
1626
		offset = sizeof(struct ether_vlan_header);
1852
		etype = ntohs(evh->evl_proto);
1853
		hoff = sizeof(struct ether_vlan_header);
1627
	} else
1854
	} else
1628
		offset = sizeof(struct ether_header);
1855
		hoff = sizeof(struct ether_header);
1629
1856
1630
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1857
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1631
		error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1858
		return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
1632
	else
1859
	else /* VIRTIO_NET_HDR_F_DATA_VALID */
1633
		error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1860
		return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
1634
1635
	return (error);
1636
}
1861
}
1637
1862
1638
static void
1863
static void
Lines 1667-1680 Link Here
1667
{
1892
{
1668
	struct vtnet_softc *sc;
1893
	struct vtnet_softc *sc;
1669
	struct virtqueue *vq;
1894
	struct virtqueue *vq;
1670
	struct mbuf *m, *m_tail;
1895
	struct mbuf *m_tail;
1671
	int len;
1672
1896
1673
	sc = rxq->vtnrx_sc;
1897
	sc = rxq->vtnrx_sc;
1674
	vq = rxq->vtnrx_vq;
1898
	vq = rxq->vtnrx_vq;
1675
	m_tail = m_head;
1899
	m_tail = m_head;
1676
1900
1677
	while (--nbufs > 0) {
1901
	while (--nbufs > 0) {
1902
		struct mbuf *m;
1903
		int len;
1904
1678
		m = virtqueue_dequeue(vq, &len);
1905
		m = virtqueue_dequeue(vq, &len);
1679
		if (m == NULL) {
1906
		if (m == NULL) {
1680
			rxq->vtnrx_stats.vrxs_ierrors++;
1907
			rxq->vtnrx_stats.vrxs_ierrors++;
Lines 1709-1727 Link Here
1709
	return (1);
1936
	return (1);
1710
}
1937
}
1711
1938
1939
#if defined(INET) || defined(INET6)
1940
static int
1941
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
1942
{
1943
	struct lro_ctrl *lro;
1944
1945
	lro = &rxq->vtnrx_lro;
1946
1947
	if (lro->lro_mbuf_max != 0) {
1948
		tcp_lro_queue_mbuf(lro, m);
1949
		return (0);
1950
	}
1951
1952
	return (tcp_lro_rx(lro, m, 0));
1953
}
1954
#endif
1955
1712
static void
1956
static void
1713
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1957
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1714
    struct virtio_net_hdr *hdr)
1958
    struct virtio_net_hdr *hdr)
1715
{
1959
{
1716
	struct vtnet_softc *sc;
1960
	struct vtnet_softc *sc;
1717
	struct ifnet *ifp;
1961
	struct ifnet *ifp;
1718
	struct ether_header *eh;
1719
1962
1720
	sc = rxq->vtnrx_sc;
1963
	sc = rxq->vtnrx_sc;
1721
	ifp = sc->vtnet_ifp;
1964
	ifp = sc->vtnet_ifp;
1722
1965
1723
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1966
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1724
		eh = mtod(m, struct ether_header *);
1967
		struct ether_header *eh = mtod(m, struct ether_header *);
1725
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1968
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1726
			vtnet_vlan_tag_remove(m);
1969
			vtnet_vlan_tag_remove(m);
1727
			/*
1970
			/*
Lines 1736-1760 Link Here
1736
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1979
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1737
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1980
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1738
1981
1739
	/*
1982
	if (hdr->flags &
1740
	 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1983
	    (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
1741
	 * distinction that Linux does. Need to reevaluate if performing
1742
	 * offloading for the NEEDS_CSUM case is really appropriate.
1743
	 */
1744
	if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1745
	    VIRTIO_NET_HDR_F_DATA_VALID)) {
1746
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1984
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1747
			rxq->vtnrx_stats.vrxs_csum++;
1985
			rxq->vtnrx_stats.vrxs_csum++;
1748
		else
1986
		else
1749
			rxq->vtnrx_stats.vrxs_csum_failed++;
1987
			rxq->vtnrx_stats.vrxs_csum_failed++;
1750
	}
1988
	}
1751
1989
1990
	if (hdr->gso_size != 0) {
1991
		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1992
		case VIRTIO_NET_HDR_GSO_TCPV4:
1993
		case VIRTIO_NET_HDR_GSO_TCPV6:
1994
			m->m_pkthdr.lro_nsegs =
1995
			    howmany(m->m_pkthdr.len, hdr->gso_size);
1996
			rxq->vtnrx_stats.vrxs_host_lro++;
1997
			break;
1998
		}
1999
	}
2000
1752
	rxq->vtnrx_stats.vrxs_ipackets++;
2001
	rxq->vtnrx_stats.vrxs_ipackets++;
1753
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
2002
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1754
2003
1755
	VTNET_RXQ_UNLOCK(rxq);
2004
#if defined(INET) || defined(INET6)
2005
	if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) {
2006
		if (vtnet_lro_rx(rxq, m) == 0)
2007
			return;
2008
	}
2009
#endif
2010
1756
	(*ifp->if_input)(ifp, m);
2011
	(*ifp->if_input)(ifp, m);
1757
	VTNET_RXQ_LOCK(rxq);
1758
}
2012
}
1759
2013
1760
static int
2014
static int
Lines 1764-1783 Link Here
1764
	struct vtnet_softc *sc;
2018
	struct vtnet_softc *sc;
1765
	struct ifnet *ifp;
2019
	struct ifnet *ifp;
1766
	struct virtqueue *vq;
2020
	struct virtqueue *vq;
1767
	struct mbuf *m;
2021
	int deq, count;
1768
	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1769
	int len, deq, nbufs, adjsz, count;
1770
2022
1771
	sc = rxq->vtnrx_sc;
2023
	sc = rxq->vtnrx_sc;
1772
	vq = rxq->vtnrx_vq;
2024
	vq = rxq->vtnrx_vq;
1773
	ifp = sc->vtnet_ifp;
2025
	ifp = sc->vtnet_ifp;
1774
	hdr = &lhdr;
1775
	deq = 0;
2026
	deq = 0;
1776
	count = sc->vtnet_rx_process_limit;
2027
	count = sc->vtnet_rx_process_limit;
1777
2028
1778
	VTNET_RXQ_LOCK_ASSERT(rxq);
2029
	VTNET_RXQ_LOCK_ASSERT(rxq);
1779
2030
2031
#ifdef DEV_NETMAP
2032
	if (netmap_rx_irq(ifp, 0, &deq))
2033
		return (0);
2034
#endif
2035
1780
	while (count-- > 0) {
2036
	while (count-- > 0) {
2037
		struct mbuf *m;
2038
		int len, nbufs, adjsz;
2039
1781
		m = virtqueue_dequeue(vq, &len);
2040
		m = virtqueue_dequeue(vq, &len);
1782
		if (m == NULL)
2041
		if (m == NULL)
1783
			break;
2042
			break;
Lines 1789-1806 Link Here
1789
			continue;
2048
			continue;
1790
		}
2049
		}
1791
2050
1792
		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
2051
		if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
2052
			struct virtio_net_hdr_mrg_rxbuf *mhdr =
2053
			    mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
2054
			nbufs = vtnet_htog16(sc, mhdr->num_buffers);
2055
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2056
		} else if (vtnet_modern(sc)) {
2057
			nbufs = 1; /* num_buffers is always 1 */
2058
			adjsz = sizeof(struct virtio_net_hdr_v1);
2059
		} else {
1793
			nbufs = 1;
2060
			nbufs = 1;
1794
			adjsz = sizeof(struct vtnet_rx_header);
2061
			adjsz = sizeof(struct vtnet_rx_header);
1795
			/*
2062
			/*
1796
			 * Account for our pad inserted between the header
2063
			 * Account for our gap between the header and start of
1797
			 * and the actual start of the frame.
2064
			 * data to keep the segments separated.
1798
			 */
2065
			 */
1799
			len += VTNET_RX_HEADER_PAD;
2066
			len += VTNET_RX_HEADER_PAD;
1800
		} else {
1801
			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1802
			nbufs = mhdr->num_buffers;
1803
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1804
		}
2067
		}
1805
2068
1806
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
2069
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
Lines 1822-1847 Link Here
1822
		}
2085
		}
1823
2086
1824
		/*
2087
		/*
1825
		 * Save copy of header before we strip it. For both mergeable
2088
		 * Save an endian swapped version of the header prior to it
1826
		 * and non-mergeable, the header is at the beginning of the
2089
		 * being stripped. The header is always at the start of the
1827
		 * mbuf data. We no longer need num_buffers, so always use a
2090
		 * mbuf data. num_buffers was already saved (and not needed)
1828
		 * regular header.
2091
		 * so use the standard header.
1829
		 *
1830
		 * BMV: Is this memcpy() expensive? We know the mbuf data is
1831
		 * still valid even after the m_adj().
1832
		 */
2092
		 */
1833
		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
2093
		hdr = mtod(m, struct virtio_net_hdr *);
2094
		lhdr.flags = hdr->flags;
2095
		lhdr.gso_type = hdr->gso_type;
2096
		lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
2097
		lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
2098
		lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
2099
		lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
1834
		m_adj(m, adjsz);
2100
		m_adj(m, adjsz);
1835
2101
1836
		vtnet_rxq_input(rxq, m, hdr);
2102
		vtnet_rxq_input(rxq, m, &lhdr);
1837
1838
		/* Must recheck after dropping the Rx lock. */
1839
		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1840
			break;
1841
	}
2103
	}
1842
2104
1843
	if (deq > 0)
2105
	if (deq > 0) {
2106
#if defined(INET) || defined(INET6)
2107
		tcp_lro_flush_all(&rxq->vtnrx_lro);
2108
#endif
1844
		virtqueue_notify(vq);
2109
		virtqueue_notify(vq);
2110
	}
1845
2111
1846
	return (count > 0 ? 0 : EAGAIN);
2112
	return (count > 0 ? 0 : EAGAIN);
1847
}
2113
}
Lines 1870-1880 Link Here
1870
		return;
2136
		return;
1871
	}
2137
	}
1872
2138
1873
#ifdef DEV_NETMAP
1874
	if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS)
1875
		return;
1876
#endif /* DEV_NETMAP */
1877
1878
	VTNET_RXQ_LOCK(rxq);
2139
	VTNET_RXQ_LOCK(rxq);
1879
2140
1880
again:
2141
again:
Lines 1894-1901 Link Here
1894
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
2155
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1895
			goto again;
2156
			goto again;
1896
2157
1897
		VTNET_RXQ_UNLOCK(rxq);
1898
		rxq->vtnrx_stats.vrxs_rescheduled++;
2158
		rxq->vtnrx_stats.vrxs_rescheduled++;
2159
		VTNET_RXQ_UNLOCK(rxq);
1899
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2160
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1900
	} else
2161
	} else
1901
		VTNET_RXQ_UNLOCK(rxq);
2162
		VTNET_RXQ_UNLOCK(rxq);
Lines 1925-1946 Link Here
1925
		if (!more)
2186
		if (!more)
1926
			vtnet_rxq_disable_intr(rxq);
2187
			vtnet_rxq_disable_intr(rxq);
1927
		rxq->vtnrx_stats.vrxs_rescheduled++;
2188
		rxq->vtnrx_stats.vrxs_rescheduled++;
2189
		VTNET_RXQ_UNLOCK(rxq);
1928
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2190
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1929
	}
2191
	} else
2192
		VTNET_RXQ_UNLOCK(rxq);
2193
}
1930
2194
1931
	VTNET_RXQ_UNLOCK(rxq);
2195
static int
2196
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
2197
{
2198
	struct vtnet_softc *sc;
2199
	int threshold;
2200
2201
	sc = txq->vtntx_sc;
2202
2203
	/*
2204
	 * The Tx interrupt is disabled until the queue free count falls
2205
	 * below our threshold. Completed frames are drained from the Tx
2206
	 * virtqueue before transmitting new frames and in the watchdog
2207
	 * callout, so the frequency of Tx interrupts is greatly reduced,
2208
	 * at the cost of not freeing mbufs as quickly as they otherwise
2209
	 * would be.
2210
	 */
2211
	threshold = virtqueue_size(txq->vtntx_vq) / 4;
2212
2213
	/*
2214
	 * Without indirect descriptors, leave enough room for the most
2215
	 * segments we handle.
2216
	 */
2217
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
2218
	    threshold < sc->vtnet_tx_nsegs)
2219
		threshold = sc->vtnet_tx_nsegs;
2220
2221
	return (threshold);
1932
}
2222
}
1933
2223
1934
static int
2224
static int
1935
vtnet_txq_below_threshold(struct vtnet_txq *txq)
2225
vtnet_txq_below_threshold(struct vtnet_txq *txq)
1936
{
2226
{
1937
	struct vtnet_softc *sc;
1938
	struct virtqueue *vq;
2227
	struct virtqueue *vq;
1939
2228
1940
	sc = txq->vtntx_sc;
1941
	vq = txq->vtntx_vq;
2229
	vq = txq->vtntx_vq;
1942
2230
1943
	return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
2231
	return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
1944
}
2232
}
1945
2233
1946
static int
2234
static int
Lines 1975-1995 Link Here
1975
	struct virtqueue *vq;
2263
	struct virtqueue *vq;
1976
	struct vtnet_tx_header *txhdr;
2264
	struct vtnet_tx_header *txhdr;
1977
	int last;
2265
	int last;
1978
#ifdef DEV_NETMAP
1979
	int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX,
1980
						txq->vtntx_id);
1981
#else  /* !DEV_NETMAP */
1982
	int netmap_bufs = 0;
1983
#endif /* !DEV_NETMAP */
1984
2266
1985
	vq = txq->vtntx_vq;
2267
	vq = txq->vtntx_vq;
1986
	last = 0;
2268
	last = 0;
1987
2269
1988
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
2270
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1989
		if (!netmap_bufs) {
2271
		m_freem(txhdr->vth_mbuf);
1990
			m_freem(txhdr->vth_mbuf);
2272
		uma_zfree(vtnet_tx_header_zone, txhdr);
1991
			uma_zfree(vtnet_tx_header_zone, txhdr);
1992
		}
1993
	}
2273
	}
1994
2274
1995
	KASSERT(virtqueue_empty(vq),
2275
	KASSERT(virtqueue_empty(vq),
Lines 1997-2008 Link Here
1997
}
2277
}
1998
2278
1999
/*
2279
/*
2000
 * BMV: Much of this can go away once we finally have offsets in
2280
 * BMV: This can go away once we finally have offsets in the mbuf header.
2001
 * the mbuf packet header. Bug andre@.
2002
 */
2281
 */
2003
static int
2282
static int
2004
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
2283
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
2005
    int *etype, int *proto, int *start)
2284
    int *proto, int *start)
2006
{
2285
{
2007
	struct vtnet_softc *sc;
2286
	struct vtnet_softc *sc;
2008
	struct ether_vlan_header *evh;
2287
	struct ether_vlan_header *evh;
Lines 2046-2052 Link Here
2046
		break;
2325
		break;
2047
#endif
2326
#endif
2048
	default:
2327
	default:
2049
		sc->vtnet_stats.tx_csum_bad_ethtype++;
2328
		sc->vtnet_stats.tx_csum_unknown_ethtype++;
2050
		return (EINVAL);
2329
		return (EINVAL);
2051
	}
2330
	}
2052
2331
Lines 2054-2060 Link Here
2054
}
2333
}
2055
2334
2056
static int
2335
static int
2057
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2336
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags,
2058
    int offset, struct virtio_net_hdr *hdr)
2337
    int offset, struct virtio_net_hdr *hdr)
2059
{
2338
{
2060
	static struct timeval lastecn;
2339
	static struct timeval lastecn;
Lines 2070-2085 Link Here
2070
	} else
2349
	} else
2071
		tcp = (struct tcphdr *)(m->m_data + offset);
2350
		tcp = (struct tcphdr *)(m->m_data + offset);
2072
2351
2073
	hdr->hdr_len = offset + (tcp->th_off << 2);
2352
	hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
2074
	hdr->gso_size = m->m_pkthdr.tso_segsz;
2353
	hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
2075
	hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2354
	hdr->gso_type = (flags & CSUM_IP_TSO) ?
2076
	    VIRTIO_NET_HDR_GSO_TCPV6;
2355
	    VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
2077
2356
2078
	if (tcp->th_flags & TH_CWR) {
2357
	if (__predict_false(tcp->th_flags & TH_CWR)) {
2079
		/*
2358
		/*
2080
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2359
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
2081
		 * ECN support is not on a per-interface basis, but globally via
2360
		 * FreeBSD, ECN support is not on a per-interface basis,
2082
		 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2361
		 * but globally via the net.inet.tcp.ecn.enable sysctl
2362
		 * knob. The default is off.
2083
		 */
2363
		 */
2084
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2364
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2085
			if (ppsratecheck(&lastecn, &curecn, 1))
2365
			if (ppsratecheck(&lastecn, &curecn, 1))
Lines 2109-2138 Link Here
2109
	if (error)
2389
	if (error)
2110
		goto drop;
2390
		goto drop;
2111
2391
2112
	if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2392
	if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
2113
	    (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2393
		/* Sanity check the parsed mbuf matches the offload flags. */
2114
		/*
2394
		if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
2115
		 * We could compare the IP protocol vs the CSUM_ flag too,
2395
		    etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
2116
		 * but that really should not be necessary.
2396
		    && etype != ETHERTYPE_IPV6))) {
2117
		 */
2397
			sc->vtnet_stats.tx_csum_proto_mismatch++;
2398
			goto drop;
2399
		}
2400
2118
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2401
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2119
		hdr->csum_start = csum_start;
2402
		hdr->csum_start = vtnet_gtoh16(sc, csum_start);
2120
		hdr->csum_offset = m->m_pkthdr.csum_data;
2403
		hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
2121
		txq->vtntx_stats.vtxs_csum++;
2404
		txq->vtntx_stats.vtxs_csum++;
2122
	}
2405
	}
2123
2406
2124
	if (flags & CSUM_TSO) {
2407
	if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
2408
		/*
2409
		 * Sanity check the parsed mbuf IP protocol is TCP, and
2410
		 * VirtIO TSO reqires the checksum offloading above.
2411
		 */
2125
		if (__predict_false(proto != IPPROTO_TCP)) {
2412
		if (__predict_false(proto != IPPROTO_TCP)) {
2126
			/* Likely failed to correctly parse the mbuf. */
2127
			sc->vtnet_stats.tx_tso_not_tcp++;
2413
			sc->vtnet_stats.tx_tso_not_tcp++;
2128
			goto drop;
2414
			goto drop;
2415
		} else if (__predict_false((hdr->flags &
2416
		    VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
2417
			sc->vtnet_stats.tx_tso_without_csum++;
2418
			goto drop;
2129
		}
2419
		}
2130
2420
2131
		KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2421
		error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr);
2132
		    ("%s: mbuf %p TSO without checksum offload %#x",
2133
		    __func__, m, flags));
2134
2135
		error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2136
		if (error)
2422
		if (error)
2137
			goto drop;
2423
			goto drop;
2138
	}
2424
	}
Lines 2161-2168 Link Here
2161
2447
2162
	sglist_reset(sg);
2448
	sglist_reset(sg);
2163
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2449
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2164
	KASSERT(error == 0 && sg->sg_nseg == 1,
2450
	if (error != 0 || sg->sg_nseg != 1) {
2165
	    ("%s: error %d adding header to sglist", __func__, error));
2451
		KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
2452
		    __func__, error, sg->sg_nseg));
2453
		goto fail;
2454
	}
2166
2455
2167
	error = sglist_append_mbuf(sg, m);
2456
	error = sglist_append_mbuf(sg, m);
2168
	if (error) {
2457
	if (error) {
Lines 2210-2218 Link Here
2210
	}
2499
	}
2211
2500
2212
	/*
2501
	/*
2213
	 * Always use the non-mergeable header, regardless if the feature
2502
	 * Always use the non-mergeable header, regardless if mergable headers
2214
	 * was negotiated. For transmit, num_buffers is always zero. The
2503
	 * were negotiated, because for transmit num_buffers is always zero.
2215
	 * vtnet_hdr_size is used to enqueue the correct header size.
2504
	 * The vtnet_hdr_size is used to enqueue the right header size segment.
2216
	 */
2505
	 */
2217
	hdr = &txhdr->vth_uhdr.hdr;
2506
	hdr = &txhdr->vth_uhdr.hdr;
2218
2507
Lines 2234-2244 Link Here
2234
	}
2523
	}
2235
2524
2236
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2525
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2237
	if (error == 0)
2238
		return (0);
2239
2240
fail:
2526
fail:
2241
	uma_zfree(vtnet_tx_header_zone, txhdr);
2527
	if (error)
2528
		uma_zfree(vtnet_tx_header_zone, txhdr);
2242
2529
2243
	return (error);
2530
	return (error);
2244
}
2531
}
Lines 2387-2393 Link Here
2387
	sc = ifp->if_softc;
2674
	sc = ifp->if_softc;
2388
	npairs = sc->vtnet_act_vq_pairs;
2675
	npairs = sc->vtnet_act_vq_pairs;
2389
2676
2390
	/* check if flowid is set */
2391
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2677
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2392
		i = m->m_pkthdr.flowid % npairs;
2678
		i = m->m_pkthdr.flowid % npairs;
2393
	else
2679
	else
Lines 2477-2482 Link Here
2477
	deq = 0;
2763
	deq = 0;
2478
	VTNET_TXQ_LOCK_ASSERT(txq);
2764
	VTNET_TXQ_LOCK_ASSERT(txq);
2479
2765
2766
#ifdef DEV_NETMAP
2767
	if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
2768
		virtqueue_disable_intr(vq); // XXX luigi
2769
		return (0); // XXX or 1 ?
2770
	}
2771
#endif
2772
2480
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2773
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2481
		m = txhdr->vth_mbuf;
2774
		m = txhdr->vth_mbuf;
2482
		deq++;
2775
		deq++;
Lines 2518-2528 Link Here
2518
		return;
2811
		return;
2519
	}
2812
	}
2520
2813
2521
#ifdef DEV_NETMAP
2522
	if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2523
		return;
2524
#endif /* DEV_NETMAP */
2525
2526
	VTNET_TXQ_LOCK(txq);
2814
	VTNET_TXQ_LOCK(txq);
2527
2815
2528
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2816
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
Lines 2709-2715 Link Here
2709
	 * Most drivers just ignore the return value - it only fails
2997
	 * Most drivers just ignore the return value - it only fails
2710
	 * with ENOMEM so an error is not likely.
2998
	 * with ENOMEM so an error is not likely.
2711
	 */
2999
	 */
2712
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3000
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
2713
		rxq = &sc->vtnet_rxqs[i];
3001
		rxq = &sc->vtnet_rxqs[i];
2714
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
3002
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2715
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
3003
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
Lines 2739-2745 Link Here
2739
		rxq = &sc->vtnet_rxqs[i];
3027
		rxq = &sc->vtnet_rxqs[i];
2740
		if (rxq->vtnrx_tq != NULL) {
3028
		if (rxq->vtnrx_tq != NULL) {
2741
			taskqueue_free(rxq->vtnrx_tq);
3029
			taskqueue_free(rxq->vtnrx_tq);
2742
			rxq->vtnrx_tq = NULL;
3030
			rxq->vtnrx_vq = NULL;
2743
		}
3031
		}
2744
3032
2745
		txq = &sc->vtnet_txqs[i];
3033
		txq = &sc->vtnet_txqs[i];
Lines 2779-2785 Link Here
2779
	struct vtnet_txq *txq;
3067
	struct vtnet_txq *txq;
2780
	int i;
3068
	int i;
2781
3069
2782
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3070
#ifdef DEV_NETMAP
3071
	if (nm_native_on(NA(sc->vtnet_ifp)))
3072
		return;
3073
#endif
3074
3075
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2783
		rxq = &sc->vtnet_rxqs[i];
3076
		rxq = &sc->vtnet_rxqs[i];
2784
		vtnet_rxq_free_mbufs(rxq);
3077
		vtnet_rxq_free_mbufs(rxq);
2785
3078
Lines 2795-2805 Link Here
2795
	struct vtnet_txq *txq;
3088
	struct vtnet_txq *txq;
2796
	int i;
3089
	int i;
2797
3090
3091
	VTNET_CORE_LOCK_ASSERT(sc);
3092
2798
	/*
3093
	/*
2799
	 * Lock and unlock the per-queue mutex so we known the stop
3094
	 * Lock and unlock the per-queue mutex so we known the stop
2800
	 * state is visible. Doing only the active queues should be
3095
	 * state is visible. Doing only the active queues should be
2801
	 * sufficient, but it does not cost much extra to do all the
3096
	 * sufficient, but it does not cost much extra to do all the
2802
	 * queues. Note we hold the core mutex here too.
3097
	 * queues.
2803
	 */
3098
	 */
2804
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3099
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2805
		rxq = &sc->vtnet_rxqs[i];
3100
		rxq = &sc->vtnet_rxqs[i];
Lines 2838-2845 Link Here
2838
	virtio_stop(dev);
3133
	virtio_stop(dev);
2839
	vtnet_stop_rendezvous(sc);
3134
	vtnet_stop_rendezvous(sc);
2840
3135
2841
	/* Free any mbufs left in the virtqueues. */
2842
	vtnet_drain_rxtx_queues(sc);
3136
	vtnet_drain_rxtx_queues(sc);
3137
	sc->vtnet_act_vq_pairs = 1;
2843
}
3138
}
2844
3139
2845
static int
3140
static int
Lines 2848-2898 Link Here
2848
	device_t dev;
3143
	device_t dev;
2849
	struct ifnet *ifp;
3144
	struct ifnet *ifp;
2850
	uint64_t features;
3145
	uint64_t features;
2851
	int mask, error;
3146
	int error;
2852
3147
2853
	dev = sc->vtnet_dev;
3148
	dev = sc->vtnet_dev;
2854
	ifp = sc->vtnet_ifp;
3149
	ifp = sc->vtnet_ifp;
2855
	features = sc->vtnet_features;
3150
	features = sc->vtnet_negotiated_features;
2856
3151
2857
	mask = 0;
2858
#if defined(INET)
2859
	mask |= IFCAP_RXCSUM;
2860
#endif
2861
#if defined (INET6)
2862
	mask |= IFCAP_RXCSUM_IPV6;
2863
#endif
2864
2865
	/*
3152
	/*
2866
	 * Re-negotiate with the host, removing any disabled receive
3153
	 * Re-negotiate with the host, removing any disabled receive
2867
	 * features. Transmit features are disabled only on our side
3154
	 * features. Transmit features are disabled only on our side
2868
	 * via if_capenable and if_hwassist.
3155
	 * via if_capenable and if_hwassist.
2869
	 */
3156
	 */
2870
3157
2871
	if (ifp->if_capabilities & mask) {
3158
	if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
2872
		/*
3159
		features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
2873
		 * We require both IPv4 and IPv6 offloading to be enabled
2874
		 * in order to negotiated it: VirtIO does not distinguish
2875
		 * between the two.
2876
		 */
2877
		if ((ifp->if_capenable & mask) != mask)
2878
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2879
	}
2880
3160
2881
	if (ifp->if_capabilities & IFCAP_LRO) {
3161
	if ((ifp->if_capenable & IFCAP_LRO) == 0)
2882
		if ((ifp->if_capenable & IFCAP_LRO) == 0)
3162
		features &= ~VTNET_LRO_FEATURES;
2883
			features &= ~VTNET_LRO_FEATURES;
2884
	}
2885
3163
2886
	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
3164
	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2887
		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3165
		features &= ~VIRTIO_NET_F_CTRL_VLAN;
2888
			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2889
	}
2890
3166
2891
	error = virtio_reinit(dev, features);
3167
	error = virtio_reinit(dev, features);
2892
	if (error)
3168
	if (error) {
2893
		device_printf(dev, "virtio reinit error %d\n", error);
3169
		device_printf(dev, "virtio reinit error %d\n", error);
3170
		return (error);
3171
	}
2894
3172
2895
	return (error);
3173
	sc->vtnet_features = features;
3174
	virtio_reinit_complete(dev);
3175
3176
	return (0);
2896
}
3177
}
2897
3178
2898
static void
3179
static void
Lines 2903-2911 Link Here
2903
	ifp = sc->vtnet_ifp;
3184
	ifp = sc->vtnet_ifp;
2904
3185
2905
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
3186
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2906
		/* Restore promiscuous and all-multicast modes. */
2907
		vtnet_rx_filter(sc);
3187
		vtnet_rx_filter(sc);
2908
		/* Restore filtered MAC addresses. */
2909
		vtnet_rx_filter_mac(sc);
3188
		vtnet_rx_filter_mac(sc);
2910
	}
3189
	}
2911
3190
Lines 2917-2948 Link Here
2917
vtnet_init_rx_queues(struct vtnet_softc *sc)
3196
vtnet_init_rx_queues(struct vtnet_softc *sc)
2918
{
3197
{
2919
	device_t dev;
3198
	device_t dev;
3199
	struct ifnet *ifp;
2920
	struct vtnet_rxq *rxq;
3200
	struct vtnet_rxq *rxq;
2921
	int i, clsize, error;
3201
	int i, clustersz, error;
2922
3202
2923
	dev = sc->vtnet_dev;
3203
	dev = sc->vtnet_dev;
3204
	ifp = sc->vtnet_ifp;
2924
3205
2925
	/*
3206
	clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu);
2926
	 * Use the new cluster size if one has been set (via a MTU
3207
	sc->vtnet_rx_clustersz = clustersz;
2927
	 * change). Otherwise, use the standard 2K clusters.
3208
2928
	 *
3209
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
2929
	 * BMV: It might make sense to use page sized clusters as
3210
		sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
2930
	 * the default (depending on the features negotiated).
3211
		    VTNET_MAX_RX_SIZE, clustersz);
2931
	 */
3212
		KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2932
	if (sc->vtnet_rx_new_clsize != 0) {
3213
		    ("%s: too many rx mbufs %d for %d segments", __func__,
2933
		clsize = sc->vtnet_rx_new_clsize;
3214
		    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2934
		sc->vtnet_rx_new_clsize = 0;
2935
	} else
3215
	} else
2936
		clsize = MCLBYTES;
3216
		sc->vtnet_rx_nmbufs = 1;
2937
3217
2938
	sc->vtnet_rx_clsize = clsize;
3218
#ifdef DEV_NETMAP
2939
	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
3219
	if (vtnet_netmap_init_rx_buffers(sc))
3220
		return (0);
3221
#endif
2940
3222
2941
	KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2942
	    sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2943
	    ("%s: too many rx mbufs %d for %d segments", __func__,
2944
	    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2945
2946
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3223
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2947
		rxq = &sc->vtnet_rxqs[i];
3224
		rxq = &sc->vtnet_rxqs[i];
2948
3225
Lines 2952-2959 Link Here
2952
		VTNET_RXQ_UNLOCK(rxq);
3229
		VTNET_RXQ_UNLOCK(rxq);
2953
3230
2954
		if (error) {
3231
		if (error) {
2955
			device_printf(dev,
3232
			device_printf(dev, "cannot populate Rx queue %d\n", i);
2956
			    "cannot allocate mbufs for Rx queue %d\n", i);
2957
			return (error);
3233
			return (error);
2958
		}
3234
		}
2959
	}
3235
	}
Lines 2970-2975 Link Here
2970
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3246
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2971
		txq = &sc->vtnet_txqs[i];
3247
		txq = &sc->vtnet_txqs[i];
2972
		txq->vtntx_watchdog = 0;
3248
		txq->vtntx_watchdog = 0;
3249
		txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
2973
	}
3250
	}
2974
3251
2975
	return (0);
3252
	return (0);
Lines 2999-3034 Link Here
2999
3276
3000
	dev = sc->vtnet_dev;
3277
	dev = sc->vtnet_dev;
3001
3278
3002
	if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3279
	if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
3003
		sc->vtnet_act_vq_pairs = 1;
3280
		sc->vtnet_act_vq_pairs = 1;
3004
		return;
3281
		return;
3005
	}
3282
	}
3006
3283
3007
	npairs = sc->vtnet_requested_vq_pairs;
3284
	npairs = sc->vtnet_req_vq_pairs;
3008
3285
3009
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3286
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3010
		device_printf(dev,
3287
		device_printf(dev, "cannot set active queue pairs to %d, "
3011
		    "cannot set active queue pairs to %d\n", npairs);
3288
		    "falling back to 1 queue pair\n", npairs);
3012
		npairs = 1;
3289
		npairs = 1;
3013
	}
3290
	}
3014
3291
3015
	sc->vtnet_act_vq_pairs = npairs;
3292
	sc->vtnet_act_vq_pairs = npairs;
3016
}
3293
}
3017
3294
3295
static void
3296
vtnet_update_rx_offloads(struct vtnet_softc *sc)
3297
{
3298
	struct ifnet *ifp;
3299
	uint64_t features;
3300
	int error;
3301
3302
	ifp = sc->vtnet_ifp;
3303
	features = sc->vtnet_features;
3304
3305
	VTNET_CORE_LOCK_ASSERT(sc);
3306
3307
	if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
3308
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
3309
			features |= VIRTIO_NET_F_GUEST_CSUM;
3310
		else
3311
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
3312
	}
3313
3314
	if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) {
3315
		if (ifp->if_capenable & IFCAP_LRO)
3316
			features |= VTNET_LRO_FEATURES;
3317
		else
3318
			features &= ~VTNET_LRO_FEATURES;
3319
	}
3320
3321
	error = vtnet_ctrl_guest_offloads(sc,
3322
	    features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
3323
		        VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN  |
3324
			VIRTIO_NET_F_GUEST_UFO));
3325
	if (error) {
3326
		device_printf(sc->vtnet_dev,
3327
		    "%s: cannot update Rx features\n", __func__);
3328
		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3329
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3330
			vtnet_init_locked(sc);
3331
		}
3332
	} else
3333
		sc->vtnet_features = features;
3334
}
3335
3018
static int
3336
static int
3019
vtnet_reinit(struct vtnet_softc *sc)
3337
vtnet_reinit(struct vtnet_softc *sc)
3020
{
3338
{
3339
	device_t dev;
3021
	struct ifnet *ifp;
3340
	struct ifnet *ifp;
3022
	int error;
3341
	int error;
3023
3342
3343
	dev = sc->vtnet_dev;
3024
	ifp = sc->vtnet_ifp;
3344
	ifp = sc->vtnet_ifp;
3025
3345
3026
	/* Use the current MAC address. */
3027
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3346
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3028
	vtnet_set_hwaddr(sc);
3029
3347
3348
	error = vtnet_virtio_reinit(sc);
3349
	if (error)
3350
		return (error);
3351
3352
	vtnet_set_macaddr(sc);
3030
	vtnet_set_active_vq_pairs(sc);
3353
	vtnet_set_active_vq_pairs(sc);
3031
3354
3355
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3356
		vtnet_init_rx_filters(sc);
3357
3032
	ifp->if_hwassist = 0;
3358
	ifp->if_hwassist = 0;
3033
	if (ifp->if_capenable & IFCAP_TXCSUM)
3359
	if (ifp->if_capenable & IFCAP_TXCSUM)
3034
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3360
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
Lines 3039-3054 Link Here
3039
	if (ifp->if_capenable & IFCAP_TSO6)
3365
	if (ifp->if_capenable & IFCAP_TSO6)
3040
		ifp->if_hwassist |= CSUM_IP6_TSO;
3366
		ifp->if_hwassist |= CSUM_IP6_TSO;
3041
3367
3042
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3043
		vtnet_init_rx_filters(sc);
3044
3045
	error = vtnet_init_rxtx_queues(sc);
3368
	error = vtnet_init_rxtx_queues(sc);
3046
	if (error)
3369
	if (error)
3047
		return (error);
3370
		return (error);
3048
3371
3049
	vtnet_enable_interrupts(sc);
3050
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3051
3052
	return (0);
3372
	return (0);
3053
}
3373
}
3054
3374
Lines 3068-3089 Link Here
3068
3388
3069
	vtnet_stop(sc);
3389
	vtnet_stop(sc);
3070
3390
3071
	/* Reinitialize with the host. */
3391
	if (vtnet_reinit(sc) != 0) {
3072
	if (vtnet_virtio_reinit(sc) != 0)
3392
		vtnet_stop(sc);
3073
		goto fail;
3393
		return;
3394
	}
3074
3395
3075
	if (vtnet_reinit(sc) != 0)
3396
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3076
		goto fail;
3077
3078
	virtio_reinit_complete(dev);
3079
3080
	vtnet_update_link_status(sc);
3397
	vtnet_update_link_status(sc);
3398
	vtnet_enable_interrupts(sc);
3081
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3399
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3082
3083
	return;
3084
3085
fail:
3086
	vtnet_stop(sc);
3087
}
3400
}
3088
3401
3089
static void
3402
static void
Lines 3093-3098 Link Here
3093
3406
3094
	sc = xsc;
3407
	sc = xsc;
3095
3408
3409
#ifdef DEV_NETMAP
3410
	if (!NA(sc->vtnet_ifp)) {
3411
		D("try to attach again");
3412
		vtnet_netmap_attach(sc);
3413
	}
3414
#endif
3415
3096
	VTNET_CORE_LOCK(sc);
3416
	VTNET_CORE_LOCK(sc);
3097
	vtnet_init_locked(sc);
3417
	vtnet_init_locked(sc);
3098
	VTNET_CORE_UNLOCK(sc);
3418
	VTNET_CORE_UNLOCK(sc);
Lines 3101-3116 Link Here
3101
static void
3421
static void
3102
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3422
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3103
{
3423
{
3104
	struct virtqueue *vq;
3105
3424
3106
	vq = sc->vtnet_ctrl_vq;
3107
3108
	/*
3425
	/*
3109
	 * The control virtqueue is only polled and therefore it should
3426
	 * The control virtqueue is only polled and therefore it should
3110
	 * already be empty.
3427
	 * already be empty.
3111
	 */
3428
	 */
3112
	KASSERT(virtqueue_empty(vq),
3429
	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
3113
	    ("%s: ctrl vq %p not empty", __func__, vq));
3430
	    ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
3114
}
3431
}
3115
3432
3116
static void
3433
static void
Lines 3121-3167 Link Here
3121
3438
3122
	vq = sc->vtnet_ctrl_vq;
3439
	vq = sc->vtnet_ctrl_vq;
3123
3440
3441
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
3124
	VTNET_CORE_LOCK_ASSERT(sc);
3442
	VTNET_CORE_LOCK_ASSERT(sc);
3125
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3126
	    ("%s: CTRL_VQ feature not negotiated", __func__));
3127
3443
3128
	if (!virtqueue_empty(vq))
3444
	if (!virtqueue_empty(vq))
3129
		return;
3445
		return;
3130
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3131
		return;
3132
3446
3133
	/*
3447
	/*
3134
	 * Poll for the response, but the command is likely already
3448
	 * Poll for the response, but the command is likely completed before
3135
	 * done when we return from the notify.
3449
	 * returning from the notify.
3136
	 */
3450
	 */
3137
	virtqueue_notify(vq);
3451
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0)  {
3138
	virtqueue_poll(vq, NULL);
3452
		virtqueue_notify(vq);
3453
		virtqueue_poll(vq, NULL);
3454
	}
3139
}
3455
}
3140
3456
3141
static int
3457
static int
3142
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3458
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3143
{
3459
{
3144
	struct virtio_net_ctrl_hdr hdr __aligned(2);
3145
	struct sglist_seg segs[3];
3460
	struct sglist_seg segs[3];
3146
	struct sglist sg;
3461
	struct sglist sg;
3147
	uint8_t ack;
3462
	struct {
3463
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3464
		uint8_t pad1;
3465
		uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
3466
		uint8_t pad2;
3467
		uint8_t ack;
3468
	} s;
3148
	int error;
3469
	int error;
3149
3470
3150
	hdr.class = VIRTIO_NET_CTRL_MAC;
3471
	error = 0;
3151
	hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3472
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
3152
	ack = VIRTIO_NET_ERR;
3153
3473
3154
	sglist_init(&sg, 3, segs);
3474
	s.hdr.class = VIRTIO_NET_CTRL_MAC;
3475
	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3476
	bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
3477
	s.ack = VIRTIO_NET_ERR;
3478
3479
	sglist_init(&sg, nitems(segs), segs);
3480
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3481
	error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
3482
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3483
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3484
3485
	if (error == 0)
3486
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3487
3488
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3489
}
3490
3491
static int
3492
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
3493
{
3494
	struct sglist_seg segs[3];
3495
	struct sglist sg;
3496
	struct {
3497
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3498
		uint8_t pad1;
3499
		uint64_t offloads __aligned(8);
3500
		uint8_t pad2;
3501
		uint8_t ack;
3502
	} s;
3503
	int error;
3504
3155
	error = 0;
3505
	error = 0;
3156
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3506
	MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3157
	error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3158
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3159
	KASSERT(error == 0 && sg.sg_nseg == 3,
3160
	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
3161
3507
3162
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3508
	s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
3509
	s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
3510
	s.offloads = vtnet_gtoh64(sc, offloads);
3511
	s.ack = VIRTIO_NET_ERR;
3163
3512
3164
	return (ack == VIRTIO_NET_OK ? 0 : EIO);
3513
	sglist_init(&sg, nitems(segs), segs);
3514
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3515
	error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
3516
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3517
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3518
3519
	if (error == 0)
3520
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3521
3522
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3165
}
3523
}
3166
3524
3167
static int
3525
static int
Lines 3170-3232 Link Here
3170
	struct sglist_seg segs[3];
3528
	struct sglist_seg segs[3];
3171
	struct sglist sg;
3529
	struct sglist sg;
3172
	struct {
3530
	struct {
3173
		struct virtio_net_ctrl_hdr hdr;
3531
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3174
		uint8_t pad1;
3532
		uint8_t pad1;
3175
		struct virtio_net_ctrl_mq mq;
3533
		struct virtio_net_ctrl_mq mq __aligned(2);
3176
		uint8_t pad2;
3534
		uint8_t pad2;
3177
		uint8_t ack;
3535
		uint8_t ack;
3178
	} s __aligned(2);
3536
	} s;
3179
	int error;
3537
	int error;
3180
3538
3539
	error = 0;
3540
	MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
3541
3181
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3542
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3182
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3543
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3183
	s.mq.virtqueue_pairs = npairs;
3544
	s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
3184
	s.ack = VIRTIO_NET_ERR;
3545
	s.ack = VIRTIO_NET_ERR;
3185
3546
3186
	sglist_init(&sg, 3, segs);
3547
	sglist_init(&sg, nitems(segs), segs);
3187
	error = 0;
3188
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3548
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3189
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3549
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3190
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3550
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3191
	KASSERT(error == 0 && sg.sg_nseg == 3,
3551
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3192
	    ("%s: error %d adding MQ message to sglist", __func__, error));
3193
3552
3194
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3553
	if (error == 0)
3554
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3195
3555
3196
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3556
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3197
}
3557
}
3198
3558
3199
static int
3559
static int
3200
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3560
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on)
3201
{
3561
{
3202
	struct sglist_seg segs[3];
3562
	struct sglist_seg segs[3];
3203
	struct sglist sg;
3563
	struct sglist sg;
3204
	struct {
3564
	struct {
3205
		struct virtio_net_ctrl_hdr hdr;
3565
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3206
		uint8_t pad1;
3566
		uint8_t pad1;
3207
		uint8_t onoff;
3567
		uint8_t onoff;
3208
		uint8_t pad2;
3568
		uint8_t pad2;
3209
		uint8_t ack;
3569
		uint8_t ack;
3210
	} s __aligned(2);
3570
	} s;
3211
	int error;
3571
	int error;
3212
3572
3213
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3573
	error = 0;
3214
	    ("%s: CTRL_RX feature not negotiated", __func__));
3574
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3215
3575
3216
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3576
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3217
	s.hdr.cmd = cmd;
3577
	s.hdr.cmd = cmd;
3218
	s.onoff = !!on;
3578
	s.onoff = !!on;
3219
	s.ack = VIRTIO_NET_ERR;
3579
	s.ack = VIRTIO_NET_ERR;
3220
3580
3221
	sglist_init(&sg, 3, segs);
3581
	sglist_init(&sg, nitems(segs), segs);
3222
	error = 0;
3223
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3582
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3224
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3583
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3225
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3584
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3226
	KASSERT(error == 0 && sg.sg_nseg == 3,
3585
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3227
	    ("%s: error %d adding Rx message to sglist", __func__, error));
3228
3586
3229
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3587
	if (error == 0)
3588
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3230
3589
3231
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3590
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3232
}
3591
}
Lines 3234-3273 Link Here
3234
static int
3593
static int
3235
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3594
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3236
{
3595
{
3237
3238
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3596
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3239
}
3597
}
3240
3598
3241
static int
3599
static int
3242
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3600
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3243
{
3601
{
3244
3245
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3602
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3246
}
3603
}
3247
3604
3248
/*
3249
 * The device defaults to promiscuous mode for backwards compatibility.
3250
 * Turn it off at attach time if possible.
3251
 */
3252
static void
3605
static void
3253
vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3254
{
3255
	struct ifnet *ifp;
3256
3257
	ifp = sc->vtnet_ifp;
3258
3259
	VTNET_CORE_LOCK(sc);
3260
	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3261
		ifp->if_flags |= IFF_PROMISC;
3262
	} else if (vtnet_set_promisc(sc, 0) != 0) {
3263
		ifp->if_flags |= IFF_PROMISC;
3264
		device_printf(sc->vtnet_dev,
3265
		    "cannot disable default promiscuous mode\n");
3266
	}
3267
	VTNET_CORE_UNLOCK(sc);
3268
}
3269
3270
static void
3271
vtnet_rx_filter(struct vtnet_softc *sc)
3606
vtnet_rx_filter(struct vtnet_softc *sc)
3272
{
3607
{
3273
	device_t dev;
3608
	device_t dev;
Lines 3278-3290 Link Here
3278
3613
3279
	VTNET_CORE_LOCK_ASSERT(sc);
3614
	VTNET_CORE_LOCK_ASSERT(sc);
3280
3615
3281
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3616
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) {
3282
		device_printf(dev, "cannot %s promiscuous mode\n",
3617
		device_printf(dev, "cannot %s promiscuous mode\n",
3283
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3618
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3619
	}
3284
3620
3285
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3621
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) {
3286
		device_printf(dev, "cannot %s all-multicast mode\n",
3622
		device_printf(dev, "cannot %s all-multicast mode\n",
3287
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3623
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3624
	}
3288
}
3625
}
3289
3626
3290
static void
3627
static void
Lines 3302-3315 Link Here
3302
3639
3303
	ifp = sc->vtnet_ifp;
3640
	ifp = sc->vtnet_ifp;
3304
	filter = sc->vtnet_mac_filter;
3641
	filter = sc->vtnet_mac_filter;
3642
3305
	ucnt = 0;
3643
	ucnt = 0;
3306
	mcnt = 0;
3644
	mcnt = 0;
3307
	promisc = 0;
3645
	promisc = 0;
3308
	allmulti = 0;
3646
	allmulti = 0;
3647
	error = 0;
3309
3648
3649
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3310
	VTNET_CORE_LOCK_ASSERT(sc);
3650
	VTNET_CORE_LOCK_ASSERT(sc);
3311
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3312
	    ("%s: CTRL_RX feature not negotiated", __func__));
3313
3651
3314
	/* Unicast MAC addresses: */
3652
	/* Unicast MAC addresses: */
3315
	if_addr_rlock(ifp);
3653
	if_addr_rlock(ifp);
Lines 3330-3343 Link Here
3330
	}
3668
	}
3331
	if_addr_runlock(ifp);
3669
	if_addr_runlock(ifp);
3332
3670
3333
	if (promisc != 0) {
3334
		filter->vmf_unicast.nentries = 0;
3335
		if_printf(ifp, "more than %d MAC addresses assigned, "
3336
		    "falling back to promiscuous mode\n",
3337
		    VTNET_MAX_MAC_ENTRIES);
3338
	} else
3339
		filter->vmf_unicast.nentries = ucnt;
3340
3341
	/* Multicast MAC addresses: */
3671
	/* Multicast MAC addresses: */
3342
	if_maddr_rlock(ifp);
3672
	if_maddr_rlock(ifp);
3343
	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3673
	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
Lines 3354-3387 Link Here
3354
	}
3684
	}
3355
	if_maddr_runlock(ifp);
3685
	if_maddr_runlock(ifp);
3356
3686
3687
	if (promisc != 0) {
3688
		if_printf(ifp, "cannot filter more than %d MAC addresses, "
3689
		    "falling back to promiscuous mode\n",
3690
		    VTNET_MAX_MAC_ENTRIES);
3691
		ucnt = 0;
3692
	}
3357
	if (allmulti != 0) {
3693
	if (allmulti != 0) {
3358
		filter->vmf_multicast.nentries = 0;
3694
		if_printf(ifp, "cannot filter more than %d multicast MAC "
3359
		if_printf(ifp, "more than %d multicast MAC addresses "
3695
		    "addresses, falling back to all-multicast mode\n",
3360
		    "assigned, falling back to all-multicast mode\n",
3361
		    VTNET_MAX_MAC_ENTRIES);
3696
		    VTNET_MAX_MAC_ENTRIES);
3362
	} else
3697
		mcnt = 0;
3363
		filter->vmf_multicast.nentries = mcnt;
3698
	}
3364
3699
3365
	if (promisc != 0 && allmulti != 0)
3700
	if (promisc != 0 && allmulti != 0)
3366
		goto out;
3701
		goto out;
3367
3702
3703
	filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
3704
	filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
3705
3368
	hdr.class = VIRTIO_NET_CTRL_MAC;
3706
	hdr.class = VIRTIO_NET_CTRL_MAC;
3369
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3707
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3370
	ack = VIRTIO_NET_ERR;
3708
	ack = VIRTIO_NET_ERR;
3371
3709
3372
	sglist_init(&sg, 4, segs);
3710
	sglist_init(&sg, nitems(segs), segs);
3373
	error = 0;
3374
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3711
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3375
	error |= sglist_append(&sg, &filter->vmf_unicast,
3712
	error |= sglist_append(&sg, &filter->vmf_unicast,
3376
	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3713
	    sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
3377
	error |= sglist_append(&sg, &filter->vmf_multicast,
3714
	error |= sglist_append(&sg, &filter->vmf_multicast,
3378
	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3715
	    sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
3379
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3716
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3380
	KASSERT(error == 0 && sg.sg_nseg == 4,
3717
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3381
	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3382
3718
3383
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3719
	if (error == 0)
3384
3720
		vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3385
	if (ack != VIRTIO_NET_OK)
3721
	if (ack != VIRTIO_NET_OK)
3386
		if_printf(ifp, "error setting host MAC filter table\n");
3722
		if_printf(ifp, "error setting host MAC filter table\n");
3387
3723
Lines 3398-3425 Link Here
3398
	struct sglist_seg segs[3];
3734
	struct sglist_seg segs[3];
3399
	struct sglist sg;
3735
	struct sglist sg;
3400
	struct {
3736
	struct {
3401
		struct virtio_net_ctrl_hdr hdr;
3737
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3402
		uint8_t pad1;
3738
		uint8_t pad1;
3403
		uint16_t tag;
3739
		uint16_t tag __aligned(2);
3404
		uint8_t pad2;
3740
		uint8_t pad2;
3405
		uint8_t ack;
3741
		uint8_t ack;
3406
	} s __aligned(2);
3742
	} s;
3407
	int error;
3743
	int error;
3408
3744
3745
	error = 0;
3746
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3747
3409
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3748
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3410
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3749
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3411
	s.tag = tag;
3750
	s.tag = vtnet_gtoh16(sc, tag);
3412
	s.ack = VIRTIO_NET_ERR;
3751
	s.ack = VIRTIO_NET_ERR;
3413
3752
3414
	sglist_init(&sg, 3, segs);
3753
	sglist_init(&sg, nitems(segs), segs);
3415
	error = 0;
3416
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3754
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3417
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3755
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3418
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3756
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3419
	KASSERT(error == 0 && sg.sg_nseg == 3,
3757
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3420
	    ("%s: error %d adding VLAN message to sglist", __func__, error));
3421
3758
3422
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3759
	if (error == 0)
3760
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3423
3761
3424
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3762
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3425
}
3763
}
Lines 3427-3439 Link Here
3427
static void
3765
static void
3428
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3766
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3429
{
3767
{
3768
	int i, bit;
3430
	uint32_t w;
3769
	uint32_t w;
3431
	uint16_t tag;
3770
	uint16_t tag;
3432
	int i, bit;
3433
3771
3772
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3434
	VTNET_CORE_LOCK_ASSERT(sc);
3773
	VTNET_CORE_LOCK_ASSERT(sc);
3435
	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3436
	    ("%s: VLAN_FILTER feature not negotiated", __func__));
3437
3774
3438
	/* Enable the filter for each configured VLAN. */
3775
	/* Enable the filter for each configured VLAN. */
3439
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3776
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
Lines 3502-3522 Link Here
3502
	vtnet_update_vlan_filter(arg, 0, tag);
3839
	vtnet_update_vlan_filter(arg, 0, tag);
3503
}
3840
}
3504
3841
3842
static void
3843
vtnet_update_speed_duplex(struct vtnet_softc *sc)
3844
{
3845
	struct ifnet *ifp;
3846
	uint32_t speed;
3847
3848
	ifp = sc->vtnet_ifp;
3849
3850
	if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
3851
		return;
3852
3853
	/* BMV: Ignore duplex. */
3854
	speed = virtio_read_dev_config_4(sc->vtnet_dev,
3855
	    offsetof(struct virtio_net_config, speed));
3856
	if (speed != -1)
3857
		ifp->if_baudrate = IF_Mbps(speed);
3858
}
3859
3505
static int
3860
static int
3506
vtnet_is_link_up(struct vtnet_softc *sc)
3861
vtnet_is_link_up(struct vtnet_softc *sc)
3507
{
3862
{
3508
	device_t dev;
3509
	struct ifnet *ifp;
3510
	uint16_t status;
3863
	uint16_t status;
3511
3864
3512
	dev = sc->vtnet_dev;
3865
	if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
3513
	ifp = sc->vtnet_ifp;
3866
		return (1);
3514
3867
3515
	if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3868
	status = virtio_read_dev_config_2(sc->vtnet_dev,
3516
		status = VIRTIO_NET_S_LINK_UP;
3869
	    offsetof(struct virtio_net_config, status));
3517
	else
3518
		status = virtio_read_dev_config_2(dev,
3519
		    offsetof(struct virtio_net_config, status));
3520
3870
3521
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3871
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3522
}
3872
}
Lines 3528-3539 Link Here
3528
	int link;
3878
	int link;
3529
3879
3530
	ifp = sc->vtnet_ifp;
3880
	ifp = sc->vtnet_ifp;
3531
3532
	VTNET_CORE_LOCK_ASSERT(sc);
3881
	VTNET_CORE_LOCK_ASSERT(sc);
3533
	link = vtnet_is_link_up(sc);
3882
	link = vtnet_is_link_up(sc);
3534
3883
3535
	/* Notify if the link status has changed. */
3884
	/* Notify if the link status has changed. */
3536
	if (link != 0 && sc->vtnet_link_active == 0) {
3885
	if (link != 0 && sc->vtnet_link_active == 0) {
3886
		vtnet_update_speed_duplex(sc);
3537
		sc->vtnet_link_active = 1;
3887
		sc->vtnet_link_active = 1;
3538
		if_link_state_change(ifp, LINK_STATE_UP);
3888
		if_link_state_change(ifp, LINK_STATE_UP);
3539
	} else if (link == 0 && sc->vtnet_link_active != 0) {
3889
	} else if (link == 0 && sc->vtnet_link_active != 0) {
Lines 3545-3560 Link Here
3545
static int
3895
static int
3546
vtnet_ifmedia_upd(struct ifnet *ifp)
3896
vtnet_ifmedia_upd(struct ifnet *ifp)
3547
{
3897
{
3548
	struct vtnet_softc *sc;
3898
	return (EOPNOTSUPP);
3549
	struct ifmedia *ifm;
3550
3551
	sc = ifp->if_softc;
3552
	ifm = &sc->vtnet_media;
3553
3554
	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3555
		return (EINVAL);
3556
3557
	return (0);
3558
}
3899
}
3559
3900
3560
static void
3901
static void
Lines 3570-3594 Link Here
3570
	VTNET_CORE_LOCK(sc);
3911
	VTNET_CORE_LOCK(sc);
3571
	if (vtnet_is_link_up(sc) != 0) {
3912
	if (vtnet_is_link_up(sc) != 0) {
3572
		ifmr->ifm_status |= IFM_ACTIVE;
3913
		ifmr->ifm_status |= IFM_ACTIVE;
3573
		ifmr->ifm_active |= VTNET_MEDIATYPE;
3914
		ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
3574
	} else
3915
	} else
3575
		ifmr->ifm_active |= IFM_NONE;
3916
		ifmr->ifm_active |= IFM_NONE;
3576
	VTNET_CORE_UNLOCK(sc);
3917
	VTNET_CORE_UNLOCK(sc);
3577
}
3918
}
3578
3919
3579
static void
3920
static void
3580
vtnet_set_hwaddr(struct vtnet_softc *sc)
3921
vtnet_get_macaddr(struct vtnet_softc *sc)
3581
{
3922
{
3923
3924
	if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3925
		virtio_read_device_config_array(sc->vtnet_dev,
3926
		    offsetof(struct virtio_net_config, mac),
3927
		    &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
3928
	} else {
3929
		/* Generate a random locally administered unicast address. */
3930
		sc->vtnet_hwaddr[0] = 0xB2;
3931
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3932
	}
3933
}
3934
3935
static void
3936
vtnet_set_macaddr(struct vtnet_softc *sc)
3937
{
3582
	device_t dev;
3938
	device_t dev;
3583
	int i;
3939
	int error;
3584
3940
3585
	dev = sc->vtnet_dev;
3941
	dev = sc->vtnet_dev;
3586
3942
3587
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3943
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3588
		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3944
		error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
3945
		if (error)
3589
			device_printf(dev, "unable to set MAC address\n");
3946
			device_printf(dev, "unable to set MAC address\n");
3590
	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3947
		return;
3591
		for (i = 0; i < ETHER_ADDR_LEN; i++) {
3948
	}
3949
3950
	/* MAC in config is read-only in modern VirtIO. */
3951
	if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
3952
		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
3592
			virtio_write_dev_config_1(dev,
3953
			virtio_write_dev_config_1(dev,
3593
			    offsetof(struct virtio_net_config, mac) + i,
3954
			    offsetof(struct virtio_net_config, mac) + i,
3594
			    sc->vtnet_hwaddr[i]);
3955
			    sc->vtnet_hwaddr[i]);
Lines 3597-3627 Link Here
3597
}
3958
}
3598
3959
3599
static void
3960
static void
3600
vtnet_get_hwaddr(struct vtnet_softc *sc)
3961
vtnet_attached_set_macaddr(struct vtnet_softc *sc)
3601
{
3962
{
3602
	device_t dev;
3603
	int i;
3604
3963
3605
	dev = sc->vtnet_dev;
3964
	/* Assign MAC address if it was generated. */
3606
3965
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
3607
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3966
		vtnet_set_macaddr(sc);
3608
		/*
3609
		 * Generate a random locally administered unicast address.
3610
		 *
3611
		 * It would be nice to generate the same MAC address across
3612
		 * reboots, but it seems all the hosts currently available
3613
		 * support the MAC feature, so this isn't too important.
3614
		 */
3615
		sc->vtnet_hwaddr[0] = 0xB2;
3616
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3617
		vtnet_set_hwaddr(sc);
3618
		return;
3619
	}
3620
3621
	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3622
		sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3623
		    offsetof(struct virtio_net_config, mac) + i);
3624
	}
3625
}
3967
}
3626
3968
3627
static void
3969
static void
Lines 3652-3687 Link Here
3652
}
3994
}
3653
3995
3654
static void
3996
static void
3655
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3656
{
3657
	int size, thresh;
3658
3659
	size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3660
3661
	/*
3662
	 * The Tx interrupt is disabled until the queue free count falls
3663
	 * below our threshold. Completed frames are drained from the Tx
3664
	 * virtqueue before transmitting new frames and in the watchdog
3665
	 * callout, so the frequency of Tx interrupts is greatly reduced,
3666
	 * at the cost of not freeing mbufs as quickly as they otherwise
3667
	 * would be.
3668
	 *
3669
	 * N.B. We assume all the Tx queues are the same size.
3670
	 */
3671
	thresh = size / 4;
3672
3673
	/*
3674
	 * Without indirect descriptors, leave enough room for the most
3675
	 * segments we handle.
3676
	 */
3677
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3678
	    thresh < sc->vtnet_tx_nsegs)
3679
		thresh = sc->vtnet_tx_nsegs;
3680
3681
	sc->vtnet_tx_intr_thresh = thresh;
3682
}
3683
3684
static void
3685
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3997
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3686
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3998
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3687
{
3999
{
Lines 3709-3714 Link Here
3709
	    &stats->vrxs_csum, "Receive checksum offloaded");
4021
	    &stats->vrxs_csum, "Receive checksum offloaded");
3710
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
4022
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3711
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4023
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4024
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
4025
	    &stats->vrxs_host_lro, "Receive host segmentation offloaded");
3712
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4026
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3713
	    &stats->vrxs_rescheduled,
4027
	    &stats->vrxs_rescheduled,
3714
	    "Receive interrupt handler rescheduled");
4028
	    "Receive interrupt handler rescheduled");
Lines 3739-3745 Link Here
3739
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
4053
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3740
	    &stats->vtxs_csum, "Transmit checksum offloaded");
4054
	    &stats->vtxs_csum, "Transmit checksum offloaded");
3741
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
4055
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3742
	    &stats->vtxs_tso, "Transmit segmentation offloaded");
4056
	    &stats->vtxs_tso, "Transmit TCP segmentation offloaded");
3743
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4057
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3744
	    &stats->vtxs_rescheduled,
4058
	    &stats->vtxs_rescheduled,
3745
	    "Transmit interrupt handler rescheduled");
4059
	    "Transmit interrupt handler rescheduled");
Lines 3759-3765 Link Here
3759
	tree = device_get_sysctl_tree(dev);
4073
	tree = device_get_sysctl_tree(dev);
3760
	child = SYSCTL_CHILDREN(tree);
4074
	child = SYSCTL_CHILDREN(tree);
3761
4075
3762
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
4076
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
3763
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
4077
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3764
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
4078
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3765
	}
4079
	}
Lines 3819-3834 Link Here
3819
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
4133
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
3820
	    "Times the receive interrupt task rescheduled itself");
4134
	    "Times the receive interrupt task rescheduled itself");
3821
4135
3822
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
4136
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
3823
	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
4137
	    CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
3824
	    "Aborted transmit of checksum offloaded buffer with unknown "
4138
	    "Aborted transmit of checksum offloaded buffer with unknown "
3825
	    "Ethernet type");
4139
	    "Ethernet type");
3826
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
4140
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
3827
	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
4141
	    CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
3828
	    "Aborted transmit of TSO buffer with unknown Ethernet type");
4142
	    "Aborted transmit of checksum offloaded buffer because mismatched "
4143
	    "protocols");
3829
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
4144
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3830
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
4145
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
3831
	    "Aborted transmit of TSO buffer with non TCP protocol");
4146
	    "Aborted transmit of TSO buffer with non TCP protocol");
4147
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
4148
	    CTLFLAG_RD, &stats->tx_tso_without_csum,
4149
	    "Aborted transmit of TSO buffer without TCP checksum offload");
3832
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
4150
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3833
	    CTLFLAG_RD, &stats->tx_defragged,
4151
	    CTLFLAG_RD, &stats->tx_defragged,
3834
	    "Transmit mbufs defragged");
4152
	    "Transmit mbufs defragged");
Lines 3861-3870 Link Here
3861
4179
3862
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
4180
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3863
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
4181
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3864
	    "Maximum number of supported virtqueue pairs");
4182
	    "Number of maximum supported virtqueue pairs");
3865
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
4183
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
3866
	    CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
4184
	    CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
3867
	    "Requested number of virtqueue pairs");
4185
	    "Number of requested virtqueue pairs");
3868
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
4186
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3869
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
4187
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3870
	    "Number of active virtqueue pairs");
4188
	    "Number of active virtqueue pairs");
Lines 3872-3877 Link Here
3872
	vtnet_setup_stat_sysctl(ctx, child, sc);
4190
	vtnet_setup_stat_sysctl(ctx, child, sc);
3873
}
4191
}
3874
4192
4193
static void
4194
vtnet_load_tunables(struct vtnet_softc *sc)
4195
{
4196
4197
	sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
4198
	    "lro_entry_count", vtnet_lro_entry_count);
4199
	if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
4200
		sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
4201
4202
	sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
4203
	    "lro_mbufq_depeth", vtnet_lro_mbufq_depth);
4204
}
4205
3875
static int
4206
static int
3876
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
4207
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3877
{
4208
{
Lines 3913-3922 Link Here
3913
static void
4244
static void
3914
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
4245
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3915
{
4246
{
4247
	struct vtnet_rxq *rxq;
3916
	int i;
4248
	int i;
3917
4249
3918
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4250
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3919
		vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
4251
		rxq = &sc->vtnet_rxqs[i];
4252
		if (vtnet_rxq_enable_intr(rxq) != 0)
4253
			taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
4254
	}
3920
}
4255
}
3921
4256
3922
static void
4257
static void
Lines 3941-3947 Link Here
3941
{
4276
{
3942
	int i;
4277
	int i;
3943
4278
3944
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4279
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3945
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
4280
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3946
}
4281
}
3947
4282
Lines 3950-3956 Link Here
3950
{
4285
{
3951
	int i;
4286
	int i;
3952
4287
3953
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4288
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3954
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
4289
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3955
}
4290
}
3956
4291
Lines 3983-3991 Link Here
3983
	sc = if_getsoftc(ifp);
4318
	sc = if_getsoftc(ifp);
3984
4319
3985
	VTNET_CORE_LOCK(sc);
4320
	VTNET_CORE_LOCK(sc);
3986
	*nrxr = sc->vtnet_max_vq_pairs;
4321
	*nrxr = sc->vtnet_req_vq_pairs;
3987
	*ncl = NETDUMP_MAX_IN_FLIGHT;
4322
	*ncl = NETDUMP_MAX_IN_FLIGHT;
3988
	*clsize = sc->vtnet_rx_clsize;
4323
	*clsize = sc->vtnet_rx_clustersz;
3989
	VTNET_CORE_UNLOCK(sc);
4324
	VTNET_CORE_UNLOCK(sc);
3990
4325
3991
	/*
4326
	/*
Lines 4034-4040 Link Here
4034
		return (EBUSY);
4369
		return (EBUSY);
4035
4370
4036
	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4371
	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4037
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4372
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4038
		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4373
		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4039
	return (0);
4374
	return (0);
4040
}
4375
}
(-)sys/dev/virtio/network/if_vtnetvar.h (-51 / +73 lines)
Lines 43-51 Link Here
43
	uint64_t	rx_csum_bad_ipproto;
43
	uint64_t	rx_csum_bad_ipproto;
44
	uint64_t	rx_csum_bad_offset;
44
	uint64_t	rx_csum_bad_offset;
45
	uint64_t	rx_csum_bad_proto;
45
	uint64_t	rx_csum_bad_proto;
46
	uint64_t	tx_csum_bad_ethtype;
46
	uint64_t	tx_csum_unknown_ethtype;
47
	uint64_t	tx_tso_bad_ethtype;
47
	uint64_t	tx_csum_proto_mismatch;
48
	uint64_t	tx_tso_not_tcp;
48
	uint64_t	tx_tso_not_tcp;
49
	uint64_t	tx_tso_without_csum;
49
	uint64_t	tx_defragged;
50
	uint64_t	tx_defragged;
50
	uint64_t	tx_defrag_failed;
51
	uint64_t	tx_defrag_failed;
51
52
Lines 67-72 Link Here
67
	uint64_t	vrxs_ierrors;	/* if_ierrors */
68
	uint64_t	vrxs_ierrors;	/* if_ierrors */
68
	uint64_t	vrxs_csum;
69
	uint64_t	vrxs_csum;
69
	uint64_t	vrxs_csum_failed;
70
	uint64_t	vrxs_csum_failed;
71
	uint64_t	vrxs_host_lro;
70
	uint64_t	vrxs_rescheduled;
72
	uint64_t	vrxs_rescheduled;
71
};
73
};
72
74
Lines 79-84 Link Here
79
	struct vtnet_rxq_stats	 vtnrx_stats;
81
	struct vtnet_rxq_stats	 vtnrx_stats;
80
	struct taskqueue	*vtnrx_tq;
82
	struct taskqueue	*vtnrx_tq;
81
	struct task		 vtnrx_intrtask;
83
	struct task		 vtnrx_intrtask;
84
	struct lro_ctrl		 vtnrx_lro;
82
#ifdef DEV_NETMAP
85
#ifdef DEV_NETMAP
83
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
86
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
84
#endif  /* DEV_NETMAP */
87
#endif  /* DEV_NETMAP */
Lines 111-116 Link Here
111
#endif
114
#endif
112
	int			 vtntx_id;
115
	int			 vtntx_id;
113
	int			 vtntx_watchdog;
116
	int			 vtntx_watchdog;
117
	int			 vtntx_intr_threshold;
114
	struct vtnet_txq_stats	 vtntx_stats;
118
	struct vtnet_txq_stats	 vtntx_stats;
115
	struct taskqueue	*vtntx_tq;
119
	struct taskqueue	*vtntx_tq;
116
	struct task		 vtntx_intrtask;
120
	struct task		 vtntx_intrtask;
Lines 136-144 Link Here
136
	struct ifnet		*vtnet_ifp;
140
	struct ifnet		*vtnet_ifp;
137
	struct vtnet_rxq	*vtnet_rxqs;
141
	struct vtnet_rxq	*vtnet_rxqs;
138
	struct vtnet_txq	*vtnet_txqs;
142
	struct vtnet_txq	*vtnet_txqs;
143
	uint64_t		 vtnet_features;
139
144
140
	uint32_t		 vtnet_flags;
145
	uint32_t		 vtnet_flags;
141
#define VTNET_FLAG_SUSPENDED	 0x0001
146
#define VTNET_FLAG_MODERN	 0x0001
142
#define VTNET_FLAG_MAC		 0x0002
147
#define VTNET_FLAG_MAC		 0x0002
143
#define VTNET_FLAG_CTRL_VQ	 0x0004
148
#define VTNET_FLAG_CTRL_VQ	 0x0004
144
#define VTNET_FLAG_CTRL_RX	 0x0008
149
#define VTNET_FLAG_CTRL_RX	 0x0008
Lines 147-175 Link Here
147
#define VTNET_FLAG_TSO_ECN	 0x0040
152
#define VTNET_FLAG_TSO_ECN	 0x0040
148
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
153
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
149
#define VTNET_FLAG_LRO_NOMRG	 0x0100
154
#define VTNET_FLAG_LRO_NOMRG	 0x0100
150
#define VTNET_FLAG_MULTIQ	 0x0200
155
#define VTNET_FLAG_MQ		 0x0200
151
#define VTNET_FLAG_INDIRECT	 0x0400
156
#define VTNET_FLAG_INDIRECT	 0x0400
152
#define VTNET_FLAG_EVENT_IDX	 0x0800
157
#define VTNET_FLAG_EVENT_IDX	 0x0800
158
#define VTNET_FLAG_SUSPENDED	 0x1000
159
#define VTNET_FLAG_FIXUP_NEEDS_CSUM 0x2000
160
#define VTNET_FLAG_SW_LRO	 0x4000
153
161
154
	int			 vtnet_link_active;
155
	int			 vtnet_hdr_size;
162
	int			 vtnet_hdr_size;
156
	int			 vtnet_rx_process_limit;
157
	int			 vtnet_rx_nsegs;
158
	int			 vtnet_rx_nmbufs;
163
	int			 vtnet_rx_nmbufs;
159
	int			 vtnet_rx_clsize;
164
	int			 vtnet_rx_clustersz;
160
	int			 vtnet_rx_new_clsize;
165
	int			 vtnet_rx_nsegs;
161
	int			 vtnet_tx_intr_thresh;
166
	int			 vtnet_rx_process_limit;
162
	int			 vtnet_tx_nsegs;
167
	int			 vtnet_link_active;
163
	int			 vtnet_if_flags;
164
	int			 vtnet_act_vq_pairs;
168
	int			 vtnet_act_vq_pairs;
169
	int			 vtnet_req_vq_pairs;
165
	int			 vtnet_max_vq_pairs;
170
	int			 vtnet_max_vq_pairs;
166
	int			 vtnet_requested_vq_pairs;
171
	int			 vtnet_tx_nsegs;
172
	int			 vtnet_if_flags;
173
	int			 vtnet_max_mtu;
174
	int			 vtnet_lro_entry_count;
175
	int			 vtnet_lro_mbufq_depth;
167
176
168
	struct virtqueue	*vtnet_ctrl_vq;
177
	struct virtqueue	*vtnet_ctrl_vq;
169
	struct vtnet_mac_filter	*vtnet_mac_filter;
178
	struct vtnet_mac_filter	*vtnet_mac_filter;
170
	uint32_t		*vtnet_vlan_filter;
179
	uint32_t		*vtnet_vlan_filter;
171
180
172
	uint64_t		 vtnet_features;
181
	uint64_t		 vtnet_negotiated_features;
173
	struct vtnet_statistics	 vtnet_stats;
182
	struct vtnet_statistics	 vtnet_stats;
174
	struct callout		 vtnet_tick_ch;
183
	struct callout		 vtnet_tick_ch;
175
	struct ifmedia		 vtnet_media;
184
	struct ifmedia		 vtnet_media;
Lines 181-190 Link Here
181
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
190
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
182
};
191
};
183
192
193
static bool
194
vtnet_modern(struct vtnet_softc *sc)
195
{
196
	return ((sc->vtnet_flags & VTNET_FLAG_MODERN) != 0);
197
}
198
199
static bool
200
vtnet_software_lro(struct vtnet_softc *sc)
201
{
202
	return ((sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0);
203
}
204
184
/*
205
/*
185
 * Maximum number of queue pairs we will autoconfigure to.
206
 * Maximum number of queue pairs we will autoconfigure to.
186
 */
207
 */
187
#define VTNET_MAX_QUEUE_PAIRS	8
208
#define VTNET_MAX_QUEUE_PAIRS	32
188
209
189
/*
210
/*
190
 * Additional completed entries can appear in a virtqueue before we can
211
 * Additional completed entries can appear in a virtqueue before we can
Lines 202-226 Link Here
202
#define VTNET_NOTIFY_RETRIES		4
223
#define VTNET_NOTIFY_RETRIES		4
203
224
204
/*
225
/*
205
 * Fake the media type. The host does not provide us with any real media
206
 * information.
207
 */
208
#define VTNET_MEDIATYPE		 (IFM_ETHER | IFM_10G_T | IFM_FDX)
209
210
/*
211
 * Number of words to allocate for the VLAN shadow table. There is one
226
 * Number of words to allocate for the VLAN shadow table. There is one
212
 * bit for each VLAN.
227
 * bit for each VLAN.
213
 */
228
 */
214
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
229
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
215
230
231
/* We depend on these being the same size (and same layout). */
232
CTASSERT(sizeof(struct virtio_net_hdr_mrg_rxbuf) ==
233
    sizeof(struct virtio_net_hdr_v1));
234
216
/*
235
/*
217
 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
236
 * In legacy VirtIO when mergeable buffers are not negotiated, this structure
218
 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
237
 * is placed at the beginning of the mbuf data. Use 4 bytes of pad to keep
219
 * both keep the VirtIO header and the data non-contiguous and to keep the
238
 * both the VirtIO header and the data non-contiguous and the frame's payload
220
 * frame's payload 4 byte aligned.
239
 * 4 byte aligned. Note this padding would not be necessary if the
240
 * VIRTIO_F_ANY_LAYOUT feature was negotiated (but we don't support that yet).
221
 *
241
 *
222
 * When mergeable buffers are negotiated, the host puts the VirtIO header in
242
 * In modern VirtIO or when mergeable buffers are negotiated, the host puts
223
 * the beginning of the first mbuf's data.
243
 * the VirtIO header in the beginning of the first mbuf's data.
224
 */
244
 */
225
#define VTNET_RX_HEADER_PAD	4
245
#define VTNET_RX_HEADER_PAD	4
226
struct vtnet_rx_header {
246
struct vtnet_rx_header {
Lines 236-241 Link Here
236
	union {
256
	union {
237
		struct virtio_net_hdr		hdr;
257
		struct virtio_net_hdr		hdr;
238
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
258
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
259
		struct virtio_net_hdr_v1	v1hdr;
239
	} vth_uhdr;
260
	} vth_uhdr;
240
261
241
	struct mbuf *vth_mbuf;
262
	struct mbuf *vth_mbuf;
Lines 250-255 Link Here
250
 */
271
 */
251
#define VTNET_MAX_MAC_ENTRIES	128
272
#define VTNET_MAX_MAC_ENTRIES	128
252
273
274
/*
275
 * The driver version of struct virtio_net_ctrl_mac but with our predefined
276
 * number of MAC addresses allocated. This structure is shared with the host,
277
 * so nentries field is in the correct VirtIO endianness.
278
 */
253
struct vtnet_mac_table {
279
struct vtnet_mac_table {
254
	uint32_t	nentries;
280
	uint32_t	nentries;
255
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
281
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
Lines 275-289 Link Here
275
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
301
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
276
302
277
/* Features desired/implemented by this driver. */
303
/* Features desired/implemented by this driver. */
278
#define VTNET_FEATURES \
304
#define VTNET_COMMON_FEATURES \
279
    (VIRTIO_NET_F_MAC			| \
305
    (VIRTIO_NET_F_MAC			| \
280
     VIRTIO_NET_F_STATUS		| \
306
     VIRTIO_NET_F_STATUS		| \
307
     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS	| \
308
     VIRTIO_NET_F_MTU			| \
281
     VIRTIO_NET_F_CTRL_VQ		| \
309
     VIRTIO_NET_F_CTRL_VQ		| \
282
     VIRTIO_NET_F_CTRL_RX		| \
310
     VIRTIO_NET_F_CTRL_RX		| \
283
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
311
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
284
     VIRTIO_NET_F_CTRL_VLAN		| \
312
     VIRTIO_NET_F_CTRL_VLAN		| \
285
     VIRTIO_NET_F_CSUM			| \
313
     VIRTIO_NET_F_CSUM			| \
286
     VIRTIO_NET_F_GSO			| \
287
     VIRTIO_NET_F_HOST_TSO4		| \
314
     VIRTIO_NET_F_HOST_TSO4		| \
288
     VIRTIO_NET_F_HOST_TSO6		| \
315
     VIRTIO_NET_F_HOST_TSO6		| \
289
     VIRTIO_NET_F_HOST_ECN		| \
316
     VIRTIO_NET_F_HOST_ECN		| \
Lines 293-301 Link Here
293
     VIRTIO_NET_F_GUEST_ECN		| \
320
     VIRTIO_NET_F_GUEST_ECN		| \
294
     VIRTIO_NET_F_MRG_RXBUF		| \
321
     VIRTIO_NET_F_MRG_RXBUF		| \
295
     VIRTIO_NET_F_MQ			| \
322
     VIRTIO_NET_F_MQ			| \
323
     VIRTIO_NET_F_SPEED_DUPLEX		| \
296
     VIRTIO_RING_F_EVENT_IDX		| \
324
     VIRTIO_RING_F_EVENT_IDX		| \
297
     VIRTIO_RING_F_INDIRECT_DESC)
325
     VIRTIO_RING_F_INDIRECT_DESC)
298
326
327
#define VTNET_MODERN_FEATURES (VTNET_COMMON_FEATURES)
328
#define VTNET_LEGACY_FEATURES (VTNET_COMMON_FEATURES | VIRTIO_NET_F_GSO)
329
299
/*
330
/*
300
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
331
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
301
 * frames larger than 1514 bytes.
332
 * frames larger than 1514 bytes.
Lines 305-352 Link Here
305
336
306
/*
337
/*
307
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
338
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
308
 * frames larger than 1514 bytes. We do not yet support software LRO
339
 * frames larger than 1514 bytes.
309
 * via tcp_lro_rx().
340
					
310
 */
341
 */
311
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
342
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
312
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
343
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
313
344
345
#define VTNET_MIN_MTU		68
314
#define VTNET_MAX_MTU		65536
346
#define VTNET_MAX_MTU		65536
315
#define VTNET_MAX_RX_SIZE	65550
347
#define VTNET_MAX_RX_SIZE	65550
316
348
317
/*
349
/*
318
 * Used to preallocate the Vq indirect descriptors. The first segment
350
 * Used to preallocate the VQ indirect descriptors. Modern and mergeable
319
 * is reserved for the header, except for mergeable buffers since the
351
 * buffers do not required one segment for the VirtIO header since it is
320
 * header is placed inline with the data.
352
 * placed inline at the beginning of the receive buffer.
321
 */
353
 */
322
#define VTNET_MRG_RX_SEGS	1
354
#define VTNET_RX_SEGS_HDR_INLINE	1
323
#define VTNET_MIN_RX_SEGS	2
355
#define VTNET_RX_SEGS_HDR_SEPARATE	2
324
#define VTNET_MAX_RX_SEGS	34
356
#define VTNET_RX_SEGS_LRO_NOMRG		34
325
#define VTNET_MIN_TX_SEGS	32
357
#define VTNET_TX_SEGS_MIN		32
326
#define VTNET_MAX_TX_SEGS	64
358
#define VTNET_TX_SEGS_MAX		64
327
359
328
/*
360
/*
329
 * Assert we can receive and transmit the maximum with regular
361
 * Assert we can receive and transmit the maximum with regular
330
 * size clusters.
362
 * size clusters.
331
 */
363
 */
332
CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
364
CTASSERT(((VTNET_RX_SEGS_LRO_NOMRG - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
333
CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
365
CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU);
334
366
335
/*
367
/*
336
 * Number of slots in the Tx bufrings. This value matches most other
368
 * Number of slots in the Tx bufrings. This value matches most other
337
 * multiqueue drivers.
369
 * multiqueue drivers.
338
 */
370
 */
339
#define VTNET_DEFAULT_BUFRING_SIZE	4096
371
#define VTNET_DEFAULT_BUFRING_SIZE	4096
340
341
/*
342
 * Determine how many mbufs are in each receive buffer. For LRO without
343
 * mergeable buffers, we must allocate an mbuf chain large enough to
344
 * hold both the vtnet_rx_header and the maximum receivable data.
345
 */
346
#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize)				\
347
	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :		\
348
	    howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,	\
349
	        (_clsize))
350
372
351
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
373
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
352
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
374
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
(-)sys/dev/virtio/network/virtio_net.h (-25 / +78 lines)
Lines 34-62 Link Here
34
#define _VIRTIO_NET_H
34
#define _VIRTIO_NET_H
35
35
36
/* The feature bitmap for virtio net */
36
/* The feature bitmap for virtio net */
37
#define VIRTIO_NET_F_CSUM	0x00001 /* Host handles pkts w/ partial csum */
37
#define VIRTIO_NET_F_CSUM		 0x000001 /* Host handles pkts w/ partial csum */
38
#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
38
#define VIRTIO_NET_F_GUEST_CSUM		 0x000002 /* Guest handles pkts w/ partial csum*/
39
#define VIRTIO_NET_F_MAC	0x00020 /* Host has given MAC address. */
39
#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 0x000004 /* Dynamic offload configuration. */
40
#define VIRTIO_NET_F_GSO	0x00040 /* Host handles pkts w/ any GSO type */
40
#define VIRTIO_NET_F_MTU		 0x000008 /* Initial MTU advice */
41
#define VIRTIO_NET_F_GUEST_TSO4	0x00080 /* Guest can handle TSOv4 in. */
41
#define VIRTIO_NET_F_MAC		 0x000020 /* Host has given MAC address. */
42
#define VIRTIO_NET_F_GUEST_TSO6	0x00100 /* Guest can handle TSOv6 in. */
42
#define VIRTIO_NET_F_GSO		 0x000040 /* Host handles pkts w/ any GSO type */
43
#define VIRTIO_NET_F_GUEST_ECN	0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
43
#define VIRTIO_NET_F_GUEST_TSO4		 0x000080 /* Guest can handle TSOv4 in. */
44
#define VIRTIO_NET_F_GUEST_UFO	0x00400 /* Guest can handle UFO in. */
44
#define VIRTIO_NET_F_GUEST_TSO6		 0x000100 /* Guest can handle TSOv6 in. */
45
#define VIRTIO_NET_F_HOST_TSO4	0x00800 /* Host can handle TSOv4 in. */
45
#define VIRTIO_NET_F_GUEST_ECN		 0x000200 /* Guest can handle TSO[6] w/ ECN in. */
46
#define VIRTIO_NET_F_HOST_TSO6	0x01000 /* Host can handle TSOv6 in. */
46
#define VIRTIO_NET_F_GUEST_UFO		 0x000400 /* Guest can handle UFO in. */
47
#define VIRTIO_NET_F_HOST_ECN	0x02000 /* Host can handle TSO[6] w/ ECN in. */
47
#define VIRTIO_NET_F_HOST_TSO4		 0x000800 /* Host can handle TSOv4 in. */
48
#define VIRTIO_NET_F_HOST_UFO	0x04000 /* Host can handle UFO in. */
48
#define VIRTIO_NET_F_HOST_TSO6		 0x001000 /* Host can handle TSOv6 in. */
49
#define VIRTIO_NET_F_MRG_RXBUF	0x08000 /* Host can merge receive buffers. */
49
#define VIRTIO_NET_F_HOST_ECN		 0x002000 /* Host can handle TSO[6] w/ ECN in. */
50
#define VIRTIO_NET_F_STATUS	0x10000 /* virtio_net_config.status available*/
50
#define VIRTIO_NET_F_HOST_UFO		 0x004000 /* Host can handle UFO in. */
51
#define VIRTIO_NET_F_CTRL_VQ	0x20000 /* Control channel available */
51
#define VIRTIO_NET_F_MRG_RXBUF		 0x008000 /* Host can merge receive buffers. */
52
#define VIRTIO_NET_F_CTRL_RX	0x40000 /* Control channel RX mode support */
52
#define VIRTIO_NET_F_STATUS		 0x010000 /* virtio_net_config.status available*/
53
#define VIRTIO_NET_F_CTRL_VLAN	0x80000 /* Control channel VLAN filtering */
53
#define VIRTIO_NET_F_CTRL_VQ		 0x020000 /* Control channel available */
54
#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
54
#define VIRTIO_NET_F_CTRL_RX		 0x040000 /* Control channel RX mode support */
55
#define VIRTIO_NET_F_GUEST_ANNOUNCE 0x200000 /* Announce device on network */
55
#define VIRTIO_NET_F_CTRL_VLAN		 0x080000 /* Control channel VLAN filtering */
56
#define VIRTIO_NET_F_MQ		0x400000 /* Device supports RFS */
56
#define VIRTIO_NET_F_CTRL_RX_EXTRA	 0x100000 /* Extra RX mode control support */
57
#define VIRTIO_NET_F_CTRL_MAC_ADDR 0x800000 /* Set MAC address */
57
#define VIRTIO_NET_F_GUEST_ANNOUNCE	 0x200000 /* Announce device on network */
58
#define VIRTIO_NET_F_MQ			 0x400000 /* Device supports Receive Flow Steering */
59
#define VIRTIO_NET_F_CTRL_MAC_ADDR	 0x800000 /* Set MAC address */
60
#define VIRTIO_NET_F_SPEED_DUPLEX	 (1ULL << 63) /* Device set linkspeed and duplex */
58
61
59
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
62
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
63
#define VIRTIO_NET_S_ANNOUNCE	2	/* Announcement is needed */
60
64
61
struct virtio_net_config {
65
struct virtio_net_config {
62
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
66
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
Lines 68-83 Link Here
68
	 * Legal values are between 1 and 0x8000.
72
	 * Legal values are between 1 and 0x8000.
69
	 */
73
	 */
70
	uint16_t	max_virtqueue_pairs;
74
	uint16_t	max_virtqueue_pairs;
75
	/* Default maximum transmit unit advice */
76
	uint16_t	mtu;
77
	/*
78
	 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
79
	 * Any other value stands for unknown.
80
	 */
81
	uint32_t	speed;
82
	/*
83
	 * 0x00 - half duplex
84
	 * 0x01 - full duplex
85
	 * Any other value stands for unknown.
86
	 */
87
	uint8_t		duplex;
71
} __packed;
88
} __packed;
72
89
73
/*
90
/*
74
 * This is the first element of the scatter-gather list.  If you don't
91
 * This header comes first in the scatter-gather list.  If you don't
75
 * specify GSO or CSUM features, you can simply ignore the header.
92
 * specify GSO or CSUM features, you can simply ignore the header.
93
 *
94
 * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
95
 * only flattened.
76
 */
96
 */
77
struct virtio_net_hdr {
97
struct virtio_net_hdr_v1 {
78
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start,csum_offset*/
98
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start, csum_offset */
79
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
99
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
80
	uint8_t	flags;
100
	uint8_t flags;
81
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
101
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
82
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
102
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
83
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
103
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
Lines 88-96 Link Here
88
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
108
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
89
	uint16_t csum_start;	/* Position to start checksumming from */
109
	uint16_t csum_start;	/* Position to start checksumming from */
90
	uint16_t csum_offset;	/* Offset after that to place checksum */
110
	uint16_t csum_offset;	/* Offset after that to place checksum */
111
	uint16_t num_buffers;	/* Number of merged rx buffers */
91
};
112
};
92
113
93
/*
114
/*
115
 * This header comes first in the scatter-gather list.
116
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
117
 * be the first element of the scatter-gather list.  If you don't
118
 * specify GSO or CSUM features, you can simply ignore the header.
119
 */
120
struct virtio_net_hdr {
121
	/* See VIRTIO_NET_HDR_F_* */
122
	uint8_t	flags;
123
	/* See VIRTIO_NET_HDR_GSO_* */
124
	uint8_t gso_type;
125
	uint16_t hdr_len;	/* Ethernet + IP + tcp/udp hdrs */
126
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
127
	uint16_t csum_start;	/* Position to start checksumming from */
128
	uint16_t csum_offset;	/* Offset after that to place checksum */
129
};
130
131
/*
94
 * This is the version of the header to use when the MRG_RXBUF
132
 * This is the version of the header to use when the MRG_RXBUF
95
 * feature has been negotiated.
133
 * feature has been negotiated.
96
 */
134
 */
Lines 200-204 Link Here
200
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
238
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
201
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
239
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
202
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
240
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
241
242
/*
243
 * Control network offloads
244
 *
245
 * Reconfigures the network offloads that Guest can handle.
246
 *
247
 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
248
 *
249
 * Command data format matches the feature bit mask exactly.
250
 *
251
 * See VIRTIO_NET_F_GUEST_* for the list of offloads
252
 * that can be enabled/disabled.
253
 */
254
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS		5
255
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET	0
203
256
204
#endif /* _VIRTIO_NET_H */
257
#endif /* _VIRTIO_NET_H */
(-)sys/dev/virtio/pci/virtio_pci.c (-838 / +508 lines)
Lines 1-7 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
3
 *
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
5
 * All rights reserved.
6
 *
6
 *
7
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
Lines 35-40 Link Here
35
#include <sys/systm.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
37
#include <sys/kernel.h>
38
#include <sys/sbuf.h>
39
#include <sys/sysctl.h>
38
#include <sys/module.h>
40
#include <sys/module.h>
39
#include <sys/malloc.h>
41
#include <sys/malloc.h>
40
42
Lines 49-403 Link Here
49
#include <dev/virtio/virtio.h>
51
#include <dev/virtio/virtio.h>
50
#include <dev/virtio/virtqueue.h>
52
#include <dev/virtio/virtqueue.h>
51
#include <dev/virtio/pci/virtio_pci.h>
53
#include <dev/virtio/pci/virtio_pci.h>
54
#include <dev/virtio/pci/virtio_pci_var.h>
52
55
53
#include "virtio_bus_if.h"
56
#include "virtio_pci_if.h"
54
#include "virtio_if.h"
57
#include "virtio_if.h"
55
58
56
struct vtpci_interrupt {
59
static void	vtpci_describe_features(struct vtpci_common *, const char *,
57
	struct resource		*vti_irq;
58
	int			 vti_rid;
59
	void			*vti_handler;
60
};
61
62
struct vtpci_virtqueue {
63
	struct virtqueue	*vtv_vq;
64
	int			 vtv_no_intr;
65
};
66
67
struct vtpci_softc {
68
	device_t			 vtpci_dev;
69
	struct resource			*vtpci_res;
70
	struct resource			*vtpci_msix_res;
71
	uint64_t			 vtpci_features;
72
	uint32_t			 vtpci_flags;
73
#define VTPCI_FLAG_NO_MSI		0x0001
74
#define VTPCI_FLAG_NO_MSIX		0x0002
75
#define VTPCI_FLAG_LEGACY		0x1000
76
#define VTPCI_FLAG_MSI			0x2000
77
#define VTPCI_FLAG_MSIX			0x4000
78
#define VTPCI_FLAG_SHARED_MSIX		0x8000
79
#define VTPCI_FLAG_ITYPE_MASK		0xF000
80
81
	/* This "bus" will only ever have one child. */
82
	device_t			 vtpci_child_dev;
83
	struct virtio_feature_desc	*vtpci_child_feat_desc;
84
85
	int				 vtpci_nvqs;
86
	struct vtpci_virtqueue		*vtpci_vqs;
87
88
	/*
89
	 * Ideally, each virtqueue that the driver provides a callback for will
90
	 * receive its own MSIX vector. If there are not sufficient vectors
91
	 * available, then attempt to have all the VQs share one vector. For
92
	 * MSIX, the configuration changed notifications must be on their own
93
	 * vector.
94
	 *
95
	 * If MSIX is not available, we will attempt to have the whole device
96
	 * share one MSI vector, and then, finally, one legacy interrupt.
97
	 */
98
	struct vtpci_interrupt		 vtpci_device_interrupt;
99
	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
100
	int				 vtpci_nmsix_resources;
101
};
102
103
static int	vtpci_probe(device_t);
104
static int	vtpci_attach(device_t);
105
static int	vtpci_detach(device_t);
106
static int	vtpci_suspend(device_t);
107
static int	vtpci_resume(device_t);
108
static int	vtpci_shutdown(device_t);
109
static void	vtpci_driver_added(device_t, driver_t *);
110
static void	vtpci_child_detached(device_t, device_t);
111
static int	vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
112
static int	vtpci_write_ivar(device_t, device_t, int, uintptr_t);
113
114
static uint64_t	vtpci_negotiate_features(device_t, uint64_t);
115
static int	vtpci_with_feature(device_t, uint64_t);
116
static int	vtpci_alloc_virtqueues(device_t, int, int,
117
		    struct vq_alloc_info *);
118
static int	vtpci_setup_intr(device_t, enum intr_type);
119
static void	vtpci_stop(device_t);
120
static int	vtpci_reinit(device_t, uint64_t);
121
static void	vtpci_reinit_complete(device_t);
122
static void	vtpci_notify_virtqueue(device_t, uint16_t);
123
static uint8_t	vtpci_get_status(device_t);
124
static void	vtpci_set_status(device_t, uint8_t);
125
static void	vtpci_read_dev_config(device_t, bus_size_t, void *, int);
126
static void	vtpci_write_dev_config(device_t, bus_size_t, void *, int);
127
128
static void	vtpci_describe_features(struct vtpci_softc *, const char *,
129
		    uint64_t);
60
		    uint64_t);
130
static void	vtpci_probe_and_attach_child(struct vtpci_softc *);
61
static int	vtpci_alloc_msix(struct vtpci_common *, int);
131
62
static int	vtpci_alloc_msi(struct vtpci_common *);
132
static int	vtpci_alloc_msix(struct vtpci_softc *, int);
63
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_common *);
133
static int	vtpci_alloc_msi(struct vtpci_softc *);
64
static int	vtpci_alloc_intr_msix_shared(struct vtpci_common *);
134
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
65
static int	vtpci_alloc_intr_msi(struct vtpci_common *);
135
static int	vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
66
static int	vtpci_alloc_intr_intx(struct vtpci_common *);
136
static int	vtpci_alloc_intr_msi(struct vtpci_softc *);
67
static int	vtpci_alloc_interrupt(struct vtpci_common *, int, int,
137
static int	vtpci_alloc_intr_legacy(struct vtpci_softc *);
138
static int	vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
139
		    struct vtpci_interrupt *);
68
		    struct vtpci_interrupt *);
140
static int	vtpci_alloc_intr_resources(struct vtpci_softc *);
69
static void	vtpci_free_interrupt(struct vtpci_common *,
70
		    struct vtpci_interrupt *);
141
71
142
static int	vtpci_setup_legacy_interrupt(struct vtpci_softc *,
72
static void	vtpci_free_interrupts(struct vtpci_common *);
73
static void	vtpci_free_virtqueues(struct vtpci_common *);
74
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_common *);
75
static int	vtpci_alloc_intr_resources(struct vtpci_common *);
76
static int	vtpci_setup_intx_interrupt(struct vtpci_common *,
143
		    enum intr_type);
77
		    enum intr_type);
144
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
78
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_common *,
145
		    enum intr_type);
79
		    enum intr_type);
146
static int	vtpci_setup_msix_interrupts(struct vtpci_softc *,
80
static int	vtpci_set_host_msix_vectors(struct vtpci_common *);
81
static int	vtpci_setup_msix_interrupts(struct vtpci_common *,
147
		    enum intr_type);
82
		    enum intr_type);
148
static int	vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type);
83
static int	vtpci_setup_intrs(struct vtpci_common *, enum intr_type);
149
84
static int	vtpci_reinit_virtqueue(struct vtpci_common *, int);
150
static int	vtpci_register_msix_vector(struct vtpci_softc *, int,
85
static void	vtpci_intx_intr(void *);
151
		    struct vtpci_interrupt *);
152
static int	vtpci_set_host_msix_vectors(struct vtpci_softc *);
153
static int	vtpci_reinit_virtqueue(struct vtpci_softc *, int);
154
155
static void	vtpci_free_interrupt(struct vtpci_softc *,
156
		    struct vtpci_interrupt *);
157
static void	vtpci_free_interrupts(struct vtpci_softc *);
158
static void	vtpci_free_virtqueues(struct vtpci_softc *);
159
static void	vtpci_release_child_resources(struct vtpci_softc *);
160
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
161
static void	vtpci_reset(struct vtpci_softc *);
162
163
static void	vtpci_select_virtqueue(struct vtpci_softc *, int);
164
165
static void	vtpci_legacy_intr(void *);
166
static int	vtpci_vq_shared_intr_filter(void *);
86
static int	vtpci_vq_shared_intr_filter(void *);
167
static void	vtpci_vq_shared_intr(void *);
87
static void	vtpci_vq_shared_intr(void *);
168
static int	vtpci_vq_intr_filter(void *);
88
static int	vtpci_vq_intr_filter(void *);
169
static void	vtpci_vq_intr(void *);
89
static void	vtpci_vq_intr(void *);
170
static void	vtpci_config_intr(void *);
90
static void	vtpci_config_intr(void *);
91
static void	vtpci_setup_sysctl(struct vtpci_common *);
171
92
172
#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
93
#define vtpci_setup_msi_interrupt vtpci_setup_intx_interrupt
173
94
174
#define VIRTIO_PCI_CONFIG(_sc) \
175
    VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0)
176
177
/*
95
/*
178
 * I/O port read/write wrappers.
96
 * This module contains two drivers:
97
 *   - virtio_pci_legacy (vtpcil) for pre-V1 support
98
 *   - virtio_pci_modern (vtpcim) for V1 support
179
 */
99
 */
180
#define vtpci_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
181
#define vtpci_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
182
#define vtpci_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
183
#define vtpci_write_config_1(sc, o, v)	bus_write_1((sc)->vtpci_res, (o), (v))
184
#define vtpci_write_config_2(sc, o, v)	bus_write_2((sc)->vtpci_res, (o), (v))
185
#define vtpci_write_config_4(sc, o, v)	bus_write_4((sc)->vtpci_res, (o), (v))
186
187
/* Tunables. */
188
static int vtpci_disable_msix = 0;
189
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
190
191
static device_method_t vtpci_methods[] = {
192
	/* Device interface. */
193
	DEVMETHOD(device_probe,			  vtpci_probe),
194
	DEVMETHOD(device_attach,		  vtpci_attach),
195
	DEVMETHOD(device_detach,		  vtpci_detach),
196
	DEVMETHOD(device_suspend,		  vtpci_suspend),
197
	DEVMETHOD(device_resume,		  vtpci_resume),
198
	DEVMETHOD(device_shutdown,		  vtpci_shutdown),
199
200
	/* Bus interface. */
201
	DEVMETHOD(bus_driver_added,		  vtpci_driver_added),
202
	DEVMETHOD(bus_child_detached,		  vtpci_child_detached),
203
	DEVMETHOD(bus_read_ivar,		  vtpci_read_ivar),
204
	DEVMETHOD(bus_write_ivar,		  vtpci_write_ivar),
205
206
	/* VirtIO bus interface. */
207
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_negotiate_features),
208
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_with_feature),
209
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_alloc_virtqueues),
210
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_setup_intr),
211
	DEVMETHOD(virtio_bus_stop,		  vtpci_stop),
212
	DEVMETHOD(virtio_bus_reinit,		  vtpci_reinit),
213
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_reinit_complete),
214
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_notify_virtqueue),
215
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_read_dev_config),
216
	DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
217
218
	DEVMETHOD_END
219
};
220
221
static driver_t vtpci_driver = {
222
	"virtio_pci",
223
	vtpci_methods,
224
	sizeof(struct vtpci_softc)
225
};
226
227
devclass_t vtpci_devclass;
228
229
DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
230
MODULE_VERSION(virtio_pci, 1);
100
MODULE_VERSION(virtio_pci, 1);
231
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
101
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
232
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
102
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
233
103
234
static int
104
int vtpci_disable_msix = 0;
235
vtpci_probe(device_t dev)
105
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
106
107
static uint8_t
108
vtpci_read_isr(struct vtpci_common *cn)
236
{
109
{
237
	char desc[36];
110
	return (VIRTIO_PCI_READ_ISR(cn->vtpci_dev));
238
	const char *name;
111
}
239
112
240
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
113
static uint16_t
241
		return (ENXIO);
114
vtpci_get_vq_size(struct vtpci_common *cn, int idx)
115
{
116
	return (VIRTIO_PCI_GET_VQ_SIZE(cn->vtpci_dev, idx));
117
}
242
118
243
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
119
static bus_size_t
244
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
120
vtpci_get_vq_notify_off(struct vtpci_common *cn, int idx)
245
		return (ENXIO);
121
{
122
	return (VIRTIO_PCI_GET_VQ_NOTIFY_OFF(cn->vtpci_dev, idx));
123
}
246
124
247
	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
125
static void
248
		return (ENXIO);
126
vtpci_set_vq(struct vtpci_common *cn, struct virtqueue *vq)
127
{
128
	VIRTIO_PCI_SET_VQ(cn->vtpci_dev, vq);
129
}
249
130
250
	name = virtio_device_name(pci_get_subdevice(dev));
131
static void
251
	if (name == NULL)
132
vtpci_disable_vq(struct vtpci_common *cn, int idx)
252
		name = "Unknown";
133
{
134
	VIRTIO_PCI_DISABLE_VQ(cn->vtpci_dev, idx);
135
}
253
136
254
	snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
137
static int
255
	device_set_desc_copy(dev, desc);
138
vtpci_register_cfg_msix(struct vtpci_common *cn, struct vtpci_interrupt *intr)
256
139
{
257
	return (BUS_PROBE_DEFAULT);
140
	return (VIRTIO_PCI_REGISTER_CFG_MSIX(cn->vtpci_dev, intr));
258
}
141
}
259
142
260
static int
143
static int
261
vtpci_attach(device_t dev)
144
vtpci_register_vq_msix(struct vtpci_common *cn, int idx,
145
    struct vtpci_interrupt *intr)
262
{
146
{
263
	struct vtpci_softc *sc;
147
	return (VIRTIO_PCI_REGISTER_VQ_MSIX(cn->vtpci_dev, idx, intr));
264
	device_t child;
148
}
265
	int rid;
266
149
267
	sc = device_get_softc(dev);
150
void
268
	sc->vtpci_dev = dev;
151
vtpci_init(struct vtpci_common *cn, device_t dev, bool modern)
152
{
269
153
154
	cn->vtpci_dev = dev;
155
270
	pci_enable_busmaster(dev);
156
	pci_enable_busmaster(dev);
271
157
272
	rid = PCIR_BAR(0);
158
	if (modern)
273
	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
159
		cn->vtpci_flags |= VTPCI_FLAG_MODERN;
274
	    RF_ACTIVE);
275
	if (sc->vtpci_res == NULL) {
276
		device_printf(dev, "cannot map I/O space\n");
277
		return (ENXIO);
278
	}
279
280
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
160
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
281
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
161
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSI;
162
	if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0)
163
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
282
164
283
	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
165
	vtpci_setup_sysctl(cn);
284
		rid = PCIR_BAR(1);
166
}
285
		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
286
		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
287
	}
288
167
289
	if (sc->vtpci_msix_res == NULL)
168
int
290
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
169
vtpci_add_child(struct vtpci_common *cn)
170
{
171
	device_t dev, child;
291
172
292
	vtpci_reset(sc);
173
	dev = cn->vtpci_dev;
293
174
294
	/* Tell the host we've noticed this device. */
175
	child = device_add_child(dev, NULL, -1);
295
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
176
	if (child == NULL) {
296
297
	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
298
		device_printf(dev, "cannot create child device\n");
177
		device_printf(dev, "cannot create child device\n");
299
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
300
		vtpci_detach(dev);
301
		return (ENOMEM);
178
		return (ENOMEM);
302
	}
179
	}
303
180
304
	sc->vtpci_child_dev = child;
181
	cn->vtpci_child_dev = child;
305
	vtpci_probe_and_attach_child(sc);
306
182
307
	return (0);
183
	return (0);
308
}
184
}
309
185
310
static int
186
int
311
vtpci_detach(device_t dev)
187
vtpci_delete_child(struct vtpci_common *cn)
312
{
188
{
313
	struct vtpci_softc *sc;
189
	device_t dev, child;
314
	device_t child;
315
	int error;
190
	int error;
316
191
317
	sc = device_get_softc(dev);
192
	dev = cn->vtpci_dev;
318
193
319
	if ((child = sc->vtpci_child_dev) != NULL) {
194
	child = cn->vtpci_child_dev;
195
	if (child != NULL) {
320
		error = device_delete_child(dev, child);
196
		error = device_delete_child(dev, child);
321
		if (error)
197
		if (error)
322
			return (error);
198
			return (error);
323
		sc->vtpci_child_dev = NULL;
199
		cn->vtpci_child_dev = NULL;
324
	}
200
	}
325
201
326
	vtpci_reset(sc);
327
328
	if (sc->vtpci_msix_res != NULL) {
329
		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
330
		    sc->vtpci_msix_res);
331
		sc->vtpci_msix_res = NULL;
332
	}
333
334
	if (sc->vtpci_res != NULL) {
335
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
336
		    sc->vtpci_res);
337
		sc->vtpci_res = NULL;
338
	}
339
340
	return (0);
202
	return (0);
341
}
203
}
342
204
343
static int
205
void
344
vtpci_suspend(device_t dev)
206
vtpci_child_detached(struct vtpci_common *cn)
345
{
207
{
346
208
347
	return (bus_generic_suspend(dev));
209
	vtpci_release_child_resources(cn);
348
}
349
210
350
static int
211
	cn->vtpci_child_feat_desc = NULL;
351
vtpci_resume(device_t dev)
212
	cn->vtpci_host_features = 0;
352
{
213
	cn->vtpci_features = 0;
353
354
	return (bus_generic_resume(dev));
355
}
214
}
356
215
357
static int
216
int
358
vtpci_shutdown(device_t dev)
217
vtpci_reinit(struct vtpci_common *cn)
359
{
218
{
219
	int idx, error;
360
220
361
	(void) bus_generic_shutdown(dev);
221
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
362
	/* Forcibly stop the host device. */
222
		error = vtpci_reinit_virtqueue(cn, idx);
363
	vtpci_stop(dev);
223
		if (error)
224
			return (error);
225
	}
364
226
227
	if (vtpci_is_msix_enabled(cn)) {
228
		error = vtpci_set_host_msix_vectors(cn);
229
		if (error)
230
			return (error);
231
	}
232
365
	return (0);
233
	return (0);
366
}
234
}
367
235
368
static void
236
static void
369
vtpci_driver_added(device_t dev, driver_t *driver)
237
vtpci_describe_features(struct vtpci_common *cn, const char *msg,
238
    uint64_t features)
370
{
239
{
371
	struct vtpci_softc *sc;
240
	device_t dev, child;
372
241
373
	sc = device_get_softc(dev);
242
	dev = cn->vtpci_dev;
243
	child = cn->vtpci_child_dev;
374
244
375
	vtpci_probe_and_attach_child(sc);
245
	if (device_is_attached(child) || bootverbose == 0)
246
		return;
247
248
	virtio_describe(dev, msg, features, cn->vtpci_child_feat_desc);
376
}
249
}
377
250
378
static void
251
uint64_t
379
vtpci_child_detached(device_t dev, device_t child)
252
vtpci_negotiate_features(struct vtpci_common *cn,
253
    uint64_t child_features, uint64_t host_features)
380
{
254
{
381
	struct vtpci_softc *sc;
255
	uint64_t features;
382
256
383
	sc = device_get_softc(dev);
257
	cn->vtpci_host_features = host_features;
258
	vtpci_describe_features(cn, "host", host_features);
384
259
385
	vtpci_reset(sc);
260
	/*
386
	vtpci_release_child_resources(sc);
261
	 * Limit negotiated features to what the driver, virtqueue, and
262
	 * host all support.
263
	 */
264
	features = host_features & child_features;
265
	features = virtio_filter_transport_features(features);
266
267
	cn->vtpci_features = features;
268
	vtpci_describe_features(cn, "negotiated", features);
269
270
	return (features);
387
}
271
}
388
272
389
static int
273
int
390
vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
274
vtpci_with_feature(struct vtpci_common *cn, uint64_t feature)
391
{
275
{
392
	struct vtpci_softc *sc;
276
	return ((cn->vtpci_features & feature) != 0);
277
}
393
278
394
	sc = device_get_softc(dev);
279
int
280
vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result)
281
{
282
	device_t dev;
283
	int error;
395
284
396
	if (sc->vtpci_child_dev != child)
285
	dev = cn->vtpci_dev;
397
		return (ENOENT);
286
	error = 0;
398
287
399
	switch (index) {
288
	switch (index) {
400
	case VIRTIO_IVAR_DEVTYPE:
401
	case VIRTIO_IVAR_SUBDEVICE:
289
	case VIRTIO_IVAR_SUBDEVICE:
402
		*result = pci_get_subdevice(dev);
290
		*result = pci_get_subdevice(dev);
403
		break;
291
		break;
Lines 410-509 Link Here
410
	case VIRTIO_IVAR_SUBVENDOR:
298
	case VIRTIO_IVAR_SUBVENDOR:
411
		*result = pci_get_subdevice(dev);
299
		*result = pci_get_subdevice(dev);
412
		break;
300
		break;
301
	case VIRTIO_IVAR_MODERN:
302
		*result = vtpci_is_modern(cn);
303
		break;
413
	default:
304
	default:
414
		return (ENOENT);
305
		error = ENOENT;
415
	}
306
	}
416
307
417
	return (0);
308
	return (error);
418
}
309
}
419
310
420
static int
311
int
421
vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
312
vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value)
422
{
313
{
423
	struct vtpci_softc *sc;
314
	int error;
424
315
425
	sc = device_get_softc(dev);
316
	error = 0;
426
317
427
	if (sc->vtpci_child_dev != child)
428
		return (ENOENT);
429
430
	switch (index) {
318
	switch (index) {
431
	case VIRTIO_IVAR_FEATURE_DESC:
319
	case VIRTIO_IVAR_FEATURE_DESC:
432
		sc->vtpci_child_feat_desc = (void *) value;
320
		cn->vtpci_child_feat_desc = (void *) value;
433
		break;
321
		break;
434
	default:
322
	default:
435
		return (ENOENT);
323
		error = ENOENT;
436
	}
324
	}
437
325
438
	return (0);
326
	return (error);
439
}
327
}
440
328
441
static uint64_t
329
int
442
vtpci_negotiate_features(device_t dev, uint64_t child_features)
330
vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
331
    struct vq_alloc_info *vq_info)
443
{
332
{
444
	struct vtpci_softc *sc;
333
	device_t dev;
445
	uint64_t host_features, features;
334
	int idx, align, error;
446
335
447
	sc = device_get_softc(dev);
336
	dev = cn->vtpci_dev;
448
337
449
	host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
450
	vtpci_describe_features(sc, "host", host_features);
451
452
	/*
338
	/*
453
	 * Limit negotiated features to what the driver, virtqueue, and
339
	 * This is VIRTIO_PCI_VRING_ALIGN from legacy VirtIO. In modern VirtIO,
454
	 * host all support.
340
	 * the tables do not have to be allocated contiguously, but we do so
341
	 * anyways.
455
	 */
342
	 */
456
	features = host_features & child_features;
343
	align = 4096;
457
	features = virtqueue_filter_features(features);
458
	sc->vtpci_features = features;
459
344
460
	vtpci_describe_features(sc, "negotiated", features);
345
	if (cn->vtpci_nvqs != 0)
461
	vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
462
463
	return (features);
464
}
465
466
static int
467
vtpci_with_feature(device_t dev, uint64_t feature)
468
{
469
	struct vtpci_softc *sc;
470
471
	sc = device_get_softc(dev);
472
473
	return ((sc->vtpci_features & feature) != 0);
474
}
475
476
static int
477
vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
478
    struct vq_alloc_info *vq_info)
479
{
480
	struct vtpci_softc *sc;
481
	struct virtqueue *vq;
482
	struct vtpci_virtqueue *vqx;
483
	struct vq_alloc_info *info;
484
	int idx, error;
485
	uint16_t size;
486
487
	sc = device_get_softc(dev);
488
489
	if (sc->vtpci_nvqs != 0)
490
		return (EALREADY);
346
		return (EALREADY);
491
	if (nvqs <= 0)
347
	if (nvqs <= 0)
492
		return (EINVAL);
348
		return (EINVAL);
493
349
494
	sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
350
	cn->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
495
	    M_DEVBUF, M_NOWAIT | M_ZERO);
351
	    M_DEVBUF, M_NOWAIT | M_ZERO);
496
	if (sc->vtpci_vqs == NULL)
352
	if (cn->vtpci_vqs == NULL)
497
		return (ENOMEM);
353
		return (ENOMEM);
498
354
499
	for (idx = 0; idx < nvqs; idx++) {
355
	for (idx = 0; idx < nvqs; idx++) {
500
		vqx = &sc->vtpci_vqs[idx];
356
		struct vtpci_virtqueue *vqx;
357
		struct vq_alloc_info *info;
358
		struct virtqueue *vq;
359
		bus_size_t notify_offset;
360
		uint16_t size;
361
362
		vqx = &cn->vtpci_vqs[idx];
501
		info = &vq_info[idx];
363
		info = &vq_info[idx];
502
364
503
		vtpci_select_virtqueue(sc, idx);
365
		size = vtpci_get_vq_size(cn, idx);
504
		size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
366
		notify_offset = vtpci_get_vq_notify_off(cn, idx);
505
367
506
		error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN,
368
		error = virtqueue_alloc(dev, idx, size, notify_offset, align,
507
		    0xFFFFFFFFUL, info, &vq);
369
		    0xFFFFFFFFUL, info, &vq);
508
		if (error) {
370
		if (error) {
509
			device_printf(dev,
371
			device_printf(dev,
Lines 511-780 Link Here
511
			break;
373
			break;
512
		}
374
		}
513
375
514
		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
376
		vtpci_set_vq(cn, vq);
515
		    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
516
377
517
		vqx->vtv_vq = *info->vqai_vq = vq;
378
		vqx->vtv_vq = *info->vqai_vq = vq;
518
		vqx->vtv_no_intr = info->vqai_intr == NULL;
379
		vqx->vtv_no_intr = info->vqai_intr == NULL;
519
380
520
		sc->vtpci_nvqs++;
381
		cn->vtpci_nvqs++;
521
	}
382
	}
522
383
523
	if (error)
384
	if (error)
524
		vtpci_free_virtqueues(sc);
385
		vtpci_free_virtqueues(cn);
525
386
526
	return (error);
387
	return (error);
527
}
388
}
528
389
529
static int
390
static int
530
vtpci_setup_intr(device_t dev, enum intr_type type)
391
vtpci_alloc_msix(struct vtpci_common *cn, int nvectors)
531
{
392
{
532
	struct vtpci_softc *sc;
533
	int attempt, error;
534
535
	sc = device_get_softc(dev);
536
537
	for (attempt = 0; attempt < 5; attempt++) {
538
		/*
539
		 * Start with the most desirable interrupt configuration and
540
		 * fallback towards less desirable ones.
541
		 */
542
		switch (attempt) {
543
		case 0:
544
			error = vtpci_alloc_intr_msix_pervq(sc);
545
			break;
546
		case 1:
547
			error = vtpci_alloc_intr_msix_shared(sc);
548
			break;
549
		case 2:
550
			error = vtpci_alloc_intr_msi(sc);
551
			break;
552
		case 3:
553
			error = vtpci_alloc_intr_legacy(sc);
554
			break;
555
		default:
556
			device_printf(dev,
557
			    "exhausted all interrupt allocation attempts\n");
558
			return (ENXIO);
559
		}
560
561
		if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
562
			break;
563
564
		vtpci_cleanup_setup_intr_attempt(sc);
565
	}
566
567
	if (bootverbose) {
568
		if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
569
			device_printf(dev, "using legacy interrupt\n");
570
		else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
571
			device_printf(dev, "using MSI interrupt\n");
572
		else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
573
			device_printf(dev, "using shared MSIX interrupts\n");
574
		else
575
			device_printf(dev, "using per VQ MSIX interrupts\n");
576
	}
577
578
	return (0);
579
}
580
581
static void
582
vtpci_stop(device_t dev)
583
{
584
585
	vtpci_reset(device_get_softc(dev));
586
}
587
588
static int
589
vtpci_reinit(device_t dev, uint64_t features)
590
{
591
	struct vtpci_softc *sc;
592
	int idx, error;
593
594
	sc = device_get_softc(dev);
595
596
	/*
597
	 * Redrive the device initialization. This is a bit of an abuse of
598
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
599
	 * play nice.
600
	 *
601
	 * We do not allow the host device to change from what was originally
602
	 * negotiated beyond what the guest driver changed. MSIX state should
603
	 * not change, number of virtqueues and their size remain the same, etc.
604
	 * This will need to be rethought when we want to support migration.
605
	 */
606
607
	if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
608
		vtpci_stop(dev);
609
610
	/*
611
	 * Quickly drive the status through ACK and DRIVER. The device
612
	 * does not become usable again until vtpci_reinit_complete().
613
	 */
614
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
615
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
616
617
	vtpci_negotiate_features(dev, features);
618
619
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
620
		error = vtpci_reinit_virtqueue(sc, idx);
621
		if (error)
622
			return (error);
623
	}
624
625
	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
626
		error = vtpci_set_host_msix_vectors(sc);
627
		if (error)
628
			return (error);
629
	}
630
631
	return (0);
632
}
633
634
static void
635
vtpci_reinit_complete(device_t dev)
636
{
637
638
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
639
}
640
641
static void
642
vtpci_notify_virtqueue(device_t dev, uint16_t queue)
643
{
644
	struct vtpci_softc *sc;
645
646
	sc = device_get_softc(dev);
647
648
	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
649
}
650
651
static uint8_t
652
vtpci_get_status(device_t dev)
653
{
654
	struct vtpci_softc *sc;
655
656
	sc = device_get_softc(dev);
657
658
	return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
659
}
660
661
static void
662
vtpci_set_status(device_t dev, uint8_t status)
663
{
664
	struct vtpci_softc *sc;
665
666
	sc = device_get_softc(dev);
667
668
	if (status != VIRTIO_CONFIG_STATUS_RESET)
669
		status |= vtpci_get_status(dev);
670
671
	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
672
}
673
674
static void
675
vtpci_read_dev_config(device_t dev, bus_size_t offset,
676
    void *dst, int length)
677
{
678
	struct vtpci_softc *sc;
679
	bus_size_t off;
680
	uint8_t *d;
681
	int size;
682
683
	sc = device_get_softc(dev);
684
	off = VIRTIO_PCI_CONFIG(sc) + offset;
685
686
	for (d = dst; length > 0; d += size, off += size, length -= size) {
687
		if (length >= 4) {
688
			size = 4;
689
			*(uint32_t *)d = vtpci_read_config_4(sc, off);
690
		} else if (length >= 2) {
691
			size = 2;
692
			*(uint16_t *)d = vtpci_read_config_2(sc, off);
693
		} else {
694
			size = 1;
695
			*d = vtpci_read_config_1(sc, off);
696
		}
697
	}
698
}
699
700
static void
701
vtpci_write_dev_config(device_t dev, bus_size_t offset,
702
    void *src, int length)
703
{
704
	struct vtpci_softc *sc;
705
	bus_size_t off;
706
	uint8_t *s;
707
	int size;
708
709
	sc = device_get_softc(dev);
710
	off = VIRTIO_PCI_CONFIG(sc) + offset;
711
712
	for (s = src; length > 0; s += size, off += size, length -= size) {
713
		if (length >= 4) {
714
			size = 4;
715
			vtpci_write_config_4(sc, off, *(uint32_t *)s);
716
		} else if (length >= 2) {
717
			size = 2;
718
			vtpci_write_config_2(sc, off, *(uint16_t *)s);
719
		} else {
720
			size = 1;
721
			vtpci_write_config_1(sc, off, *s);
722
		}
723
	}
724
}
725
726
static void
727
vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
728
    uint64_t features)
729
{
730
	device_t dev, child;
731
732
	dev = sc->vtpci_dev;
733
	child = sc->vtpci_child_dev;
734
735
	if (device_is_attached(child) || bootverbose == 0)
736
		return;
737
738
	virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
739
}
740
741
static void
742
vtpci_probe_and_attach_child(struct vtpci_softc *sc)
743
{
744
	device_t dev, child;
745
746
	dev = sc->vtpci_dev;
747
	child = sc->vtpci_child_dev;
748
749
	if (child == NULL)
750
		return;
751
752
	if (device_get_state(child) != DS_NOTPRESENT)
753
		return;
754
755
	if (device_probe(child) != 0)
756
		return;
757
758
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
759
	if (device_attach(child) != 0) {
760
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
761
		vtpci_reset(sc);
762
		vtpci_release_child_resources(sc);
763
		/* Reset status for future attempt. */
764
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
765
	} else {
766
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
767
		VIRTIO_ATTACH_COMPLETED(child);
768
	}
769
}
770
771
static int
772
vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
773
{
774
	device_t dev;
393
	device_t dev;
775
	int nmsix, cnt, required;
394
	int nmsix, cnt, required;
776
395
777
	dev = sc->vtpci_dev;
396
	dev = cn->vtpci_dev;
778
397
779
	/* Allocate an additional vector for the config changes. */
398
	/* Allocate an additional vector for the config changes. */
780
	required = nvectors + 1;
399
	required = nvectors + 1;
Lines 785-791 Link Here
785
404
786
	cnt = required;
405
	cnt = required;
787
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
406
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
788
		sc->vtpci_nmsix_resources = required;
407
		cn->vtpci_nmsix_resources = required;
789
		return (0);
408
		return (0);
790
	}
409
	}
791
410
Lines 795-806 Link Here
795
}
414
}
796
415
797
static int
416
static int
798
vtpci_alloc_msi(struct vtpci_softc *sc)
417
vtpci_alloc_msi(struct vtpci_common *cn)
799
{
418
{
800
	device_t dev;
419
	device_t dev;
801
	int nmsi, cnt, required;
420
	int nmsi, cnt, required;
802
421
803
	dev = sc->vtpci_dev;
422
	dev = cn->vtpci_dev;
804
	required = 1;
423
	required = 1;
805
424
806
	nmsi = pci_msi_count(dev);
425
	nmsi = pci_msi_count(dev);
Lines 817-896 Link Here
817
}
436
}
818
437
819
static int
438
static int
820
vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
439
vtpci_alloc_intr_msix_pervq(struct vtpci_common *cn)
821
{
440
{
822
	int i, nvectors, error;
441
	int i, nvectors, error;
823
442
824
	if (vtpci_disable_msix != 0 ||
443
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
825
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
826
		return (ENOTSUP);
444
		return (ENOTSUP);
827
445
828
	for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
446
	for (nvectors = 0, i = 0; i < cn->vtpci_nvqs; i++) {
829
		if (sc->vtpci_vqs[i].vtv_no_intr == 0)
447
		if (cn->vtpci_vqs[i].vtv_no_intr == 0)
830
			nvectors++;
448
			nvectors++;
831
	}
449
	}
832
450
833
	error = vtpci_alloc_msix(sc, nvectors);
451
	error = vtpci_alloc_msix(cn, nvectors);
834
	if (error)
452
	if (error)
835
		return (error);
453
		return (error);
836
454
837
	sc->vtpci_flags |= VTPCI_FLAG_MSIX;
455
	cn->vtpci_flags |= VTPCI_FLAG_MSIX;
838
456
839
	return (0);
457
	return (0);
840
}
458
}
841
459
842
static int
460
static int
843
vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
461
vtpci_alloc_intr_msix_shared(struct vtpci_common *cn)
844
{
462
{
845
	int error;
463
	int error;
846
464
847
	if (vtpci_disable_msix != 0 ||
465
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
848
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
849
		return (ENOTSUP);
466
		return (ENOTSUP);
850
467
851
	error = vtpci_alloc_msix(sc, 1);
468
	error = vtpci_alloc_msix(cn, 1);
852
	if (error)
469
	if (error)
853
		return (error);
470
		return (error);
854
471
855
	sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
472
	cn->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
856
473
857
	return (0);
474
	return (0);
858
}
475
}
859
476
860
static int
477
static int
861
vtpci_alloc_intr_msi(struct vtpci_softc *sc)
478
vtpci_alloc_intr_msi(struct vtpci_common *cn)
862
{
479
{
863
	int error;
480
	int error;
864
481
865
	/* Only BHyVe supports MSI. */
482
	/* Only BHyVe supports MSI. */
866
	if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
483
	if (cn->vtpci_flags & VTPCI_FLAG_NO_MSI)
867
		return (ENOTSUP);
484
		return (ENOTSUP);
868
485
869
	error = vtpci_alloc_msi(sc);
486
	error = vtpci_alloc_msi(cn);
870
	if (error)
487
	if (error)
871
		return (error);
488
		return (error);
872
489
873
	sc->vtpci_flags |= VTPCI_FLAG_MSI;
490
	cn->vtpci_flags |= VTPCI_FLAG_MSI;
874
491
875
	return (0);
492
	return (0);
876
}
493
}
877
494
878
static int
495
static int
879
vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
496
vtpci_alloc_intr_intx(struct vtpci_common *cn)
880
{
497
{
881
498
882
	sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
499
	cn->vtpci_flags |= VTPCI_FLAG_INTX;
883
500
884
	return (0);
501
	return (0);
885
}
502
}
886
503
887
static int
504
static int
888
vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
505
vtpci_alloc_interrupt(struct vtpci_common *cn, int rid, int flags,
889
    struct vtpci_interrupt *intr)
506
    struct vtpci_interrupt *intr)
890
{
507
{
891
	struct resource *irq;
508
	struct resource *irq;
892
509
893
	irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags);
510
	irq = bus_alloc_resource_any(cn->vtpci_dev, SYS_RES_IRQ, &rid, flags);
894
	if (irq == NULL)
511
	if (irq == NULL)
895
		return (ENXIO);
512
		return (ENXIO);
896
513
Lines 900-939 Link Here
900
	return (0);
517
	return (0);
901
}
518
}
902
519
520
static void
521
vtpci_free_interrupt(struct vtpci_common *cn, struct vtpci_interrupt *intr)
522
{
523
	device_t dev;
524
525
	dev = cn->vtpci_dev;
526
527
	if (intr->vti_handler != NULL) {
528
		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
529
		intr->vti_handler = NULL;
530
	}
531
532
	if (intr->vti_irq != NULL) {
533
		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
534
		    intr->vti_irq);
535
		intr->vti_irq = NULL;
536
		intr->vti_rid = -1;
537
	}
538
}
539
540
static void
541
vtpci_free_interrupts(struct vtpci_common *cn)
542
{
543
	struct vtpci_interrupt *intr;
544
	int i, nvq_intrs;
545
546
	vtpci_free_interrupt(cn, &cn->vtpci_device_interrupt);
547
548
	if (cn->vtpci_nmsix_resources != 0) {
549
		nvq_intrs = cn->vtpci_nmsix_resources - 1;
550
		cn->vtpci_nmsix_resources = 0;
551
552
		if ((intr = cn->vtpci_msix_vq_interrupts) != NULL) {
553
			for (i = 0; i < nvq_intrs; i++, intr++)
554
				vtpci_free_interrupt(cn, intr);
555
556
			free(cn->vtpci_msix_vq_interrupts, M_DEVBUF);
557
			cn->vtpci_msix_vq_interrupts = NULL;
558
		}
559
	}
560
561
	if (cn->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
562
		pci_release_msi(cn->vtpci_dev);
563
564
	cn->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
565
}
566
567
static void
568
vtpci_free_virtqueues(struct vtpci_common *cn)
569
{
570
	struct vtpci_virtqueue *vqx;
571
	int idx;
572
573
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
574
		vtpci_disable_vq(cn, idx);
575
576
		vqx = &cn->vtpci_vqs[idx];